commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
5fa36e781729fbfe5e3343f921e52eebf0062e75
|
Switch rackspace env variables to prettyconf
|
scottwernervt/cloudstorage
|
tests/settings.py
|
tests/settings.py
|
import hashlib
import os
from tempfile import mkdtemp
from time import time
from prettyconf.configuration import Configuration
config = Configuration()
# Append epoch to prevent test runs from clobbering each other.
CONTAINER_PREFIX = 'cloud-storage-test-' + str(int(time()))
SECRET = hashlib.sha1(os.urandom(128)).hexdigest()
SALT = hashlib.sha1(os.urandom(128)).hexdigest()
TEXT_FILENAME = 'flask.txt'
TEXT_STREAM_FILENAME = 'flask-stream.txt'
TEXT_FORM_FILENAME = 'flask-form.txt'
TEXT_MD5_CHECKSUM = '2a5a634f5c8d931350e83e41c9b3b0bb'
BINARY_FILENAME = 'avatar.png'
BINARY_FORM_FILENAME = 'avatar-form.png'
BINARY_STREAM_FILENAME = 'avatar-stream.png'
BINARY_MD5_CHECKSUM = '2f907a59924ad96b7478074ed96b05f0'
BINARY_OPTIONS = {
'meta_data': {
'owner-id': 'da17c32d-21c2-4bfe-b083-e2e78187d868',
'owner-email': '[email protected]'
},
'content_type': 'image/png',
'content_disposition': 'attachment; filename=avatar-attachment.png',
}
AMAZON_KEY = config('AMAZON_KEY', default=None)
AMAZON_SECRET = config('AMAZON_SECRET', default=None)
AMAZON_REGION = config('AMAZON_REGION', default='us-east-1')
GOOGLE_CREDENTIALS = config('GOOGLE_CREDENTIALS', default=None)
RACKSPACE_KEY = config('RACKSPACE_KEY', default=None)
RACKSPACE_SECRET = config('RACKSPACE_SECRET', default=None)
RACKSPACE_REGION = config('RACKSPACE_REGION', default='IAD')
LOCAL_KEY = config('LOCAL_KEY', default=mkdtemp(prefix='cloud-storage-test-'))
if not os.path.exists(LOCAL_KEY):
os.makedirs(LOCAL_KEY)
LOCAL_SECRET = config('LOCAL_SECRET', default='local-storage-secret')
|
import hashlib
import os
from tempfile import mkdtemp
from time import time
from prettyconf.configuration import Configuration
config = Configuration()
# Append epoch to prevent test runs from clobbering each other.
CONTAINER_PREFIX = 'cloud-storage-test-' + str(int(time()))
SECRET = hashlib.sha1(os.urandom(128)).hexdigest()
SALT = hashlib.sha1(os.urandom(128)).hexdigest()
TEXT_FILENAME = 'flask.txt'
TEXT_STREAM_FILENAME = 'flask-stream.txt'
TEXT_FORM_FILENAME = 'flask-form.txt'
TEXT_MD5_CHECKSUM = '2a5a634f5c8d931350e83e41c9b3b0bb'
BINARY_FILENAME = 'avatar.png'
BINARY_FORM_FILENAME = 'avatar-form.png'
BINARY_STREAM_FILENAME = 'avatar-stream.png'
BINARY_MD5_CHECKSUM = '2f907a59924ad96b7478074ed96b05f0'
BINARY_OPTIONS = {
'meta_data': {
'owner-id': 'da17c32d-21c2-4bfe-b083-e2e78187d868',
'owner-email': '[email protected]'
},
'content_type': 'image/png',
'content_disposition': 'attachment; filename=avatar-attachment.png',
}
AMAZON_KEY = config('AMAZON_KEY', default=None)
AMAZON_SECRET = config('AMAZON_SECRET', default=None)
AMAZON_REGION = config('AMAZON_REGION', default='us-east-1')
GOOGLE_CREDENTIALS = config('GOOGLE_CREDENTIALS', default=None)
RACKSPACE_KEY = os.environ['RACKSPACE_KEY']
RACKSPACE_SECRET = os.environ['RACKSPACE_SECRET']
RACKSPACE_REGION = os.environ['RACKSPACE_REGION']
# RACKSPACE_KEY = config('RACKSPACE_KEY', default=None)
# RACKSPACE_SECRET = config('RACKSPACE_SECRET', default=None)
# RACKSPACE_REGION = config('RACKSPACE_REGION', default='IAD')
LOCAL_KEY = config('LOCAL_KEY', default=mkdtemp(prefix='cloud-storage-test-'))
if not os.path.exists(LOCAL_KEY):
os.makedirs(LOCAL_KEY)
LOCAL_SECRET = config('LOCAL_SECRET', default='local-storage-secret')
|
mit
|
Python
|
6e3ebff613254c7e13d89cd3599e030947a5072f
|
fix coverage report
|
bulkan/robotframework-requests,bulkan/robotframework-requests
|
tests/unittest/test_calls.py
|
tests/unittest/test_calls.py
|
import os
import sys
from unittest import TestCase, mock
from unittest.mock import patch
# I hate it but I can't get the coverage report to work without it, must be before RequestsLibrary import
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../src/')))
import RequestsLibrary
lib = RequestsLibrary.RequestsLibrary()
HTTP_LOCAL_SERVER = 'http://localhost:5000'
sess_headers = {'content-type': False}
post_headers = {'Content-Type': 'application/json'}
class TestCalls(TestCase):
def test_import_defaults(self):
RequestsLibrary.RequestsLibrary()
@patch('RequestsLibrary.RequestsLibrary._common_request')
def test_post_request_with_empty_data(self, common_request):
lib.create_session('http_server', HTTP_LOCAL_SERVER, sess_headers)
lib.post_request('http_server', '/anything', data="", headers=post_headers)
common_request.assert_called_with('post', mock.ANY, '/anything', allow_redirects=True, data='',
files=None, headers={'Content-Type': 'application/json'}, json=None,
params=None, timeout=None)
|
from unittest import TestCase, mock
from unittest.mock import patch
import RequestsLibrary
lib = RequestsLibrary.RequestsLibrary()
HTTP_LOCAL_SERVER = 'http://localhost:5000'
sess_headers = {'content-type': False}
post_headers = {'Content-Type': 'application/json'}
class TestCalls(TestCase):
@patch('RequestsLibrary.RequestsLibrary._common_request')
def test_post_request_with_empty_data(self, common_request):
lib.create_session('http_server', HTTP_LOCAL_SERVER, sess_headers)
lib.post_request('http_server', '/anything', data="", headers=post_headers)
common_request.assert_called_with('post', mock.ANY, '/anything', allow_redirects=True, data='',
files=None, headers={'Content-Type': 'application/json'}, json=None,
params=None, timeout=None)
|
mit
|
Python
|
3dd0ac13a5c2a3e0dc949d60e807b438c36636a9
|
Fix for post_process.
|
cwant/tessagon
|
core/tessagon.py
|
core/tessagon.py
|
from tessagon.core.grid_tile_generator import GridTileGenerator
from tessagon.core.rotate_tile_generator import RotateTileGenerator
class Tessagon:
def __init__(self, **kwargs):
if 'function' in kwargs:
self.f = kwargs['function']
else:
raise ValueError('Must specify a function')
self.tile_class = self.init_tile_class()
if 'tile_generator' in kwargs:
self.tile_generator = kwargs['tile_generator'](self, **kwargs)
elif 'rot_factor' in kwargs:
self.tile_generator = RotateTileGenerator(self, **kwargs)
else:
self.tile_generator = GridTileGenerator(self, **kwargs)
# Optional post processing function
self.post_process = None
if 'post_process' in kwargs:
self.post_process = kwargs['post_process']
if 'adaptor_class' in kwargs:
adaptor_class = kwargs['adaptor_class']
self.mesh_adaptor = adaptor_class(**kwargs)
else:
raise ValueError('Must provide a mesh adaptor class')
self.tiles = None
self.face_types = {}
self.vert_types = {}
def create_mesh(self):
self._initialize_tiles()
self.mesh_adaptor.create_empty_mesh()
self._calculate_verts()
self._calculate_faces()
self.mesh_adaptor.finish_mesh()
if self.post_process:
# Run user defined post-processing code
# Need to pass self here (this could be designed better)
self.post_process(self)
return self.mesh_adaptor.get_mesh()
def inspect(self):
print("\n=== %s ===\n" % (self.__class__.__name__))
for i in range(len(self.tiles)):
self.tiles[i].inspect(tile_number=i)
### Below are protected
def _initialize_tiles(self):
self.tiles = self.tile_generator.create_tiles()
def _calculate_verts(self):
for tile in self.tiles:
tile.calculate_verts()
def _calculate_faces(self):
for tile in self.tiles:
tile.calculate_faces()
|
from tessagon.core.grid_tile_generator import GridTileGenerator
from tessagon.core.rotate_tile_generator import RotateTileGenerator
class Tessagon:
def __init__(self, **kwargs):
if 'function' in kwargs:
self.f = kwargs['function']
else:
raise ValueError('Must specify a function')
self.tile_class = self.init_tile_class()
if 'tile_generator' in kwargs:
self.tile_generator = kwargs['tile_generator'](self, **kwargs)
elif 'rot_factor' in kwargs:
self.tile_generator = RotateTileGenerator(self, **kwargs)
else:
self.tile_generator = GridTileGenerator(self, **kwargs)
# Optional post processing function
self.post_process = None
if 'post_process' in kwargs:
self.post_process = kwargs['post_process']
if 'adaptor_class' in kwargs:
adaptor_class = kwargs['adaptor_class']
self.mesh_adaptor = adaptor_class(**kwargs)
else:
raise ValueError('Must provide a mesh adaptor class')
self.tiles = None
self.face_types = {}
self.vert_types = {}
def create_mesh(self):
self._initialize_tiles()
self.mesh_adaptor.create_empty_mesh()
self._calculate_verts()
self._calculate_faces()
self.mesh_adaptor.finish_mesh()
if self.post_process:
self.post_process()
return self.mesh_adaptor.get_mesh()
def inspect(self):
print("\n=== %s ===\n" % (self.__class__.__name__))
for i in range(len(self.tiles)):
self.tiles[i].inspect(tile_number=i)
### Below are protected
def _initialize_tiles(self):
self.tiles = self.tile_generator.create_tiles()
def _calculate_verts(self):
for tile in self.tiles:
tile.calculate_verts()
def _calculate_faces(self):
for tile in self.tiles:
tile.calculate_faces()
|
apache-2.0
|
Python
|
d71353d8d1e0778f121c3ec07067d617ab3ce932
|
Add run() method to Backend and make start() a wrapper for it. Also set backend.running in backend.start and backend.stop. Whatever code runs in a loop in backend.run() needs to check self.running periodically to make sure it should still be running.
|
rapidsms/rapidsms-legacy,rapidsms/rapidsms-legacy,rapidsms/rapidsms-legacy
|
lib/rapidsms/backends/backend.py
|
lib/rapidsms/backends/backend.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
class Backend(object):
def __init__ (self, router):
self.router = router
self.running = False
def log(self, level, message):
self.router.log(level, message)
def start(self):
self.running = True
try:
self.run()
finally:
self.running = False
def run (self):
raise NotImplementedError
def stop(self):
self.running = False
def send(self):
raise NotImplementedError
def receive(self):
raise NotImplementedError
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
class Backend(object):
def __init__ (self, router):
self.router = router
def log(self, level, message):
self.router.log(level, message)
def start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def send(self):
raise NotImplementedError
def receive(self):
raise NotImplementedError
|
bsd-3-clause
|
Python
|
f81bc19a9627225113ff1a3fa2aa0e6446402acb
|
test that shorten is text/plain (answer no :S)
|
aa-m-sa/summer-url-py
|
tests/test_api.py
|
tests/test_api.py
|
import unittest
from flask import url_for
import summerurlapp
import appconfig
import types
class SummerApiTestCase(unittest.TestCase):
"""Test that the API works as intended"""
testurl_http1 = "http://random.org"
testurl_bad = "random.org"
def setUp(self):
summerurlapp.app.config.from_object(appconfig.TestConfig)
self.app = summerurlapp.app.test_client()
summerurlapp.init_db()
def tearDown(self):
summerurlapp.init_db()
# use init_db() to clear the test db after testcase
def post_shorten(self, link):
return self.app.post("/api/shorten", data = dict(link = link))
def test_shorten(self):
resp = self.post_shorten(self.testurl_http1)
self.assertEqual(resp.headers['Content-Type'], "text/plain")
def test_getbyid_ok(self):
respPost = self.post_shorten(self.testurl_http1)
gotid = respPost.data[0]
respId = self.app.get('/api/' + gotid)
self.assertEqual(respId.status_code, 301)
self.assertEqual(respId.location, self.testurl_http1)
def test_getbyid_appendscheme(self):
respPost = self.post_shorten(self.testurl_bad)
gotid = respPost.data[0]
respId = self.app.get('/api/' + gotid)
self.assertEqual(respId.status_code, 301)
self.assertEqual(respId.location, "http://" + self.testurl_bad)
def test_getbyid_noid(self):
resp = self.app.get('/api/9000')
self.assertEqual(resp.status_code, 404)
resp = self.app.get('/api/nonexistentid')
self.assertEqual(resp.status_code, 404)
|
import unittest
from flask import url_for
import summerurlapp
import appconfig
import types
class SummerApiTestCase(unittest.TestCase):
"""Test that the API works as intended"""
testurl_http1 = "http://random.org"
testurl_bad = "random.org"
def setUp(self):
summerurlapp.app.config.from_object(appconfig.TestConfig)
self.app = summerurlapp.app.test_client()
summerurlapp.init_db()
def tearDown(self):
summerurlapp.init_db()
# use init_db() to clear the test db after testcase
def post_shorten(self, link):
return self.app.post("/api/shorten", data = dict(link = link))
def test_shorten(self):
resp = self.post_shorten(self.testurl_http1)
self.assertEqual(resp.data[0], "1")
def test_getbyid_ok(self):
respPost = self.post_shorten(self.testurl_http1)
gotid = respPost.data[0]
respId = self.app.get('/api/' + gotid)
self.assertEqual(respId.status_code, 301)
self.assertEqual(respId.location, self.testurl_http1)
def test_getbyid_appendscheme(self):
respPost = self.post_shorten(self.testurl_bad)
gotid = respPost.data[0]
respId = self.app.get('/api/' + gotid)
self.assertEqual(respId.status_code, 301)
self.assertEqual(respId.location, "http://" + self.testurl_bad)
def test_getbyid_noid(self):
resp = self.app.get('/api/9000')
self.assertEqual(resp.status_code, 404)
resp = self.app.get('/api/nonexistentid')
self.assertEqual(resp.status_code, 404)
|
mit
|
Python
|
a396332ad66d31ac5caa1fcbf92ed564615fac85
|
Use assert_raises in test_cli
|
rspeer/python-ftfy
|
tests/test_cli.py
|
tests/test_cli.py
|
import subprocess
import os
from nose.tools import eq_, assert_raises
# Get the filename of 'halibote.txt', which contains some mojibake about
# Harry Potter in Chinese
THIS_DIR = os.path.dirname(__file__)
TEST_FILENAME = os.path.join(THIS_DIR, 'halibote.txt')
CORRECT_OUTPUT = '【更新】《哈利波特》石堧卜才新婚娶初戀今痠逝\n'
FAILED_OUTPUT = '''ftfy error:
This input couldn't be decoded as 'windows-1252'. We got the following error:
'charmap' codec can't decode byte 0x90 in position 5: character maps to <undefined>
ftfy works best when its input is in a known encoding. You can use `ftfy -g`
to guess, if you're desperate. Otherwise, give the encoding name with the
`-e` option, such as `ftfy -e latin-1`.
'''
def get_command_output(args, stdin=None):
return subprocess.check_output(args, stdin=stdin, stderr=subprocess.STDOUT, timeout=5).decode('utf-8')
def test_basic():
output = get_command_output(['ftfy', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_guess_bytes():
output = get_command_output(['ftfy', '-g', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_alternate_encoding():
# The file isn't really in Windows-1252. But that's a problem ftfy
# can fix, if it's allowed to be sloppy when reading the file.
output = get_command_output(['ftfy', '-e', 'sloppy-windows-1252', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_wrong_encoding():
# It's more of a problem when the file doesn't actually decode.
with assert_raises(subprocess.CalledProcessError) as context:
get_command_output(['ftfy', '-e', 'windows-1252', TEST_FILENAME])
e = context.exception
eq_(e.output.decode('utf-8'), FAILED_OUTPUT)
def test_stdin():
with open(TEST_FILENAME, 'rb') as infile:
output = get_command_output(['ftfy'], stdin=infile)
eq_(output, CORRECT_OUTPUT)
|
import subprocess
import os
from nose.tools import eq_
# Get the filename of 'halibote.txt', which contains some mojibake about
# Harry Potter in Chinese
THIS_DIR = os.path.dirname(__file__)
TEST_FILENAME = os.path.join(THIS_DIR, 'halibote.txt')
CORRECT_OUTPUT = '【更新】《哈利波特》石堧卜才新婚娶初戀今痠逝\n'
FAILED_OUTPUT = '''ftfy error:
This input couldn't be decoded as 'windows-1252'. We got the following error:
'charmap' codec can't decode byte 0x90 in position 5: character maps to <undefined>
ftfy works best when its input is in a known encoding. You can use `ftfy -g`
to guess, if you're desperate. Otherwise, give the encoding name with the
`-e` option, such as `ftfy -e latin-1`.
'''
def get_command_output(args, stdin=None):
return subprocess.check_output(args, stdin=stdin, stderr=subprocess.STDOUT, timeout=5).decode('utf-8')
def test_basic():
output = get_command_output(['ftfy', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_guess_bytes():
output = get_command_output(['ftfy', '-g', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_alternate_encoding():
# The file isn't really in Windows-1252. But that's a problem ftfy
# can fix, if it's allowed to be sloppy when reading the file.
output = get_command_output(['ftfy', '-e', 'sloppy-windows-1252', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_wrong_encoding():
# It's more of a problem when the file doesn't actually decode.
try:
get_command_output(['ftfy', '-e', 'windows-1252', TEST_FILENAME])
assert False, "Should have raised a CalledProcessError"
except subprocess.CalledProcessError as e:
eq_(e.output.decode('utf-8'), FAILED_OUTPUT)
def test_stdin():
with open(TEST_FILENAME, 'rb') as infile:
output = get_command_output(['ftfy'], stdin=infile)
eq_(output, CORRECT_OUTPUT)
|
mit
|
Python
|
83dc9b5f80268a5bd23a737d66a219067353f2b7
|
change parameter handling
|
fpbattaglia/ophys_io,fpbattaglia/ophys_io,wonkoderverstaendige/ophys_io,wonkoderverstaendige/ophys_io
|
test_files.py
|
test_files.py
|
#!/usr/bin/env python
# Generate test directories to mess with from a list of filenames.
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-t', '--target')
args = parser.parse_args()
base_dir = args.target if args.target else 'testing'
input_file = args.input or 'filenames.tsv'
with open(input_file) as fh:
[os.makedirs(os.path.join(base_dir, row.split()[0])) for row in fh.readlines()]
|
#!/usr/bin/env python
# Generate test directories to mess with from a list of filenames.
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-t', '--target')
args = parser.parse_args()
base_dir = args.target if args.target else 'testing'
input_file = args.input if args.input is not None else 'filenames.tsv'
with open(input_file) as fh:
[os.makedirs(os.path.join(base_dir, row.split()[0])) for row in fh.readlines()]
|
mit
|
Python
|
e94b2593424518632c704f4a440df3bc51cbcd3e
|
fix failing tests.
|
core/uricore
|
tests/test_uri.py
|
tests/test_uri.py
|
# encoding: utf-8
import unittest
from resources import URI
from resources import IRI
class TestURISnowman(unittest.TestCase):
def setUp(self):
idna = u"\N{SNOWMAN}".encode('idna')
uri = "http://u:p@www.%s:80/path" % idna
self.uri = URI(uri)
def testFail(self):
self.assertRaises(TypeError, URI, u"http://\u2603/")
def test_repr(self):
expect = "URI('http://www.xn--n3h/path', encoding='idna')".encode('ascii')
self.assertEquals(repr(self.uri), expect)
def test_netloc(self):
expect = "u:[email protected]:80".encode('ascii')
self.assertEquals(self.uri.netloc, expect)
def test_hostname(self):
expect = "www.xn--n3h".encode('ascii')
self.assertEquals(self.uri.hostname, expect)
def test_port(self):
expect = "80"
self.assertEquals(self.uri.port, expect)
def test_path(self):
expect = "/path".encode('ascii')
self.assertEquals(self.uri.path, expect)
|
# encoding: utf-8
import unittest
from resources import URI
from resources import IRI
class TestURISnowman(unittest.TestCase):
def setUp(self):
uri = "http://u:p@" + "www.\N{SNOWMAN}".encode('idna') + ":80/path"
self.uri = URI(uri)
def testFail(self):
self.assertRaises(TypeError, URI, u"http://\u2603/")
def test_repr(self):
expect = "URI('http://www.xn--n3h/path', encoding='idna')".encode('ascii')
self.assertEquals(repr(self.uri), expect)
def test_netloc(self):
expect = "u:[email protected]:80".encode('ascii')
self.assertEquals(self.uri.netloc, expect)
def test_hostname(self):
expect = "www.xn--n3h".encode('ascii')
self.assertEquals(self.uri.hostname, expect)
def test_port(self):
expect = "80"
self.assertEquals(self.uri.port, expect)
def test_path(self):
expect = "/path".encode('ascii')
self.assertEquals(self.uri.path, expect)
|
bsd-2-clause
|
Python
|
4104ea04d75b400e7a2a4d71c259ceb0957f8992
|
include the absolute url to the onsite page
|
crate-archive/crate-site,crateio/crate.pypi,crate-archive/crate-site
|
crate_project/apps/packages/api.py
|
crate_project/apps/packages/api.py
|
from tastypie import fields
from tastypie.resources import ModelResource
from packages.models import Package, Release
class PackageResource(ModelResource):
releases = fields.ToManyField("packages.api.ReleaseResource", "releases")
class Meta:
allowed_methods = ["get"]
include_absolute_url = True
queryset = Package.objects.all()
resource_name = "package"
class ReleaseResource(ModelResource):
package = fields.ForeignKey(PackageResource, "package")
class Meta:
allowed_methods = ["get"]
fields = [
"author", "author_email", "created", "description", "download_uri",
"license", "maintainer", "maintainer_email", "package", "platform",
"requires_python", "summary", "version"
]
include_absolute_url = True
queryset = Release.objects.all()
resource_name = "release"
|
from tastypie import fields
from tastypie.resources import ModelResource
from packages.models import Package, Release
class PackageResource(ModelResource):
releases = fields.ToManyField("packages.api.ReleaseResource", "releases")
class Meta:
allowed_methods = ["get"]
queryset = Package.objects.all()
resource_name = "package"
class ReleaseResource(ModelResource):
package = fields.ForeignKey(PackageResource, "package")
class Meta:
allowed_methods = ["get"]
fields = [
"author", "author_email", "created", "description", "download_uri",
"license", "maintainer", "maintainer_email", "package", "platform",
"requires_python", "summary", "version"
]
queryset = Release.objects.all()
resource_name = "release"
|
bsd-2-clause
|
Python
|
6bc3e784828c1f339ab4fd5fe3ca6dc80a07bb46
|
Enable logs
|
UPOLSearch/UPOL-Search-Engine,UPOLSearch/UPOL-Search-Engine,UPOLSearch/UPOL-Search-Engine,UPOLSearch/UPOL-Search-Engine
|
crawler/tasks.py
|
crawler/tasks.py
|
from __future__ import absolute_import, unicode_literals
from .celery import app
from celery.utils.log import get_task_logger
from .crawler import crawl_url
logger = get_task_logger(__name__)
@app.task(rate_limit="6/s", queue='crawler')
def crawl_url_task(url, value):
crawl_url(url, value)
response, status, redirected = crawl_url(url)
if response is not None:
logger.info(str(url) + " | " + str(response.status_code) + " | " + str(response.reason) +
" | " + str(response.headers['Content-Type']) + " | " + str(status) + " | Redirected: " + str(redirected))
else:
logger.info(url + " | " + str(status) + " | Redirected: " + str(redirected))
|
from __future__ import absolute_import, unicode_literals
from .celery import app
# from celery.utils.log import get_task_logger
from .crawler import crawl_url
# logger = get_task_logger(__name__)
@app.task(rate_limit="6/s", queue='crawler')
def crawl_url_task(url, value):
crawl_url(url, value)
# response, status, redirected = crawl_url(url)
# if response is not None:
# logger.info(str(url) + " | " + str(response.status_code) + " | " + str(response.reason) +
# " | " + str(response.headers['Content-Type']) + " | " + str(status) + " | Redirected: " + str(redirected))
# else:
# logger.info(url + " | " + str(status) + " | Redirected: " + str(redirected))
|
mit
|
Python
|
89d70f5794969cb8d71201504b8645a8359f5b70
|
read config file strings as unicode
|
edx/credentials,edx/credentials,edx/credentials,edx/credentials
|
credentials/settings/production.py
|
credentials/settings/production.py
|
from os import environ
import yaml
from credentials.settings.base import *
from credentials.settings.utils import get_env_setting, get_logger_config
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['*']
LOGGING = get_logger_config()
# Keep track of the names of settings that represent dicts. Instead of overriding the values in base.py,
# the values read from disk should UPDATE the pre-configured dicts.
DICT_UPDATE_KEYS = ('JWT_AUTH',)
# AMAZON S3 STORAGE CONFIGURATION
# See: https://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
# This may be overridden by the yaml in CREDENTIALS_CFG, but it should
# be here as a default.
FILE_STORAGE_BACKEND = {}
CONFIG_FILE = get_env_setting('CREDENTIALS_CFG')
with open(CONFIG_FILE, encoding='utf-8') as f:
config_from_yaml = yaml.load(f)
# Remove the items that should be used to update dicts, and apply them separately rather
# than pumping them into the local vars.
dict_updates = {key: config_from_yaml.pop(key, None) for key in DICT_UPDATE_KEYS}
for key, value in list(dict_updates.items()):
if value:
vars()[key].update(value)
vars().update(config_from_yaml)
# Load the files storage backend settings for django storages
vars().update(FILE_STORAGE_BACKEND)
if 'EXTRA_APPS' in locals():
INSTALLED_APPS += EXTRA_APPS
DB_OVERRIDES = dict(
PASSWORD=environ.get('DB_MIGRATION_PASS', DATABASES['default']['PASSWORD']),
ENGINE=environ.get('DB_MIGRATION_ENGINE', DATABASES['default']['ENGINE']),
USER=environ.get('DB_MIGRATION_USER', DATABASES['default']['USER']),
NAME=environ.get('DB_MIGRATION_NAME', DATABASES['default']['NAME']),
HOST=environ.get('DB_MIGRATION_HOST', DATABASES['default']['HOST']),
PORT=environ.get('DB_MIGRATION_PORT', DATABASES['default']['PORT']),
)
for override, value in DB_OVERRIDES.items():
DATABASES['default'][override] = value
|
from os import environ
import yaml
from credentials.settings.base import *
from credentials.settings.utils import get_env_setting, get_logger_config
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['*']
LOGGING = get_logger_config()
# Keep track of the names of settings that represent dicts. Instead of overriding the values in base.py,
# the values read from disk should UPDATE the pre-configured dicts.
DICT_UPDATE_KEYS = ('JWT_AUTH',)
# AMAZON S3 STORAGE CONFIGURATION
# See: https://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
# This may be overridden by the yaml in CREDENTIALS_CFG, but it should
# be here as a default.
FILE_STORAGE_BACKEND = {}
CONFIG_FILE = get_env_setting('CREDENTIALS_CFG')
with open(CONFIG_FILE) as f:
config_from_yaml = yaml.load(f)
# Remove the items that should be used to update dicts, and apply them separately rather
# than pumping them into the local vars.
dict_updates = {key: config_from_yaml.pop(key, None) for key in DICT_UPDATE_KEYS}
for key, value in list(dict_updates.items()):
if value:
vars()[key].update(value)
vars().update(config_from_yaml)
# Load the files storage backend settings for django storages
vars().update(FILE_STORAGE_BACKEND)
if 'EXTRA_APPS' in locals():
INSTALLED_APPS += EXTRA_APPS
DB_OVERRIDES = dict(
PASSWORD=environ.get('DB_MIGRATION_PASS', DATABASES['default']['PASSWORD']),
ENGINE=environ.get('DB_MIGRATION_ENGINE', DATABASES['default']['ENGINE']),
USER=environ.get('DB_MIGRATION_USER', DATABASES['default']['USER']),
NAME=environ.get('DB_MIGRATION_NAME', DATABASES['default']['NAME']),
HOST=environ.get('DB_MIGRATION_HOST', DATABASES['default']['HOST']),
PORT=environ.get('DB_MIGRATION_PORT', DATABASES['default']['PORT']),
)
for override, value in DB_OVERRIDES.items():
DATABASES['default'][override] = value
|
agpl-3.0
|
Python
|
8257411a58f03d8a353129f2813cbc516a0e40c6
|
Make sure API tests are registered
|
editorsnotes/editorsnotes,editorsnotes/editorsnotes
|
editorsnotes/api/tests/__init__.py
|
editorsnotes/api/tests/__init__.py
|
from .serializers import *
from .views import *
|
agpl-3.0
|
Python
|
|
2982d38d863e6f7654c4939a526d6e783525f8d6
|
refactor compare_players
|
wroberts/cribbage,wroberts/cribbage
|
cribbage/main.py
|
cribbage/main.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from cribbage.game import Game
from cribbage.randomplayer import RandomCribbagePlayer
from cribbage.simpleplayer import SimpleCribbagePlayer
def compare_players(players, num_games=1000):
stats = [0, 0]
for i in range(num_games):
g = Game(players)
g.play()
stats[g.winner] += 1
return stats
# ------------------------------------------------------------
# Cribbage Game
stats = compare_players([RandomCribbagePlayer(), RandomCribbagePlayer()])
# stats
# [487, 513]
stats = compare_players([RandomCribbagePlayer(), SimpleCribbagePlayer()], 500)
# with discard()
# stats
# [16, 484]
# with play_card()
# stats
# [12, 488]
# 0.976 success against random player
# http://www.socscistatistics.com/tests/chisquare/Default2.aspx
# The chi-square statistic is 0.5879. The p-value is .443236.
stats = compare_players([RandomCribbagePlayer(),
SimpleCribbagePlayer(estimate_discard=False)],
500)
# stats
# [161, 339]
stats = compare_players([SimpleCribbagePlayer(),
SimpleCribbagePlayer(estimate_playcard=False)],
500)
# stats
# [326, 174]
# stats (after optimizing code)
# [298, 202]
# [325, 175]
def myfunc():
stats = compare_players([SimpleCribbagePlayer(),
SimpleCribbagePlayer(estimate_playcard=False)],
100)
import cProfile
cProfile.run('myfunc()', sort='time')
# deck=make_deck()
# random.shuffle(deck)
# p=SimpleCribbagePlayer()
# hand=deck[:6]
# def wrap_discard():
# for i in range(1000):
# p.discard(hand,False)
# import hotshot
# prof = hotshot.Profile("stones.prof")
# prof.runcall(wrap_discard)
# prof.close()
# import hotshot.stats
# stats = hotshot.stats.load("stones.prof")
# stats.sort_stats('time', 'calls')
# stats.print_stats(20)
stats = compare_players([SimpleCribbagePlayer(estimate_discard=False),
SimpleCribbagePlayer(estimate_playcard=False)],
500)
# stats
# [48, 452]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from cribbage.game import Game
from cribbage.randomplayer import RandomCribbagePlayer
from cribbage.simpleplayer import SimpleCribbagePlayer
# ------------------------------------------------------------
# Cribbage Game
stats = [0,0]
for i in range(1000):
g = Game([RandomCribbagePlayer(), RandomCribbagePlayer()])
g.play()
stats[g.winner] += 1
# stats
# [487, 513]
stats = [0,0]
for i in range(500):
g = Game([RandomCribbagePlayer(), SimpleCribbagePlayer()])
g.play()
stats[g.winner] += 1
# with discard()
# stats
# [16, 484]
# with play_card()
# stats
# [12, 488]
# 0.976 success against random player
# http://www.socscistatistics.com/tests/chisquare/Default2.aspx
# The chi-square statistic is 0.5879. The p-value is .443236.
stats = [0,0]
for i in range(500):
g = Game([RandomCribbagePlayer(), SimpleCribbagePlayer(estimate_discard=False)])
g.play()
stats[g.winner] += 1
# stats
# [161, 339]
stats = [0,0]
for i in range(500):
g = Game([SimpleCribbagePlayer(), SimpleCribbagePlayer(estimate_playcard=False)])
g.play()
stats[g.winner] += 1
# stats
# [326, 174]
# stats (after optimizing code)
# [298, 202]
# [325, 175]
def myfunc():
stats = [0,0]
for i in range(100):
g = Game([SimpleCribbagePlayer(), SimpleCribbagePlayer(estimate_playcard=False)])
g.play()
stats[g.winner] += 1
import cProfile
cProfile.run('myfunc()', sort='time')
# deck=make_deck()
# random.shuffle(deck)
# p=SimpleCribbagePlayer()
# hand=deck[:6]
# def wrap_discard():
# for i in range(1000):
# p.discard(hand,False)
# import hotshot
# prof = hotshot.Profile("stones.prof")
# prof.runcall(wrap_discard)
# prof.close()
# import hotshot.stats
# stats = hotshot.stats.load("stones.prof")
# stats.sort_stats('time', 'calls')
# stats.print_stats(20)
stats = [0,0]
for i in range(500):
g = Game([SimpleCribbagePlayer(estimate_discard=False), SimpleCribbagePlayer(estimate_playcard=False)])
g.play()
stats[g.winner] += 1
# stats
# [48, 452]
|
mit
|
Python
|
0415361dcd6171f0f407ee528fa0761bf1e914b0
|
Add proc name to gunicorn conf.
|
jerivas/mezzanine,tuxinhang1989/mezzanine,joshcartme/mezzanine,dsanders11/mezzanine,orlenko/sfpirg,Cicero-Zhao/mezzanine,dovydas/mezzanine,promil23/mezzanine,gbosh/mezzanine,Kniyl/mezzanine,dsanders11/mezzanine,wrwrwr/mezzanine,christianwgd/mezzanine,agepoly/mezzanine,orlenko/sfpirg,saintbird/mezzanine,scarcry/snm-mezzanine,dustinrb/mezzanine,stbarnabas/mezzanine,dovydas/mezzanine,frankier/mezzanine,promil23/mezzanine,sjdines/mezzanine,spookylukey/mezzanine,geodesign/mezzanine,vladir/mezzanine,Cicero-Zhao/mezzanine,jerivas/mezzanine,PegasusWang/mezzanine,Kniyl/mezzanine,nikolas/mezzanine,dekomote/mezzanine-modeltranslation-backport,ZeroXn/mezzanine,ryneeverett/mezzanine,ZeroXn/mezzanine,promil23/mezzanine,jerivas/mezzanine,dekomote/mezzanine-modeltranslation-backport,SoLoHiC/mezzanine,webounty/mezzanine,damnfine/mezzanine,Cajoline/mezzanine,gbosh/mezzanine,biomassives/mezzanine,ryneeverett/mezzanine,batpad/mezzanine,fusionbox/mezzanine,frankier/mezzanine,guibernardino/mezzanine,stephenmcd/mezzanine,adrian-the-git/mezzanine,industrydive/mezzanine,christianwgd/mezzanine,wyzex/mezzanine,stephenmcd/mezzanine,tuxinhang1989/mezzanine,cccs-web/mezzanine,molokov/mezzanine,viaregio/mezzanine,gradel/mezzanine,dustinrb/mezzanine,nikolas/mezzanine,biomassives/mezzanine,wyzex/mezzanine,gradel/mezzanine,tuxinhang1989/mezzanine,spookylukey/mezzanine,batpad/mezzanine,industrydive/mezzanine,orlenko/plei,eino-makitalo/mezzanine,industrydive/mezzanine,viaregio/mezzanine,ryneeverett/mezzanine,sjuxax/mezzanine,stephenmcd/mezzanine,webounty/mezzanine,Skytorn86/mezzanine,wrwrwr/mezzanine,mush42/mezzanine,viaregio/mezzanine,sjuxax/mezzanine,douglaskastle/mezzanine,cccs-web/mezzanine,eino-makitalo/mezzanine,sjuxax/mezzanine,jjz/mezzanine,Cajoline/mezzanine,orlenko/plei,jjz/mezzanine,sjdines/mezzanine,frankchin/mezzanine,geodesign/mezzanine,dsanders11/mezzanine,damnfine/mezzanine,webounty/mezzanine,scarcry/snm-mezzanine,Skytorn86/mezzanine,damnfine/mezzanine,theclanks/mezzanine,readevalprint/mezzanine,mush42/mezzanine,Cajoline/mezzanine,saintbird/mezzanine,wbtuomela/mezzanine,molokov/mezzanine,frankier/mezzanine,douglaskastle/mezzanine,AlexHill/mezzanine,joshcartme/mezzanine,wyzex/mezzanine,PegasusWang/mezzanine,gradel/mezzanine,douglaskastle/mezzanine,readevalprint/mezzanine,wbtuomela/mezzanine,adrian-the-git/mezzanine,stbarnabas/mezzanine,dekomote/mezzanine-modeltranslation-backport,frankchin/mezzanine,molokov/mezzanine,emile2016/mezzanine,adrian-the-git/mezzanine,PegasusWang/mezzanine,jjz/mezzanine,eino-makitalo/mezzanine,biomassives/mezzanine,sjdines/mezzanine,frankchin/mezzanine,dustinrb/mezzanine,scarcry/snm-mezzanine,agepoly/mezzanine,mush42/mezzanine,SoLoHiC/mezzanine,geodesign/mezzanine,theclanks/mezzanine,wbtuomela/mezzanine,joshcartme/mezzanine,dovydas/mezzanine,gbosh/mezzanine,orlenko/plei,guibernardino/mezzanine,emile2016/mezzanine,theclanks/mezzanine,emile2016/mezzanine,Kniyl/mezzanine,vladir/mezzanine,agepoly/mezzanine,AlexHill/mezzanine,christianwgd/mezzanine,saintbird/mezzanine,readevalprint/mezzanine,SoLoHiC/mezzanine,nikolas/mezzanine,Skytorn86/mezzanine,fusionbox/mezzanine,orlenko/sfpirg,spookylukey/mezzanine,vladir/mezzanine,ZeroXn/mezzanine
|
mezzanine/project_template/deploy/gunicorn.conf.py
|
mezzanine/project_template/deploy/gunicorn.conf.py
|
import os
bind = "127.0.0.1:%(gunicorn_port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
proc_name = "%(proj_name)s"
|
import os
bind = "127.0.0.1:%(port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
|
bsd-2-clause
|
Python
|
56606d3234fbebc504feec201e4a99a3adcd5023
|
Fix code for pyflake8 convention
|
jobiols/management-system,ClearCorp/management-system
|
mgmtsystem_hazard_risk/models/mgmtsystem_hazard.py
|
mgmtsystem_hazard_risk/models/mgmtsystem_hazard.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from .common import _parse_risk_formula
class MgmtsystemHazard(models.Model):
_inherit = "mgmtsystem.hazard"
risk_type_id = fields.Many2one(
'mgmtsystem.hazard.risk.type',
'Risk Type',
required=True,
)
risk = fields.Integer(compute="_compute_risk", string='Risk')
residual_risk_ids = fields.One2many(
'mgmtsystem.hazard.residual_risk',
'hazard_id',
'Residual Risk Evaluations',
)
@api.depends("probability_id", "severity_id", "usage_id")
def _compute_risk(self):
mycompany = self.env['res.users'].browse(self._uid).company_id
for hazard in self:
if hazard.probability_id and\
hazard.severity_id and\
hazard.usage_id:
hazard.risk = _parse_risk_formula(
mycompany.risk_computation_id.name,
hazard.probability_id.value,
hazard.severity_id.value,
hazard.usage_id.value
)
else:
hazard.risk = False
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from .common import _parse_risk_formula
class MgmtsystemHazard(models.Model):
_inherit = "mgmtsystem.hazard"
risk_type_id = fields.Many2one(
'mgmtsystem.hazard.risk.type',
'Risk Type',
required=True,
)
risk = fields.Integer(compute="_compute_risk", string='Risk')
residual_risk_ids = fields.One2many(
'mgmtsystem.hazard.residual_risk',
'hazard_id',
'Residual Risk Evaluations',
)
@api.depends("probability_id", "severity_id", "usage_id")
def _compute_risk(self):
mycompany = self.env['res.users'].browse(self._uid).company_id
for hazard in self:
if hazard.probability_id and hazard.severity_id and hazard.usage_id:
hazard.risk = _parse_risk_formula(
mycompany.risk_computation_id.name,
hazard.probability_id.value,
hazard.severity_id.value,
hazard.usage_id.value
)
else:
hazard.risk = False
|
agpl-3.0
|
Python
|
96113152179ca81f24b85c19420fae7078907035
|
change to ipn ver
|
lucky-/django-amazon-buynow
|
amazon_buttons/views.py
|
amazon_buttons/views.py
|
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from amazon_buttons import models
import datetime
from django.conf import settings
import urllib
from amazon_buttons import buttonconf
from amazon_buttons import _crypt
@csrf_exempt
def ipn_handler(request):
ipn = models.ipn_response()
ipn.datetime = datetime.datetime.fromtimestamp(int(request.POST['transactionDate']))
for key, val in request.POST.iteritems():
attrib = getattr(ipn, key, None)
if attrib:
setattr(ipn, key, val)
if settings.AMAZON_IPN_VERIFY:
if settings.AMAZON_SANDBOX:
ver_url = buttonconf.SANDBOX_VERIFY
else:
ver_url = buttonconf.LIVE_VERIFY
prepd_data = buttonconf.DEFAULT_IPNVER_DATA
prepd_data['UrlEndPoint'] = settings.DOMAIN_FOR_AMAZON_IPN + reverse('amazon_ipn')
prepd_data['target_url'] = ver_url
prepd_data['HttpParameters'] = urllib.urlencode(request.POST)
prepd_data['AWSAccessKeyId'] = settings.AMAZON_ACCESS_KEY
prepd_data['Timestamp'] = datetime.datetime.now().isoformat()
s_key = settings.AMAZON_SECRET_KEY
prepd_data['Signature'] = _crypt.sig_maker(s_key, prepd_data,'GET')
del prepd_data['target_url']
fin_url = urllib.urlencode(prepd_data)
print fin_url
else:
ipn.save()
return HttpResponse('Done')
|
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from amazon_buttons import models
import datetime
from django.conf import settings
import urllib
from amazon_buttons import buttonconf
from amazon_buttons import _crypt
@csrf_exempt
def ipn_handler(request):
ipn = models.ipn_response()
ipn.datetime = datetime.datetime.fromtimestamp(int(request.POST['transactionDate']))
for key, val in request.POST.iteritems():
attrib = getattr(ipn, key, None)
if attrib:
setattr(ipn, key, val)
if settings.AMAZON_IPN_VERIFY:
if settings.AMAZON_SANDBOX:
ver_url = buttonconf.SANDBOX_VERIFY
else:
ver_url = buttonconf.LIVE_VERIFY
prepd_data = buttonconf.DEFAULT_IPNVER_DATA
prepd_data['UrlEndPoint'] = ver_url
prepd_data['target_url'] = ver_url
prepd_data['HttpParameters'] = urllib.urlencode(request.POST)
prepd_data['AWSAccessKeyId'] = settings.AMAZON_ACCESS_KEY
prepd_data['Timestamp'] = datetime.datetime.now().isoformat()
s_key = settings.AMAZON_SECRET_KEY
prepd_data['Signature'] = _crypt.sig_maker(s_key, prepd_data,'GET')
del prepd_data['target_url']
fin_url = urllib.urlencode(prepd_data)
print fin_url
else:
ipn.save()
return HttpResponse('Done')
|
bsd-3-clause
|
Python
|
859f97e2ba209479b0e882946afdf235ccd9e648
|
Fix #1 Busy loop
|
Ernesto-Alvarez/pig
|
pigv2/backends/glue.py
|
pigv2/backends/glue.py
|
import threading
import ipaddr
import time
#Hub: takes one message from the input queue and replicates it across all output queues
class hub(object):
def __init__(self,input,output):
#Input and output functions (usually q1.get and [q2.put,q3.put....])
self.input = input;
self.output = output;
self.x=threading.Thread(target=self.process)
self.x.daemon=True
self.x.start()
def process(self):
while True:
data = self.input()
for i in self.output:
i(data)
#Network range gate: takes an IP packet from the input queue and passes it to the output queue if and only if the IP source is within a list of dymanically changing networks.
#Takes an input function, an output function and an update function (which returns a list of addresses, usually database.ip_network_table.ip_list)
class network_range_gate(object):
def __init__(self,input,output,update,update_frequency=0.1):
self.input = input;
self.output = output;
self.addresses = []
self.db_semaphore = threading.Semaphore()
self.passed = []
self.update_function = update
self.update_frequency = update_frequency
self.x=threading.Thread(target=self.process)
self.x.daemon=True
self.x.start()
self.y=threading.Thread(target=self.update_addresses)
self.y.daemon=True
self.y.start()
# def debug_data(self):
# print "Gating list", self.addresses
# print "Recently passed", self.passed
# self.passed = []
def process(self):
while True:
data = self.input()
self.db_semaphore.acquire()
try:
for i in self.addresses:
if i.Contains(data['source']):
self.output(data)
#self.passed.append(data['source'])
break
except:
pass
self.db_semaphore.release()
def update_addresses(self):
while True:
time.sleep(0.1) # to avoid a busy loop
self.db_semaphore.acquire()
self.addresses = self.update_function()
self.db_semaphore.release()
|
import threading
import ipaddr
#Hub: takes one message from the input queue and replicates it across all output queues
class hub(object):
def __init__(self,input,output):
#Input and output functions (usually q1.get and [q2.put,q3.put....])
self.input = input;
self.output = output;
self.x=threading.Thread(target=self.process)
self.x.daemon=True
self.x.start()
def process(self):
while True:
data = self.input()
for i in self.output:
i(data)
#Network range gate: takes an IP packet from the input queue and passes it to the output queue if and only if the IP source is within a list of dymanically changing networks.
#Takes an input function, an output function and an update function (which returns a list of addresses, usually database.ip_network_table.ip_list)
class network_range_gate(object):
def __init__(self,input,output,update):
self.input = input;
self.output = output;
self.addresses = []
self.db_semaphore = threading.Semaphore()
self.passed = []
self.update_function = update
self.x=threading.Thread(target=self.process)
self.x.daemon=True
self.x.start()
self.y=threading.Thread(target=self.update_addresses)
self.y.daemon=True
self.y.start()
# def debug_data(self):
# print "Gating list", self.addresses
# print "Recently passed", self.passed
# self.passed = []
def process(self):
while True:
data = self.input()
self.db_semaphore.acquire()
try:
for i in self.addresses:
if i.Contains(data['source']):
self.output(data)
#self.passed.append(data['source'])
break
except:
pass
self.db_semaphore.release()
def update_addresses(self):
while True:
self.db_semaphore.acquire()
self.addresses = self.update_function()
self.db_semaphore.release()
|
lgpl-2.1
|
Python
|
ce73fe56375bef32a0997bdbe4ab305f232d605e
|
rename variable
|
rockers7414/xmusic,rockers7414/xmusic-crawler
|
daemon/rpcservice/systemservice.py
|
daemon/rpcservice/systemservice.py
|
import psutil
import json
from rpcservice.rpcservice import RPCService
from decorator.serialize import json_decorate
from decorator.singleton import singleton
@singleton
@json_decorate
class SystemService(RPCService):
def get_server_status(self):
system_status = {
"cpu": psutil.cpu_percent(),
"memory": psutil.virtual_memory().percent,
}
json_obj = []
json_obj.append(system_status)
return json_obj
def get_server_version(self):
pass
|
import psutil
import json
from rpcservice.rpcservice import RPCService
from decorator.serialize import json_decorate
from decorator.singleton import singleton
@singleton
@json_decorate
class SystemService(RPCService):
def get_server_status(self):
cpu_status = {
"cpu": psutil.cpu_percent(),
"memory": psutil.virtual_memory().percent,
}
json_obj = []
json_obj.append(cpu_status)
return json_obj
def get_server_version(self):
pass
|
apache-2.0
|
Python
|
6e8be0bf525d386cfd83ac1c0c3f66475e308234
|
fix id tag
|
skluth/RooUnfold,skluth/RooUnfold,skluth/RooUnfold
|
examples/RooUnfoldExample.py
|
examples/RooUnfoldExample.py
|
# ==============================================================================
# File and Version Information:
# $Id: RooUnfoldExample.py 248 2010-10-04 22:18:19Z T.J.Adye $
#
# Description:
# Simple example usage of the RooUnfold package using toy MC.
#
# Author: Tim Adye <[email protected]>
#
# ==============================================================================
from ROOT import gRandom, TH1, TH1D, cout
from ROOT import RooUnfoldResponse
from ROOT import RooUnfold
from ROOT import RooUnfoldBayes
# from ROOT import RooUnfoldSvd
# from ROOT import RooUnfoldTUnfold
# ==============================================================================
# Gaussian smearing, systematic translation, and variable inefficiency
# ==============================================================================
def smear(xt):
xeff= 0.3 + (1.0-0.3)/20*(xt+10.0); # efficiency
x= gRandom.Rndm();
if x>xeff: return None;
xsmear= gRandom.Gaus(-2.5,0.2); # bias and smear
return xt+xsmear;
# ==============================================================================
# Example Unfolding
# ==============================================================================
print "==================================== TRAIN ===================================="
response= RooUnfoldResponse (40, -10.0, 10.0);
# Train with a Breit-Wigner, mean 0.3 and width 2.5.
for i in xrange(100000):
xt= gRandom.BreitWigner (0.3, 2.5);
x= smear (xt);
if x!=None:
response.Fill (x, xt);
else:
response.Miss (xt);
print "==================================== TEST ====================================="
hTrue= TH1D ("true", "Test Truth", 40, -10.0, 10.0);
hMeas= TH1D ("meas", "Test Measured", 40, -10.0, 10.0);
# Test with a Gaussian, mean 0 and width 2.
for i in xrange(10000):
xt= gRandom.Gaus (0.0, 2.0)
x= smear (xt);
hTrue.Fill(xt);
if x!=None: hMeas.Fill(x);
print "==================================== UNFOLD ==================================="
unfold= RooUnfoldBayes (response, hMeas, 4); # OR
# unfold= RooUnfoldSvd (response, hMeas, 20); # OR
# unfold= RooUnfoldTUnfold (response, hMeas);
hReco= unfold.Hreco();
unfold.PrintTable (cout, hTrue);
hReco.Draw();
hMeas.Draw("SAME");
hTrue.SetLineColor(8);
hTrue.Draw("SAME");
|
# ==============================================================================
# File and Version Information:
# $Id: RooUnfoldExample.py 248 2010-10-04 22:18:19Z T.J.Adye $
#
# Description:
# Simple example usage of the RooUnfold package using toy MC.
#
# Author: Tim Adye <[email protected]>
#
# ==============================================================================
from ROOT import gRandom, TH1, TH1D, cout
from ROOT import RooUnfoldResponse
from ROOT import RooUnfold
from ROOT import RooUnfoldBayes
# from ROOT import RooUnfoldSvd
# from ROOT import RooUnfoldTUnfold
# ==============================================================================
# Gaussian smearing, systematic translation, and variable inefficiency
# ==============================================================================
def smear(xt):
xeff= 0.3 + (1.0-0.3)/20*(xt+10.0); # efficiency
x= gRandom.Rndm();
if x>xeff: return None;
xsmear= gRandom.Gaus(-2.5,0.2); # bias and smear
return xt+xsmear;
# ==============================================================================
# Example Unfolding
# ==============================================================================
print "==================================== TRAIN ===================================="
response= RooUnfoldResponse (40, -10.0, 10.0);
# Train with a Breit-Wigner, mean 0.3 and width 2.5.
for i in xrange(100000):
xt= gRandom.BreitWigner (0.3, 2.5);
x= smear (xt);
if x!=None:
response.Fill (x, xt);
else:
response.Miss (xt);
print "==================================== TEST ====================================="
hTrue= TH1D ("true", "Test Truth", 40, -10.0, 10.0);
hMeas= TH1D ("meas", "Test Measured", 40, -10.0, 10.0);
# Test with a Gaussian, mean 0 and width 2.
for i in xrange(10000):
xt= gRandom.Gaus (0.0, 2.0)
x= smear (xt);
hTrue.Fill(xt);
if x!=None: hMeas.Fill(x);
print "==================================== UNFOLD ==================================="
unfold= RooUnfoldBayes (response, hMeas, 4); # OR
# unfold= RooUnfoldSvd (response, hMeas, 20); # OR
# unfold= RooUnfoldTUnfold (response, hMeas);
hReco= unfold.Hreco();
unfold.PrintTable (cout, hTrue);
hReco.Draw();
hMeas.Draw("SAME");
hTrue.SetLineColor(8);
hTrue.Draw("SAME");
|
apache-2.0
|
Python
|
b193a4035a0a77ba2555c41d977cf31975ac3b47
|
Disable destructive action challenge for codelab. (#1059)
|
spinnaker/spinnaker,jtk54/spinnaker,imosquera/spinnaker,duftler/spinnaker,stitchfix/spinnaker,duftler/spinnaker,spinnaker/spinnaker,skim1420/spinnaker,ewiseblatt/spinnaker,jtk54/spinnaker,ewiseblatt/spinnaker,Roshan2017/spinnaker,ewiseblatt/spinnaker,stitchfix/spinnaker,duftler/spinnaker,imosquera/spinnaker,skim1420/spinnaker,spinnaker/spinnaker,imosquera/spinnaker,spinnaker/spinnaker,jtk54/spinnaker,duftler/spinnaker,skim1420/spinnaker,skim1420/spinnaker,ewiseblatt/spinnaker,Roshan2017/spinnaker,stitchfix/spinnaker,Roshan2017/spinnaker
|
pylib/spinnaker/codelab_config.py
|
pylib/spinnaker/codelab_config.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinnaker.yaml_util import YamlBindings
def configure_codelab_igor_jenkins():
"""Configures Igor to be enabled and to point to the codelab jenkins instance.
"""
YamlBindings.update_yml_source(
'/opt/spinnaker/config/spinnaker-local.yml',
{
'jenkins': {
'defaultMaster': {
'name': 'CodelabJenkins',
'baseUrl': 'http://localhost:9090',
'username': 'admin',
'password': 'admin'
}
},
'igor': {
'enabled': 'true'
}
}
)
def disable_destructive_action_challenge():
"""Disables destructive action challenge for codelab.
"""
YamlBindings.update_yml_source(
'/opt/spinnaker/config/clouddriver.yml',
{
'credentials': {
'challengeDestructiveActionsEnvironments': ''
}
}
)
if __name__ == '__main__':
configure_codelab_igor_jenkins()
disable_destructive_action_challenge()
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinnaker.yaml_util import YamlBindings
def configure_codelab_igor_jenkins():
"""Configures Igor to be enabled and to point to the codelab jenkins instance.
"""
YamlBindings.update_yml_source(
'/opt/spinnaker/config/spinnaker-local.yml',
{
'jenkins': {
'defaultMaster': {
'name': 'CodelabJenkins',
'baseUrl': 'http://localhost:9090',
'username': 'admin',
'password': 'admin'
}
},
'igor': {
'enabled': 'true'
}
}
)
if __name__ == '__main__':
configure_codelab_igor_jenkins()
|
apache-2.0
|
Python
|
a766bfa315f7c91f672f59bdd1b606d50467c332
|
Bump version.
|
concordusapps/flask-components
|
src/flask_components/_version.py
|
src/flask_components/_version.py
|
# -*- coding: utf-8 -*-
__version_info__ = (0, 1, 1)
__version__ = '.'.join(map(str, __version_info__))
|
# -*- coding: utf-8 -*-
__version_info__ = (0, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
|
mit
|
Python
|
f84aa449780f2645a89c3fb015a2235389937ec5
|
Clean up mongo fixtures a bit
|
LiaoPan/blaze,cowlicks/blaze,ChinaQuants/blaze,caseyclements/blaze,nkhuyu/blaze,cpcloud/blaze,ContinuumIO/blaze,scls19fr/blaze,jdmcbr/blaze,xlhtc007/blaze,scls19fr/blaze,caseyclements/blaze,cpcloud/blaze,cowlicks/blaze,ContinuumIO/blaze,jcrist/blaze,jcrist/blaze,maxalbert/blaze,jdmcbr/blaze,dwillmer/blaze,nkhuyu/blaze,ChinaQuants/blaze,alexmojaki/blaze,mrocklin/blaze,LiaoPan/blaze,alexmojaki/blaze,mrocklin/blaze,maxalbert/blaze,dwillmer/blaze,xlhtc007/blaze
|
blaze/tests/test_mongo.py
|
blaze/tests/test_mongo.py
|
from __future__ import absolute_import, division, print_function
import pytest
pymongo = pytest.importorskip('pymongo')
try:
pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.importorskip('fhskjfdskfhsf')
from datashape import discover, dshape
from blaze import drop, into, create_index
conn = pymongo.MongoClient()
db = conn.test_db
from pymongo import ASCENDING, DESCENDING
@pytest.yield_fixture
def empty_collec():
yield db.tmp_collection
db.tmp_collection.drop()
@pytest.yield_fixture
def bank_collec():
coll = into(db.tmp_collection, bank)
yield coll
coll.drop()
bank = [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300}]
def test_discover(bank_collec):
assert discover(bank_collec) == dshape('5 * {amount: int64, name: string}')
def test_into(empty_collec):
lhs = set(into([], into(empty_collec, bank), columns=['name', 'amount']))
rhs = set([('Alice', 100), ('Alice', 200), ('Bob', 100), ('Bob', 200),
('Bob', 300)])
assert lhs == rhs
@pytest.yield_fixture
def mongo():
pymongo = pytest.importorskip('pymongo')
conn = pymongo.MongoClient()
db = conn.test_db
db.tmp_collection.insert(bank)
yield conn
conn.close()
def test_drop(mongo):
db = mongo.test_db
drop(db.tmp_collection)
assert db.tmp_collection.count() == 0
|
from __future__ import absolute_import, division, print_function
import pytest
pymongo = pytest.importorskip('pymongo')
try:
pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.importorskip('fhskjfdskfhsf')
from datashape import discover, dshape
from contextlib import contextmanager
from toolz.curried import get
from blaze import drop, into
conn = pymongo.MongoClient()
db = conn.test_db
@contextmanager
def collection(data=None):
if data is None:
data = []
coll = db.tmp_collection
if data:
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
bank = [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300}]
def test_discover():
with collection(bank) as coll:
assert discover(coll) == dshape('5 * {amount: int64, name: string}')
def test_into():
with collection([]) as coll:
key = get(['name', 'amount'])
assert set(into([], into(coll, bank), columns=['name', 'amount'])) ==\
set([('Alice', 100), ('Alice', 200), ('Bob', 100),
('Bob', 200), ('Bob', 300)])
@pytest.yield_fixture
def mongo():
pymongo = pytest.importorskip('pymongo')
conn = pymongo.MongoClient()
db = conn.test_db
db.tmp_collection.insert(bank)
yield conn
conn.close()
def test_drop(mongo):
db = mongo.test_db
drop(db.tmp_collection)
assert db.tmp_collection.count() == 0
|
bsd-3-clause
|
Python
|
36408b92a74b8f9963686d215b26de57b429cd6c
|
Fix test_table.py record syntax.
|
seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core
|
blaze/tests/test_table.py
|
blaze/tests/test_table.py
|
from blaze import dshape
from blaze import NDTable, Table, NDArray, Array
def test_arrays():
# Assert that the pretty pritner works for all of the
# toplevel structures
expected_ds = dshape('3, int')
a = NDArray([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
a = Array([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
def test_record():
expected_ds = dshape('1, {x: int32; y: float32}')
t = NDTable([(1, 2.1), (2, 3.1)], dshape='1, {x: int32; y: float32}')
t.datashape._equal(expected_ds)
str(t)
repr(t)
def test_record_consume():
expected_ds = dshape("4, {i: int64; f: float64}")
d = {
'i' : [1, 2, 3, 4],
'f' : [4., 3., 2., 1.]
}
t = NDTable(d)
t.datashape._equal(expected_ds)
def test_record_consume2():
d = {
'a' : ["foo", "bar"],
'b' : [4., 3., 2., 1.]
}
table = NDTable(d)
|
from blaze import dshape
from blaze import NDTable, Table, NDArray, Array
def test_arrays():
# Assert that the pretty pritner works for all of the
# toplevel structures
expected_ds = dshape('3, int')
a = NDArray([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
a = Array([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
def test_record():
expected_ds = dshape('1, {x: int32; y: float32}')
t = NDTable([(1, 2.1), (2, 3.1)], dshape='1, {x: int32; y: float32}')
t.datashape._equal(expected_ds)
str(t)
repr(t)
def test_record_consume():
expected_ds = dshape("4, {i: int64, f: float64}")
d = {
'i' : [1, 2, 3, 4],
'f' : [4., 3., 2., 1.]
}
t = NDTable(d)
t.datashape._equal(expected_ds)
def test_record_consume2():
d = {
'a' : ["foo", "bar"],
'b' : [4., 3., 2., 1.]
}
table = NDTable(d)
|
bsd-2-clause
|
Python
|
3f90d0ec25491eb64f164180139d4baf9ff238a9
|
Sort the context list in alphabetical order
|
libravatar/libravatar,libravatar/libravatar,libravatar/libravatar,libravatar/libravatar,libravatar/libravatar,libravatar/libravatar,libravatar/libravatar
|
libravatar/context_processors.py
|
libravatar/context_processors.py
|
# Copyright (C) 2010 Jonathan Harker <[email protected]>
#
# This file is part of Libravatar
#
# Libravatar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libravatar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar. If not, see <http://www.gnu.org/licenses/>.
import settings
"""
Default useful variables for the base page template.
"""
def basepage(request):
context = {}
context['analytics_propertyid'] = settings.ANALYTICS_PROPERTYID
context['avatar_url'] = settings.AVATAR_URL
context['disable_signup'] = settings.DISABLE_SIGNUP
context['libravatar_version'] = settings.LIBRAVATAR_VERSION
context['media_url'] = settings.MEDIA_URL
context['secure_avatar_url'] = settings.SECURE_AVATAR_URL
context['site_name'] = settings.SITE_NAME
context['site_url'] = settings.SITE_URL
context['support_email'] = settings.SUPPORT_EMAIL
return context
|
# Copyright (C) 2010 Jonathan Harker <[email protected]>
#
# This file is part of Libravatar
#
# Libravatar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libravatar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar. If not, see <http://www.gnu.org/licenses/>.
import settings
"""
Default useful variables for the base page template.
"""
def basepage(request):
context = {}
context["site_name"] = settings.SITE_NAME
context["libravatar_version"] = settings.LIBRAVATAR_VERSION
context["avatar_url"] = settings.AVATAR_URL
context["secure_avatar_url"] = settings.SECURE_AVATAR_URL
context["media_url"] = settings.MEDIA_URL
context["site_url"] = settings.SITE_URL
context["disable_signup"] = settings.DISABLE_SIGNUP
context["analytics_propertyid"] = settings.ANALYTICS_PROPERTYID
context['support_email'] = settings.SUPPORT_EMAIL
return context
|
agpl-3.0
|
Python
|
34dc1c775e4808664dcdb5824b8f2ed5f12e94a1
|
add jsonp renderer and route for graph build status
|
MLR-au/esrc-cnex,MLR-au/esrc-cnex,MLR-au/esrc-cnex
|
app/app/__init__.py
|
app/app/__init__.py
|
from pyramid.config import Configurator
from pyramid.renderers import JSONP
from sqlalchemy import engine_from_config
from .models import (
DBSession,
Base,
)
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
config = Configurator(settings=settings)
config.add_renderer('jsonp', JSONP(param_name='callback'))
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('site_graph', '/site/{code}')
config.add_route('entity_graph', '/entity/{id}')
config.add_route('status', '/status')
config.scan()
return config.make_wsgi_app()
|
from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from .models import (
DBSession,
Base,
)
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
config = Configurator(settings=settings)
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('site_graph', '/site/{code}')
config.add_route('entity_graph', '/entity/{id}')
config.scan()
return config.make_wsgi_app()
|
bsd-3-clause
|
Python
|
17eb885097da7b2b2418f909e2f23058245be72c
|
Update spotify example (#276)
|
balloob/pychromecast,balloob/pychromecast,dominikkarall/pychromecast
|
examples/spotify_example.py
|
examples/spotify_example.py
|
"""
Example on how to use the Spotify Controller.
NOTE: You need to install the spotipy and spotify-token dependencies.
This can be done by running the following:
pip install spotify-token
pip install git+https://github.com/plamere/spotipy.git
"""
import logging
import sys
import pychromecast
from pychromecast.controllers.spotify import SpotifyController
import spotify_token as st
import spotipy
CAST_NAME = "My Chromecast"
debug = '--show-debug' in sys.argv
if debug:
logging.basicConfig(level=logging.DEBUG)
chromecasts = pychromecast.get_chromecasts()
cast = None
for _cast in chromecasts:
if _cast.name == CAST_NAME:
cast = _cast
break
if cast:
cast.wait()
device_id = None
data = st.start_session("SPOTIFY_USERNAME", "SPOTIFY_PASSWORD")
access_token = data[0]
client = spotipy.Spotify(auth=access_token)
sp = SpotifyController(access_token)
cast.register_handler(sp)
sp.launch_app()
devices_available = client.devices()
for device in devices_available['devices']:
if device['name'] == CAST_NAME:
device_id = device['id']
break
client.start_playback(device_id=device_id, uris=["spotify:track:3Zwu2K0Qa5sT6teCCHPShP"])
|
"""
Example on how to use the Spotify Controller.
NOTE: You need to install the spotipy and spotify-token dependencies.
This can be done by running the following:
pip install spotify-token
pip install git+https://github.com/plamere/spotipy.git
"""
import pychromecast
from pychromecast.controllers.spotify import SpotifyController
import spotify_token as st
import spotipy
chromecasts = pychromecast.get_chromecasts()
cast = chromecasts[0]
cast.start()
CAST_NAME = "My Chromecast"
device_id = None
if cast.name == CAST_NAME:
data = st.start_session("SPOTIFY_USERNAME", "SPOTIFY_PASSWORD")
access_token = data[0]
client = spotipy.Spotify(auth=access_token)
sp = SpotifyController(access_token)
cast.register_handler(sp)
sp.launch_app()
devices_available = client.devices()
for device in devices_available['devices']:
if device['name'] == CAST_NAME and device['type'] == 'CastVideo':
device_id = device['id']
break
client.start_playback(device_id=device_id, uris=["spotify:track:3Zwu2K0Qa5sT6teCCHPShP"])
|
mit
|
Python
|
fd460c1b987354b01d306e2e96ab5c74f6b0d06f
|
add socket close call.
|
constanthatz/network_tools
|
echo_server.py
|
echo_server.py
|
#!/usr/bin/env python
from __future__ import print_function
import socket
import email.utils
def server_socket_function():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
server_socket.bind(('127.0.0.1', 50000))
server_socket.listen(1)
try:
while True:
conn, addr = server_socket.accept()
message = conn.recv(32)
if message:
conn.sendall("I recieved your message. Stop talking to me. You are annoying.")
except KeyboardInterrupt:
conn.close()
server_socket.close()
def response_ok():
first_line = 'HTTP/1.1 200 OK'
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def response_error():
error_code = '404'
error_text = 'Not Found'
first_line = 'HTTP/1.1 {} {}'.format(error_code, error_text)
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def parse_request():
return
print(response_ok())
# if __name__ == '__main__':
# server_socket_function()
|
#!/usr/bin/env python
from __future__ import print_function
import socket
import email.utils
def server_socket_function():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
server_socket.bind(('127.0.0.1', 50000))
server_socket.listen(1)
try:
while True:
conn, addr = server_socket.accept()
message = conn.recv(32)
if message:
conn.sendall("I recieved your message. Stop talking to me. You are annoying.")
except KeyboardInterrupt:
conn.close()
def response_ok():
first_line = 'HTTP/1.1 200 OK'
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def response_error():
error_code = '404'
error_text = 'Not Found'
first_line = 'HTTP/1.1 {} {}'.format(error_code, error_text)
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def parse_request():
return
print(response_ok())
# if __name__ == '__main__':
# server_socket_function()
|
mit
|
Python
|
add0af524dafa241d7bab64093ed45c857c66c0d
|
Rename cfg to settings
|
luigiberrettini/build-deploy-stats
|
statsSend/teamCity/teamCityStatisticsSender.py
|
statsSend/teamCity/teamCityStatisticsSender.py
|
#!/usr/bin/env python3
from dateutil import parser
from statsSend.teamCity.teamCityConnection import TeamCityConnection
from statsSend.teamCity.teamCityUrlBuilder import TeamCityUrlBuilder
from statsSend.teamCity.teamCityProject import TeamCityProject
class TeamCityStatisticsSender:
def __init__(self, settings, reporter):
self.page_size = int(settings['page_size'])
connection = TeamCityConnection(settings['user'], settings['password'])
url_builder = TeamCityUrlBuilder(settings['server_url'], settings['api_url_prefix'])
self.project = TeamCityProject(settings['project_id'], connection, url_builder, self.page_size)
self.since_timestamp = parser.parse(settings['since_timestamp']).strftime('%Y%m%dT%H%M%S%z')
self.reporter = reporter
async def send(self):
if ("report_categories" in dir(self.reporter)):
categories = [build_configuration.toCategory() async for build_configuration in self.project.retrieve_build_configurations()]
self.reporter.report_categories(categories)
async for build_configuration in self.project.retrieve_build_configurations():
async for build_run in build_configuration.retrieve_build_runs_since_timestamp(self.since_timestamp):
job = build_run.toJob()
self.reporter.report_job(job)
|
#!/usr/bin/env python3
from dateutil import parser
from statsSend.teamCity.teamCityConnection import TeamCityConnection
from statsSend.teamCity.teamCityUrlBuilder import TeamCityUrlBuilder
from statsSend.teamCity.teamCityProject import TeamCityProject
class TeamCityStatisticsSender:
def __init__(self, cfg, reporter):
self.page_size = int(cfg['page_size'])
connection = TeamCityConnection(cfg['user'], cfg['password'])
url_builder = TeamCityUrlBuilder(cfg['server_url'], cfg['api_url_prefix'])
self.project = TeamCityProject(cfg['project_id'], connection, url_builder, self.page_size)
self.since_timestamp = parser.parse(cfg['since_timestamp']).strftime('%Y%m%dT%H%M%S%z')
self.reporter = reporter
async def send(self):
if ("report_categories" in dir(self.reporter)):
categories = [build_configuration.toCategory() async for build_configuration in self.project.retrieve_build_configurations()]
self.reporter.report_categories(categories)
async for build_configuration in self.project.retrieve_build_configurations():
async for build_run in build_configuration.retrieve_build_runs_since_timestamp(self.since_timestamp):
job = build_run.toJob()
self.reporter.report_job(job)
|
mit
|
Python
|
daba0d7eb4b77e40790624e23938b2ebb6d04fca
|
fix notify loop
|
benoitc/pistil,menghan/pistil,menghan/pistil,harrisonfeng/pistil,meebo/pistil
|
examples/multiworker2.py
|
examples/multiworker2.py
|
# -*- coding: utf-8 -
#
# This file is part of pistil released under the MIT license.
# See the NOTICE for more information.
import time
import urllib2
from pistil.arbiter import Arbiter
from pistil.worker import Worker
from pistil.tcp.sync_worker import TcpSyncWorker
from pistil.tcp.arbiter import TcpArbiter
from http_parser.http import HttpStream
from http_parser.reader import SocketReader
class MyTcpWorker(TcpSyncWorker):
def handle(self, sock, addr):
p = HttpStream(SocketReader(sock))
path = p.path()
data = "welcome wold"
sock.send("".join(["HTTP/1.1 200 OK\r\n",
"Content-Type: text/html\r\n",
"Content-Length:" + str(len(data)) + "\r\n",
"Connection: close\r\n\r\n",
data]))
class UrlWorker(Worker):
def run(self):
print "ici"
while self.alive:
time.sleep(0.1)
f = urllib2.urlopen("http://localhost:5000")
print f.read()
self.notify()
class MyPoolArbiter(TcpArbiter):
def on_init(self, conf):
TcpArbiter.on_init(self, conf)
# we return a spec
return (MyTcpWorker, 30, "worker", {}, "http_welcome",)
if __name__ == '__main__':
conf = {"num_workers": 3, "address": ("127.0.0.1", 5000)}
specs = [
(MyPoolArbiter, 30, "supervisor", {}, "tcp_pool"),
(UrlWorker, 30, "worker", {}, "grabber")
]
arbiter = Arbiter(conf, specs)
arbiter.run()
|
# -*- coding: utf-8 -
#
# This file is part of pistil released under the MIT license.
# See the NOTICE for more information.
import time
import urllib2
from pistil.arbiter import Arbiter
from pistil.worker import Worker
from pistil.tcp.sync_worker import TcpSyncWorker
from pistil.tcp.arbiter import TcpArbiter
from http_parser.http import HttpStream
from http_parser.reader import SocketReader
class MyTcpWorker(TcpSyncWorker):
def handle(self, sock, addr):
p = HttpStream(SocketReader(sock))
path = p.path()
data = "welcome wold"
sock.send("".join(["HTTP/1.1 200 OK\r\n",
"Content-Type: text/html\r\n",
"Content-Length:" + str(len(data)) + "\r\n",
"Connection: close\r\n\r\n",
data]))
class UrlWorker(Worker):
def run(self):
print "ici"
while self.alive:
time.sleep(0.1)
f = urllib2.urlopen("http://localhost:5000")
print f.read()
self.notify
class MyPoolArbiter(TcpArbiter):
def on_init(self, conf):
TcpArbiter.on_init(self, conf)
# we return a spec
return (MyTcpWorker, 30, "worker", {}, "http_welcome",)
if __name__ == '__main__':
conf = {"num_workers": 3, "address": ("127.0.0.1", 5000)}
specs = [
(MyPoolArbiter, 30, "supervisor", {}, "tcp_pool"),
(UrlWorker, 30, "worker", {}, "grabber")
]
arbiter = Arbiter(conf, specs)
arbiter.run()
|
mit
|
Python
|
878811a673625f9dbe0f41dd0196887f612ecf2e
|
Set default file extension to empty string
|
johyphenel/sublime-expand-region,johyphenel/sublime-expand-region,aronwoost/sublime-expand-region
|
expand_region_handler.py
|
expand_region_handler.py
|
import re
try:
import javascript
import html
except:
from . import javascript
from . import html
def expand(string, start, end, extension=""):
if(re.compile("html|htm|xml").search(extension)):
return html.expand(string, start, end)
return javascript.expand(string, start, end)
|
import re
try:
import javascript
import html
except:
from . import javascript
from . import html
def expand(string, start, end, extension=None):
if(re.compile("html|htm|xml").search(extension)):
return html.expand(string, start, end)
return javascript.expand(string, start, end)
|
mit
|
Python
|
58d3e0712a35052d0016fa3c3b3ffda1ba56b305
|
Add some locks
|
wbkang/light-control,wbkang/light-control
|
lightcontrol/server.py
|
lightcontrol/server.py
|
#!/usr/bin/env python3
import RPi.GPIO as GPIO
import time
import threading
import logging
from threading import RLock, Lock
from tzlocal import get_localzone
from flask import Flask, render_template, url_for, request, make_response
from lightcontrol.config import lights
from os.path import expanduser
import os.path
import json
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
app = Flask("lightcontrol")
home_dir = expanduser('~')
class Preferences:
def __init__(self, filename):
self.filename = filename
self.lock = RLock()
def read(self):
with self.lock:
if os.path.exists(self.filename):
try:
with open(self.filename, 'rb') as f:
return json.loads(f.read().decode('utf-8'))
except:
logger.exception("Error reading JSON. Resetting preferences")
return dict()
else:
return dict()
def write(self, d):
with self.lock:
with open(self.filename, 'wb') as f:
return f.write(json.dumps(d).encode('utf-8'))
def update(self, key, value):
with self.lock:
p = self.read()
p[key] = value
self.write(p)
pref = Preferences(filename=home_dir + '/.lightcontrol')
switch_lock = Lock()
def toggle_switch(light_name, onoff):
with switch_lock:
pref.update(light_name, onoff)
line = lights[light_name][0 if onoff else 1]
GPIO.setup(line, GPIO.OUT)
GPIO.output(line, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(line, GPIO.LOW)
@app.route("/")
def index():
return render_template("index.html", config=lights)
@app.route("/lights/<room_name>/<onoff>", methods=["POST"])
def control(room_name, onoff):
onoff = onoff == "on"
toggle_switch(room_name, onoff)
return make_response(str(onoff), 200)
@app.route("/lights/<room_name>/status", methods=["GET"])
def status(room_name):
stat = pref.read().get(room_name, False)
# update
#toggle_switch(room_name, stat)
return "1" if stat else "0"
#for name, val in pref.read().items():
# toggle_switch(name, val)
#import IPython
#IPython.embed()
|
#!/usr/bin/env python3
import RPi.GPIO as GPIO
import time
import threading
import logging
from tzlocal import get_localzone
from flask import Flask, render_template, url_for, request, make_response
from lightcontrol.config import lights
from os.path import expanduser
import os.path
import json
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
app = Flask("lightcontrol")
home_dir = expanduser('~')
class Preferences:
def __init__(self, filename):
self.filename = filename
def read(self):
if os.path.exists(self.filename):
try:
with open(self.filename, 'rb') as f:
return json.loads(f.read().decode('utf-8'))
except:
logger.exception("Error reading JSON. Resetting preferences")
return dict()
else:
return dict()
def write(self, d):
with open(self.filename, 'wb') as f:
return f.write(json.dumps(d).encode('utf-8'))
def update(self, key, value):
p = self.read()
p[key] = value
self.write(p)
pref = Preferences(filename=home_dir + '/.lightcontrol')
def toggle_switch(light_name, onoff):
line = lights[light_name][0 if onoff else 1]
GPIO.setup(line, GPIO.OUT)
GPIO.output(line, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(line, GPIO.LOW)
pref.update(light_name, onoff)
@app.route("/")
def index():
return render_template("index.html", config=lights)
@app.route("/lights/<room_name>/<onoff>", methods=["POST"])
def control(room_name, onoff):
onoff = onoff == "on"
toggle_switch(room_name, onoff)
return make_response(str(onoff), 200)
@app.route("/lights/<room_name>/status", methods=["GET"])
def status(room_name):
stat = pref.read().get(room_name, False)
# update
#toggle_switch(room_name, stat)
return "1" if stat else "0"
#for name, val in pref.read().items():
# toggle_switch(name, val)
#import IPython
#IPython.embed()
|
mit
|
Python
|
d31adbfd0485579c94e92b9c2950230d00fdf309
|
update flaskapp.wsgi
|
KLachhani/RiotAPIChallenge2.0,KLachhani/RiotAPIChallenge2.0,KLachhani/RiotAPIChallenge2.0,KLachhani/RiotAPIChallenge2.0
|
FlaskApp/flaskapp.wsgi
|
FlaskApp/flaskapp.wsgi
|
#!/usr/bin/python
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/RiotAPIChallenge2.0/FlaskApp/")
from FlaskApp import app as application
application.secret_key = 'secretkeyhere'
|
#!/usr/bin/python
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/FlaskApp/")
from FlaskApp import app as application
application.secret_key = 'secretkeyhere'
|
mit
|
Python
|
2c64c4fd5a81537d891aadafe01a4da96fcb7ab4
|
Update ipc_lista1.6.py
|
any1m1c/ipc20161
|
lista1/ipc_lista1.6.py
|
lista1/ipc_lista1.6.py
|
#ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule e mostre sua área
raio = 0
area = 0
raio = input("Entre com o valor do raio: ")
|
#ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule e mostre sua área
raio = 0
area = 0
raio = input("Entre com o valor do raio: "
|
apache-2.0
|
Python
|
1cf097d30d5966456c01e4f2e678213c04f8e334
|
Update ipc_lista1.6.py
|
any1m1c/ipc20161
|
lista1/ipc_lista1.6.py
|
lista1/ipc_lista1.6.py
|
#ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule e mostre sua área
raio = 0
area = 0
raio = input("Entre com o valor do raio: ")
area = 3.14 * raio*raio
print "Valor da
|
#ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule e mostre sua área
raio = 0
area = 0
raio = input("Entre com o valor do raio: ")
area = 3.14 * raio*raio
print "Valor
|
apache-2.0
|
Python
|
8804091fb22ef0a7682ea402ff22750261fc38a7
|
Update ipc_lista1.6.py
|
any1m1c/ipc20161
|
lista1/ipc_lista1.6.py
|
lista1/ipc_lista1.6.py
|
#ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule e mostre sua área
raio = 0
|
#ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule e mostre sua área
raio =
|
apache-2.0
|
Python
|
c6abbd5b8176943ec02d0a03852e1992f62950a1
|
Update ipc_lista1.9.py
|
any1m1c/ipc20161
|
lista1/ipc_lista1.9.py
|
lista1/ipc_lista1.9.py
|
#ipc_lista1.9
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que peça a temperatura em graus Fahrenheit, transforme e mostre a temperatura
|
#ipc_lista1.9
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que peça a temperatura em graus Fahrenheit, transforme e mostre
|
apache-2.0
|
Python
|
89571a6caf877f8ff5ff0b983548b926dec87f8d
|
Update ipc_lista1.9.py
|
any1m1c/ipc20161
|
lista1/ipc_lista1.9.py
|
lista1/ipc_lista1.9.py
|
#ipc_lista1.9
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que peça a temperatura em graus Fahrenheit
|
#ipc_lista1.9
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que peça a temperatura em graus
|
apache-2.0
|
Python
|
d6672e2da113e2fdcfec147619ed03d5410ad014
|
Fix cleanup at exit in Escalator. Remove socket.
|
onitu/onitu,onitu/onitu,onitu/onitu
|
onitu/escalator/server/__main__.py
|
onitu/escalator/server/__main__.py
|
import os
import signal
import argparse
import zmq
from logbook import Logger
from logbook import StderrHandler
from logbook.queues import ZeroMQHandler
from .databases import Databases
from .worker import Worker
back_uri = 'inproc://workers'
logger = Logger('Escalator')
def main(logger):
proxy = zmq.devices.ThreadDevice(
device_type=zmq.QUEUE, in_type=zmq.DEALER, out_type=zmq.ROUTER
)
proxy.bind_out(bind_uri)
proxy.bind_in(back_uri)
proxy.start()
logger.info("Starting on '{}'", args.bind)
nb_workers = 8
workers = []
for i in range(nb_workers):
worker = Worker(databases, back_uri, logger)
worker.daemon = True
worker.start()
workers.append(worker)
while proxy.launcher.isAlive():
try:
# If we join the process without a timeout we never
# get the chance to handle the exception
proxy.join(100)
except KeyboardInterrupt:
break
def cleanup(*args, **kwargs):
databases.close()
if bind_uri.startswith("ipc://"):
# With ZMQ < 4.1 (which isn't released yet), we can't
# close the device in a clean way.
# This will be possible with ZMQ 4.1 by using
# zmq_proxy_steerable.
# In the meantime, we must delete the Unix socket by hand.
sock_file = bind_uri[6:]
try:
os.unlink(sock_file)
except OSError:
pass
exit()
parser = argparse.ArgumentParser("escalator")
parser.add_argument(
'--bind', default='tcp://127.0.0.1:4224',
help="Address to bind escalator server"
)
parser.add_argument(
'--log-uri',
help="The URI of the ZMQ handler listening to the logs"
)
args = parser.parse_args()
bind_uri = args.bind
databases = Databases('dbs')
for s in (signal.SIGINT, signal.SIGTERM, signal.SIGQUIT):
signal.signal(s, cleanup)
if args.log_uri:
handler = ZeroMQHandler(args.log_uri, multi=True)
else:
handler = StderrHandler()
with handler.applicationbound():
main(logger)
cleanup()
|
import argparse
import zmq
from logbook import Logger
from logbook import StderrHandler
from logbook.queues import ZeroMQHandler
from .databases import Databases
from .worker import Worker
back_uri = 'inproc://workers'
logger = Logger('Escalator')
def main(logger):
proxy = zmq.devices.ThreadDevice(
device_type=zmq.QUEUE, in_type=zmq.DEALER, out_type=zmq.ROUTER
)
proxy.bind_out(args.bind)
proxy.bind_in(back_uri)
proxy.start()
logger.info("Starting on '{}'", args.bind)
databases = Databases('dbs')
nb_workers = 8
workers = []
for i in range(nb_workers):
worker = Worker(databases, back_uri, logger)
worker.daemon = True
worker.start()
workers.append(worker)
while proxy.launcher.isAlive():
try:
# If we join the process without a timeout we never
# get the chance to handle the exception
proxy.join(100)
except KeyboardInterrupt:
break
logger.info("Exiting")
databases.close()
parser = argparse.ArgumentParser("escalator")
parser.add_argument(
'--bind', default='tcp://127.0.0.1:4224',
help="Address to bind escalator server"
)
parser.add_argument(
'--log-uri',
help="The URI of the ZMQ handler listening to the logs"
)
args = parser.parse_args()
if args.log_uri:
handler = ZeroMQHandler(args.log_uri, multi=True)
else:
handler = StderrHandler()
with handler.applicationbound():
main(logger)
|
mit
|
Python
|
da12486a207e1ade8c7b49379613e4aadec23794
|
add check to see if rename is needed
|
Redball45/RedballMisc-Cogs
|
misc/misc.py
|
misc/misc.py
|
import discord
from discord.ext import commands
from .utils import checks
from __main__ import send_cmd_help
import asyncio
class misc:
"""Misc commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(hidden=True)
async def summon(self):
await self.bot.say("Who dares summon me?")
async def rename_orun(self, ):
while self is self.bot.get_cog("misc"):
serverid = "294578270064869377"
userid = "202429404503212034"
server = self.bot.get_server(serverid)
user = server.get_member(userid)
print(user.nick)
try:
if user.nick != "Orun"
await self.bot.change_nickname(user, nickname)
print("Renamed Orun")
else
print("No rename needed")
except discord.Forbidden:
print("I cannot do that, I lack the "
"\"Manage Nicknames\" permission.")
await asyncio.sleep(30)
def setup(bot):
n = misc(bot)
loop = asyncio.get_event_loop()
loop.create_task(n.rename_orun())
bot.add_cog(n)
|
import discord
from discord.ext import commands
from .utils import checks
from __main__ import send_cmd_help
import asyncio
class misc:
"""Misc commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(hidden=True)
async def summon(self):
await self.bot.say("Who dares summon me?")
async def rename_orun(self, ):
while self is self.bot.get_cog("misc"):
serverid = "294578270064869377"
userid = "202429404503212034"
server = self.bot.get_server(serverid)
user = server.get_member(userid)
print(user.nick)
nickname = "Orun"
try:
await self.bot.change_nickname(user, nickname)
print("Renamed Orun")
except discord.Forbidden:
print("I cannot do that, I lack the "
"\"Manage Nicknames\" permission.")
await asyncio.sleep(30)
def setup(bot):
n = misc(bot)
loop = asyncio.get_event_loop()
loop.create_task(n.rename_orun())
bot.add_cog(n)
|
mit
|
Python
|
0d32f515b7a7cc31f263c61f8605f730259b8fa9
|
Update views.py
|
rochapps/django-pdfy
|
pdf/views.py
|
pdf/views.py
|
"""
RenderPDF helper class
"""
import os
import datetime
import cStringIO as StringIO
import ho.pisa as pisa
from cgi import escape
import logging
from django.conf import settings
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from django.views.generic.base import TemplateView
logger = logging.getLogger(__name__)
class RenderPDF(object):
"""
class based view to render template in PDF format.
"""
template_name = 'django_pdf/hello_world.html'
assets_url = settings.MEDIA_ROOT
def fetch_resources(self, uri, rel=''):
""""
Method return absolute path to resources.
"""
absolute_path = os.path.join(settings.MEDIA_ROOT,
uri.replace(self.assets_url, ""))
logger.debug(absolute_path)
return absolute_path
def render_to_response(self, context, **response_kwargs):
context.update(response_kwargs)
return self.render_to_pdf(context)
def render_to_pdf(self, context):
"""
renders pdf file
"""
template = get_template(self.template_name)
template_context = Context(context)
html = template.render(template_context)
result = StringIO.StringIO()
pdf = pisa.pisaDocument(StringIO.StringIO(html.encode("UTF-8")),
result, link_callback=self.fetch_resources)
if not pdf.err:
return HttpResponse(result.getvalue(), mimetype='application/pdf')
return HttpResponse('We had some errors<pre>%s</pre>' % escape(html))
|
"""
RenderPDF helper class
"""
import os
import datetime
import cStringIO as StringIO
import ho.pisa as pisa
from cgi import escape
from django.conf import settings
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from django.views.generic.base import TemplateView
class RenderPDF(object):
"""
class based view to render template in PDF format.
"""
template_name = 'django_pdf/hello_world.html'
assets_url = settings.MEDIA_ROOT
def fetch_resources(self, uri, rel=''):
""""
Method return absolute path to resources.
"""
absolute_path = os.path.join(settings.MEDIA_ROOT,
uri.replace(self.assets_url, ""))
return absolute_path
def render_to_response(self, context, **response_kwargs):
context.update(response_kwargs)
return self.render_to_pdf(context)
def render_to_pdf(self, context):
"""
renders pdf file
"""
template = get_template(self.template_name)
template_context = Context(context)
html = template.render(template_context)
result = StringIO.StringIO()
pdf = pisa.pisaDocument(StringIO.StringIO(html.encode("UTF-8")),
result, link_callback=self.fetch_resources)
if not pdf.err:
return HttpResponse(result.getvalue(), mimetype='application/pdf')
return HttpResponse('We had some errors<pre>%s</pre>' % escape(html))
|
bsd-3-clause
|
Python
|
fc13e6b523a4ba78835ab54d16f7a6caab0fc73b
|
Sort total attendance list by yes rsvps in python script.
|
jimbo00000/meetup-attendance-graph
|
data/calculate_totals.py
|
data/calculate_totals.py
|
# calculate_totals.py
import json
import datetime
from collections import defaultdict
# http://stackoverflow.com/questions/6999726/how-can-i-convert-a-datetime-object-to-milliseconds-since-epoch-unix-time-in-p
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return unix_time(dt) * 1000
event_rsvps = defaultdict(int)
event_counts = defaultdict(int)
event_descrs = defaultdict(list)
event_strings = defaultdict(str)
json_objects = []
with open('groups.json') as group_json:
group_data = json.load(group_json)
# Accumulate totals by month
for g in group_data[0]["groups"]:
if g == "totals":
continue
mfile = "meetup_history_" + g +".json"
#print(mfile)
with open(mfile) as mjson:
dat = json.load(mjson)
for d in dat:
msepoch = d['time']
#print(msepoch, d['yes_rsvp_count'])
x = datetime.date.fromtimestamp(msepoch/1000)
monthdate = datetime.datetime(x.year, x.month, 1)
#print(monthdate)
yess = d['yes_rsvp_count']
event_rsvps[monthdate] += yess
event_counts[monthdate] += 1
tup = (yess, d['group']['name'])
event_descrs[monthdate].append(tup)
# Sort list of tuples by attendance numbers and assemble HTML string
for k in event_descrs:
event_descrs[k].sort(key=lambda tup: tup[0])
for y in reversed(event_descrs[k]):
descr = "<strong>" + str(y[0]) + "</strong>"
descr += " "
descr += y[1] + "<br>"
event_strings[k] += descr
# Assemble into a json object
for k,v in sorted(event_rsvps.iteritems()):
#print(k,v, event_counts[k])
millis = unix_time_millis(k)
#print('%f' % millis)
#print('{0:f}'.format(millis))
#print(int(millis))
jo = {}
jo['time'] = int(millis)
jo['yes_rsvp_count'] = v
jo['name'] = k.strftime("%B %Y")
jo['description'] = event_strings[k]
jo['group'] = {}
jo['group']['name'] = "Monthly Totals"
jo['group']['urlname'] = "Monthly Totals"
#print(jo)
json_objects.append(jo)
#print json_objects
with open('meetup_history_totals.json', 'w+') as tots:
json.dump(json_objects, tots)
|
# calculate_totals.py
import json
import datetime
from collections import defaultdict
# http://stackoverflow.com/questions/6999726/how-can-i-convert-a-datetime-object-to-milliseconds-since-epoch-unix-time-in-p
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return unix_time(dt) * 1000
event_rsvps = defaultdict(int)
event_counts = defaultdict(int)
event_descrs = defaultdict(str)
json_objects = []
with open('groups.json') as group_json:
group_data = json.load(group_json)
# Accumulate totals by month
for g in group_data[0]["groups"]:
if g == "totals":
continue
mfile = "meetup_history_" + g +".json"
#print(mfile)
with open(mfile) as mjson:
dat = json.load(mjson)
for d in dat:
msepoch = d['time']
#print(msepoch, d['yes_rsvp_count'])
x = datetime.date.fromtimestamp(msepoch/1000)
monthdate = datetime.datetime(x.year, x.month, 1)
#print(monthdate)
event_rsvps[monthdate] += d['yes_rsvp_count']
event_counts[monthdate] += 1
descr = "<strong>" + str(d['yes_rsvp_count']) + "</strong>"
descr += " "
descr += d['group']['name'] + "<br>"
event_descrs[monthdate] += descr
# Assemble into a json object
for k,v in sorted(event_rsvps.iteritems()):
#print(k,v, event_counts[k])
millis = unix_time_millis(k)
#print('%f' % millis)
#print('{0:f}'.format(millis))
#print(int(millis))
jo = {}
jo['time'] = int(millis)
jo['yes_rsvp_count'] = v
jo['name'] = k.strftime("%B %Y")
jo['description'] = event_descrs[k]
jo['group'] = {}
jo['group']['name'] = "Monthly Totals"
jo['group']['urlname'] = "Monthly Totals"
#print(jo)
json_objects.append(jo)
#print json_objects
with open('meetup_history_totals.json', 'w+') as tots:
json.dump(json_objects, tots)
|
mit
|
Python
|
81ced1c9642fa8c364ce9a840adecde633c96b42
|
Remove disable pylint for paginator error and fix syntax
|
itsjeyd/edx-platform,hamzehd/edx-platform,franosincic/edx-platform,inares/edx-platform,gsehub/edx-platform,Stanford-Online/edx-platform,angelapper/edx-platform,ahmedaljazzar/edx-platform,JioEducation/edx-platform,jolyonb/edx-platform,CourseTalk/edx-platform,ahmadiga/min_edx,caesar2164/edx-platform,tanmaykm/edx-platform,proversity-org/edx-platform,amir-qayyum-khan/edx-platform,kxliugang/edx-platform,JioEducation/edx-platform,devs1991/test_edx_docmode,defance/edx-platform,Endika/edx-platform,shabab12/edx-platform,jamesblunt/edx-platform,appsembler/edx-platform,ubc/edx-platform,10clouds/edx-platform,IndonesiaX/edx-platform,synergeticsedx/deployment-wipro,Softmotions/edx-platform,kursitet/edx-platform,doganov/edx-platform,Lektorium-LLC/edx-platform,Lektorium-LLC/edx-platform,cpennington/edx-platform,jamiefolsom/edx-platform,ZLLab-Mooc/edx-platform,lduarte1991/edx-platform,don-github/edx-platform,cognitiveclass/edx-platform,mahendra-r/edx-platform,bigdatauniversity/edx-platform,ovnicraft/edx-platform,alexthered/kienhoc-platform,jbzdak/edx-platform,deepsrijit1105/edx-platform,Semi-global/edx-platform,louyihua/edx-platform,MakeHer/edx-platform,stvstnfrd/edx-platform,mitocw/edx-platform,zhenzhai/edx-platform,jjmiranda/edx-platform,Kalyzee/edx-platform,Edraak/edraak-platform,SivilTaram/edx-platform,ahmedaljazzar/edx-platform,utecuy/edx-platform,simbs/edx-platform,arbrandes/edx-platform,SivilTaram/edx-platform,alu042/edx-platform,pabloborrego93/edx-platform,kmoocdev2/edx-platform,ampax/edx-platform,utecuy/edx-platform,jjmiranda/edx-platform,hamzehd/edx-platform,ferabra/edx-platform,tanmaykm/edx-platform,wwj718/edx-platform,jamesblunt/edx-platform,hastexo/edx-platform,mbareta/edx-platform-ft,itsjeyd/edx-platform,chauhanhardik/populo,jolyonb/edx-platform,don-github/edx-platform,philanthropy-u/edx-platform,adoosii/edx-platform,teltek/edx-platform,miptliot/edx-platform,devs1991/test_edx_docmode,jbzdak/edx-platform,ovnicraft/edx-platform,ahmadiga/min_edx,cognitiveclass/edx-platform,playm2mboy/edx-platform,nanolearningllc/edx-platform-cypress,playm2mboy/edx-platform,etzhou/edx-platform,alexthered/kienhoc-platform,chrisndodge/edx-platform,devs1991/test_edx_docmode,doganov/edx-platform,hastexo/edx-platform,inares/edx-platform,10clouds/edx-platform,UOMx/edx-platform,caesar2164/edx-platform,eduNEXT/edunext-platform,vikas1885/test1,antoviaque/edx-platform,halvertoluke/edx-platform,ferabra/edx-platform,cecep-edu/edx-platform,rismalrv/edx-platform,adoosii/edx-platform,zerobatu/edx-platform,chauhanhardik/populo_2,louyihua/edx-platform,edx/edx-platform,jamiefolsom/edx-platform,mbareta/edx-platform-ft,pepeportela/edx-platform,etzhou/edx-platform,4eek/edx-platform,doismellburning/edx-platform,gymnasium/edx-platform,Edraak/circleci-edx-platform,xingyepei/edx-platform,ESOedX/edx-platform,shurihell/testasia,jamesblunt/edx-platform,adoosii/edx-platform,proversity-org/edx-platform,bitifirefly/edx-platform,JCBarahona/edX,nttks/edx-platform,kmoocdev2/edx-platform,zerobatu/edx-platform,shashank971/edx-platform,jamesblunt/edx-platform,philanthropy-u/edx-platform,nttks/edx-platform,IONISx/edx-platform,mahendra-r/edx-platform,Ayub-Khan/edx-platform,deepsrijit1105/edx-platform,halvertoluke/edx-platform,pepeportela/edx-platform,cecep-edu/edx-platform,procangroup/edx-platform,simbs/edx-platform,alexthered/kienhoc-platform,synergeticsedx/deployment-wipro,mitocw/edx-platform,Semi-global/edx-platform,J861449197/edx-platform,leansoft/edx-platform,appliedx/edx-platform,chauhanhardik/populo,doganov/edx-platform,inares/edx-platform,fintech-circle/edx-platform,J861449197/edx-platform,J861449197/edx-platform,romain-li/edx-platform,zerobatu/edx-platform,adoosii/edx-platform,kxliugang/edx-platform,edx/edx-platform,franosincic/edx-platform,doismellburning/edx-platform,tanmaykm/edx-platform,eduNEXT/edx-platform,appliedx/edx-platform,rismalrv/edx-platform,prarthitm/edxplatform,JCBarahona/edX,pepeportela/edx-platform,arbrandes/edx-platform,msegado/edx-platform,CredoReference/edx-platform,zofuthan/edx-platform,philanthropy-u/edx-platform,zofuthan/edx-platform,chauhanhardik/populo_2,Ayub-Khan/edx-platform,arbrandes/edx-platform,JioEducation/edx-platform,zubair-arbi/edx-platform,angelapper/edx-platform,edx-solutions/edx-platform,pomegranited/edx-platform,MakeHer/edx-platform,msegado/edx-platform,gsehub/edx-platform,antoviaque/edx-platform,Semi-global/edx-platform,shurihell/testasia,TeachAtTUM/edx-platform,cecep-edu/edx-platform,devs1991/test_edx_docmode,amir-qayyum-khan/edx-platform,lduarte1991/edx-platform,zofuthan/edx-platform,deepsrijit1105/edx-platform,wwj718/edx-platform,halvertoluke/edx-platform,devs1991/test_edx_docmode,marcore/edx-platform,devs1991/test_edx_docmode,rismalrv/edx-platform,louyihua/edx-platform,simbs/edx-platform,pabloborrego93/edx-platform,ampax/edx-platform,JCBarahona/edX,nanolearningllc/edx-platform-cypress-2,doismellburning/edx-platform,zofuthan/edx-platform,nanolearningllc/edx-platform-cypress,TeachAtTUM/edx-platform,solashirai/edx-platform,CredoReference/edx-platform,iivic/BoiseStateX,nanolearningllc/edx-platform-cypress,Stanford-Online/edx-platform,jamiefolsom/edx-platform,ampax/edx-platform,ESOedX/edx-platform,UOMx/edx-platform,nttks/edx-platform,zerobatu/edx-platform,Kalyzee/edx-platform,Lektorium-LLC/edx-platform,Softmotions/edx-platform,cecep-edu/edx-platform,kursitet/edx-platform,vikas1885/test1,zhenzhai/edx-platform,leansoft/edx-platform,RPI-OPENEDX/edx-platform,longmen21/edx-platform,ahmedaljazzar/edx-platform,10clouds/edx-platform,playm2mboy/edx-platform,shurihell/testasia,mahendra-r/edx-platform,prarthitm/edxplatform,playm2mboy/edx-platform,don-github/edx-platform,don-github/edx-platform,doismellburning/edx-platform,eduNEXT/edx-platform,cpennington/edx-platform,cecep-edu/edx-platform,CourseTalk/edx-platform,chauhanhardik/populo_2,gymnasium/edx-platform,raccoongang/edx-platform,stvstnfrd/edx-platform,ampax/edx-platform,solashirai/edx-platform,fintech-circle/edx-platform,xingyepei/edx-platform,RPI-OPENEDX/edx-platform,marcore/edx-platform,ovnicraft/edx-platform,ahmadiga/min_edx,Edraak/edx-platform,synergeticsedx/deployment-wipro,pabloborrego93/edx-platform,appsembler/edx-platform,ovnicraft/edx-platform,lduarte1991/edx-platform,xingyepei/edx-platform,CredoReference/edx-platform,IndonesiaX/edx-platform,kxliugang/edx-platform,ahmedaljazzar/edx-platform,RPI-OPENEDX/edx-platform,fly19890211/edx-platform,xingyepei/edx-platform,pomegranited/edx-platform,edx-solutions/edx-platform,franosincic/edx-platform,wwj718/edx-platform,JioEducation/edx-platform,waheedahmed/edx-platform,ferabra/edx-platform,eduNEXT/edx-platform,gymnasium/edx-platform,a-parhom/edx-platform,procangroup/edx-platform,ferabra/edx-platform,bigdatauniversity/edx-platform,eduNEXT/edunext-platform,xinjiguaike/edx-platform,Endika/edx-platform,analyseuc3m/ANALYSE-v1,Edraak/edx-platform,nanolearningllc/edx-platform-cypress-2,mcgachey/edx-platform,zubair-arbi/edx-platform,Softmotions/edx-platform,romain-li/edx-platform,longmen21/edx-platform,edry/edx-platform,cognitiveclass/edx-platform,waheedahmed/edx-platform,RPI-OPENEDX/edx-platform,Livit/Livit.Learn.EdX,xinjiguaike/edx-platform,hamzehd/edx-platform,zhenzhai/edx-platform,doismellburning/edx-platform,simbs/edx-platform,miptliot/edx-platform,Endika/edx-platform,leansoft/edx-platform,jzoldak/edx-platform,vikas1885/test1,appliedx/edx-platform,solashirai/edx-platform,CourseTalk/edx-platform,nanolearningllc/edx-platform-cypress,shurihell/testasia,SivilTaram/edx-platform,kmoocdev2/edx-platform,analyseuc3m/ANALYSE-v1,itsjeyd/edx-platform,eduNEXT/edunext-platform,a-parhom/edx-platform,waheedahmed/edx-platform,mahendra-r/edx-platform,defance/edx-platform,hastexo/edx-platform,alexthered/kienhoc-platform,Softmotions/edx-platform,Edraak/edraak-platform,teltek/edx-platform,gsehub/edx-platform,franosincic/edx-platform,fintech-circle/edx-platform,xingyepei/edx-platform,xinjiguaike/edx-platform,jbzdak/edx-platform,halvertoluke/edx-platform,zhenzhai/edx-platform,philanthropy-u/edx-platform,Edraak/edx-platform,Ayub-Khan/edx-platform,EDUlib/edx-platform,amir-qayyum-khan/edx-platform,miptliot/edx-platform,zhenzhai/edx-platform,Lektorium-LLC/edx-platform,doganov/edx-platform,tanmaykm/edx-platform,ubc/edx-platform,franosincic/edx-platform,wwj718/edx-platform,IONISx/edx-platform,xinjiguaike/edx-platform,4eek/edx-platform,ZLLab-Mooc/edx-platform,synergeticsedx/deployment-wipro,hamzehd/edx-platform,shurihell/testasia,pomegranited/edx-platform,IndonesiaX/edx-platform,solashirai/edx-platform,bitifirefly/edx-platform,deepsrijit1105/edx-platform,alu042/edx-platform,iivic/BoiseStateX,EDUlib/edx-platform,BehavioralInsightsTeam/edx-platform,Ayub-Khan/edx-platform,ahmadiga/min_edx,nttks/edx-platform,ZLLab-Mooc/edx-platform,edx-solutions/edx-platform,fly19890211/edx-platform,chauhanhardik/populo,kmoocdev2/edx-platform,vikas1885/test1,shashank971/edx-platform,doganov/edx-platform,naresh21/synergetics-edx-platform,raccoongang/edx-platform,Edraak/circleci-edx-platform,IONISx/edx-platform,pabloborrego93/edx-platform,edx-solutions/edx-platform,kmoocdev2/edx-platform,IndonesiaX/edx-platform,angelapper/edx-platform,ovnicraft/edx-platform,Semi-global/edx-platform,ferabra/edx-platform,cognitiveclass/edx-platform,Edraak/edx-platform,prarthitm/edxplatform,BehavioralInsightsTeam/edx-platform,edx/edx-platform,iivic/BoiseStateX,JCBarahona/edX,zubair-arbi/edx-platform,Edraak/edx-platform,kursitet/edx-platform,stvstnfrd/edx-platform,vikas1885/test1,devs1991/test_edx_docmode,shabab12/edx-platform,procangroup/edx-platform,alu042/edx-platform,bigdatauniversity/edx-platform,teltek/edx-platform,hastexo/edx-platform,edry/edx-platform,ESOedX/edx-platform,Stanford-Online/edx-platform,pomegranited/edx-platform,antoviaque/edx-platform,TeachAtTUM/edx-platform,chrisndodge/edx-platform,kxliugang/edx-platform,cpennington/edx-platform,appsembler/edx-platform,eduNEXT/edx-platform,UOMx/edx-platform,shashank971/edx-platform,nanolearningllc/edx-platform-cypress-2,waheedahmed/edx-platform,Edraak/circleci-edx-platform,kxliugang/edx-platform,louyihua/edx-platform,marcore/edx-platform,cpennington/edx-platform,IONISx/edx-platform,mcgachey/edx-platform,chrisndodge/edx-platform,caesar2164/edx-platform,proversity-org/edx-platform,chrisndodge/edx-platform,jjmiranda/edx-platform,jolyonb/edx-platform,iivic/BoiseStateX,chauhanhardik/populo,utecuy/edx-platform,Edraak/edraak-platform,bitifirefly/edx-platform,jamesblunt/edx-platform,Stanford-Online/edx-platform,ahmadiga/min_edx,edry/edx-platform,ZLLab-Mooc/edx-platform,MakeHer/edx-platform,4eek/edx-platform,pepeportela/edx-platform,angelapper/edx-platform,Livit/Livit.Learn.EdX,longmen21/edx-platform,etzhou/edx-platform,procangroup/edx-platform,defance/edx-platform,defance/edx-platform,zubair-arbi/edx-platform,amir-qayyum-khan/edx-platform,CredoReference/edx-platform,a-parhom/edx-platform,xinjiguaike/edx-platform,shashank971/edx-platform,leansoft/edx-platform,J861449197/edx-platform,alexthered/kienhoc-platform,halvertoluke/edx-platform,caesar2164/edx-platform,fly19890211/edx-platform,chauhanhardik/populo,BehavioralInsightsTeam/edx-platform,itsjeyd/edx-platform,inares/edx-platform,shashank971/edx-platform,devs1991/test_edx_docmode,romain-li/edx-platform,rismalrv/edx-platform,analyseuc3m/ANALYSE-v1,kursitet/edx-platform,etzhou/edx-platform,kursitet/edx-platform,EDUlib/edx-platform,bitifirefly/edx-platform,stvstnfrd/edx-platform,MakeHer/edx-platform,jamiefolsom/edx-platform,gsehub/edx-platform,alu042/edx-platform,fintech-circle/edx-platform,mcgachey/edx-platform,Livit/Livit.Learn.EdX,marcore/edx-platform,msegado/edx-platform,Endika/edx-platform,waheedahmed/edx-platform,edry/edx-platform,mitocw/edx-platform,utecuy/edx-platform,mbareta/edx-platform-ft,TeachAtTUM/edx-platform,Livit/Livit.Learn.EdX,4eek/edx-platform,IndonesiaX/edx-platform,bigdatauniversity/edx-platform,nanolearningllc/edx-platform-cypress-2,antoviaque/edx-platform,ubc/edx-platform,10clouds/edx-platform,adoosii/edx-platform,simbs/edx-platform,appsembler/edx-platform,shabab12/edx-platform,nanolearningllc/edx-platform-cypress,chauhanhardik/populo_2,leansoft/edx-platform,Softmotions/edx-platform,appliedx/edx-platform,utecuy/edx-platform,CourseTalk/edx-platform,edx/edx-platform,BehavioralInsightsTeam/edx-platform,SivilTaram/edx-platform,fly19890211/edx-platform,raccoongang/edx-platform,hamzehd/edx-platform,mahendra-r/edx-platform,msegado/edx-platform,jzoldak/edx-platform,ubc/edx-platform,mcgachey/edx-platform,naresh21/synergetics-edx-platform,jbzdak/edx-platform,nttks/edx-platform,pomegranited/edx-platform,mcgachey/edx-platform,iivic/BoiseStateX,playm2mboy/edx-platform,jzoldak/edx-platform,don-github/edx-platform,zubair-arbi/edx-platform,Edraak/circleci-edx-platform,wwj718/edx-platform,lduarte1991/edx-platform,shabab12/edx-platform,rismalrv/edx-platform,ESOedX/edx-platform,eduNEXT/edunext-platform,zofuthan/edx-platform,teltek/edx-platform,jbzdak/edx-platform,longmen21/edx-platform,Kalyzee/edx-platform,prarthitm/edxplatform,RPI-OPENEDX/edx-platform,gymnasium/edx-platform,proversity-org/edx-platform,fly19890211/edx-platform,IONISx/edx-platform,Edraak/circleci-edx-platform,Kalyzee/edx-platform,msegado/edx-platform,naresh21/synergetics-edx-platform,analyseuc3m/ANALYSE-v1,a-parhom/edx-platform,Kalyzee/edx-platform,Semi-global/edx-platform,bigdatauniversity/edx-platform,Edraak/edraak-platform,J861449197/edx-platform,longmen21/edx-platform,inares/edx-platform,appliedx/edx-platform,EDUlib/edx-platform,etzhou/edx-platform,mbareta/edx-platform-ft,JCBarahona/edX,UOMx/edx-platform,jjmiranda/edx-platform,ZLLab-Mooc/edx-platform,4eek/edx-platform,jzoldak/edx-platform,jamiefolsom/edx-platform,raccoongang/edx-platform,arbrandes/edx-platform,romain-li/edx-platform,ubc/edx-platform,Ayub-Khan/edx-platform,naresh21/synergetics-edx-platform,solashirai/edx-platform,cognitiveclass/edx-platform,romain-li/edx-platform,miptliot/edx-platform,mitocw/edx-platform,edry/edx-platform,zerobatu/edx-platform,MakeHer/edx-platform,nanolearningllc/edx-platform-cypress-2,SivilTaram/edx-platform,chauhanhardik/populo_2,bitifirefly/edx-platform,jolyonb/edx-platform
|
openedx/core/lib/api/paginators.py
|
openedx/core/lib/api/paginators.py
|
""" Paginatator methods for edX API implementations."""
from django.http import Http404
from django.core.paginator import Paginator, InvalidPage
def paginate_search_results(object_class, search_results, page_size, page):
"""
Takes edx-search results and returns a Page object populated
with db objects for that page.
:param object_class: Model class to use when querying the db for objects.
:param search_results: edX-search results.
:param page_size: Number of results per page.
:param page: Page number.
:return: Paginator object with model objects
"""
paginator = Paginator(search_results['results'], page_size)
# This code is taken from within the GenericAPIView#paginate_queryset method.
# It is common code, but
try:
page_number = paginator.validate_number(page)
except InvalidPage:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404("Page is not 'last', nor can it be converted to an int.")
try:
paged_results = paginator.page(page_number)
except InvalidPage as exception:
raise Http404(
"Invalid page {page_number}: {message}".format(
page_number=page_number,
message=str(exception)
)
)
search_queryset_pks = [item['data']['pk'] for item in paged_results.object_list]
queryset = object_class.objects.filter(pk__in=search_queryset_pks)
def ordered_objects(primary_key):
""" Returns database object matching the search result object"""
for obj in queryset:
if obj.pk == primary_key:
return obj
# map over the search results and get a list of database objects in the same order
object_results = map(ordered_objects, search_queryset_pks)
paged_results.object_list = object_results
return paged_results
|
""" Paginatator methods for edX API implementations."""
from django.http import Http404
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator, InvalidPage
def paginate_search_results(object_class, search_results, page_size, page):
"""
Takes edx-search results and returns a Page object populated
with db objects for that page.
:param object_class: Model class to use when querying the db for objects.
:param search_results: edX-search results.
:param page_size: Number of results per page.
:param page: Page number.
:return: Paginator object with model objects
"""
paginator = Paginator(search_results['results'], page_size)
# This code is taken from within the GenericAPIView#paginate_queryset method.
# It is common code, but
try:
page_number = paginator.validate_number(page)
except InvalidPage:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_("Page is not 'last', nor can it be converted to an int."))
try:
paged_results = paginator.page(page_number)
except InvalidPage as e: # pylint: disable=invalid-name
raise Http404(_('Invalid page (%(page_number)s): %(message)s') % {
'page_number': page_number,
'message': str(e)
})
search_queryset_pks = [item['data']['pk'] for item in paged_results.object_list]
queryset = object_class.objects.filter(pk__in=search_queryset_pks)
def ordered_objects(primary_key):
""" Returns database object matching the search result object"""
for obj in queryset:
if obj.pk == primary_key:
return obj
# map over the search results and get a list of database objects in the same order
object_results = map(ordered_objects, search_queryset_pks)
paged_results.object_list = object_results
return paged_results
|
agpl-3.0
|
Python
|
c775b159b310afd323945afcb9dba771731a382b
|
use repr for log serialization if json fails
|
dpausp/arguments,dpausp/arguments,dpausp/arguments,dpausp/arguments
|
src/ekklesia_portal/__init__.py
|
src/ekklesia_portal/__init__.py
|
import eliot
import logging
import sys
from eliot.stdlib import EliotHandler
from eliot.json import EliotJSONEncoder
class MyEncoder(EliotJSONEncoder):
def default(self, obj):
try:
return EliotJSONEncoder.default(self, obj)
except TypeError:
return repr(obj)
logging.getLogger().addHandler(EliotHandler())
logging.getLogger().setLevel(logging.DEBUG)
eliot.to_file(sys.stdout, encoder=MyEncoder)
logging.captureWarnings(True)
logg = logging.getLogger(__name__)
logging.getLogger("parso").setLevel(logging.WARN)
|
import eliot
import logging
import sys
from eliot.stdlib import EliotHandler
logging.getLogger().addHandler(EliotHandler())
logging.getLogger().setLevel(logging.DEBUG)
eliot.to_file(sys.stdout)
logging.captureWarnings(True)
logg = logging.getLogger(__name__)
logging.getLogger("parso").setLevel(logging.WARN)
logg.info("init")
|
agpl-3.0
|
Python
|
a3e05e67b4907f6f97462aa958e4f344f92d1e72
|
add a new compare method
|
F5Networks/f5-ansible-modules
|
library/module_utils/network/f5/compare.py
|
library/module_utils/network/f5/compare.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
def cmp_simple_list(want, have):
if want is None:
return None
if have is None and want in ['', 'none']:
return None
if have is not None and want in ['', 'none']:
return []
if have is None:
return want
if set(want) != set(have):
return want
return None
def cmp_str_with_none(want, have):
if want is None:
return None
if have is None and want == '':
return None
if want != have:
return want
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
def cmp_simple_list(want, have):
if want is None:
return None
if have is None and want in ['', 'none']:
return None
if have is not None and want in ['', 'none']:
return []
if have is None:
return want
if set(want) != set(have):
return want
return None
|
mit
|
Python
|
795e9734cc802caa8847a9a2b22f3f16297462bc
|
use combined provider in nsi2 provider setup
|
NORDUnet/opennsa,jab1982/opennsa,jab1982/opennsa,NORDUnet/opennsa,NORDUnet/opennsa
|
opennsa/protocols/nsi2/__init__.py
|
opennsa/protocols/nsi2/__init__.py
|
"""
Various protocol initialization.
Author: Henrik Thostrup Jensen <[email protected]>
Copyright: NORDUnet (2011-2012)
"""
from twisted.web import resource, server
from opennsa.protocols.shared import resource as soapresource
from opennsa.protocols.nsi2 import providerservice, providerclient, provider, \
requesterservice, requesterclient, requester
def setupProvider(nsi_service, top_resource, service_provider, host, port, tls=False, ctx_factory=None):
soap_resource = soapresource.setupSOAPResource(top_resource, 'CS2')
provider_client = providerclient.ProviderClient(ctx_factory)
nsi2_provider = provider.Provider(service_provider, provider_client)
providerservice.ProviderService(soap_resource, nsi2_provider)
return nsi2_provider
def setupRequester(top_resource, host, port, tls=False, ctx_factory=None, callback_timeout=None):
resource_name = 'RequesterService2'
# copied from nsi1.__init__
def _createServiceURL(host, port, tls=False):
proto_scheme = 'https://' if tls else 'http://'
service_url = proto_scheme + '%s:%i/NSI/services/%s' % (host,port, resource_name)
return service_url
service_url = _createServiceURL(host, port, tls)
soap_resource = soapresource.setupSOAPResource(top_resource, resource_name)
requester_client = requesterclient.RequesterClient(service_url)
nsi_requester = requester.Requester(requester_client, callback_timeout=callback_timeout)
requester_service = requesterservice.RequesterService(soap_resource, nsi_requester)
return nsi_requester
# copied from nsi1.__init__
def createRequesterClient(host, port, tls=False, ctx_factory=None, callback_timeout=None):
top_resource = resource.Resource()
nsi_requester = setupRequester(top_resource, host, port, tls, ctx_factory, callback_timeout)
site = server.Site(top_resource, logPath='/dev/null')
return nsi_requester, site
|
"""
Various protocol initialization.
Author: Henrik Thostrup Jensen <[email protected]>
Copyright: NORDUnet (2011-2012)
"""
from twisted.web import resource, server
from opennsa.protocols.shared import resource as soapresource
from opennsa.protocols.nsi2 import providerservice, providerclient, provider, \
requesterservice, requesterclient, requester
def setupProvider(nsi_service, top_resource, service_provider, host, port, tls=False, ctx_factory=None):
soap_resource = soapresource.setupSOAPResource(top_resource, 'CS2')
provider_client = providerclient.ProviderClient(ctx_factory)
nsi2_provider = provider.Provider(service_provider)
providerservice.ProviderService(soap_resource, nsi2_provider)
return provider_client
def setupRequester(top_resource, host, port, tls=False, ctx_factory=None, callback_timeout=None):
resource_name = 'RequesterService2'
# copied from nsi1.__init__
def _createServiceURL(host, port, tls=False):
proto_scheme = 'https://' if tls else 'http://'
service_url = proto_scheme + '%s:%i/NSI/services/%s' % (host,port, resource_name)
return service_url
service_url = _createServiceURL(host, port, tls)
soap_resource = soapresource.setupSOAPResource(top_resource, resource_name)
requester_client = requesterclient.RequesterClient(service_url)
nsi_requester = requester.Requester(requester_client, callback_timeout=callback_timeout)
requester_service = requesterservice.RequesterService(soap_resource, nsi_requester)
return nsi_requester
# copied from nsi1.__init__
def createRequesterClient(host, port, tls=False, ctx_factory=None, callback_timeout=None):
top_resource = resource.Resource()
nsi_requester = setupRequester(top_resource, host, port, tls, ctx_factory, callback_timeout)
site = server.Site(top_resource, logPath='/dev/null')
return nsi_requester, site
|
bsd-3-clause
|
Python
|
582964f9da6029cd089117496babf9267c41ecd5
|
Reduce queries used to lookup config
|
evewspace/eve-wspace,nyrocron/eve-wspace,hybrid1969/eve-wspace,hybrid1969/eve-wspace,acdervis/eve-wspace,marbindrakon/eve-wspace,Unsettled/eve-wspace,proycon/eve-wspace,mmalyska/eve-wspace,evewspace/eve-wspace,gpapaz/eve-wspace,acdervis/eve-wspace,proycon/eve-wspace,marbindrakon/eve-wspace,Zumochi/eve-wspace,marbindrakon/eve-wspace,gpapaz/eve-wspace,Unsettled/eve-wspace,gpapaz/eve-wspace,hybrid1969/eve-wspace,Unsettled/eve-wspace,nyrocron/eve-wspace,proycon/eve-wspace,Zumochi/eve-wspace,nyrocron/eve-wspace,Maarten28/eve-wspace,Maarten28/eve-wspace,mmalyska/eve-wspace,acdervis/eve-wspace,marbindrakon/eve-wspace,proycon/eve-wspace,mmalyska/eve-wspace,Maarten28/eve-wspace,mmalyska/eve-wspace,Unsettled/eve-wspace,Zumochi/eve-wspace,evewspace/eve-wspace,Maarten28/eve-wspace,acdervis/eve-wspace,Zumochi/eve-wspace,gpapaz/eve-wspace,evewspace/eve-wspace,hybrid1969/eve-wspace,nyrocron/eve-wspace
|
evewspace/core/utils.py
|
evewspace/core/utils.py
|
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from core.models import ConfigEntry
def get_config(name, user):
"""
Gets the correct config value for the given key name.
Value with the given user has priority over any default value.
"""
try:
return ConfigEntry.objects.get(name=name, user=user)
except ConfigEntry.DoesNotExist:
return ConfigEntry.objects.get(name=name, user=None)
|
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from core.models import ConfigEntry
def get_config(name, user):
"""
Gets the correct config value for the given key name.
Value with the given user has priority over any default value.
"""
if ConfigEntry.objects.filter(name=name, user=user).count() != 0:
return ConfigEntry.objects.get(name=name, user=user)
# No user value, look for global / default
if ConfigEntry.objects.filter(name=name, user=None).count() != 0:
return ConfigEntry.objects.get(name=name, user=None)
else:
raise KeyError("No configuration entry with key %s was found." % name)
|
apache-2.0
|
Python
|
fa9bc00d09cfd173b99eaba3eb17bdfc49100a5b
|
Add explicit name export
|
fnielsen/dasem,fnielsen/dasem
|
dasem/__init__.py
|
dasem/__init__.py
|
"""dasem."""
from __future__ import absolute_import
from .fullmonty import Word2Vec
__all__ = ['Word2Vec']
|
"""dasem."""
from __future__ import absolute_import
from .fullmonty import Word2Vec
|
apache-2.0
|
Python
|
c1a3d40295a7c4b5f178ae78b49a90c317844371
|
Replace double quotes with single quotes for flake8 check
|
adfinis-sygroup/timed-backend,adfinis-sygroup/timed-backend,adfinis-sygroup/timed-backend
|
timed/reports/tests/test_notify_reviewers_unverified.py
|
timed/reports/tests/test_notify_reviewers_unverified.py
|
from datetime import date
import pytest
from django.core.management import call_command
from timed.employment.factories import UserFactory
from timed.projects.factories import ProjectFactory, TaskFactory
from timed.tracking.factories import ReportFactory
@pytest.mark.freeze_time('2017-8-4')
def test_notify_reviewers(db, mailoutbox):
"""Test time range 2017-7-1 till 2017-7-31."""
# a reviewer which will be notified
reviewer_work = UserFactory.create()
project_work = ProjectFactory.create()
project_work.reviewers.add(reviewer_work)
task_work = TaskFactory.create(project=project_work)
ReportFactory.create(date=date(2017, 7, 1), task=task_work,
verified_by=None)
# a reviewer which doesn't have any unverfied reports
reviewer_no_work = UserFactory.create()
project_no_work = ProjectFactory.create()
project_no_work.reviewers.add(reviewer_no_work)
task_no_work = TaskFactory.create(project=project_no_work)
ReportFactory.create(date=date(2017, 7, 1), task=task_no_work,
verified_by=reviewer_no_work)
call_command(
'notify_reviewers_unverified',
'[email protected]',
'--message=This is a test'
)
# checks
mail = mailoutbox[0]
cc = mail.to[-1]
mail.to.pop()
for item in mail.body.split('\n'):
if 'test' in item:
msg = item.strip()
assert len(mailoutbox) == 1
assert mail.to == [reviewer_work.email]
url = (
'http://localhost:4200/analysis?fromDate=2017-07-01&'
'toDate=2017-07-31&reviewer=%d&editable=1'
) % reviewer_work.id
assert url in mail.body
assert msg == 'This is a test'
assert cc == '[email protected]'
|
from datetime import date
import pytest
from django.core.management import call_command
from timed.employment.factories import UserFactory
from timed.projects.factories import ProjectFactory, TaskFactory
from timed.tracking.factories import ReportFactory
@pytest.mark.freeze_time('2017-8-4')
def test_notify_reviewers(db, mailoutbox):
"""Test time range 2017-7-1 till 2017-7-31."""
# a reviewer which will be notified
reviewer_work = UserFactory.create()
project_work = ProjectFactory.create()
project_work.reviewers.add(reviewer_work)
task_work = TaskFactory.create(project=project_work)
ReportFactory.create(date=date(2017, 7, 1), task=task_work,
verified_by=None)
# a reviewer which doesn't have any unverfied reports
reviewer_no_work = UserFactory.create()
project_no_work = ProjectFactory.create()
project_no_work.reviewers.add(reviewer_no_work)
task_no_work = TaskFactory.create(project=project_no_work)
ReportFactory.create(date=date(2017, 7, 1), task=task_no_work,
verified_by=reviewer_no_work)
call_command(
'notify_reviewers_unverified',
'[email protected]',
'--message=This is a test'
)
# checks
mail = mailoutbox[0]
cc = mail.to[-1]
mail.to.pop()
for item in mail.body.split("\n"):
if "test" in item:
msg = item.strip()
assert len(mailoutbox) == 1
assert mail.to == [reviewer_work.email]
url = (
'http://localhost:4200/analysis?fromDate=2017-07-01&'
'toDate=2017-07-31&reviewer=%d&editable=1'
) % reviewer_work.id
assert url in mail.body
assert msg == 'This is a test'
assert cc == '[email protected]'
|
agpl-3.0
|
Python
|
32e00001ec29b0fe13f8c9b2c4dbd61232ba348a
|
Update starting offset
|
berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud
|
tools/db/copy_nonpartitioned_sentences_to_partitions.py
|
tools/db/copy_nonpartitioned_sentences_to_partitions.py
|
#!/usr/bin/env python3
import time
from mediawords.db import connect_to_db
from mediawords.util.log import create_logger
from mediawords.util.process import run_alone
log = create_logger(__name__)
def copy_nonpartitioned_sentences_to_partitions():
"""Gradually copy sentences from "story_sentences_nonpartitioned" to "story_sentences_partitioned"."""
# How many stories the sentences of which to copy at the same time
stories_chunk_size = 50 * 1000
db = connect_to_db()
# With 512 MB, database can deduplicate (sort) sentences in memory instead of disk
db.query("SET work_mem TO '512MB'")
max_stories_id = db.query("SELECT MAX(stories_id) FROM stories").flat()[0]
if max_stories_id is None:
raise Exception("Max. stories ID is None.")
log.info("Max. stories ID: {}".format(max_stories_id))
for start_stories_id in range(99900000, max_stories_id + 1, stories_chunk_size):
end_stories_id = start_stories_id + stories_chunk_size - 1
log.info("Copying sentences of stories_id BETWEEN {} AND {} to the partitioned table...".format(
start_stories_id,
end_stories_id,
))
copied_sentences = db.query(
'SELECT copy_chunk_of_nonpartitioned_sentences_to_partitions(%(start_stories_id)s, %(end_stories_id)s)',
{'start_stories_id': start_stories_id, 'end_stories_id': end_stories_id}
).flat()[0]
log.info("Copied {} sentences of stories_id BETWEEN {} AND {} to the partitioned table.".format(
copied_sentences,
start_stories_id,
end_stories_id,
))
log.info("All done!")
# Weird, but otherwise Ansible deployments to mctest don't work due to this script exit(0)ing right away
while True:
time.sleep(1)
if __name__ == '__main__':
run_alone(copy_nonpartitioned_sentences_to_partitions)
|
#!/usr/bin/env python3
import time
from mediawords.db import connect_to_db
from mediawords.util.log import create_logger
from mediawords.util.process import run_alone
log = create_logger(__name__)
def copy_nonpartitioned_sentences_to_partitions():
"""Gradually copy sentences from "story_sentences_nonpartitioned" to "story_sentences_partitioned"."""
# How many stories the sentences of which to copy at the same time
stories_chunk_size = 50 * 1000
db = connect_to_db()
# With 512 MB, database can deduplicate (sort) sentences in memory instead of disk
db.query("SET work_mem TO '512MB'")
max_stories_id = db.query("SELECT MAX(stories_id) FROM stories").flat()[0]
if max_stories_id is None:
raise Exception("Max. stories ID is None.")
log.info("Max. stories ID: {}".format(max_stories_id))
for start_stories_id in range(44000000, max_stories_id + 1, stories_chunk_size):
end_stories_id = start_stories_id + stories_chunk_size - 1
log.info("Copying sentences of stories_id BETWEEN {} AND {} to the partitioned table...".format(
start_stories_id,
end_stories_id,
))
copied_sentences = db.query(
'SELECT copy_chunk_of_nonpartitioned_sentences_to_partitions(%(start_stories_id)s, %(end_stories_id)s)',
{'start_stories_id': start_stories_id, 'end_stories_id': end_stories_id}
).flat()[0]
log.info("Copied {} sentences of stories_id BETWEEN {} AND {} to the partitioned table.".format(
copied_sentences,
start_stories_id,
end_stories_id,
))
log.info("All done!")
# Weird, but otherwise Ansible deployments to mctest don't work due to this script exit(0)ing right away
while True:
time.sleep(1)
if __name__ == '__main__':
run_alone(copy_nonpartitioned_sentences_to_partitions)
|
agpl-3.0
|
Python
|
608a0c75cba735e7d4a59fb941cd6e6135f3e7cf
|
Update reverse URL.
|
ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website
|
src/epiweb/apps/survey/views.py
|
src/epiweb/apps/survey/views.py
|
# -*- coding: utf-8 -*-
from django import forms
from django.template import Context, loader
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from epiweb.apps.survey import utils
from epiweb.apps.survey import models
from epiweb.apps.survey import example
from epiweb.apps.survey import profile_data
from epidb_client import EpiDBClient
from django.conf import settings
sfh = None
@login_required
def thanks(request):
return render_to_response('survey/thanks.html')
@login_required
def index(request):
global sfh
if sfh is None:
survey = example.survey()
sfh = utils.SurveyFormHelper(survey, request.user)
if request.method == 'POST':
form = sfh.create_form(request.POST)
if form.is_valid():
id = utils.send_survey_response(request.user, form._survey, form.cleaned_data)
utils.save_survey_response(request.user, form._survey, id)
return HttpResponseRedirect(reverse('epiweb.apps.survey.views.thanks'))
else:
form = sfh.create_form()
#js = utils.generate_js_helper(example.survey
jsh = utils.JavascriptHelper(example.survey(), request.user)
js = jsh.get_javascript()
return render_to_response('survey/index.html', {
'form': form,
'js': js
})
@login_required
def profile_index(request):
global sfh
if sfh is None:
survey = profile_data.UserProfile()
sfh = utils.SurveyFormHelper(survey, request.user)
if request.method == 'POST':
form = sfh.create_form(request.POST)
if form.is_valid():
utils.send_profile(request.user, form._survey, form.cleaned_data)
utils.save_profile(request.user, form.cleaned_data)
return HttpResponseRedirect(reverse('epiweb.apps.survey.views.profile_index'))
else:
form = sfh.create_form(utils.get_profile(request.user))
jsh = utils.JavascriptHelper(profile_data.UserProfile(), request.user)
js = jsh.get_javascript()
return render_to_response('profile/index.html', {
'form': form,
'js': js
})
|
# -*- coding: utf-8 -*-
from django import forms
from django.template import Context, loader
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from epiweb.apps.survey import utils
from epiweb.apps.survey import models
from epiweb.apps.survey import example
from epiweb.apps.survey import profile_data
from epidb_client import EpiDBClient
from django.conf import settings
sfh = None
@login_required
def thanks(request):
return render_to_response('survey/thanks.html')
@login_required
def index(request):
global sfh
if sfh is None:
survey = example.survey()
sfh = utils.SurveyFormHelper(survey, request.user)
if request.method == 'POST':
form = sfh.create_form(request.POST)
if form.is_valid():
id = utils.send_survey_response(request.user, form._survey, form.cleaned_data)
utils.save_survey_response(request.user, form._survey, id)
return HttpResponseRedirect(reverse('epiweb.apps.survey.survey_views.thanks'))
else:
form = sfh.create_form()
#js = utils.generate_js_helper(example.survey
jsh = utils.JavascriptHelper(example.survey(), request.user)
js = jsh.get_javascript()
return render_to_response('survey/index.html', {
'form': form,
'js': js
})
@login_required
def profile_index(request):
global sfh
if sfh is None:
survey = profile_data.UserProfile()
sfh = utils.SurveyFormHelper(survey, request.user)
if request.method == 'POST':
form = sfh.create_form(request.POST)
if form.is_valid():
utils.send_profile(request.user, form._survey, form.cleaned_data)
utils.save_profile(request.user, form.cleaned_data)
return HttpResponseRedirect(reverse('epiweb.apps.survey.profile_views.index'))
else:
form = sfh.create_form(utils.get_profile(request.user))
jsh = utils.JavascriptHelper(profile_data.UserProfile(), request.user)
js = jsh.get_javascript()
return render_to_response('profile/index.html', {
'form': form,
'js': js
})
|
agpl-3.0
|
Python
|
99e910f58fa54e9bce2518c6f9752ba1e8dbd6af
|
Stop tracking call size in bm diff
|
firebase/grpc,dgquintas/grpc,jtattermusch/grpc,firebase/grpc,ctiller/grpc,pszemus/grpc,grpc/grpc,stanley-cheung/grpc,sreecha/grpc,simonkuang/grpc,firebase/grpc,mehrdada/grpc,chrisdunelm/grpc,mehrdada/grpc,firebase/grpc,jtattermusch/grpc,mehrdada/grpc,chrisdunelm/grpc,donnadionne/grpc,sreecha/grpc,simonkuang/grpc,ncteisen/grpc,pszemus/grpc,sreecha/grpc,muxi/grpc,nicolasnoble/grpc,ncteisen/grpc,vjpai/grpc,donnadionne/grpc,grpc/grpc,firebase/grpc,jboeuf/grpc,simonkuang/grpc,pszemus/grpc,carl-mastrangelo/grpc,nicolasnoble/grpc,pszemus/grpc,stanley-cheung/grpc,mehrdada/grpc,dgquintas/grpc,stanley-cheung/grpc,simonkuang/grpc,donnadionne/grpc,nicolasnoble/grpc,jboeuf/grpc,dgquintas/grpc,Vizerai/grpc,simonkuang/grpc,mehrdada/grpc,vjpai/grpc,Vizerai/grpc,ctiller/grpc,pszemus/grpc,pszemus/grpc,ctiller/grpc,mehrdada/grpc,carl-mastrangelo/grpc,grpc/grpc,jboeuf/grpc,jtattermusch/grpc,pszemus/grpc,muxi/grpc,jtattermusch/grpc,dgquintas/grpc,ctiller/grpc,ctiller/grpc,thinkerou/grpc,jboeuf/grpc,ctiller/grpc,carl-mastrangelo/grpc,vjpai/grpc,vjpai/grpc,mehrdada/grpc,jtattermusch/grpc,grpc/grpc,ncteisen/grpc,muxi/grpc,nicolasnoble/grpc,jboeuf/grpc,sreecha/grpc,ejona86/grpc,carl-mastrangelo/grpc,simonkuang/grpc,vjpai/grpc,dgquintas/grpc,ejona86/grpc,thinkerou/grpc,firebase/grpc,nicolasnoble/grpc,Vizerai/grpc,grpc/grpc,vjpai/grpc,ctiller/grpc,ejona86/grpc,carl-mastrangelo/grpc,thinkerou/grpc,nicolasnoble/grpc,Vizerai/grpc,muxi/grpc,sreecha/grpc,jtattermusch/grpc,stanley-cheung/grpc,ncteisen/grpc,ncteisen/grpc,firebase/grpc,jboeuf/grpc,ncteisen/grpc,donnadionne/grpc,pszemus/grpc,vjpai/grpc,sreecha/grpc,stanley-cheung/grpc,ctiller/grpc,ncteisen/grpc,simonkuang/grpc,thinkerou/grpc,simonkuang/grpc,ncteisen/grpc,pszemus/grpc,carl-mastrangelo/grpc,thinkerou/grpc,donnadionne/grpc,mehrdada/grpc,vjpai/grpc,ejona86/grpc,Vizerai/grpc,muxi/grpc,ejona86/grpc,nicolasnoble/grpc,muxi/grpc,muxi/grpc,grpc/grpc,thinkerou/grpc,carl-mastrangelo/grpc,chrisdunelm/grpc,Vizerai/grpc,carl-mastrangelo/grpc,firebase/grpc,jtattermusch/grpc,thinkerou/grpc,dgquintas/grpc,muxi/grpc,firebase/grpc,grpc/grpc,donnadionne/grpc,ctiller/grpc,ejona86/grpc,ncteisen/grpc,vjpai/grpc,ncteisen/grpc,chrisdunelm/grpc,ejona86/grpc,jboeuf/grpc,firebase/grpc,donnadionne/grpc,jboeuf/grpc,Vizerai/grpc,chrisdunelm/grpc,donnadionne/grpc,stanley-cheung/grpc,pszemus/grpc,carl-mastrangelo/grpc,dgquintas/grpc,chrisdunelm/grpc,muxi/grpc,grpc/grpc,muxi/grpc,jtattermusch/grpc,sreecha/grpc,jtattermusch/grpc,simonkuang/grpc,mehrdada/grpc,ejona86/grpc,thinkerou/grpc,jboeuf/grpc,nicolasnoble/grpc,grpc/grpc,sreecha/grpc,stanley-cheung/grpc,dgquintas/grpc,ctiller/grpc,donnadionne/grpc,vjpai/grpc,stanley-cheung/grpc,stanley-cheung/grpc,sreecha/grpc,firebase/grpc,grpc/grpc,ncteisen/grpc,jboeuf/grpc,carl-mastrangelo/grpc,ejona86/grpc,jboeuf/grpc,carl-mastrangelo/grpc,chrisdunelm/grpc,nicolasnoble/grpc,thinkerou/grpc,ctiller/grpc,nicolasnoble/grpc,jtattermusch/grpc,firebase/grpc,ejona86/grpc,nicolasnoble/grpc,ejona86/grpc,mehrdada/grpc,Vizerai/grpc,grpc/grpc,nicolasnoble/grpc,carl-mastrangelo/grpc,pszemus/grpc,stanley-cheung/grpc,pszemus/grpc,ejona86/grpc,Vizerai/grpc,thinkerou/grpc,grpc/grpc,jtattermusch/grpc,chrisdunelm/grpc,jboeuf/grpc,jtattermusch/grpc,dgquintas/grpc,donnadionne/grpc,stanley-cheung/grpc,sreecha/grpc,donnadionne/grpc,chrisdunelm/grpc,muxi/grpc,vjpai/grpc,donnadionne/grpc,thinkerou/grpc,muxi/grpc,Vizerai/grpc,ctiller/grpc,chrisdunelm/grpc,sreecha/grpc,dgquintas/grpc,dgquintas/grpc,mehrdada/grpc,mehrdada/grpc,Vizerai/grpc,stanley-cheung/grpc,vjpai/grpc,thinkerou/grpc,sreecha/grpc,ncteisen/grpc,chrisdunelm/grpc
|
tools/profiling/microbenchmarks/bm_diff/bm_constants.py
|
tools/profiling/microbenchmarks/bm_diff/bm_constants.py
|
#!/usr/bin/env python2.7
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configurable constants for the bm_*.py family """
_AVAILABLE_BENCHMARK_TESTS = [
'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
'bm_metadata', 'bm_fullstack_trickle'
]
_INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration',
'allocs_per_iteration', 'writes_per_iteration',
'atm_cas_per_iteration', 'atm_add_per_iteration',
'nows_per_iteration', 'cli_transport_stalls_per_iteration',
'cli_stream_stalls_per_iteration',
'svr_transport_stalls_per_iteration',
'svr_stream_stalls_per_iteration',
'http2_pings_sent_per_iteration')
|
#!/usr/bin/env python2.7
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configurable constants for the bm_*.py family """
_AVAILABLE_BENCHMARK_TESTS = [
'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
'bm_metadata', 'bm_fullstack_trickle'
]
_INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median',
'locks_per_iteration', 'allocs_per_iteration',
'writes_per_iteration', 'atm_cas_per_iteration',
'atm_add_per_iteration', 'nows_per_iteration',
'cli_transport_stalls_per_iteration',
'cli_stream_stalls_per_iteration',
'svr_transport_stalls_per_iteration',
'svr_stream_stalls_per_iteration',
'http2_pings_sent_per_iteration')
|
apache-2.0
|
Python
|
dcc2821cac0619fc2ca5f486ad30416f3c3cfda9
|
Replace parsing with Python's ast
|
admk/soap
|
ce/expr/parser.py
|
ce/expr/parser.py
|
#!/usr/bin/env python
# vim: set fileencoding=UTF-8 :
import ast
from ..semantics import mpq
from .common import OPERATORS, ADD_OP, MULTIPLY_OP
def try_to_number(s):
try:
return mpq(s)
except (ValueError, TypeError):
return s
OPERATOR_MAP = {
ast.Add: ADD_OP,
ast.Mult: MULTIPLY_OP,
}
def parse(s):
from .biop import Expr
def _parse_r(t):
try:
return t.n
except AttributeError:
pass
try:
return t.id
except AttributeError:
op = OPERATOR_MAP[t.op.__class__]
a1 = _parse_r(t.left)
a2 = _parse_r(t.right)
return Expr(op, a1, a2)
return _parse_r(ast.parse(s, mode='eval').body)
|
#!/usr/bin/env python
# vim: set fileencoding=UTF-8 :
from ..semantics import mpq
from .common import OPERATORS, ADD_OP, MULTIPLY_OP
def try_to_number(s):
try:
return mpq(s)
except (ValueError, TypeError):
return s
def _parse_r(s):
s = s.strip()
bracket_level = 0
operator_pos = -1
for i, v in enumerate(s):
if v == '(':
bracket_level += 1
if v == ')':
bracket_level -= 1
if bracket_level == 1 and v in OPERATORS:
operator_pos = i
break
if operator_pos == -1:
return s
a1 = _parse_r(s[1:operator_pos].strip())
a2 = _parse_r(s[operator_pos + 1:-1].strip())
return Expr(s[operator_pos], a1, a2)
|
mit
|
Python
|
38fe4ef9df7e09709611bb3aca1aea4f2d42316a
|
add encode_url_path method to util
|
cydrobolt/pifx
|
pifx/util.py
|
pifx/util.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2015 Chaoyi Zha <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import urllib
from .constants import A_OK_HTTP_CODES, A_ERROR_HTTP_CODES
def generate_auth_header(api_key):
headers = {
"Authorization": "Bearer {}".format(api_key),
}
return headers
def arg_tup_to_dict(argument_tuples):
"""Given a set of argument tuples, set their value in a data dictionary if not blank"""
data = dict()
for arg_name, arg_val in argument_tuples:
if arg_val != None:
data[arg_name] = arg_val
return data
def parse_data(parsed_data):
"""Given parsed response, return correct return values"""
return parsed_data['results']
def parse_response(response):
"""Parse JSON API response, return object."""
parsed_response = json.loads(response.text)
return parsed_response
def handle_error(response):
"""Raise appropriate exceptions if necessary."""
status_code = response.status_code
if status_code not in A_OK_HTTP_CODES:
error_explanation = A_ERROR_HTTP_CODES.get(status_code)
raise_error = "{}: {}".format(status_code, error_explanation)
raise Exception(raise_error)
else:
return True
def encode_url_path(url):
"""Encodes the path url string replacing special characters with properly escaped sequences.
Not intended for use with query string parameters. """
return urllib.quote(url)
|
# -*- coding: utf-8 -*-
#
# Copyright © 2015 Chaoyi Zha <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from .constants import A_OK_HTTP_CODES, A_ERROR_HTTP_CODES
def generate_auth_header(api_key):
headers = {
"Authorization": "Bearer {}".format(api_key),
}
return headers
def arg_tup_to_dict(argument_tuples):
"""Given a set of argument tuples, set their value in a data dictionary if not blank"""
data = dict()
for arg_name, arg_val in argument_tuples:
if arg_val != None:
data[arg_name] = arg_val
return data
def parse_data(parsed_data):
"""Given parsed response, return correct return values"""
return parsed_data['results']
def parse_response(response):
"""Parse JSON API response, return object."""
parsed_response = json.loads(response.text)
return parsed_response
def handle_error(response):
"""Raise appropriate exceptions if necessary."""
status_code = response.status_code
if status_code not in A_OK_HTTP_CODES:
error_explanation = A_ERROR_HTTP_CODES.get(status_code)
raise_error = "{}: {}".format(status_code, error_explanation)
raise Exception(raise_error)
else:
return True
|
apache-2.0
|
Python
|
180805e919c73d0540a29ae57b653574e1ff704c
|
Clean up local.py for OSX notifications
|
kfdm/gntp-regrowl
|
local.py
|
local.py
|
from gntp import *
import Growl
def register_send(self):
'''
Resend a GNTP Register message to Growl running on a local OSX Machine
'''
print 'Sending Local Registration'
#Local growls only need a list of strings
notifications=[]
defaultNotifications = []
for notice in self.notifications:
notifications.append(notice['Notification-Name'])
if notice.get('Notification-Enabled',True):
defaultNotifications.append(notice['Notification-Name'])
growl = Growl.GrowlNotifier(
applicationName = self.headers['Application-Name'],
notifications = notifications,
defaultNotifications = defaultNotifications,
)
growl.register()
return self.encode()
def notice_send(self):
'''
Resend a GNTP Notify message to Growl running on a local OSX Machine
'''
print 'Sending Local Notification'
growl = Growl.GrowlNotifier(
applicationName = self.headers['Application-Name'],
notifications = [self.headers['Notification-Name']]
)
noticeIcon = None
if self.headers.get('Notification-Icon',False):
resource = self.headers['Notification-Icon'].split('://')
#print resource
resource = self.resources.get(resource[1],False)
#print resource
if resource:
noticeIcon = resource['Data']
growl.notify(
noteType = self.headers['Notification-Name'],
title = self.headers['Notification-Title'],
description=self.headers.get('Notification-Text',''),
)
return self.encode()
GNTPRegister.send = register_send
GNTPNotice.send = notice_send
|
import gntp
import Growl
GNTPParseError = gntp.GNTPParseError
GNTPOK = gntp.GNTPOK
GNTPError = gntp.GNTPError
parse_gntp = gntp.parse_gntp
class GNTPRegister(gntp.GNTPRegister):
def send(self):
print 'Sending Local Registration'
#Local growls only need a list of strings
notifications=[]
defaultNotifications = []
for notice in self.notifications:
notifications.append(notice['Notification-Name'])
if notice.get('Notification-Enabled',True):
defaultNotifications.append(notice['Notification-Name'])
growl = Growl.GrowlNotifier(
applicationName = self.headers['Application-Name'],
notifications = notifications,
defaultNotifications = defaultNotifications,
)
growl.register()
class GNTPNotice(gntp.GNTPNotice):
def send(self):
print 'Sending Local Notification'
growl = Growl.GrowlNotifier(
applicationName = self.headers['Application-Name'],
notifications = [self.headers['Notification-Name']]
)
noticeIcon = None
if self.headers.get('Notification-Icon',False):
resource = self.headers['Notification-Icon'].split('://')
#print resource
resource = self.resources.get(resource[1],False)
#print resource
if resource:
noticeIcon = resource['Data']
growl.notify(
noteType = self.headers['Notification-Name'],
title = self.headers['Notification-Title'],
description=self.headers['Notification-Text'],
)
|
mit
|
Python
|
9a73dcfbc77236d11387909c3f97d3712b56fb2a
|
clean out commented code from old version
|
oaao/pomegranate-esi
|
juice.py
|
juice.py
|
# read from db data and provide analysis
|
# replace all functionality of the disgusting mess below
'''
# read and process raw data, store processed data
# using config(?) and io_db, calculate the following and store its end-result data to db:
# - raw profitability ("hypothetical")
# - competitiveness of orders per typeID per hub
# - END-RESULT [DB]: competition-factored profitability ("actual")
# - END-RESULT [DB]: per-orderID update behaviours
import config
import io_http
import arrow
# remove specific dependency:
import requests
def group_dictvalue(raw_list, dictvalue):
new_dict = {}
for i in raw_list:
new_key = i.pop(dictvalue)
if new_key in new_dict:
new_dict[new_key].append(i)
else:
new_dict[new_key] = [i]
return new_dict
def market_import(hub_spec):
data_configs = config.market_configs(hub_spec)
hub_regionid = str(data_configs[0])
url_base = io_http.url_format(hub_regionid, 'orders')
url_market = url_base + '?page='
data_pages = requests.get(url_base).json()['pageCount']
url_set = [url_market + str(x) for x in range(1, data_pages+1)]
data_res = io_http.url_async(url_set, data_pages)
data_items = [x for i in data_res for x in i['items']]
return data_items, data_configs
def market_distill(raw_list, configs):
data_total = raw_list
hub_stationid = configs[1]
data_hubonly = [x for x in data_total if hub_stationid == x['stationID']]
data_timestamp = data_hubonly
for i in range(0, len(data_hubonly)):
order_time = arrow.get(data_hubonly[i]['issued'])
data_timestamp[i]['issued'] = order_time.timestamp
sort_choice = 'type'
data_grouped_type = group_dictvalue(data_timestamp, sort_choice)
data_grouped_buysell = {}
sort_choice = 'buy'
for k,v in data_grouped_type.items():
buysell_grouped = group_dictvalue(v, sort_choice)
data_grouped_buysell[k] = [buysell_grouped]
sort_choice = 'stationID'
data_grouped_station = {}
for k,v in data_grouped_buysell.items():
for order_pair in v:
for order_type, order in order_pair.items():
for attribute in order:
id_subgroup = attribute.pop(sort_choice)
data_grouped_station[k] = {id_subgroup: v}
return data_grouped_station
def market_context(raw_list, configs):
data_distilled = raw_list
hub_regionid = str(configs[0])
type_ids = data_distilled.keys()
url_context = io_http.url_format(hub_regionid, 'context')
url_set = [url_context + str(x) + '/history/' for x in type_ids]
data_res = io_http.url_async(url_set, len(type_ids))
# need to restructure this so the typeID is preserved
# data_context = [x for i in data_res for x in i['items']]
# make a new list that contains:
# - pricing/volume info
# add to dict such that {'typeid': [[orders],[context]]
data_contextualised = {}
return data_contextualised'''
|
mit
|
Python
|
e112a2651357f586bd1b5a6a4378ac46e3407d58
|
add v1.1.2 (#22081)
|
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
|
var/spack/repos/builtin/packages/py-awkward1/package.py
|
var/spack/repos/builtin/packages/py-awkward1/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAwkward1(PythonPackage):
"""ROOT I/O in pure Python and NumPy."""
git = "https://github.com/scikit-hep/awkward-1.0.git"
url = "https://github.com/scikit-hep/awkward-1.0/archive/0.3.1.tar.gz"
homepage = "https://awkward-array.org"
maintainers = ['vvolkl']
version('1.1.2', sha256='626e3a6a2a92dd67abc8692b1ebfa1b447b9594352d6ce8c86c37d7299dc4602')
version('0.3.1', sha256='7126d9feab8828b0b4f4c6dbc9e28c269a91e28eef4a6033d7ebb5db21f1dab3')
patch('pybind11.patch')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:')
depends_on('py-pybind11')
depends_on('rapidjson')
depends_on('cmake', type='build')
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAwkward1(PythonPackage):
"""ROOT I/O in pure Python and NumPy."""
git = "https://github.com/scikit-hep/awkward-1.0.git"
url = "https://github.com/scikit-hep/awkward-1.0/archive/0.3.1.tar.gz"
homepage = "https://awkward-array.org"
maintainers = ['vvolkl']
version('0.3.1', sha256='7126d9feab8828b0b4f4c6dbc9e28c269a91e28eef4a6033d7ebb5db21f1dab3')
patch('pybind11.patch')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:')
depends_on('py-pybind11')
depends_on('rapidjson')
depends_on('cmake', type='build')
|
lgpl-2.1
|
Python
|
50411f83942ff033b4b55ef72d595e6d3ab9949f
|
add version 2.9.1 (#25646)
|
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
|
var/spack/repos/builtin/packages/py-psycopg2/package.py
|
var/spack/repos/builtin/packages/py-psycopg2/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPsycopg2(PythonPackage):
"""Python interface to PostgreSQL databases"""
homepage = "https://psycopg.org/"
pypi = "psycopg2/psycopg2-2.8.6.tar.gz"
version('2.9.1', sha256='de5303a6f1d0a7a34b9d40e4d3bef684ccc44a49bbe3eb85e3c0bffb4a131b7c')
version('2.8.6', sha256='fb23f6c71107c37fd667cb4ea363ddeb936b348bbd6449278eb92c189699f543')
version('2.7.5', sha256='eccf962d41ca46e6326b97c8fe0a6687b58dfc1a5f6540ed071ff1474cea749e')
# https://www.psycopg.org/docs/install.html#prerequisites
depends_on('[email protected]:3.9', type=('build', 'link', 'run'), when='@2.9:')
depends_on('[email protected]:2.8,3.4:3.8', type=('build', 'link', 'run'), when='@2.8')
depends_on('[email protected]:2.8,3.2:3.7', type=('build', 'link', 'run'), when='@:2.7')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:13', type=('build', 'link', 'run'), when='@2.9:')
depends_on('[email protected]:12', type=('build', 'link', 'run'), when='@:2.8')
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPsycopg2(PythonPackage):
"""Python interface to PostgreSQL databases"""
homepage = "https://psycopg.org/"
pypi = "psycopg2/psycopg2-2.8.6.tar.gz"
version('2.8.6', sha256='fb23f6c71107c37fd667cb4ea363ddeb936b348bbd6449278eb92c189699f543')
version('2.7.5', sha256='eccf962d41ca46e6326b97c8fe0a6687b58dfc1a5f6540ed071ff1474cea749e')
# https://www.psycopg.org/docs/install.html#prerequisites
depends_on('[email protected]:2.8,3.4:3.8', type=('build', 'link', 'run'), when='@2.8:')
depends_on('[email protected]:2.8,3.2:3.7', type=('build', 'link', 'run'), when='@2.7')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:12', type=('build', 'link', 'run'))
|
lgpl-2.1
|
Python
|
5d5e6a505bf7282fdef4ca3b3555ecf6f3efa137
|
Update __copyright__
|
thombashi/typepy
|
typepy/__version__.py
|
typepy/__version__.py
|
# encoding: utf-8
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2017, {}".format(__author__)
__license__ = "MIT License"
__version__ = "0.6.5"
__maintainer__ = __author__
__email__ = "[email protected]"
|
# encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2017-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.6.5"
__maintainer__ = __author__
__email__ = "[email protected]"
|
mit
|
Python
|
39fbd9fabaa9945fe33475afe23c109711679192
|
Make output more compact
|
googlefonts/fonttools,googlei18n/cu2qu,fonttools/fonttools,googlefonts/cu2qu
|
Lib/cu2qu/benchmark.py
|
Lib/cu2qu/benchmark.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import random
import timeit
MAX_ERR = 5
SETUP_CODE = '''
from %(module)s import %(function)s
from %(benchmark_module)s import %(setup_function)s
args = %(setup_function)s()
'''
def generate_curve():
return [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(4)]
def setup_curve_to_quadratic():
return generate_curve(), MAX_ERR
def setup_curves_to_quadratic():
num_curves = 3
return (
[generate_curve() for curve in range(num_curves)],
[MAX_ERR] * num_curves)
def run_benchmark(
benchmark_module, module, function, setup_suffix='', repeat=1000):
setup_func = 'setup_' + function
if setup_suffix:
print('%s with %s:' % (function, setup_suffix))
setup_func += '_' + setup_suffix
else:
print('%s:' % function, end='')
results = timeit.repeat(
'%s(*args)' % function,
setup=(SETUP_CODE % {
'benchmark_module': benchmark_module, 'setup_function': setup_func,
'module': module, 'function': function}),
repeat=repeat, number=1)
print('\tavg=%dus' % (sum(results) / len(results) * 1000000.),
'\tmin=%dus' % (min(results) * 1000000.))
#print()
def main():
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curve_to_quadratic')
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curves_to_quadratic')
if __name__ == '__main__':
random.seed(1)
main()
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import random
import timeit
MAX_ERR = 5
SETUP_CODE = '''
from %(module)s import %(function)s
from %(benchmark_module)s import %(setup_function)s
args = %(setup_function)s()
'''
def generate_curve():
return [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(4)]
def setup_curve_to_quadratic():
return generate_curve(), MAX_ERR
def setup_curves_to_quadratic():
num_curves = 3
return (
[generate_curve() for curve in range(num_curves)],
[MAX_ERR] * num_curves)
def run_benchmark(
benchmark_module, module, function, setup_suffix='', repeat=1000):
setup_func = 'setup_' + function
if setup_suffix:
print('%s with %s:' % (function, setup_suffix))
setup_func += '_' + setup_suffix
else:
print('%s:' % function)
results = timeit.repeat(
'%s(*args)' % function,
setup=(SETUP_CODE % {
'benchmark_module': benchmark_module, 'setup_function': setup_func,
'module': module, 'function': function}),
repeat=repeat, number=1)
print('min: %dus' % (min(results) * 1000000.))
print('avg: %dus' % (sum(results) / len(results) * 1000000.))
print()
def main():
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curve_to_quadratic')
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curves_to_quadratic')
if __name__ == '__main__':
random.seed(1)
main()
|
mit
|
Python
|
d3e673069977c392eb292ce8b313f6dba4da4d9f
|
Fix these to use non-deprecated APIs, i.e. get_content_maintype() and get_content_subtype().
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Lib/email/_compat21.py
|
Lib/email/_compat21.py
|
# Copyright (C) 2002 Python Software Foundation
# Author: [email protected]
"""Module containing compatibility functions for Python 2.1.
"""
from cStringIO import StringIO
from types import StringType, UnicodeType
False = 0
True = 1
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
parts = []
parts.append(self)
if self.is_multipart():
for subpart in self.get_payload():
parts.extend(subpart.walk())
return parts
# Python 2.2 spells floor division //
def _floordiv(i, j):
"""Do a floor division, i/j."""
return i / j
def _isstring(obj):
return isinstance(obj, StringType) or isinstance(obj, UnicodeType)
# These two functions are imported into the Iterators.py interface module.
# The Python 2.2 version uses generators for efficiency.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
lines = []
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if _isstring(payload):
for line in StringIO(payload).readlines():
lines.append(line)
return lines
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
parts = []
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
parts.append(subpart)
return parts
|
# Copyright (C) 2002 Python Software Foundation
# Author: [email protected]
"""Module containing compatibility functions for Python 2.1.
"""
from cStringIO import StringIO
from types import StringType, UnicodeType
False = 0
True = 1
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
parts = []
parts.append(self)
if self.is_multipart():
for subpart in self.get_payload():
parts.extend(subpart.walk())
return parts
# Python 2.2 spells floor division //
def _floordiv(i, j):
"""Do a floor division, i/j."""
return i / j
def _isstring(obj):
return isinstance(obj, StringType) or isinstance(obj, UnicodeType)
# These two functions are imported into the Iterators.py interface module.
# The Python 2.2 version uses generators for efficiency.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
lines = []
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if _isstring(payload):
for line in StringIO(payload).readlines():
lines.append(line)
return lines
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
parts = []
for subpart in msg.walk():
if subpart.get_main_type('text') == maintype:
if subtype is None or subpart.get_subtype('plain') == subtype:
parts.append(subpart)
return parts
|
mit
|
Python
|
364d0a78725539a58862f672f718e4bf966da2f5
|
add stats-per-day route
|
pedesen/plogx,pedesen/plogx
|
plogx/app.py
|
plogx/app.py
|
from flask import Flask
from flask import render_template
from flask.ext.pymongo import PyMongo
import database
from bson.json_util import dumps
from datetime import datetime
app = Flask("log_db")
mongo = PyMongo(app)
app.debug = True
@app.route("/")
def overview():
return render_template("index.html")
@app.route("/all_items")
def all_items():
return dumps(database.all_log_items(mongo.db))
@app.route("/stats_per_day/<int:date>")
def stats_per_day(date):
d = str(date)
day = datetime(int(d[:4]), int(d[4:6]), int(d[6:]))
log_items = database.get_stats_per_day(mongo.db, day)
return dumps(log_items)
if __name__ == "__main__":
app.run()
|
from flask import Flask
from flask import render_template
from flask.ext.pymongo import PyMongo
import database
from bson.json_util import dumps
from datetime import datetime
app = Flask("log_db")
mongo = PyMongo(app)
app.debug = True
@app.route('/')
def overview():
return render_template('index.html')
@app.route('/all_items')
def all_items():
return dumps(database.all_log_items(mongo.db))
if __name__ == "__main__":
app.run()
|
mit
|
Python
|
602876c2b132664cc1802d467eaf8109a745d613
|
Add option for margin and strand selection
|
konrad/kufpybio
|
kufpybiotools/generate_igr_gff.py
|
kufpybiotools/generate_igr_gff.py
|
#!/usr/bin/env python
__description__ = ""
__author__ = "Konrad Foerstner <[email protected]>"
__copyright__ = "2013 by Konrad Foerstner <[email protected]>"
__license__ = "ISC license"
__email__ = "[email protected]"
__version__ = ""
import argparse
import csv
import sys
sys.path.append(".")
from kufpybio.gff3 import Gff3Parser, Gff3Entry
from kufpybio.gene import Gene
from kufpybio.igrfinder import IGRFinder
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("gff_file", type=argparse.FileType("r"))
parser.add_argument("output_file", type=argparse.FileType("w"))
parser.add_argument("--margin", type=int, default=0)
parser.add_argument("--plus_only", default=False, action="store_true")
args = parser.parse_args()
# Build gene list
gene_list = []
gff_parser = Gff3Parser()
region_entry = None
for entry in gff_parser.entries(args.gff_file):
if entry.feature == "region":
region_entry = entry
continue
gene_list.append(Gene(
entry.seq_id, "", "", entry.start, entry.end,
entry.strand))
# Find IGRs and generate GFF file
igr_finder = IGRFinder()
args.output_file.write("##gff-version 3\n")
strands = ["+", "-"]
if args.plus_only is True:
strands = ["+"]
for start, end in igr_finder.find_igrs(gene_list, region_entry.end):
start = start + args.margin
end = end - args.margin
if end <= start:
continue
for strand in strands:
gff3_entry = Gff3Entry({
"seq_id" : region_entry.seq_id,
"source" : "IGR",
"feature" : "IGR",
"start" : start,
"end" : end,
"score" : ".",
"strand" : strand,
"phase" : ".",
"attributes" : "ID=IGR_%s_%s_to_%s" % (
region_entry.seq_id, start, end)})
args.output_file.write(str(gff3_entry) + "\n")
|
#!/usr/bin/env python
__description__ = ""
__author__ = "Konrad Foerstner <[email protected]>"
__copyright__ = "2013 by Konrad Foerstner <[email protected]>"
__license__ = "ISC license"
__email__ = "[email protected]"
__version__ = ""
import argparse
import csv
import sys
sys.path.append(".")
from kufpybio.gff3 import Gff3Parser, Gff3Entry
from kufpybio.gene import Gene
from kufpybio.igrfinder import IGRFinder
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("gff_file", type=argparse.FileType("r"))
parser.add_argument("output_file", type=argparse.FileType("w"))
args = parser.parse_args()
# Build gene list
gene_list = []
gff_parser = Gff3Parser()
region_entry = None
for entry in gff_parser.entries(args.gff_file):
if entry.feature == "region":
region_entry = entry
continue
gene_list.append(Gene(
entry.seq_id, "", "", entry.start, entry.end,
entry.strand))
# Find IGRs and generate GFF file
igr_finder = IGRFinder()
args.output_file.write("##gff-version 3\n")
for start, end in igr_finder.find_igrs(gene_list, region_entry.end):
for strand in ["+", "-"]:
gff3_entry = Gff3Entry({
"seq_id" : region_entry.seq_id,
"source" : "IGR",
"feature" : "IGR",
"start" : start,
"end" : end,
"score" : ".",
"strand" : strand,
"phase" : ".",
"attributes" : "ID=IGR_%s_%s_to_%s" % (
region_entry.seq_id, start, end)})
args.output_file.write(str(gff3_entry) + "\n")
|
isc
|
Python
|
4bad79872547f90159e75b34b46e99e54f78b736
|
Fix error in big comment header.
|
cgaspoz/l10n-switzerland,open-net-sarl/l10n-switzerland,cyp-opennet/ons_cyp_github,michl/l10n-switzerland,ndtran/l10n-switzerland,BT-csanchez/l10n-switzerland,BT-aestebanez/l10n-switzerland,open-net-sarl/l10n-switzerland,CompassionCH/l10n-switzerland,cyp-opennet/ons_cyp_github,CompassionCH/l10n-switzerland,BT-ojossen/l10n-switzerland,BT-ojossen/l10n-switzerland,BT-fgarbely/l10n-switzerland,BT-fgarbely/l10n-switzerland
|
l10n_ch_hr_payroll/__openerp__.py
|
l10n_ch_hr_payroll/__openerp__.py
|
# -*- coding: utf-8 -*-
#
# File: __openerp__.py
# Module: l10n_ch_hr_payroll
#
# Created by [email protected]
#
# Copyright (c) 2014-TODAY Open-Net Ltd. <http://www.open-net.ch>
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Switzerland - Payroll',
'summary': 'Swizerland Payroll Rules',
'category': 'Localization',
'author': 'Open-Net Sàrl',
'depends': ['decimal_precision', 'hr_payroll', 'hr_payroll_account'],
'version': '1.0.1',
'description': """
Swizerland Payroll Rules.
=========================
**Features list :**
* Add Swiss salary rule categories
* Add Swiss salary rules
* Add children in school to employee
* Add LPP range to contract
**For functionnal information:**
http://ur1.ca/ir5ou
**Author :**
Open Net Sàrl -Industrie 59 1030 Bussigny Suisse - http://www.open-net.ch
**Contact :**
[email protected]
**History :**
V1.0.0: 2014-11-07/Sge
* Add Salary rule categories
* Add Salary rules
* Add Employee children in school
* Add Contract LPP rate
V1.0.1: 2014-11-11/Sge
* Set the 'LPP rate'' digits to 'Payroll Rate' decimal accuracy
V1.0.2:
* Add some minor changes, based on pull request #66 comments.
""",
'auto_install': False,
'demo': [],
'website': 'http://open-net.ch',
'data': [
'hr_contract_view.xml',
'hr_employee_view.xml',
'l10n_ch_hr_payroll_data.xml',
'data/hr.salary.rule-change.csv',
'data/hr.salary.rule-new.csv',
],
'installable': True
}
|
# -*- coding: utf-8 -*-
#
# File: __init__.py
# Module: l10n_ch_hr_payroll
#
# Created by [email protected]
#
# Copyright (c) 2014-TODAY Open-Net Ltd. <http://www.open-net.ch>
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Switzerland - Payroll',
'summary': 'Swizerland Payroll Rules',
'category': 'Localization',
'author': 'Open-Net Sàrl',
'depends': ['decimal_precision', 'hr_payroll', 'hr_payroll_account'],
'version': '1.0.1',
'description': """
Swizerland Payroll Rules.
=========================
**Features list :**
* Add Swiss salary rule categories
* Add Swiss salary rules
* Add children in school to employee
* Add LPP range to contract
**For functionnal information:**
http://ur1.ca/ir5ou
**Author :**
Open Net Sàrl -Industrie 59 1030 Bussigny Suisse - http://www.open-net.ch
**Contact :**
[email protected]
**History :**
V1.0.0: 2014-11-07/Sge
* Add Salary rule categories
* Add Salary rules
* Add Employee children in school
* Add Contract LPP rate
V1.0.1: 2014-11-11/Sge
* Set the 'LPP rate'' digits to 'Payroll Rate' decimal accuracy
V1.0.2:
* Add some minor changes, based on pull request #66 comments.
""",
'auto_install': False,
'demo': [],
'website': 'http://open-net.ch',
'data': [
'hr_contract_view.xml',
'hr_employee_view.xml',
'l10n_ch_hr_payroll_data.xml',
'data/hr.salary.rule-change.csv',
'data/hr.salary.rule-new.csv',
],
'installable': True
}
|
agpl-3.0
|
Python
|
5edf75129189fc37ce24ff338821b726b0a7c28a
|
Revert 2c1ee5f..1620020
|
santoshkumarsingh/Data-Wrangling-with-MongoDB,santoshkumarsingh/Data-Wrangling-with-MongoDB,sk-rai/Data-Wrangling-with-MongoDB,sk-rai/Data-Wrangling-with-MongoDB
|
Lesson_3_Problem_Set/05-Fixing_Name/name.py
|
Lesson_3_Problem_Set/05-Fixing_Name/name.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a cleaning idea and then clean it up.
In the previous quiz you recognized that the "name" value can be an array (or list in Python terms).
It would make it easier to process and query the data later, if all values for the name
would be in a Python list, instead of being just a string separated with special characters, like now.
Finish the function fix_name(). It will recieve a string as an input, and it has to return a list
of all the names. If there is only one name, the list with have only one item in it, if the name is "NONE",
the list should be empty.
The rest of the code is just an example on how this function can be used
"""
import codecs
import csv
import pprint
CITIES = 'cities.csv'
def fix_name(name):
# YOUR CODE HERE
return name
def process_file(filename):
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
#skipping the extra matadata
for i in range(3):
l = reader.next()
# processing file
for line in reader:
# calling your function to fix the area value
if "name" in line:
line["name"] = fix_name(line["name"])
data.append(line)
return data
def test():
data = process_file(CITIES)
print "Printing 20 results:"
for n in range(20):
pprint.pprint(data[n]["name"])
assert data[14]["name"] == ['Negtemiut', 'Nightmute']
assert data[3]["name"] == ['Kumhari']
if __name__ == "__main__":
test()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a cleaning idea and then clean it up.
In the previous quiz you recognized that the "name" value can be an array (or list in Python terms).
It would make it easier to process and query the data later, if all values for the name
would be in a Python list, instead of being just a string separated with special characters, like now.
Finish the function fix_name(). It will recieve a string as an input, and it has to return a list
of all the names. If there is only one name, the list with have only one item in it, if the name is "NONE",
the list should be empty.
The rest of the code is just an example on how this function can be used
"""
import codecs
import csv
import pprint
CITIES = 'cities.csv'
def fix_name(name):
if name == "NULL" or name == "":
return []
else:
return name.replace('{', '').replace('}','').split('|')
return name
def process_file(filename):
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
#skipping the extra matadata
for i in range(3):
l = reader.next()
# processing file
for line in reader:
# calling your function to fix the area value
if "name" in line:
line["name"] = fix_name(line["name"])
data.append(line)
return data
def test():
data = process_file(CITIES)
print "Printing 20 results:"
for n in range(20):
pprint.pprint(data[n]["name"])
assert data[14]["name"] == ['Negtemiut', 'Nightmute']
assert data[3]["name"] == ['Kumhari']
if __name__ == "__main__":
test()
|
agpl-3.0
|
Python
|
c91beca414a5216a3fd5f8f5ef1c1643f0aea2f9
|
Tag the rc release
|
blink1073/oct2py,blink1073/oct2py
|
oct2py/__init__.py
|
oct2py/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) oct2py developers.
# Distributed under the terms of the MIT License.
"""
Oct2Py is a means to seamlessly call M-files and GNU Octave functions from
Python.
It manages the Octave session for you, sharing data behind the scenes using
MAT files. Usage is as simple as:
.. code-block:: python
>>> import oct2py
>>> oc = oct2py.Oct2Py()
>>> x = oc.zeros(3,3)
>>> print(x, x.dtype.str) # doctest: +SKIP
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]] <f8
If you want to run legacy m-files, do not have MATLAB(TM), and do not fully
trust a code translator, this is your library.
"""
from __future__ import absolute_import, print_function, division
from .core import Oct2Py
from .io import Struct, Cell, StructArray
from .utils import get_log, Oct2PyError
from .demo import demo
from .speed_check import speed_check
from .thread_check import thread_check
__version__ = '4.0rc1'
__all__ = ['Oct2Py', 'Oct2PyError', 'octave', 'Struct', 'Cell', 'StructArray',
'demo', 'speed_check', 'thread_check', '__version__', 'get_log']
try:
octave = Oct2Py()
except Oct2PyError as e:
print(e)
def kill_octave():
"""Kill all octave instances (cross-platform).
This will restart the "octave" instance. If you have instantiated
Any other Oct2Py objects, you must restart them.
"""
import os
if os.name == 'nt':
os.system('taskkill /im octave /f')
else:
os.system('killall -9 octave')
os.system('killall -9 octave-cli')
octave.restart()
|
# -*- coding: utf-8 -*-
# Copyright (c) oct2py developers.
# Distributed under the terms of the MIT License.
"""
Oct2Py is a means to seamlessly call M-files and GNU Octave functions from
Python.
It manages the Octave session for you, sharing data behind the scenes using
MAT files. Usage is as simple as:
.. code-block:: python
>>> import oct2py
>>> oc = oct2py.Oct2Py()
>>> x = oc.zeros(3,3)
>>> print(x, x.dtype.str) # doctest: +SKIP
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]] <f8
If you want to run legacy m-files, do not have MATLAB(TM), and do not fully
trust a code translator, this is your library.
"""
from __future__ import absolute_import, print_function, division
from .core import Oct2Py
from .io import Struct, Cell, StructArray
from .utils import get_log, Oct2PyError
from .demo import demo
from .speed_check import speed_check
from .thread_check import thread_check
__version__ = '4.0-dev'
__all__ = ['Oct2Py', 'Oct2PyError', 'octave', 'Struct', 'Cell', 'StructArray',
'demo', 'speed_check', 'thread_check', '__version__', 'get_log']
try:
octave = Oct2Py()
except Oct2PyError as e:
print(e)
def kill_octave():
"""Kill all octave instances (cross-platform).
This will restart the "octave" instance. If you have instantiated
Any other Oct2Py objects, you must restart them.
"""
import os
if os.name == 'nt':
os.system('taskkill /im octave /f')
else:
os.system('killall -9 octave')
os.system('killall -9 octave-cli')
octave.restart()
|
mit
|
Python
|
35801fdc81c4f5f93862b900bba656cfbbf1652c
|
Prepare version 0.1.18
|
dreipol/djangocms-spa,dreipol/djangocms-spa
|
djangocms_spa/__init__.py
|
djangocms_spa/__init__.py
|
__version__ = '0.1.18'
|
__version__ = '0.1.17'
|
mit
|
Python
|
104e05f326b7138e524296c049a1860c8c8a8cea
|
document col_to_number
|
raymondnoonan/Mpropulator
|
MPropulator/helpers.py
|
MPropulator/helpers.py
|
import string
def column_range(start, stop, skip_columns=None):
"""0-indexed function that returns a list of Excel column names, except
for skip_columns
:param start: column index at which you begin iterating
:param stop: column index at which you want to stop iterating
:param skip_columns: column NAMES you'd like to skip
:return: list of Excel column names
"""
if skip_columns is None:
skip_columns = []
if start < 0:
raise ValueError("Start must be >= 0")
if stop < 0:
raise ValueError("Stop must be >= 0")
return [column_name(i + 1) for i in range(start, stop)
if column_name(i + 1) not in skip_columns]
def column_name(col):
""" 1-indexed function that, given a column number, returns
the Excel column name.
:rtype : string
:param col: the column you want to return
:return: name of the col-th Excel column
"""
assert isinstance(col, int), 'Column must be int'
assert col >= 1, 'Column must be >= 1'
excel_col = str()
div = col
while div:
(div, mod) = divmod(div - 1, 26)
excel_col = chr(mod + ord('A')) + excel_col
return excel_col
def col_to_number(col):
"""0-indexed function that, given a column name, returns
the number of that column in excel.
col: string consisting of Excel column name
return: int that corresponds to excel column name
col_to_number("A") => 0
col_to_number("AB") => 28
"""
num = 0
for c in col:
if c in string.ascii_letters:
num = num * 26 + (ord(c.upper()) - ord('A')) + 1
else:
raise ValueError("Input had characters other than ASCII letters")
return num
def cell_name(row, col):
""" 0-indexed function that, given a row and column number,
returns the Excel cell name.
:param row: row index
:param col: column index
:return: string
"""
assert isinstance(row, int), 'Row must be int'
assert row >= 0, 'Row index must be >= 0'
assert col >= 0, 'Column index must be >= 0'
return column_name(col + 1) + str(row + 1)
|
import string
def column_range(start, stop, skip_columns=None):
"""0-indexed function that returns a list of Excel column names, except
for skip_columns
:param start: column index at which you begin iterating
:param stop: column index at which you want to stop iterating
:param skip_columns: column NAMES you'd like to skip
:return: list of Excel column names
"""
if skip_columns is None:
skip_columns = []
if start < 0:
raise ValueError("Start must be >= 0")
if stop < 0:
raise ValueError("Stop must be >= 0")
return [column_name(i + 1) for i in range(start, stop) \
if column_name(i + 1) not in skip_columns]
def column_name(col):
""" 1-indexed function that, given a column number, returns
the Excel column name.
:rtype : string
:param col: the column you want to return
:return: name of the col-th Excel column
"""
assert isinstance(col, int), 'Column must be int'
assert col >= 1, 'Column must be >= 1'
excel_col = str()
div = col
while div:
(div, mod) = divmod(div - 1, 26)
excel_col = chr(mod + ord('A')) + excel_col
return excel_col
def col_to_number(col):
num = 0
for c in col:
if c in string.ascii_letters:
num = num * 26 + (ord(c.upper()) - ord('A')) + 1
else:
raise ValueError("Input had characters other than ASCII letters")
return num
def cell_name(row, col):
""" 0-indexed function that, given a row and column number,
returns the Excel cell name.
:param row: row index
:param col: column index
:return: string
"""
assert isinstance(row, int), 'Row must be int'
assert row >= 0, 'Row index must be >= 0'
assert col >= 0, 'Column index must be >= 0'
return column_name(col + 1) + str(row + 1)
|
mit
|
Python
|
c00a12fb593e329cc1fc2d3710cd9e89d0abfa16
|
set production api url
|
cloud4rpi/cloud4rpi
|
c4r/config.py
|
c4r/config.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Server parameters
baseApiUrl = 'https://cloud4rpi.io:3000/api'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Server parameters
baseApiUrl = 'http://stage.cloud4rpi.io:3000/api'
|
mit
|
Python
|
920e75491f3aaa74980e11086cfebe911c2def4b
|
Remove yield from datasets tests
|
josef-pkt/statsmodels,ChadFulton/statsmodels,jseabold/statsmodels,josef-pkt/statsmodels,statsmodels/statsmodels,bashtage/statsmodels,jseabold/statsmodels,bashtage/statsmodels,ChadFulton/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,bashtage/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,ChadFulton/statsmodels,josef-pkt/statsmodels,josef-pkt/statsmodels,statsmodels/statsmodels,ChadFulton/statsmodels,bashtage/statsmodels,jseabold/statsmodels,bashtage/statsmodels,ChadFulton/statsmodels,ChadFulton/statsmodels,josef-pkt/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,jseabold/statsmodels
|
statsmodels/datasets/tests/test_data.py
|
statsmodels/datasets/tests/test_data.py
|
import importlib
import numpy as np
import pandas as pd
import nose
import pytest
import statsmodels.datasets
from statsmodels.datasets.utils import Dataset
exclude = ['check_internet', 'clear_data_home', 'get_data_home',
'get_rdataset', 'tests', 'utils', 'webuse']
datasets = []
for dataset_name in dir(statsmodels.datasets):
if not dataset_name.startswith('_') and dataset_name not in exclude:
datasets.append(dataset_name)
# TODO: Remove nottest when nose support is dropped
@nose.tools.nottest
@pytest.mark.parametrize('dataset_name', datasets)
def test_dataset(dataset_name):
dataset = importlib.import_module('statsmodels.datasets.' + dataset_name)
data = dataset.load()
assert isinstance(data, Dataset)
assert isinstance(data.data, np.recarray)
df_data = dataset.load_pandas()
assert isinstance(df_data, Dataset)
assert isinstance(df_data.data, pd.DataFrame)
# TODO: Remove when nose support is dropped
def test_all_datasets():
for dataset in datasets:
test_dataset(dataset)
|
import numpy as np
import pandas as pd
import statsmodels.datasets as datasets
from statsmodels.datasets import co2
from statsmodels.datasets.utils import Dataset
def test_co2_python3():
# this failed in pd.to_datetime on Python 3 with pandas <= 0.12.0
dta = co2.load_pandas()
class TestDatasets(object):
@classmethod
def setup_class(cls):
exclude = ['check_internet', 'clear_data_home', 'get_data_home',
'get_rdataset', 'tests', 'utils', 'webuse']
cls.sets = []
for dataset_name in dir(datasets):
if not dataset_name.startswith('_') and dataset_name not in exclude:
cls.sets.append(dataset_name)
def check(self, dataset_name):
dataset = __import__('statsmodels.datasets.' + dataset_name, fromlist=[''])
data = dataset.load()
assert isinstance(data, Dataset)
assert isinstance(data.data, np.recarray)
df_data = dataset.load_pandas()
assert isinstance(data, Dataset)
assert isinstance(df_data.data, pd.DataFrame)
def test_all_datasets(self):
for dataset_name in self.sets:
yield (self.check, dataset_name)
|
bsd-3-clause
|
Python
|
97badc176f4a8ac30eb3932359e2e132e36170c4
|
Increase the number of workers
|
sheagcraig/sal,chasetb/sal,sheagcraig/sal,erikng/sal,salopensource/sal,salopensource/sal,chasetb/sal,erikng/sal,chasetb/sal,erikng/sal,chasetb/sal,erikng/sal,sheagcraig/sal,salopensource/sal,salopensource/sal,sheagcraig/sal
|
docker/gunicorn_config.py
|
docker/gunicorn_config.py
|
import multiprocessing
from os import getenv
bind = '127.0.0.1:8001'
workers = multiprocessing.cpu_count() * 3
timeout = 60
threads = multiprocessing.cpu_count() * 3
max_requests = 500
max_requests_jitter = 5
# Read the DEBUG setting from env var
try:
if getenv('DOCKER_SAL_DEBUG').lower() == 'true':
errorlog = '/var/log/gunicorn/gunicorn-error.log'
accesslog = '/var/log/gunicorn/gunicorn-access.log'
loglevel = 'info'
except:
pass
|
import multiprocessing
from os import getenv
bind = '127.0.0.1:8001'
workers = multiprocessing.cpu_count() * 2
timeout = 60
threads = multiprocessing.cpu_count() * 2
max_requests = 1000
max_requests_jitter = 5
# Read the DEBUG setting from env var
try:
if getenv('DOCKER_SAL_DEBUG').lower() == 'true':
errorlog = '/var/log/gunicorn/gunicorn-error.log'
accesslog = '/var/log/gunicorn/gunicorn-access.log'
loglevel = 'info'
except:
pass
|
apache-2.0
|
Python
|
15090b84e1c7359c49cb45aec4d9b4d492f855ac
|
Update smb test to include port parameter
|
pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine
|
tests/scoring_engine/engine/checks/test_smb.py
|
tests/scoring_engine/engine/checks/test_smb.py
|
from scoring_engine.engine.basic_check import CHECKS_BIN_PATH
from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestSMBCheck(CheckTest):
check_name = 'SMBCheck'
required_properties = ['share', 'file', 'hash']
properties = {
'share': 'ScoringShare',
'file': 'flag.txt',
'hash': '123456789'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = CHECKS_BIN_PATH + "/smb_check --host '127.0.0.1' --port 100 --user 'pwnbus' --pass 'pwnbuspass' --share 'ScoringShare' --file 'flag.txt' --hash '123456789'"
|
from scoring_engine.engine.basic_check import CHECKS_BIN_PATH
from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestSMBCheck(CheckTest):
check_name = 'SMBCheck'
required_properties = ['share', 'file', 'hash']
properties = {
'share': 'ScoringShare',
'file': 'flag.txt',
'hash': '123456789'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = CHECKS_BIN_PATH + "/smb_check --host '127.0.0.1' --user 'pwnbus' --pass 'pwnbuspass' --share 'ScoringShare' --file 'flag.txt' --hash '123456789'"
|
mit
|
Python
|
b3889bbdab80fb502c74b99b61cf36bae112ce2c
|
Add property decorator to getters
|
PressLabs/cobalt,PressLabs/cobalt
|
node/node.py
|
node/node.py
|
from configparser import ConfigParser
from driver import BTRFSDriver
class Node:
"""
# Dummy config example
[bk1-z3.presslabs.net]
ssd = True
"""
def __init__(self, context):
self._conf_path = context['node']['conf_path']
self._driver = BTRFSDriver(context['volume_path'])
self._name, self._labels = '', {}
config = ConfigParser()
config.read(self._conf_path)
try:
self._name = config.sections()[0]
for label, value in config[self._name].iteritems():
self._labels[label] = value
except IndexError:
pass
def get_subvolumes(self):
return self._driver.get_all()
@property
def name(self):
return self._name
@property
def labels(self):
return self._labels
|
from configparser import ConfigParser
from driver import BTRFSDriver
class Node:
"""
# Dummy config example
[bk1-z3.presslabs.net]
ssd = True
"""
def __init__(self, context):
self._conf_path = context['node']['conf_path']
self._driver = BTRFSDriver(context['volume_path'])
self._name, self._labels = '', {}
config = ConfigParser()
config.read(self._conf_path)
try:
self._name = config.sections()[0]
for label, value in config[self._name].iteritems():
self._labels[label] = value
except IndexError:
pass
def get_subvolumes(self):
return self._driver.get_all()
def name(self):
return self._name
def labels(self):
return self._labels
|
apache-2.0
|
Python
|
af7a2e59b76a5c404e393a6fc1aeca9517018185
|
Fix peacock crash when filename with unicode exists
|
SudiptaBiswas/moose,harterj/moose,andrsd/moose,sapitts/moose,andrsd/moose,permcody/moose,sapitts/moose,dschwen/moose,bwspenc/moose,harterj/moose,bwspenc/moose,dschwen/moose,dschwen/moose,YaqiWang/moose,sapitts/moose,nuclear-wizard/moose,harterj/moose,SudiptaBiswas/moose,nuclear-wizard/moose,lindsayad/moose,lindsayad/moose,jessecarterMOOSE/moose,sapitts/moose,dschwen/moose,idaholab/moose,SudiptaBiswas/moose,laagesen/moose,idaholab/moose,laagesen/moose,harterj/moose,andrsd/moose,jessecarterMOOSE/moose,milljm/moose,YaqiWang/moose,jessecarterMOOSE/moose,permcody/moose,milljm/moose,idaholab/moose,nuclear-wizard/moose,lindsayad/moose,sapitts/moose,nuclear-wizard/moose,idaholab/moose,laagesen/moose,YaqiWang/moose,milljm/moose,YaqiWang/moose,idaholab/moose,dschwen/moose,harterj/moose,bwspenc/moose,bwspenc/moose,SudiptaBiswas/moose,bwspenc/moose,milljm/moose,SudiptaBiswas/moose,laagesen/moose,lindsayad/moose,andrsd/moose,andrsd/moose,jessecarterMOOSE/moose,laagesen/moose,milljm/moose,jessecarterMOOSE/moose,permcody/moose,permcody/moose,lindsayad/moose
|
python/peacock/ExodusViewer/plugins/ExodusFilterProxyModel.py
|
python/peacock/ExodusViewer/plugins/ExodusFilterProxyModel.py
|
#!/usr/bin/env python2
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5 import QtWidgets, QtCore
import sys
import os
import re
class ExodusFilterProxyModel(QtCore.QSortFilterProxyModel):
"""
A filename filter for Exodus *.efiles.
"""
def filterAcceptsRow(self, row, parent):
index0 = self.sourceModel().index(row, 0, parent)
filename = self.sourceModel().filePath(index0)
if os.path.isdir(filename):
return True
match = re.search(r'(.*?)\.e(-s[0-9]+)', filename)
if not match or filename.endswith('.e'):
return True
else:
return False
if __name__ == "__main__":
qapp = QtWidgets.QApplication(sys.argv)
fd = QtWidgets.QFileDialog()
fd.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
fd.setWindowTitle('Select ExodusII File(s)')
fd.setDirectory('/Users/slauae/projects/gui/tests/chigger/input')
fd.setNameFilter('ExodusII Files (*.e)')
fd.setOption(QtWidgets.QFileDialog.DontUseNativeDialog)
proxy = ExodusFilterProxyModel(fd)
fd.setProxyModel(proxy)
fd.raise_()
fd.exec_()
|
#!/usr/bin/env python2
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5 import QtWidgets, QtCore
import sys
import os
import re
class ExodusFilterProxyModel(QtCore.QSortFilterProxyModel):
"""
A filename filter for Exodus *.efiles.
"""
def filterAcceptsRow(self, row, parent):
index0 = self.sourceModel().index(row, 0, parent)
filename = str(self.sourceModel().filePath(index0))
if os.path.isdir(filename):
return True
match = re.search(r'(.*?)\.e(-s[0-9]+)', filename)
if not match or filename.endswith('.e'):
return True
else:
return False
if __name__ == "__main__":
qapp = QtWidgets.QApplication(sys.argv)
fd = QtWidgets.QFileDialog()
fd.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
fd.setWindowTitle('Select ExodusII File(s)')
fd.setDirectory('/Users/slauae/projects/gui/tests/chigger/input')
fd.setNameFilter('ExodusII Files (*.e)')
fd.setOption(QtWidgets.QFileDialog.DontUseNativeDialog)
proxy = ExodusFilterProxyModel(fd)
fd.setProxyModel(proxy)
fd.raise_()
fd.exec_()
|
lgpl-2.1
|
Python
|
814b344082fbce471509c54c683470467dd8f814
|
use env to find python binary
|
tchapi/pianette,tchapi/pianette,tchapi/pianette,tchapi/pianette
|
main.pyw
|
main.pyw
|
#!/usr/bin/env python3
from tkinter import *
from models import *
import logging
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
appWindow = Tk()
appWindow.title("Virtual Controller")
# Set fullscreen [Not necessary when debugging]
# appWindow.geometry("{0}x{1}+0+0".format(appWindow.winfo_screenwidth(), appWindow.winfo_screenheight()))
appWindow.focus_set() # <-- move focus to this widget
# Binds <Escape> key to quit the program
appWindow.bind("<Escape>", lambda e: e.widget.destroy())
# Removes the title bar and menu bar
appWindow.overrideredirect(True)
# This holds the controller state at any moment
ctrlState = ControllerState()
# The virtual controller can set state flags via the UI
app = VirtualControllerDisplay(appWindow, ctrlState)
# Instantiate the console controller that will send out the state to the console when needed
consoleCtrl = ConsoleController(ctrlState)
# Now loads the GPIO Controller that will set state flags depending on the GPIO inputs
# It needs the app to flash the buttons
gpioCtrl = GPIOController(ctrlState, app)
# Run main loop
appWindow.mainloop()
# Cleanup GPIOs
GPIO.cleanup()
|
#!/usr/bin/python3
from tkinter import *
from models import *
import logging
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
appWindow = Tk()
appWindow.title("Virtual Controller")
# Set fullscreen [Not necessary when debugging]
# appWindow.geometry("{0}x{1}+0+0".format(appWindow.winfo_screenwidth(), appWindow.winfo_screenheight()))
appWindow.focus_set() # <-- move focus to this widget
# Binds <Escape> key to quit the program
appWindow.bind("<Escape>", lambda e: e.widget.destroy())
# Removes the title bar and menu bar
appWindow.overrideredirect(True)
# This holds the controller state at any moment
ctrlState = ControllerState()
# The virtual controller can set state flags via the UI
app = VirtualControllerDisplay(appWindow, ctrlState)
# Instantiate the console controller that will send out the state to the console when needed
consoleCtrl = ConsoleController(ctrlState)
# Now loads the GPIO Controller that will set state flags depending on the GPIO inputs
# It needs the app to flash the buttons
gpioCtrl = GPIOController(ctrlState, app)
# Run main loop
appWindow.mainloop()
# Cleanup GPIOs
GPIO.cleanup()
|
mit
|
Python
|
59c14e1c0d69309c554ddafa5e168115ba05ddfd
|
Update the win/lose determination code
|
Jacobinski/SaltBot
|
match.py
|
match.py
|
'''
The match monitoring module for SaltBot
'''
from bs4 import BeautifulSoup
import requests
import time
from bet import bet_player1
from website import website
class Match:
def __init__(self):
self.id = 0 # TODO: Use SQL to determine this w/ MAX()
self.player1 = None
self.player2 = None
self.duration = None
self.winner = None
self.p1bets = None
self.p2bets = None
self.mybet = None
def start_round(self, mybet, website):
self.player1 = website.get_player1_name()
self.player2 = website.get_player2_name()
self.p1bets = website.get_player1_wagers()
self.p2bets = website.get_player2_wagers()
self.mybet = mybet
def end_round(self, duration, winner):
self.duration = duration
self.winner = winner
def save_round(self):
# TODO: Save to SQL
return
def record_match(session, request):
# Initialize a match
site = website(session, request)
balance_start = None
balance_end = None
while(True):
# Add a delay to avoid overloading the server
time.sleep(10)
# Update status
prev_status = site.get_betting_status()
site.update()
status = site.get_betting_status()
if (prev_status == 'locked' and status == 'open'):
balance_end = site.get_balance()
if (balance_end > balance_start):
print('Our bet wins')
elif (balance_end < balance_start):
print('Our bet loses')
else:
print('Money remained the same')
print(site.get_json())
print('\nBetting is now open!')
print('Balance: ' + str(balance_end))
# Place the bet
bet_player1(session, 500)
elif (prev_status == 'open' and status == 'locked'):
print('The match begins!')
balance_start = site.get_balance()
|
'''
The match monitoring module for SaltBot
'''
from bs4 import BeautifulSoup
import requests
import time
from bet import bet_player1
from website import website
class Match:
def __init__(self):
self.id = 0 # TODO: Use SQL to determine this w/ MAX()
self.player1 = None
self.player2 = None
self.duration = None
self.winner = None
self.p1bets = None
self.p2bets = None
self.mybet = None
def start_round(self, mybet, website):
self.player1 = website.get_player1_name()
self.player2 = website.get_player2_name()
self.p1bets = website.get_player1_wagers()
self.p2bets = website.get_player2_wagers()
self.mybet = mybet
def end_round(self, duration, winner):
self.duration = duration
self.winner = winner
def save_round(self):
# TODO: Save to SQL
return
def record_match(session, request):
# Initialize a match
site = website(session, request)
while(True):
# Add a delay to avoid overloading the server
time.sleep(10)
# Update status
prev_status = site.get_betting_status()
prev_balance = site.get_balance()
site.update()
status = site.get_betting_status()
balance = site.get_balance()
if (prev_status == 'locked' and status == 'open'):
if (balance > prev_balance):
print('Our bet wins')
elif (balance < prev_balance):
print('Our bet loses')
else:
print('Money remained the same')
print(site.get_json())
print('\nBetting is now open!')
print('Balance: ' + str(balance))
# Place the bet
bet_player1(session, 500)
elif (prev_status == 'open' and status == 'locked'):
print('The match begins!')
|
mit
|
Python
|
67e620716b494f74c9b913b6514463eb4689c590
|
add OMPL_DEBUG and other convenience function to python bindings
|
davetcoleman/ompl,sonny-tarbouriech/ompl,utiasASRL/batch-informed-trees,sonny-tarbouriech/ompl,davetcoleman/ompl,utiasASRL/batch-informed-trees,davetcoleman/ompl,utiasASRL/batch-informed-trees,florianhauer/ompl,utiasASRL/batch-informed-trees,jvgomez/ompl,jvgomez/ompl,florianhauer/ompl,davetcoleman/ompl,florianhauer/ompl,florianhauer/ompl,jvgomez/ompl,sonny-tarbouriech/ompl,sonny-tarbouriech/ompl,jvgomez/ompl,sonny-tarbouriech/ompl,davetcoleman/ompl,florianhauer/ompl,utiasASRL/batch-informed-trees,florianhauer/ompl,jvgomez/ompl,davetcoleman/ompl,sonny-tarbouriech/ompl,utiasASRL/batch-informed-trees,jvgomez/ompl
|
py-bindings/ompl/util/__init__.py
|
py-bindings/ompl/util/__init__.py
|
from os.path import abspath, dirname
from ompl import dll_loader
dll_loader('ompl', dirname(abspath(__file__)))
from ompl.util._util import *
import inspect
def OMPL_DEBUG(text):
c = inspect.currentframe().f_back
getOutputHandler().log(text, LogLevel.LOG_DEBUG, c.f_code.co_filename, c.f_lineno)
def OMPL_INFORM(text):
c = inspect.currentframe().f_back
getOutputHandler().log(text, LogLevel.LOG_INFO, c.f_code.co_filename, c.f_lineno)
def OMPL_WARN(text):
c = inspect.currentframe().f_back
getOutputHandler().log(text, LogLevel.LOG_WARN, c.f_code.co_filename, c.f_lineno)
def OMPL_ERROR(text):
c = inspect.currentframe().f_back
getOutputHandler().log(text, LogLevel.LOG_ERROR, c.f_code.co_filename, c.f_lineno)
|
from os.path import abspath, dirname
from ompl import dll_loader
dll_loader('ompl', dirname(abspath(__file__)))
from ompl.util._util import *
|
bsd-3-clause
|
Python
|
66b6d3648c0a4229048c0f8a63ec410c407f1ba1
|
Fix unittest
|
cpaulik/pyscaffold,cpaulik/pyscaffold,blue-yonder/pyscaffold,blue-yonder/pyscaffold
|
src/pyscaffold/extensions/no_skeleton.py
|
src/pyscaffold/extensions/no_skeleton.py
|
# -*- coding: utf-8 -*-
"""
Extension that omits the creation of file `skeleton.py`
"""
from ..api import Extension
from ..api import helpers
class NoSkeleton(Extension):
"""Omit creation of skeleton.py"""
def activate(self, actions):
"""Activate extension
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
return self.register(
actions,
self.remove_files,
after='define_structure')
def remove_files(self, struct, opts):
"""Remove all skeleton files from structure
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`.
opts (dict): given options, see :obj:`create_project` for
an extensive list.
Returns:
struct, opts: updated project representation and options
"""
# Namespace is not yet applied so deleting from package is enough
file = [opts['project'], 'src', opts['package'], 'skeleton.py']
struct = helpers.reject(struct, file)
file = [opts['project'], 'tests', 'test_skeleton.py']
struct = helpers.reject(struct, file)
return struct, opts
|
# -*- coding: utf-8 -*-
"""
Extension that omits the creation of file `skeleton.py`
"""
from ..api import Extension
from ..api import helpers
class NoSkeleton(Extension):
"""Omit creation of skeleton.py"""
def activate(self, actions):
"""Activate extension
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
return self.register(
actions,
self.remove_files,
after='define_structure')
def remove_files(self, struct, opts):
"""Remove all skeleton files from structure
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`.
opts (dict): given options, see :obj:`create_project` for
an extensive list.
Returns:
struct, opts: updated project representation and options
"""
pkgs = opts['qual_pkg'].split('.')
file = [opts['project'], 'src'] + pkgs + ['skeleton.py']
struct = helpers.reject(struct, file)
file = [opts['project'], 'tests', 'test_skeleton.py']
struct = helpers.reject(struct, file)
return struct, opts
|
mit
|
Python
|
0062eeaf558a0eb9e8a736baf16932e56546001f
|
Fix silly Python indentation issues.
|
amkahn/question-answering,amkahn/question-answering
|
src/query_processing/query_processing.py
|
src/query_processing/query_processing.py
|
# LING 573 Question Answering System
# Code last updated 4/15/14 by Andrea Kahn
#
# This code implements a QueryProcessor for the question answering system.
import sys
import general_classes
import nltk
# TODO: A QueryProcessor should be initialized with the Question object, but should it
# have this question as an attribute, or should it have attributes id, type, q, target that
# match those of the question?
# Advantage of first approach: allows us to change the Question and have the QueryProcessor
# update automatically (not sure we'd ever do this).
# Advantage of second approach: saves typing (access the id with self.id, versus self.question.id).
# A QueryProcessor object has the attribute "question", a Question object.
class QueryProcessor(object):
def __init__(self, question):
self.question = question
# This method returns a set of SearchQuery objects.
def generate_queries(self):
tokenized_q = nltk.word_tokenize(self.question.q)
tokenized_target = nltk.word_tokenize(self.question.target)
# FIXME: Strip out punctuation tokens
# note from Claire: here is a temporary fix
punctuation = ['?','.',',']
search_query = [x for x in tokenized_q if x not in punctuation] + [x for x in tokenized_target if x not in punctuation]
# FIXME: Issue with leading escape character in some questions
return [search_query]
# This method returns an AnswerTemplate object.
def generate_ans_template(self):
# NB: The following if statement should always evaluate as True in our system, but
# its inclusion enables the system to more easily be extended to handle other types
# of questions, for which the text-processing and AnswerTemplate-generation steps
# might be slightlydifferent.
if self.question.type=="FACTOID":
# do some sort of text-processing on the natural-language question and context
# to determine NE type
# generate a corresponding AnswerTemplate object
# return it
return None
else:
sys.stderr.write("Warning: System can only handle \"factoid\" questions\n")
|
# LING 573 Question Answering System
# Code last updated 4/15/14 by Andrea Kahn
#
# This code implements a QueryProcessor for the question answering system.
import sys
import general_classes
import nltk
# TODO: A QueryProcessor should be initialized with the Question object, but should it
# have this question as an attribute, or should it have attributes id, type, q, target that
# match those of the question?
# Advantage of first approach: allows us to change the Question and have the QueryProcessor
# update automatically (not sure we'd ever do this).
# Advantage of second approach: saves typing (access the id with self.id, versus self.question.id).
# A QueryProcessor object has the attribute "question", a Question object.
class QueryProcessor(object):
def __init__(self, question):
self.question = question
# This method returns a set of SearchQuery objects.
def generate_queries(self):
tokenized_q = nltk.word_tokenize(self.question.q)
tokenized_target = nltk.word_tokenize(self.question.target)
# FIXME: Strip out punctuation tokens
# note from Claire: here is a temporary fix
punctuation = ['?','.',',']
search_query = [x for x in tokenized_q if x not in punctuation] + [x for x in tokenized_target if x not in punctuation]
# FIXME: Issue with leading escape character in some questions
return [search_query]
# This method returns an AnswerTemplate object.
def generate_ans_template(self):
# NB: The following if statement should always evaluate as True in our system, but
# its inclusion enables the system to more easily be extended to handle other types
# of questions, for which the text-processing and AnswerTemplate-generation steps
# might be slightlydifferent.
if self.question.type=="FACTOID":
# do some sort of text-processing on the natural-language question and context
# to determine NE type
# generate a corresponding AnswerTemplate object
# return it
return None
else:
sys.stderr.write("Warning: System can only handle \"factoid\" questions\n")
|
mit
|
Python
|
c6b47431f75675547d54c3b68c07aad76721e513
|
fix srfit test
|
CJ-Wright/pyIID
|
pyiid/tests/test_against_srfit.py
|
pyiid/tests/test_against_srfit.py
|
__author__ = 'christopher'
from pyiid.tests import *
from pyiid.experiments.elasticscatter import ElasticScatter
local_test_atoms = setup_atomic_square()[0] * 3
test_data = tuple(product([local_test_atoms], [None]))
@known_fail_if(not srfit)
def check_fq_against_srfit():
# unpack the atoms and experiment
atoms = local_test_atoms
exp = None
# get the pyIID F(Q)
s = ElasticScatter(exp)
# s.set_processor('CPU', 'nxn')
ans1 = s.get_fq(atoms)
# get the SrFit F(Q)
stru = convert_atoms_to_stru(atoms)
srfit_calc = DebyePDFCalculator()
srfit_calc.qmin = s.exp['qmin']
srfit_calc.qmax = s.exp['qmax']
srfit_calc.qstep = s.exp['qbin']
r1, g1 = srfit_calc(stru)
assert_allclose(s.get_scatter_vector(), srfit_calc.qgrid)
ans2 = srfit_calc.fq
stats_check(ans1, ans2, rtol=1e-1, atol=5e-6)
del srfit_calc
assert_allclose(ans1, ans2, rtol=1e-1, atol=5e-6)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[
# '-s',
'--with-doctest',
# '--nocapture',
# '-v'
],
# env={"NOSE_PROCESSES": 1, "NOSE_PROCESS_TIMEOUT": 599},
exit=False)
|
__author__ = 'christopher'
from pyiid.tests import *
from pyiid.experiments.elasticscatter import ElasticScatter
local_test_atoms = setup_atomic_square()[0] * 3
test_data = tuple(product([local_test_atoms], [None]))
def test_fq_against_srfit():
for value in test_data:
yield check_fq_against_srfit, value
def check_fq_against_srfit(value):
if not srfit:
KnownFailureTest()
# unpack the atoms and experiment
atoms = value[0]
exp = value[1]
# get the pyIID F(Q)
s = ElasticScatter(exp)
# s.set_processor('CPU', 'nxn')
ans1 = s.get_fq(atoms)
# get the SrFit F(Q)
stru = convert_atoms_to_stru(atoms)
srfit_calc = DebyePDFCalculator()
srfit_calc.qmin = s.exp['qmin']
srfit_calc.qmax = s.exp['qmax']
srfit_calc.qstep = s.exp['qbin']
r1, g1 = srfit_calc(stru)
assert_allclose(s.get_scatter_vector(), srfit_calc.qgrid)
ans2 = srfit_calc.fq
stats_check(ans1, ans2, rtol=1e-1, atol=5e-6)
del srfit_calc
assert_allclose(ans1, ans2, rtol=1e-1, atol=5e-6)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[
# '-s',
'--with-doctest',
# '--nocapture',
# '-v'
],
# env={"NOSE_PROCESSES": 1, "NOSE_PROCESS_TIMEOUT": 599},
exit=False)
|
bsd-3-clause
|
Python
|
8b34cf20b9cc010d321912433d772ccad8dbdb6f
|
Update MediaWiki version for i18n_family.py from trunk r8823
|
legoktm/pywikipedia-rewrite
|
pywikibot/families/i18n_family.py
|
pywikibot/families/i18n_family.py
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia i18n family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'i18n'
self.langs = {
'i18n': 'translatewiki.net',
}
def version(self, code):
return "1.18alpha"
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia i18n family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'i18n'
self.langs = {
'i18n': 'translatewiki.net',
}
def version(self, code):
return "1.17alpha"
|
mit
|
Python
|
aeeed413830f58c60ab4b05beddf44ab4dba5e36
|
Update views.py
|
cloud-fire/signalnews,cloud-fire/signalnews,cloud-fire/signalnews
|
chat/views.py
|
chat/views.py
|
import random
import string
from django.db import transaction
from django.shortcuts import render, redirect
import haikunator
from .models import Room
def about(request):
return render(request, "chat/about.html")
def new_room(request):
"""
Randomly create a new room, and redirect to it.
"""
new_room = None
while not new_room:
with transaction.atomic():
label = haikunator.haikunate()
if Room.objects.filter(label=label).exists():
continue
new_room = Room.objects.create(label=label)
return redirect(chat_room, label=label)
def chat_room(request, label):
"""
Room view - show the room, with latest messages.
The template for this view has the WebSocket business to send and stream
messages, so see the template for where the magic happens.
"""
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
room, created = Room.objects.get_or_create(label=label)
# We want to show the last 50 messages, ordered most-recent-last
messages = (room.messages.order_by('-timestamp')[:10])
return render(request, "chat/room.html", {
'room': room,
'messages': messages,
})
|
import random
import string
from django.db import transaction
from django.shortcuts import render, redirect
import haikunator
from .models import Room
def about(request):
return render(request, "chat/about.html")
def new_room(request):
"""
Randomly create a new room, and redirect to it.
"""
new_room = None
while not new_room:
with transaction.atomic():
label = haikunator.haikunate()
if Room.objects.filter(label=label).exists():
continue
new_room = Room.objects.create(label=label)
return redirect(chat_room, label=label)
def chat_room(request, label):
"""
Room view - show the room, with latest messages.
The template for this view has the WebSocket business to send and stream
messages, so see the template for where the magic happens.
"""
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
room, created = Room.objects.get_or_create(label=label)
# We want to show the last 50 messages, ordered most-recent-last
messages = (room.messages.order_by('-timestamp')[:1])
return render(request, "chat/room.html", {
'room': room,
'messages': messages,
})
|
bsd-3-clause
|
Python
|
9e6e2fd1903b8e4deb3b6737d86aadc2627cb4eb
|
add the `__all__` to `_compat.py`.
|
lepture/flask-storage,menghan/flask-storage,LiuDeng/flask-storage,fengluo/flask-storage
|
flask_storage/_compat.py
|
flask_storage/_compat.py
|
import sys
try:
from urlparse import urljoin
import urllib2 as http
except ImportError:
from urllib.parse import urljoin
from urllib import request as http
if sys.version_info[0] == 3:
string_type = str
else:
string_type = unicode
__all__ = ['urljoin', 'http', 'string_type', 'to_bytes']
def to_bytes(text):
if isinstance(text, string_type):
text = text.encode('utf-8')
return text
|
import sys
try:
from urlparse import urljoin
import urllib2 as http
except ImportError:
from urllib.parse import urljoin
from urllib import request as http
if sys.version_info[0] == 3:
string_type = str
else:
string_type = unicode
def to_bytes(text):
if isinstance(text, string_type):
text = text.encode('utf-8')
return text
|
bsd-3-clause
|
Python
|
901b3e88704f938dcc090bb93b9818ac7ac994dd
|
Update ipc_lista1.8.py
|
any1m1c/ipc20161
|
lista1/ipc_lista1.8.py
|
lista1/ipc_lista1.8.py
|
#ipc_lista1.8
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês.
#Calcule e mostre o total do seu salário no referido mês.
QntHora = input("Entre com o valor de seu rendimento por hora: ")
hT = input("Entre com a quantidade de horas trabalhadas no mês: )
Salario = round(QntHora*
|
#ipc_lista1.8
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês.
#Calcule e mostre o total do seu salário no referido mês.
QntHora = input("Entre com o valor de seu rendimento por hora: ")
hT = input("Entre com a quantidade de horas trabalhadas no mês: )
Salario = round(QntHora
|
apache-2.0
|
Python
|
84f9f45f984cbf0b4192cae49e51333767bb5576
|
fix runtox.py failure when 'tox' is not available on the current system path
|
pfctdayelise/pytest,Bachmann1234/pytest,takluyver/pytest,JonathonSonesen/pytest,abusalimov/pytest,mbirtwell/pytest,Carreau/pytest,pytest-dev/pytest,MengJueM/pytest,inirudebwoy/pytest,Carreau/pytest,bukzor/pytest,Akasurde/pytest,davidszotten/pytest,omarkohl/pytest,doordash/pytest,Haibo-Wang-ORG/pytest,nicoddemus/pytest,wfxiang08/pytest,codewarrior0/pytest,mdboom/pytest,userzimmermann/pytest,mbirtwell/pytest,hackebrot/pytest,doordash/pytest,ropez/pytest,vmalloc/dessert,vodik/pytest,ionelmc/pytest,bukzor/pytest,gabrielcnr/pytest,mhils/pytest,inirudebwoy/pytest,pelme/pytest,eli-b/pytest,icemac/pytest,The-Compiler/pytest,untitaker/pytest,bubenkoff/pytest,jb098/pytest,chillbear/pytest,ericdill/pytest,abusalimov/pytest,chillbear/pytest,oleg-alexandrov/pytest,userzimmermann/pytest,hpk42/pytest,ojake/pytest,vodik/pytest,omarkohl/pytest,hunse/pytest,RonnyPfannschmidt/pytest,rmfitzpatrick/pytest,mhils/pytest,malinoff/pytest,txomon/pytest,chiller/pytest,mdboom/pytest,untitaker/pytest,oleg-alexandrov/pytest,nicoddemus/repo-test,JonathonSonesen/pytest,tomviner/pytest,chiller/pytest,hpk42/pytest,codewarrior0/pytest,markshao/pytest,ericdill/pytest,Bachmann1234/pytest,tgoodlet/pytest,ghostsquad/pytest,bubenkoff/pytest,wfxiang08/pytest,Bjwebb/pytest,nicoddemus/pytest,ropez/pytest,MichaelAquilina/pytest,jb098/pytest,ghostsquad/pytest,lukas-bednar/pytest,ionelmc/pytest,etataurov/pytest,MengJueM/pytest,jaraco/pytest,tomviner/pytest,alfredodeza/pytest,tareqalayan/pytest,takluyver/pytest,pelme/pytest,hunse/pytest,The-Compiler/pytest,ddboline/pytest,ojake/pytest,icemac/pytest,flub/pytest,skylarjhdownes/pytest,rouge8/pytest,lukas-bednar/pytest,rouge8/pytest,Bjwebb/pytest,gabrielcnr/pytest,nicoddemus/repo-test,Haibo-Wang-ORG/pytest
|
runtox.py
|
runtox.py
|
#!/usr/bin/env python
import subprocess
import sys
if __name__ == "__main__":
subprocess.call([sys.executable, "-m", "tox",
"-i", "ALL=https://devpi.net/hpk/dev/",
"--develop",] + sys.argv[1:])
|
#!/usr/bin/env python
import subprocess
import sys
if __name__ == "__main__":
subprocess.call(["tox",
"-i", "ALL=https://devpi.net/hpk/dev/",
"--develop",] + sys.argv[1:])
|
mit
|
Python
|
bc9c43160a58508e412592b0ab9a0d7f3a35c48c
|
fix regression in folde tests
|
xs2maverick/adhocracy3.mercator,liqd/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,liqd/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,fhartwig/adhocracy3.mercator
|
src/adhocracy/adhocracy/folder/test_init.py
|
src/adhocracy/adhocracy/folder/test_init.py
|
import unittest
from pyramid import testing
class ResourcesAutolNamingFolderUnitTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeOne(self, d=None):
from . import ResourcesAutolNamingFolder
return ResourcesAutolNamingFolder(d)
def test_create(self):
from adhocracy.interfaces import IPool
from zope.interface.verify import verifyObject
inst = self._makeOne()
assert verifyObject(IPool, inst)
def test_next_name_empty(self):
ob = testing.DummyResource()
inst = self._makeOne()
assert inst.next_name(ob) == '0'.zfill(7)
assert inst.next_name(ob) == '1'.zfill(7)
def test_next_name_nonempty(self):
ob = testing.DummyResource()
inst = self._makeOne({'nonintifiable': ob})
assert inst.next_name(ob) == '0'.zfill(7)
def test_next_name_nonempty_intifiable(self):
ob = testing.DummyResource()
inst = self._makeOne({'0000000': ob})
assert inst.next_name(ob).startswith('0'.zfill(7) + '_20')
def test_next_name_empty_prefix(self):
ob = testing.DummyResource()
inst = self._makeOne()
assert inst.next_name(ob, prefix='prefix') == 'prefix' + '0'.zfill(7)
assert inst.next_name(ob,) == '1'.zfill(7)
def test_add(self):
ob = testing.DummyResource()
inst = self._makeOne()
inst.add('name', ob)
assert 'name' in inst
def test_add_next(self):
ob = testing.DummyResource()
inst = self._makeOne()
inst.add_next(ob)
assert '0'.zfill(7) in inst
def test_add_next_prefix(self):
ob = testing.DummyResource()
inst = self._makeOne()
inst.add_next(ob, prefix='prefix')
assert 'prefix' + '0'.zfill(7) in inst
|
import unittest
from pyramid import testing
class ResourcesAutolNamingFolderUnitTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeOne(self, d=None):
from . import ResourcesAutolNamingFolder
return ResourcesAutolNamingFolder(d)
def test_create(self):
from adhocracy.interfaces import IAutoNamingManualFolder
from zope.interface.verify import verifyObject
inst = self._makeOne()
assert verifyObject(IAutoNamingManualFolder, inst)
def test_next_name_empty(self):
ob = testing.DummyResource()
inst = self._makeOne()
assert inst.next_name(ob) == '0'.zfill(7)
assert inst.next_name(ob) == '1'.zfill(7)
def test_next_name_nonempty(self):
ob = testing.DummyResource()
inst = self._makeOne({'nonintifiable': ob})
assert inst.next_name(ob) == '0'.zfill(7)
def test_next_name_nonempty_intifiable(self):
ob = testing.DummyResource()
inst = self._makeOne({'0000000': ob})
assert inst.next_name(ob).startswith('0'.zfill(7) + '_20')
def test_next_name_empty_prefix(self):
ob = testing.DummyResource()
inst = self._makeOne()
assert inst.next_name(ob, prefix='prefix') == 'prefix' + '0'.zfill(7)
assert inst.next_name(ob,) == '1'.zfill(7)
def test_add(self):
ob = testing.DummyResource()
inst = self._makeOne()
inst.add('name', ob)
assert 'name' in inst
def test_add_next(self):
ob = testing.DummyResource()
inst = self._makeOne()
inst.add_next(ob)
assert '0'.zfill(7) in inst
def test_add_next_prefix(self):
ob = testing.DummyResource()
inst = self._makeOne()
inst.add_next(ob, prefix='prefix')
assert 'prefix' + '0'.zfill(7) in inst
|
agpl-3.0
|
Python
|
d6e06d4be5c483bdf4aff8032ff22bee5a49be02
|
Fix broken test.
|
crr0004/lime,crr0004/lime,farhaanbukhsh/lime,farhaanbukhsh/lime,farhaanbukhsh/lime,crr0004/lime,crr0004/lime,farhaanbukhsh/lime,farhaanbukhsh/lime,crr0004/lime
|
backend/sublime/testdata/view_test.py
|
backend/sublime/testdata/view_test.py
|
# coding=utf-8
import sys
import traceback
try:
import sublime
v = sublime.test_window.new_file()
assert v.id() != sublime.test_window.id()
assert sublime.test_window.id() == v.window().id()
assert v.size() == 0
e = v.begin_edit()
v.insert(e, 0, "hellå world")
v.end_edit(e)
assert v.substr(sublime.Region(0, v.size())) == "hellå world"
e = v.begin_edit()
v.insert(e, 0, """abrakadabra
simsalabim
hocus pocus
""")
v.end_edit(e)
assert v.rowcol(20) == (1, 8)
assert v.rowcol(25) == (2, 2)
assert len(v.sel()) == 1
assert len(list(v.sel())) == 1
assert v.settings().get("test", "hello") == "hello"
v.settings().set("test", 10)
assert v.settings().get("test") == 10
assert v.sel()[0] == (46, 46)
v.run_command("move", {"by": "characters", "forward": False})
assert v.sel()[0] == (45, 45)
v.run_command("move", {"by": "characters", "forward": True})
assert v.sel()[0] == (46, 46)
except:
print(sys.exc_info()[1])
traceback.print_exc()
raise
|
# coding=utf-8
import sys
import traceback
try:
import sublime
v = sublime.test_window.new_file()
assert v.id() != sublime.test_window.id()
assert sublime.test_window.id() == v.window().id()
assert v.size() == 0
e = v.begin_edit()
v.insert(e, 0, "hellå world")
v.end_edit(e)
assert v.substr(sublime.Region(0, v.size())) == "hellå world"
e = v.begin_edit()
v.insert(e, 0, """abrakadabra
simsalabim
hocus pocus
""")
v.end_edit(e)
assert v.rowcol(20) == (1, 8)
assert v.rowcol(25) == (2, 2)
assert len(v.sel()) == 1
assert len(list(v.sel())) == 1
assert v.settings().get("test", "hello") == "hello"
v.settings().set("test", 10)
assert v.settings().get("test") == 10
assert v.sel()[0] == (46, 46)
v.run_command("move", {"by": "characters", "forward": True})
assert v.sel()[0] == (47, 47)
v.run_command("move", {"by": "characters", "forward": False})
assert v.sel()[0] == (46, 46)
except:
print(sys.exc_info()[1])
traceback.print_exc()
raise
|
bsd-2-clause
|
Python
|
5e4c9f5a82f9a4f505cbb5c11e411ef70bc78db9
|
Bump version
|
Calysto/metakernel
|
metakernel/__init__.py
|
metakernel/__init__.py
|
from ._metakernel import (
MetaKernel, IPythonKernel, register_ipython_magics, get_metakernel)
from . import pexpect
from .replwrap import REPLWrapper, u
from .process_metakernel import ProcessMetaKernel
from .magic import Magic, option
from .parser import Parser
__all__ = ['Magic', 'MetaKernel', 'option']
__version__ = '0.13.1'
del magic, _metakernel, parser, process_metakernel
|
from ._metakernel import (
MetaKernel, IPythonKernel, register_ipython_magics, get_metakernel)
from . import pexpect
from .replwrap import REPLWrapper, u
from .process_metakernel import ProcessMetaKernel
from .magic import Magic, option
from .parser import Parser
__all__ = ['Magic', 'MetaKernel', 'option']
__version__ = '0.13.0'
del magic, _metakernel, parser, process_metakernel
|
bsd-3-clause
|
Python
|
d5ec09fe4ad4209c387b1b0da82a412ea83f7658
|
Change module name
|
amuehlem/misp-modules,MISP/misp-modules,VirusTotal/misp-modules,VirusTotal/misp-modules,amuehlem/misp-modules,VirusTotal/misp-modules,amuehlem/misp-modules,MISP/misp-modules,MISP/misp-modules
|
misp_modules/modules/expansion/__init__.py
|
misp_modules/modules/expansion/__init__.py
|
from . import _vmray # noqa
__all__ = ['vmray_submit', 'bgpranking', 'circl_passivedns', 'circl_passivessl',
'countrycode', 'cve', 'dns', 'btc_steroids', 'domaintools', 'eupi',
'farsight_passivedns', 'ipasn', 'passivetotal', 'sourcecache', 'virustotal',
'whois', 'shodan', 'reversedns', 'geoip_country', 'wiki', 'iprep',
'threatminer', 'otx', 'threatcrowd', 'vulndb', 'crowdstrike_falcon',
'yara_syntax_validator', 'hashdd', 'onyphe', 'onyphe_full', 'rbl',
'xforceexchange', 'sigma_syntax_validator', 'stix2_pattern_syntax_validator',
'sigma_queries', 'dbl_spamhaus', 'vulners', 'yara_query', 'macaddress_io',
'intel471']
|
from . import _vmray # noqa
__all__ = ['vmray_submit', 'asn_history', 'circl_passivedns', 'circl_passivessl',
'countrycode', 'cve', 'dns', 'btc_steroids', 'domaintools', 'eupi',
'farsight_passivedns', 'ipasn', 'passivetotal', 'sourcecache', 'virustotal',
'whois', 'shodan', 'reversedns', 'geoip_country', 'wiki', 'iprep',
'threatminer', 'otx', 'threatcrowd', 'vulndb', 'crowdstrike_falcon',
'yara_syntax_validator', 'hashdd', 'onyphe', 'onyphe_full', 'rbl',
'xforceexchange', 'sigma_syntax_validator', 'stix2_pattern_syntax_validator',
'sigma_queries', 'dbl_spamhaus', 'vulners', 'yara_query', 'macaddress_io',
'intel471']
|
agpl-3.0
|
Python
|
48894b2200d3324525ce3f1056fbd4d3420765e2
|
Make date string sent to Guardian API dynamic.
|
mattpatey/news-to-epub
|
scrape.py
|
scrape.py
|
#!/usr/bin/env python
import argparse
from datetime import datetime
from json import loads
from bs4 import BeautifulSoup
from ebooklib import epub
import requests
def get_todays_news(section, api_key):
now = datetime.now()
api_date = now.strftime('%Y-%m-%d')
payload = {'api-key': api_key,
'section': section,
'from-date': api_date}
r = requests.get('http://content.guardianapis.com/search', params=payload)
json = loads(r.text)
articles = [(x['webTitle'], x['webUrl']) for x in json['response']['results']]
return articles
def scrape(uri):
response = requests.get(uri)
soup = BeautifulSoup(response.text)
content = soup.find('div', class_='content__article-body')
filtered_content = content.find_all('p')
processed_content = u''.join([unicode(x) for x in filtered_content])
return processed_content
def make_chapter(title, content):
safe_title = u''.join([x for x in title if x.isalpha() or x.isspace()]).replace(u' ', u'-')
file_name = u'chapter-{}.xhtml'.format(safe_title)
chapter = epub.EpubHtml(title=title, file_name=file_name, lang='en')
chapter.content = u'<h1>{}</h1>{}'.format(title, content)
return chapter
def make_ebook(title, chapters):
book = epub.EpubBook()
book.set_title(title)
book.set_language('en')
date = datetime.now().strftime(u'%A %d %B %Y')
section_name = u'Headlines for {}'.format(date)
book.toc = ((epub.Link(c.file_name, c.title, c.title) for c in chapters),
(epub.Section(section_name), chapters))
for c in chapters:
book.add_item(c)
book.spine = ['nav'] + chapters
book.add_item(epub.EpubNcx())
safe_filename = u''.join([x for x in title if x.isalpha() or x.isspace() or x.isdigit()]).replace(u' ', u'-')
filename = u'{}.epub'.format(safe_filename.lower())
epub.write_epub(filename, book, {})
def main():
parser = argparse.ArgumentParser("Transform news from The Guardian's website into an epub file.")
parser.add_argument('api_key', type=str)
args = parser.parse_args()
uris = get_todays_news('world', args.api_key)
chapters = []
for title, raw_content in uris:
processed_content = scrape(raw_content)
chapter = make_chapter(title, processed_content)
chapters.append(chapter)
date = datetime.now().strftime(u'%A %d %B %Y')
book_title = u'News for {}'.format(date)
make_ebook(book_title, chapters)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import argparse
from datetime import datetime
from json import loads
from bs4 import BeautifulSoup
from ebooklib import epub
import requests
def get_todays_news(api_key):
payload = {'api-key': api_key,
'section': 'world',
'from-date': '2015-03-22'}
r = requests.get('http://content.guardianapis.com/search', params=payload)
json = loads(r.text)
articles = [(x['webTitle'], x['webUrl']) for x in json['response']['results']]
return articles
def scrape(uri):
response = requests.get(uri)
soup = BeautifulSoup(response.text)
content = soup.find('div', class_='content__article-body')
filtered_content = content.find_all('p')
processed_content = u''.join([unicode(x) for x in filtered_content])
return processed_content
def make_chapter(title, content):
safe_title = u''.join([x for x in title if x.isalpha() or x.isspace()]).replace(u' ', u'-')
file_name = u'chapter-{}.xhtml'.format(safe_title)
chapter = epub.EpubHtml(title=title, file_name=file_name, lang='en')
chapter.content = u'<h1>{}</h1>{}'.format(title, content)
return chapter
def make_ebook(title, chapters):
book = epub.EpubBook()
book.set_title(title)
book.set_language('en')
date = datetime.now().strftime(u'%A %d %B %Y')
section_name = u'Headlines for {}'.format(date)
book.toc = ((epub.Link(c.file_name, c.title, c.title) for c in chapters),
(epub.Section(section_name), chapters))
for c in chapters:
book.add_item(c)
book.spine = ['nav'] + chapters
book.add_item(epub.EpubNcx())
safe_filename = u''.join([x for x in title if x.isalpha() or x.isspace() or x.isdigit()]).replace(u' ', u'-')
filename = u'{}.epub'.format(safe_filename.lower())
epub.write_epub(filename, book, {})
def main():
parser = argparse.ArgumentParser("Transform news from The Guardian's website into an epub file.")
parser.add_argument('api_key', type=str)
args = parser.parse_args()
uris = get_todays_news(args.api_key)
chapters = []
for title, raw_content in uris:
processed_content = scrape(raw_content)
chapter = make_chapter(title, processed_content)
chapters.append(chapter)
date = datetime.now().strftime(u'%A %d %B %Y')
book_title = u'News for {}'.format(date)
make_ebook(book_title, chapters)
if __name__ == '__main__':
main()
|
mit
|
Python
|
29134a36b3b1d5db12fe4891d1f15191f7f1fa31
|
make collection paths unique to avoid all sorts of mayhem
|
compas-dev/compas
|
src/compas_blender/utilities/collections.py
|
src/compas_blender/utilities/collections.py
|
import bpy
from typing import List, Text
from compas_blender.utilities import delete_objects
__all__ = [
"create_collection",
"create_collections",
"create_collections_from_path",
"clear_collection",
"clear_collections"
]
def collection_path(collection, names=[]):
for parent in bpy.data.collections:
if collection.name in parent.children:
names.append(parent.name)
collection_path(parent, names)
return names
def create_collection(name: Text, parent: bpy.types.Collection = None) -> bpy.types.Collection:
"""Create a collection with the given name.
Parameters
----------
name : str
parent : :class:`bpy.types.Collection`, optional
Returns
-------
:class:`bpy.types.Collection`
"""
if not name:
return
if not parent:
if name in bpy.data.collections:
count = 1
newname = f'{name}.{count:04}'
while newname in bpy.data.collections:
count += 1
newname = f'{name}.{count:04}'
name = newname
collection = bpy.data.collections.new(name)
bpy.context.scene.collection.children.link(collection)
else:
path = collection_path(parent)[::-1] + [parent.name]
name = "::".join(path) + "::" + name
if name not in parent.children:
collection = bpy.data.collections.new(name)
parent.children.link(collection)
else:
collection = bpy.data.collections.get(name)
return collection
def create_collections(names: List[Text]) -> List[bpy.types.Collection]:
"""Create multiple collections at once.
Parameters
----------
names : list of str
Returns
-------
list of :class:`bpy.types.Collection`
"""
collections = [create_collection(name) for name in names]
return collections
def create_collections_from_path(path: Text, separator: Text = '::') -> List[bpy.types.Collection]:
"""Create nested collections from a collection path string.
Parameters
----------
path : str
The collection path with collection names separated by the specified separator.
separator : str, optional
Returns
-------
list of :class:`bpy.types.Collection`
"""
names = path.split(separator)
collections = []
parent = None
for name in names:
collection = create_collection(name, parent=parent)
parent = collection
collections.append(collection)
return collections
def clear_collection(name: Text):
"""Clear the objects from a collection."""
objects = list(bpy.data.collections[name].objects)
if objects:
delete_objects(objects)
def clear_collections(collections: List[bpy.types.Collection]):
"""Clear the objects from multiple collections."""
for name in collections:
clear_collection(name)
|
import bpy
from typing import List, Text
from compas_blender.utilities import delete_objects
__all__ = [
"create_collection",
"create_collections",
"create_collections_from_path",
"clear_collection",
"clear_collections"
]
def create_collection(name: Text, parent: bpy.types.Collection = None) -> bpy.types.Collection:
"""Create a collection with the given name.
Parameters
----------
name : str
parent : :class:`bpy.types.Collection`, optional
Returns
-------
:class:`bpy.types.Collection`
"""
if not name:
return
collection = bpy.data.collections.get(name) or bpy.data.collections.new(name)
if not parent:
if collection.name not in bpy.context.scene.collection.children:
bpy.context.scene.collection.children.link(collection)
else:
if collection.name not in parent.children:
parent.children.link(collection)
return collection
def create_collections(names: List[Text]) -> List[bpy.types.Collection]:
"""Create multiple collections at once.
Parameters
----------
names : list of str
Returns
-------
list of :class:`bpy.types.Collection`
"""
collections = [create_collection(name) for name in names]
return collections
def create_collections_from_path(path: Text, separator: Text = '::') -> List[bpy.types.Collection]:
"""Create nested collections from a collection path string.
Parameters
----------
path : str
The collection path with collection names separated by the specified separator.
separator : str, optional
Returns
-------
list of :class:`bpy.types.Collection`
"""
names = path.split(separator)
collections = []
parent = None
for name in names:
collection = create_collection(name, parent=parent)
parent = collection
collections.append(collection)
return collections
def clear_collection(name: Text):
"""Clear the objects from a collection."""
objects = list(bpy.data.collections[name].objects)
if objects:
delete_objects(objects)
def clear_collections(collections: List[bpy.types.Collection]):
"""Clear the objects from multiple collections."""
for name in collections:
clear_collection(name)
|
mit
|
Python
|
54fab9c1cb9e2888f7050392d38a94b4f6546741
|
fix branch reference
|
missionpinball/mpf,missionpinball/mpf
|
get_version.py
|
get_version.py
|
"""Return the short version string."""
from mpf._version import __short_version__
print("{}.x".format(__short_version__))
|
"""Return the short version string."""
from mpf._version import __short_version__
print(__short_version__)
|
mit
|
Python
|
b14c6446ac16798f797f279818ae53adc549323e
|
Clean up wrap.py a bit
|
orezpraw/unnaturalcode,orezpraw/unnaturalcode,orezpraw/estimate-charm,orezpraw/unnaturalcode,naturalness/unnaturalcode,naturalness/unnaturalcode,naturalness/unnaturalcode,orezpraw/unnaturalcode,orezpraw/unnaturalcode,orezpraw/unnaturalcode,naturalness/unnaturalcode,orezpraw/unnaturalcode,naturalness/unnaturalcode,naturalness/unnaturalcode,naturalness/unnaturalcode
|
unnaturalcode/wrap.py
|
unnaturalcode/wrap.py
|
#!/usr/bin/env python
# Copyright 2013 Joshua Charles Campbell, Alex Wilson
#
# This file is part of UnnaturalCode.
#
# UnnaturalCode is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UnnaturalCode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>.
import re
import runpy
import sys
import traceback
from logging import debug, info, warning, error
def main():
print sys.path
name_err_extract = re.compile(r"^name\s+'([^']+)'")
def get_file_line(filename, line):
try:
with open(filename) as f:
return filename.readlines()[line - 1]
except:
return None
try:
runpy.run_path(sys.argv[1])
except SyntaxError as se:
print 'syntax error: {} {}:{}'.format(se.filename, se.lineno - 1,
se.offset)
except NameError as ne:
exctype, _, tb = sys.exc_info()
filename, line, func, text = traceback.extract_tb(tb)[-1]
name = name_err_extract.match(ne.message).group(1)
# note: text has all leading whitespace stripped, so the column
# we find for name will not be quite right.
column = (get_file_line(filename, line) or text).index(name)
print 'name error: {} {}:{}'.format(filename, line, column)
print [m.__file__ for m in sys.modules.values() if hasattr(m, '__file__')] + [sys.argv[1]]
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright 2013 Joshua Charles Campbell, Alex Wilson
#
# This file is part of UnnaturalCode.
#
# UnnaturalCode is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UnnaturalCode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>.
import re, runpy, sys, traceback
from logging import debug, info, warning, error
print sys.path
name_err_extract = re.compile(r"^name\s+'([^']+)'")
def get_file_line(filename, line):
try:
with open(filename) as f:
return filename.readlines()[line - 1]
except:
return None
try:
runpy.run_path(sys.argv[1])
except SyntaxError as se:
print 'syntax error: {} {}:{}'.format(se.filename, se.lineno - 1,
se.offset)
except NameError as ne:
exctype, _, tb = sys.exc_info()
filename, line, func, text = traceback.extract_tb(tb)[-1]
name = name_err_extract.match(ne.message).group(1)
# note: text has all leading whitespace stripped, so the column
# we find for name will not be quite right.
column = (get_file_line(filename, line) or text).index(name)
print 'name error: {} {}:{}'.format(filename, line, column)
print [m.__file__ for m in sys.modules.values() if hasattr(m, '__file__')] + [sys.argv[1]]
|
agpl-3.0
|
Python
|
914a7ae8480875942f6273cf70249f9f9fdf482a
|
Remove unused and unimplemented `retry_on_decode_error` option from modelzoo.util's `load_graphdef`. The option is no longer needed as loading itself will autodetect Exceptions during loading and retry
|
tensorflow/lucid,tensorflow/lucid,tensorflow/lucid,tensorflow/lucid
|
lucid/modelzoo/util.py
|
lucid/modelzoo/util.py
|
# Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for modelzoo models."""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from google.protobuf.message import DecodeError
import logging
# create logger with module name, e.g. lucid.misc.io.reading
log = logging.getLogger(__name__)
from lucid.misc.io import load
def load_text_labels(labels_path):
return load(labels_path).splitlines()
def load_graphdef(model_url, reset_device=True):
"""Load GraphDef from a binary proto file."""
graph_def = load(model_url)
if reset_device:
for n in graph_def.node:
n.device = ""
return graph_def
def forget_xy(t):
"""Ignore sizes of dimensions (1, 2) of a 4d tensor in shape inference.
This allows using smaller input sizes, which create an invalid graph at higher
layers (for example because a spatial dimension becomes smaller than a conv
filter) when we only use early parts of it.
"""
shape = (t.shape[0], None, None, t.shape[3])
return tf.placeholder_with_default(t, shape)
|
# Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for modelzoo models."""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from google.protobuf.message import DecodeError
import logging
# create logger with module name, e.g. lucid.misc.io.reading
log = logging.getLogger(__name__)
from lucid.misc.io import load
def load_text_labels(labels_path):
return load(labels_path).splitlines()
def load_graphdef(model_url, reset_device=True, retry_on_decode_error=True):
"""Load GraphDef from a binary proto file."""
graph_def = load(model_url)
if reset_device:
for n in graph_def.node:
n.device = ""
return graph_def
def forget_xy(t):
"""Ignore sizes of dimensions (1, 2) of a 4d tensor in shape inference.
This allows using smaller input sizes, which create an invalid graph at higher
layers (for example because a spatial dimension becomes smaller than a conv
filter) when we only use early parts of it.
"""
shape = (t.shape[0], None, None, t.shape[3])
return tf.placeholder_with_default(t, shape)
|
apache-2.0
|
Python
|
75be8ed7040cd43aa0a41cba56da48942972ca42
|
Add Testts For Models
|
rockwyc992/monkey-pdns,rockwyc992/monkey-pdns
|
monkey_pdns/app/tests.py
|
monkey_pdns/app/tests.py
|
from django.test import TestCase
from django.contrib.auth.models import User
from .views import hello
from .models import Zone, Sub_Zone, Record, Record_Type
class View_hello_tests(TestCase):
def test_hello(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Hello World!')
class Record_Type_Test(TestCase):
def setUp(self):
Record_Type.objects.create(name="A")
def test_record_type(self):
created_a = Record_Type.objects.get(name="A")
self.assertEqual(str(created_a), "A")
class Zone_Test(TestCase):
def setUp(self):
Zone.objects.create(name="test.domain.com.")
def test_zone(self):
created_zone = Zone.objects.get(name="test.domain.com.")
self.assertEqual(str(created_zone), "test.domain.com.")
class Sub_Zone_Test(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="test_user", password="test")
self.zone = Zone.objects.create(name="test.domain.com.")
Sub_Zone.objects.create(owner=self.user, prefix="new", super_zone=self.zone)
def test_sub_zone(self):
created_sub_zone = Sub_Zone.objects.get(owner=self.user, prefix="new", super_zone=self.zone)
self.assertEqual(str(created_sub_zone), "new.test.domain.com.")
class Record_Test(TestCase):
def setUp(self):
self.record = Record_Type.objects.create(name="A")
self.user = User.objects.create_user(username="test_user", password="test")
self.zone = Zone.objects.create(name="test.domain.com.")
self.sub_zone = Sub_Zone.objects.create(owner=self.user, prefix="new", super_zone=self.zone)
Record.objects.create(prefix="www", type=self.record, zone=self.sub_zone, context="140.115.50.58")
def test_record(self):
created_record = Record.objects.get(zone=self.sub_zone)
self.assertEqual(str(created_record), "www.new.test.domain.com. A 140.115.50.58")
|
from django.test import TestCase
from .views import hello
class View_hello_tests(TestCase):
def test_hello(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Hello World!')
|
mit
|
Python
|
ab819232e0e036709ff6098b2d9f259fc8956ca2
|
add in sentinel support
|
ortoo/schooldata
|
update_school_data.py
|
update_school_data.py
|
import governorhub
import logging
import redis
from redis.sentinel import Sentinel
import os
import loggly.handlers
from datetime import datetime
from similar_schools import update_similar_schools
from dfe_data import update_dfe_data
logging.basicConfig(level=logging.INFO)
# Turn off requests INFO level logging
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
REDIS_HOST = os.environ.get('REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None)
SENTINEL_HOST = os.environ.get('SENTINEL_HOST', None)
SENTINEL_PORT = os.environ.get('SENTINEL_PORT', 26379)
SENTINEL_MASTER = os.environ.get('SENTINEL_MASTER', 'base')
LOGGLY_TOKEN = os.environ.get('LOGGLY_TOKEN', None)
UPDATE_CHAN = 'or2:school:updatedata:channel'
UPDATE_Q = 'or2:school:updatedataq'
if LOGGLY_TOKEN is not None:
handler = loggly.handlers.HTTPSHandler('https://logs-01.loggly.com/inputs/%s/tag/school-data' % LOGGLY_TOKEN)
logging.getLogger('').addHandler(handler)
governorhub.connect()
School = governorhub.ModelType('school')
def update_school(school):
if getattr(school, 'manualData', False):
logging.warning('School requested that has manual data: %s. Not processing' % school._id)
return
update_similar_schools(school)
update_dfe_data(school)
setattr(school, 'lastRefreshed', datetime.now())
school.save()
def clear_queue(client):
while True:
try:
schoolId = client.lpop(UPDATE_Q)
if schoolId is None:
break
schoolId = schoolId.decode('utf-8')
try:
logging.info('Updating ' + schoolId)
school = School.get(schoolId)
update_school(school)
logging.info('Updated ' + schoolId)
except Exception as ex:
logging.error('Error updating data for school: ' + schoolId)
logging.exception(ex)
except Exception as ex:
logging.exception(ex)
def listen_for_requests():
if SENTINEL_HOST is None:
client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
else:
sentinel = Sentinel([(SENTINEL_HOST, SENTINEL_PORT)])
client = sentinel.master_for(SENTINEL_MASTER)
clear_queue(client)
ps = client.pubsub()
ps.subscribe(UPDATE_CHAN)
# Hang until we get a message
try:
for message in ps.listen():
try:
if message['type'] == 'message':
data = message['data'].decode('utf-8')
if data == 'update':
clear_queue(client)
except Exception as ex:
logging.exception(ex)
finally:
ps.close()
if __name__ == '__main__':
listen_for_requests()
|
import governorhub
import logging
import redis
import os
import loggly.handlers
from datetime import datetime
from similar_schools import update_similar_schools
from dfe_data import update_dfe_data
logging.basicConfig(level=logging.INFO)
# Turn off requests INFO level logging
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
REDIS_HOST = os.environ.get('REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None)
LOGGLY_TOKEN = os.environ.get('LOGGLY_TOKEN', None)
UPDATE_CHAN = 'or2:school:updatedata:channel'
UPDATE_Q = 'or2:school:updatedataq'
if LOGGLY_TOKEN is not None:
handler = loggly.handlers.HTTPSHandler('https://logs-01.loggly.com/inputs/%s/tag/school-data' % LOGGLY_TOKEN)
logging.getLogger('').addHandler(handler)
governorhub.connect()
School = governorhub.ModelType('school')
def update_school(school):
if getattr(school, 'manualData', False):
logging.warning('School requested that has manual data: %s. Not processing' % school._id)
return
update_similar_schools(school)
update_dfe_data(school)
setattr(school, 'lastRefreshed', datetime.now())
school.save()
def clear_queue(client):
while True:
try:
schoolId = client.lpop(UPDATE_Q)
if schoolId is None:
break
schoolId = schoolId.decode('utf-8')
try:
logging.info('Updating ' + schoolId)
school = School.get(schoolId)
update_school(school)
logging.info('Updated ' + schoolId)
except Exception as ex:
logging.error('Error updating data for school: ' + schoolId)
logging.exception(ex)
except Exception as ex:
logging.exception(ex)
def listen_for_requests():
client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
clear_queue(client)
ps = client.pubsub()
ps.subscribe(UPDATE_CHAN)
# Hang until we get a message
try:
for message in ps.listen():
try:
if message['type'] == 'message':
data = message['data'].decode('utf-8')
if data == 'update':
clear_queue(client)
except Exception as ex:
logging.exception(ex)
finally:
ps.close()
if __name__ == '__main__':
listen_for_requests()
|
mit
|
Python
|
493637ace6881defedee22971f3bc39fe9a5bd0a
|
Make it compatible with Bob
|
kif/freesas,kif/freesas,kif/freesas
|
freesas/test/__init__.py
|
freesas/test/__init__.py
|
#!usr/bin/env python
# coding: utf-8
__author__ = "Jérôme Kieffer"
__license__ = "MIT"
__date__ = "15/01/2021"
__copyright__ = "2015-2021, ESRF"
import sys
import unittest
from .test_all import suite
def run_tests():
"""Run test complete test_suite"""
mysuite = suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
print("Test suite failed")
return 1
else:
print("Test suite succeeded")
return 0
run = run_tests
if __name__ == '__main__':
sys.exit(run_tests())
|
#!usr/bin/env python
# coding: utf-8
__author__ = "Jérôme Kieffer"
__license__ = "MIT"
__date__ = "05/09/2017"
__copyright__ = "2015, ESRF"
import unittest
from .test_all import suite
def run():
runner = unittest.TextTestRunner()
return runner.run(suite())
if __name__ == '__main__':
run()
|
mit
|
Python
|
1697e0a20b14c89cf2db209ef03cb1dc551b14a1
|
Bump version
|
msanders/cider
|
cider/__init__.py
|
cider/__init__.py
|
from .core import Cider
__author__ = "Michael Sanders"
__version__ = "1.1"
__all__ = ["Cider"]
|
from .core import Cider
__author__ = "Michael Sanders"
__version__ = "1.0"
__all__ = ['Cider']
|
mit
|
Python
|
e0a8f8f6765a071ba71191b6e047b861812ec2f9
|
Update settings.py
|
DenfeldRobotics4009/2016_Freckles
|
utilities/settings.py
|
utilities/settings.py
|
import math
#Tilt pot setpoints .158
kMaxDown = .800
kMaxUp = kMaxDown - .590
kTop = kMaxUp + .050
kTopShot = .292
kTopShotAtBase = .281
kBottom = kMaxDown - .050
kShootLevel = .646
kShootAtBase = .528
kShootRamp = .400
kLongShot = .600
class Settings():
"""Robot mapping. Values that are changed often go here."""
#Numbers to be changed through drive station
num_precision_one = 0.80
num_precision_two = 0.50
num_scaling = 1.25
num_macro_timeout = 15
|
import math
#Tilt pot setpoints .158
kMaxDown = .79
kMaxUp = kMaxDown - .590
kTop = kMaxUp + .050
kTopShot = .292
kTopShotAtBase = .281
kBottom = kMaxDown - .050
kShootLevel = .646
kShootAtBase = .528
kShootRamp = .400
kLongShot = .600
class Settings():
"""Robot mapping. Values that are changed often go here."""
#Numbers to be changed through drive station
num_precision_one = 0.80
num_precision_two = 0.50
num_scaling = 1.25
num_macro_timeout = 15
|
bsd-3-clause
|
Python
|
555e76a62f0ec955932f95bec444e7c360f23241
|
use environment variable to set port
|
WeKeyPedia/metrics,WeKeyPedia/metrics,WeKeyPedia/metrics
|
server.py
|
server.py
|
import os
from flask import Flask
from flask import jsonify
from flask import render_template
from flask.ext.cors import CORS
from api import api
app = Flask(__name__)
cors = CORS(app)
if __name__ == "__main__":
port = os.environ.setdefault("PORT", "5000")
app.register_blueprint(api)
app.run(debug=True, port=int(port))
|
from flask import Flask
from flask import jsonify
from flask import render_template
from flask.ext.cors import CORS
from api import api
app = Flask(__name__)
cors = CORS(app)
if __name__ == "__main__":
app.register_blueprint(api)
app.run(debug=True, port=5100)
|
mit
|
Python
|
828c78566879412c6e2cc6981af9fa1adb5bdcf4
|
return result files, not just names
|
iclab/centinel-server,iclab/centinel-server,rpanah/centinel-server,ben-jones/centinel-server,ben-jones/centinel-server,rpanah/centinel-server,ben-jones/centinel-server,rpanah/centinel-server,lianke123321/centinel-server,gsathya/centinel-server,lianke123321/centinel-server,lianke123321/centinel-server,iclab/centinel-server
|
server.py
|
server.py
|
import config
import glob
import flask
import os
import json
app = flask.Flask(__name__)
@app.route("/versions/")
def get_recommended_versions():
return flask.jsonify({"versions" : config.recommended_versions})
@app.route("/results", methods=['GET', 'POST'])
def submit_result():
if flask.request.method == "POST":
pass
else:
# look in results directory
results = {}
for path in glob.glob(os.path.join(config.results_dir,'[!_]*.json')):
# get name of file and path
file_name, ext = os.path.splitext(os.path.basename(path))
with open(path) as result_file:
results[file_name] = json.load(result_file)
return flask.jsonify({"results":results})
@app.route("/experiments/")
@app.route("/experiments/<name>")
def get_experiment_list(name=None):
experiments = {}
# look for experiments in experiments directory
for path in glob.glob(os.path.join(config.experiments_dir,'[!_]*.py')):
# get name of file and path
file_name, ext = os.path.splitext(os.path.basename(path))
experiments[file_name] = path
if name == None:
return flask.jsonify({"experiments" : experiments.keys()})
if name in experiments:
return "Experiment found"
else:
return "Experiment not found"
@app.route("/clients/")
@app.route("/clients/<name>")
def get_clients(name=None):
clients = {}
with open(config.clients_file) as clients_fh:
clients = json.load(clients_fh)
if name == None:
return flask.jsonify(clients)
if name in clients:
return flask.jsonify(client[name])
else:
return "Client not found"
if __name__ == "__main__":
app.run(debug=True)
|
import config
import glob
import flask
import os
import json
app = flask.Flask(__name__)
@app.route("/versions/")
def get_recommended_versions():
return flask.jsonify({"versions" : config.recommended_versions})
@app.route("/results", methods=['GET', 'POST'])
def submit_result():
if flask.request.method == "POST":
pass
else:
# look in results directory
results = []
for path in glob.glob(os.path.join(config.results_dir,'[!_]*.json')):
results.append(path)
return flask.jsonify({"result_files":results})
@app.route("/experiments/")
@app.route("/experiments/<name>")
def get_experiment_list(name=None):
experiments = {}
for path in glob.glob(os.path.join(config.experiments_dir,'[!_]*.py')):
# get name of file and path
file_name, ext = os.path.splitext(os.path.basename(path))
experiments[file_name] = path
if name in experiments:
return "Experiment found"
else:
return flask.jsonify({"experiments" : experiments.keys()})
@app.route("/clients/")
@app.route("/clients/<name>")
def get_clients(name=None):
clients = {}
with open(config.clients_file) as clients_fh:
clients = json.load(clients_fh)
if name not in clients:
return flask.jsonify(clients)
else:
return flask.jsonify(client[name])
if __name__ == "__main__":
app.run(debug=True)
|
mit
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.