commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
96d17640a1aef57f35f22620fe45028bf1c0f6fb
|
Fix error with Django admin urls
|
gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine
|
openquake/server/urls.py
|
openquake/server/urls.py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.conf.urls import url, include
from django.views.generic.base import RedirectView
from openquake.server import views
urlpatterns = [
url(r'^$', RedirectView.as_view(url='/engine/', permanent=True)),
url(r'^v1/engine_version$', views.get_engine_version),
url(r'^v1/engine_latest_version$', views.get_engine_latest_version),
url(r'^v1/calc/', include('openquake.server.v1.calc_urls')),
url(r'^v1/valid/', views.validate_nrml),
url(r'^v1/available_gsims$', views.get_available_gsims),
url(r'^v1/on_same_fs$', views.on_same_fs, name="on_same_fs"),
url(r'^engine/?$', views.web_engine, name="index"),
url(r'^engine/(\d+)/outputs$',
views.web_engine_get_outputs, name="outputs"),
url(r'^engine/license$', views.license,
name="license"),
]
for app in settings.STANDALONE_APPS:
app_name = app.split('_')[1]
urlpatterns.append(url(r'^%s/' % app_name, include('%s.urls' % app,
namespace='%s' % app_name)))
if settings.LOCKDOWN:
from django.contrib import admin
from django.contrib.auth.views import login, logout
admin.autodiscover()
urlpatterns += [
url(r'^admin/', admin.site.urls),
url(r'^accounts/login/$', login,
{'template_name': 'account/login.html'}, name="login"),
url(r'^accounts/logout/$', logout,
{'template_name': 'account/logout.html'}, name="logout"),
url(r'^accounts/ajax_login/$', views.ajax_login),
url(r'^accounts/ajax_logout/$', views.ajax_logout),
]
# To enable gunicorn debug without Nginx (to serve static files)
# uncomment the following lines
# from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# urlpatterns += staticfiles_urlpatterns()
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.conf.urls import url, include
from django.views.generic.base import RedirectView
from openquake.server import views
urlpatterns = [
url(r'^$', RedirectView.as_view(url='/engine/', permanent=True)),
url(r'^v1/engine_version$', views.get_engine_version),
url(r'^v1/engine_latest_version$', views.get_engine_latest_version),
url(r'^v1/calc/', include('openquake.server.v1.calc_urls')),
url(r'^v1/valid/', views.validate_nrml),
url(r'^v1/available_gsims$', views.get_available_gsims),
url(r'^v1/on_same_fs$', views.on_same_fs, name="on_same_fs"),
url(r'^engine/?$', views.web_engine, name="index"),
url(r'^engine/(\d+)/outputs$',
views.web_engine_get_outputs, name="outputs"),
url(r'^engine/license$', views.license,
name="license"),
]
for app in settings.STANDALONE_APPS:
app_name = app.split('_')[1]
urlpatterns.append(url(r'^%s/' % app_name, include('%s.urls' % app,
namespace='%s' % app_name)))
if settings.LOCKDOWN:
from django.contrib import admin
from django.contrib.auth.views import login, logout
admin.autodiscover()
urlpatterns += [
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', login,
{'template_name': 'account/login.html'}, name="login"),
url(r'^accounts/logout/$', logout,
{'template_name': 'account/logout.html'}, name="logout"),
url(r'^accounts/ajax_login/$', views.ajax_login),
url(r'^accounts/ajax_logout/$', views.ajax_logout),
]
# To enable gunicorn debug without Nginx (to serve static files)
# uncomment the following lines
# from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# urlpatterns += staticfiles_urlpatterns()
|
agpl-3.0
|
Python
|
b5416d4e68e273ce7164fc177d99f7c5b29e8ca4
|
Handle OSError on sendall, re-connect socket when error occurred.
|
scarchik/opentsdb-py
|
opentsdb/tsdb_connect.py
|
opentsdb/tsdb_connect.py
|
import threading
import logging
import socket
import time
logger = logging.getLogger('opentsdb-py')
class TSDBConnect:
def __init__(self, host: str, port: int, check_tsdb_alive: bool=False):
self.tsdb_host = host
self.tsdb_port = int(port)
if check_tsdb_alive:
self.is_alive(raise_error=True)
self._connect = None
self.stopped = threading.Event()
def is_alive(self, timeout=3, raise_error=False) -> bool:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((self.tsdb_host, self.tsdb_port))
sock.close()
except (ConnectionRefusedError, socket.timeout):
if raise_error is True:
raise
return False
else:
return True
@property
def connect(self) -> socket.socket:
if not self._connect or getattr(self._connect, '_closed', False) is True:
logger.debug("Connect to OpenTSDB: %s:%s", self.tsdb_host, self.tsdb_port)
self.stopped.clear()
self._make_connection()
return self._connect
def _make_connection(self, timeout=2):
self._connect = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connect.settimeout(timeout)
attempt = 0
while not self.stopped.is_set():
try:
self._connect.connect((self.tsdb_host, self.tsdb_port))
return
except (ConnectionRefusedError, socket.timeout):
time.sleep(min(15, 2 ** attempt))
attempt += 1
def disconnect(self):
logger.debug("Disconnecting from %s:%s", self.tsdb_host, self.tsdb_port)
self.stopped.set()
if self._connect:
self._connect.close()
self._connect = None
def sendall(self, line: bytes):
try:
self.connect.sendall(line)
except (BrokenPipeError, IOError, OSError) as error:
logger.error("Close connection to handle exception: %s", error)
self._connect.close()
|
import threading
import logging
import socket
import time
logger = logging.getLogger('opentsdb-py')
class TSDBConnect:
def __init__(self, host: str, port: int, check_tsdb_alive: bool=False):
self.tsdb_host = host
self.tsdb_port = int(port)
if check_tsdb_alive:
self.is_alive(raise_error=True)
self._connect = None
self.stopped = threading.Event()
def is_alive(self, timeout=3, raise_error=False) -> bool:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((self.tsdb_host, self.tsdb_port))
sock.close()
except (ConnectionRefusedError, socket.timeout):
if raise_error is True:
raise
return False
else:
return True
@property
def connect(self) -> socket.socket:
if not self._connect or getattr(self._connect, '_closed', False) is True:
logger.debug("Connect to OpenTSDB: %s:%s", self.tsdb_host, self.tsdb_port)
self.stopped.clear()
self._make_connection()
return self._connect
def _make_connection(self, timeout=2):
self._connect = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connect.settimeout(timeout)
attempt = 0
while not self.stopped.is_set():
try:
self._connect.connect((self.tsdb_host, self.tsdb_port))
return
except (ConnectionRefusedError, socket.timeout):
time.sleep(min(15, 2 ** attempt))
attempt += 1
def disconnect(self):
logger.debug("Disconnecting from %s:%s", self.tsdb_host, self.tsdb_port)
self.stopped.set()
if self._connect:
self._connect.close()
self._connect = None
def sendall(self, line: bytes):
try:
self.connect.sendall(line)
except (BrokenPipeError, IOError) as error:
logger.error("Close connection to handle exception: %s", error)
self._connect.close()
|
mit
|
Python
|
207fbc64fc9001abc62d0e687beefb3e3a25ef73
|
fix exception error
|
Hanaasagi/sorator
|
orator/exceptions/orm.py
|
orator/exceptions/orm.py
|
# -*- coding: utf-8 -*-
class ModelNotFound(RuntimeError):
def __init__(self, model):
self._model = model
self.message = 'No query results found for model [%s]' % self._model.__name__
def __str__(self):
return self.message
class MassAssignmentError(RuntimeError):
pass
class RelatedClassNotFound(RuntimeError):
def __init__(self, related):
self._related = related
self.message = 'The related class for "%s" does not exists' % related
def __str__(self):
return self.message
class ValidationError(ValueError):
detail = 'Invalid input.'
def __init__(self, detail=None):
if detail is not None:
self.detail = detail
def __str__(self):
return self.detail
|
# -*- coding: utf-8 -*-
class ModelNotFound(RuntimeError):
def __init__(self, model):
self._model = model
self.message = 'No query results found for model [%s]' % self._model.__name__
def __str__(self):
return self.message
class MassAssignmentError(RuntimeError):
pass
class RelatedClassNotFound(RuntimeError):
def __init__(self, related):
self._related = related
self.message = 'The related class for "%s" does not exists' % related
def __str__(self):
return self.message
class ValidationError(ValueError):
default_detail = 'Invalid input.'
def __init__(self, detail=None):
if detail is None:
self.detail = self.default_detail if detail is None else detail
def __str__(self):
return self.detail
|
mit
|
Python
|
a26ad106dba7ce2f00a0b9438629abf32a15a061
|
Improve reliability of test by clearing cache
|
tkf/orgviz
|
orgviz/tests/test_web.py
|
orgviz/tests/test_web.py
|
import os
import tempfile
import shutil
import unittest
import textwrap
import json
import datetime
from .. import web
TMP_PREFIX = 'orgviz-test-'
def totimestamp(dt):
zero = datetime.datetime.fromtimestamp(0)
return (dt - zero).total_seconds()
class TestWebEventsData(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp(prefix=TMP_PREFIX)
cls.org_file = os.path.join(cls.tmpdir, 'test.org')
web.app.config['ORG_FILE_COMMON'] = [cls.org_file]
cls.app = web.app.test_client()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
def setUp(self):
web.cache.clear()
def write_org_file(self, text):
with open(self.org_file, 'w') as f:
f.write(textwrap.dedent(text))
def test_single_event(self):
self.write_org_file("""
* Node title
SCHEDULED: <2012-10-23 Tue>
""")
rv = self.app.get('/events_data?start=1349042400&end=1352674800')
events_data = json.loads(rv.data)
self.assertEqual(len(events_data), 1)
self.assertEqual(events_data[0]['title'], 'Node title')
def get_events_data(self, start, end):
start = totimestamp(datetime.datetime(*start))
end = totimestamp(datetime.datetime(*end))
return self.app.get(
'/events_data?start={0:.0f}&end={1:.0f}'.format(start, end))
def test_start_end(self):
self.write_org_file("""
* Node 1
SCHEDULED: <2012-10-21 Tue>
* Node 2
SCHEDULED: <2012-10-22 Wed>
* Node 3
SCHEDULED: <2012-10-24 Fri>
""")
# FIXME: clarify boundary condition 2012-10-23 in Node 3 does not work!
rv = self.get_events_data(start=(2012, 10, 20), end=(2012, 10, 23))
events_data = json.loads(rv.data)
self.assertEqual(len(events_data), 2)
self.assertEqual(events_data[0]['title'], 'Node 1')
self.assertEqual(events_data[1]['title'], 'Node 2')
|
import os
import tempfile
import shutil
import unittest
import textwrap
import json
import datetime
from .. import web
TMP_PREFIX = 'orgviz-test-'
def totimestamp(dt):
zero = datetime.datetime.fromtimestamp(0)
return (dt - zero).total_seconds()
class TestWebEventsData(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp(prefix=TMP_PREFIX)
cls.org_file = os.path.join(cls.tmpdir, 'test.org')
web.app.config['ORG_FILE_COMMON'] = [cls.org_file]
cls.app = web.app.test_client()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
def write_org_file(self, text):
with open(self.org_file, 'w') as f:
f.write(textwrap.dedent(text))
def test_single_event(self):
self.write_org_file("""
* Node title
SCHEDULED: <2012-10-23 Tue>
""")
rv = self.app.get('/events_data?start=1349042400&end=1352674800')
events_data = json.loads(rv.data)
self.assertEqual(len(events_data), 1)
self.assertEqual(events_data[0]['title'], 'Node title')
def get_events_data(self, start, end):
start = totimestamp(datetime.datetime(*start))
end = totimestamp(datetime.datetime(*end))
return self.app.get(
'/events_data?start={0:.0f}&end={1:.0f}'.format(start, end))
def test_start_end(self):
self.write_org_file("""
* Node 1
SCHEDULED: <2012-10-21 Tue>
* Node 2
SCHEDULED: <2012-10-22 Wed>
* Node 3
SCHEDULED: <2012-10-24 Fri>
""")
# FIXME: clarify boundary condition 2012-10-23 in Node 3 does not work!
rv = self.get_events_data(start=(2012, 10, 20), end=(2012, 10, 23))
events_data = json.loads(rv.data)
self.assertEqual(len(events_data), 2)
self.assertEqual(events_data[0]['title'], 'Node 1')
self.assertEqual(events_data[1]['title'], 'Node 2')
|
mit
|
Python
|
6147a5229f67874179f371ded2e835c318a2bd56
|
correct prob formulation for secondary user beamforming
|
cvxgrp/qcqp
|
examples/secondary_user_beamforming.py
|
examples/secondary_user_beamforming.py
|
#!/usr/bin/python
# Secondary user multicast beamforming
# minimize ||w||^2
# subject to |h_i^H w|^2 >= tau
# |g_i^H w|^2 <= eta
# with variable w in complex^n.
# Data vectors h_i and g_i are also in complex^n.
# The script below expands out the complex part and
# works with real numbers only.
import numpy as np
import cvxpy as cvx
import qcqp
# n, m, l: 100, 30, 10
n = 10
m = 3
l = 2
tau = 10
eta = 1
np.random.seed(1)
HR = np.random.randn(m, n)/np.sqrt(2);
HI = np.random.randn(m, n)/np.sqrt(2);
H1 = np.hstack((HR, HI))
H2 = np.hstack((-HI, HR))
GR = np.random.randn(l, n)/np.sqrt(2);
GI = np.random.randn(l, n)/np.sqrt(2);
G1 = np.hstack((GR, GI))
G2 = np.hstack((-GI, GR))
w = cvx.Variable(2*n)
obj = cvx.Minimize(cvx.sum_squares(w))
cons = [
cvx.square(H1*w) + cvx.square(H2*w) >= tau,
cvx.square(G1*w) + cvx.square(G2*w) <= eta
]
prob = cvx.Problem(obj, cons)
# SDP-based lower bound
lb = prob.solve(method='sdp-relax', solver=cvx.MOSEK)
print ('Lower bound: %.3f' % lb)
# Upper bounds
print ('Upper bounds:')
ub_admm = prob.solve(method='qcqp-admm', use_sdp=False, solver=cvx.MOSEK, num_samples=10, rho=np.sqrt(m+l))
print (' Nonconvex ADMM: %.3f' % ub_admm)
ub_dccp = prob.solve(method='qcqp-dccp', use_sdp=False, solver=cvx.MOSEK, num_samples=10, tau=1)
print (' Convex-concave programming: %.3f' % ub_dccp)
ub_cd = prob.solve(method='coord-descent', use_sdp=False, solver=cvx.MOSEK, num_samples=10)
print (' Coordinate descent: %.3f' % ub_cd)
|
#!/usr/bin/python
# Secondary user multicast beamforming
# minimize ||w||^2
# subject to |h_i^H w|^2 >= tau
# |g_i^H w|^2 <= eta
# with variable w in complex^n
import numpy as np
import cvxpy as cvx
import qcqp
n = 10
m = 8
l = 2
tau = 10
eta = 1
np.random.seed(1)
H = np.random.randn(m, n)
G = np.random.randn(l, n)
w = cvx.Variable(n)
obj = cvx.Minimize(cvx.sum_squares(w))
cons = [cvx.square(H*w) >= tau, cvx.square(G*w) <= eta]
prob = cvx.Problem(obj, cons)
# SDP-based lower bound
lb = prob.solve(method='sdp-relax', solver=cvx.MOSEK)
print ('Lower bound: %.3f' % lb)
# Upper bounds
ub_cd = prob.solve(method='coord-descent', solver=cvx.MOSEK, num_samples=10)
ub_admm = prob.solve(method='qcqp-admm', solver=cvx.MOSEK, num_samples=10)
ub_dccp = prob.solve(method='qcqp-dccp', solver=cvx.MOSEK, num_samples=10, tau=1)
print ('Lower bound: %.3f' % lb)
print ('Upper bounds:')
print (' Coordinate descent: %.3f' % ub_cd)
print (' Nonconvex ADMM: %.3f' % ub_admm)
print (' Convex-concave programming: %.3f' % ub_dccp)
|
mit
|
Python
|
183e08be99fae2ba521c5fb60b7205d3c3c5b520
|
Add grappelli styles to make inlines collapsible
|
Princeton-CDH/winthrop-django,Princeton-CDH/winthrop-django,Princeton-CDH/winthrop-django
|
winthrop/books/admin.py
|
winthrop/books/admin.py
|
from django.contrib import admin
from winthrop.common.admin import NamedNotableAdmin
from .models import Subject, Language, Publisher, OwningInstitution, \
Book, Catalogue, BookSubject, BookLanguage, CreatorType, Creator, \
PersonBook, PersonBookRelationshipType
class NamedNotableBookCount(NamedNotableAdmin):
list_display = NamedNotableAdmin.list_display + ('book_count', )
class OwningInstitutionAdmin(admin.ModelAdmin):
list_display = ('short_name', 'name', 'place', 'has_notes', 'book_count')
fields = ('name', 'short_name', 'contact_info', 'place', 'notes')
search_fields = ('name', 'short_name', 'contact_info', 'notes')
class CollapsibleTabularInline(admin.TabularInline):
'Django admin tabular inline with grappelli collapsible classes added'
classes = ('grp-collapse grp-open',)
class CatalogueInline(CollapsibleTabularInline):
model = Catalogue
fields = ('institution', 'call_number', 'start_year', 'end_year',
'is_current', 'is_sammelband', 'bound_order', 'notes')
class SubjectInline(CollapsibleTabularInline):
model = BookSubject
fields = ('subject', 'is_primary', 'notes')
class LanguageInline(CollapsibleTabularInline):
model = BookLanguage
fields = ('language', 'is_primary', 'notes')
class CreatorInline(CollapsibleTabularInline):
model = Creator
fields = ('creator_type', 'person', 'notes')
class BookAdmin(admin.ModelAdmin):
list_display = ('short_title', 'author_names', 'pub_year',
'catalogue_call_numbers', 'is_extant', 'is_annotated',
'is_digitized', 'has_notes')
# NOTE: fields are specified here so that notes input will be displayed last
fields = ('title', 'short_title', 'original_pub_info', 'publisher',
'pub_place', 'pub_year', 'is_extant', 'is_annotated', 'is_digitized',
'red_catalog_number', 'ink_catalog_number', 'pencil_catalog_number',
'dimensions', 'notes')
inlines = [CreatorInline, LanguageInline, SubjectInline, CatalogueInline]
list_filter = ('subjects', 'languages')
admin.site.register(Subject, NamedNotableBookCount)
admin.site.register(Language, NamedNotableBookCount)
admin.site.register(Publisher, NamedNotableBookCount)
admin.site.register(OwningInstitution, OwningInstitutionAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(CreatorType, NamedNotableAdmin)
# NOTE: these will probably be inlines, but register for testing for now
admin.site.register(PersonBook)
admin.site.register(PersonBookRelationshipType)
|
from django.contrib import admin
from winthrop.common.admin import NamedNotableAdmin
from .models import Subject, Language, Publisher, OwningInstitution, \
Book, Catalogue, BookSubject, BookLanguage, CreatorType, Creator, \
PersonBook, PersonBookRelationshipType
class NamedNotableBookCount(NamedNotableAdmin):
list_display = NamedNotableAdmin.list_display + ('book_count', )
class OwningInstitutionAdmin(admin.ModelAdmin):
list_display = ('short_name', 'name', 'place', 'has_notes', 'book_count')
fields = ('name', 'short_name', 'contact_info', 'place', 'notes')
search_fields = ('name', 'short_name', 'contact_info', 'notes')
class CatalogueInline(admin.TabularInline):
model = Catalogue
fields = ('institution', 'call_number', 'start_year', 'end_year',
'is_current', 'is_sammelband', 'bound_order', 'notes')
class SubjectInline(admin.TabularInline):
model = BookSubject
fields = ('subject', 'is_primary', 'notes')
class LanguageInline(admin.TabularInline):
model = BookLanguage
fields = ('language', 'is_primary', 'notes')
class CreatorInline(admin.TabularInline):
model = Creator
fields = ('creator_type', 'person', 'notes')
class BookAdmin(admin.ModelAdmin):
list_display = ('short_title', 'author_names', 'pub_year',
'catalogue_call_numbers', 'is_extant', 'is_annotated',
'is_digitized')
# NOTE: fields are specified here so that notes input will be displayed last
fields = ('title', 'short_title', 'original_pub_info', 'publisher',
'pub_place', 'pub_year', 'is_extant', 'is_annotated', 'is_digitized',
'red_catalog_number', 'ink_catalog_number', 'pencil_catalog_number',
'dimensions', 'notes')
inlines = [CreatorInline, LanguageInline, SubjectInline, CatalogueInline]
list_filter = ('subjects', 'languages')
admin.site.register(Subject, NamedNotableBookCount)
admin.site.register(Language, NamedNotableBookCount)
admin.site.register(Publisher, NamedNotableBookCount)
admin.site.register(OwningInstitution, OwningInstitutionAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(Catalogue)
admin.site.register(CreatorType)
# NOTE: these will probably be inlines, but register for testing for now
admin.site.register(Creator)
admin.site.register(PersonBook)
admin.site.register(PersonBookRelationshipType)
|
apache-2.0
|
Python
|
a40c80eea715626616ef280b87de6bbcc7b73b7f
|
use relative imports
|
mdietrichc2c/vertical-ngo,yvaucher/vertical-ngo,jorsea/vertical-ngo,gurneyalex/vertical-ngo,jorsea/vertical-ngo,jgrandguillaume/vertical-ngo
|
transport_information/model/__init__.py
|
transport_information/model/__init__.py
|
# -*- coding: utf-8 -*-
from . import transport_mode
from . import transport_vehicle
|
# -*- coding: utf-8 -*-
import transport_mode
import transport_vehicle
|
agpl-3.0
|
Python
|
08cf82852ab19417f9521af45f2fb296d9e223d6
|
Update batch processing example
|
takluyver/nbparameterise
|
batch_eg.py
|
batch_eg.py
|
"""Example of using nbparameterise API to substitute variables in 'batch mode'
"""
from nbparameterise import code
import nbformat
from nbconvert.preprocessors.execute import ExecutePreprocessor
from nbconvert.exporters.notebook import NotebookExporter
from nbconvert.writers import FilesWriter
stock_names = ['YHOO', 'MSFT', 'GOOG']
with open("Stock display.ipynb") as f:
nb = nbformat.read(f, as_version=4)
definitions = code.extract_parameters(nb)
for name in stock_names:
print("Rendering for stock", name)
defined = []
for inp in definitions:
if inp.name =='stock':
# Fill in the current value
defined.append(inp.with_value(name))
else:
defined.append(inp)
code.replace_definitions(nb, defined)
# Run
resources = {}
nb, resources = ExecutePreprocessor().preprocess(nb, resources)
# Save
output, resources = NotebookExporter().from_notebook_node(nb, resources)
nbname = "Stock display %s" % name
FilesWriter().write(output, resources, notebook_name=nbname)
|
"""Example of using nbparameterise API to substitute variables in 'batch mode'
"""
from nbparameterise import code
from IPython.nbformat import current as nbformat
from IPython.nbconvert.preprocessors.execute import ExecutePreprocessor
from IPython.nbconvert.exporters.notebook import NotebookExporter
from IPython.nbconvert.writers import FilesWriter
stock_names = ['YHOO', 'MSFT', 'GOOG']
with open("Stock display.ipynb") as f:
nb = nbformat.read(f, 'ipynb')
definitions = code.extract_parameters(nb)
for name in stock_names:
print("Rendering for stock", name)
defined = []
for inp in definitions:
if inp.name =='stock':
# Fill in the current value
defined.append(inp.with_value(name))
else:
defined.append(inp)
code.replace_definitions(nb, defined)
# Run
resources = {}
nb, resources = ExecutePreprocessor().preprocess(nb, resources)
# Save
output, resources = NotebookExporter().from_notebook_node(nb, resources)
nbname = "Stock display %s" % name
FilesWriter().write(output, resources, notebook_name=nbname)
|
mit
|
Python
|
97939c334543d9ca4d717a7bc75ae30e848c8a09
|
Replace native.git_repository with skylark rule
|
GerritCodeReview/plugins_javamelody,GerritCodeReview/plugins_javamelody,GerritCodeReview/plugins_javamelody
|
bazlets.bzl
|
bazlets.bzl
|
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
NAME = "com_googlesource_gerrit_bazlets"
def load_bazlets(
commit,
local_path = None):
if not local_path:
git_repository(
name = NAME,
remote = "https://gerrit.googlesource.com/bazlets",
commit = commit,
)
else:
native.local_repository(
name = NAME,
path = local_path,
)
|
NAME = "com_googlesource_gerrit_bazlets"
def load_bazlets(
commit,
local_path = None):
if not local_path:
native.git_repository(
name = NAME,
remote = "https://gerrit.googlesource.com/bazlets",
commit = commit,
)
else:
native.local_repository(
name = NAME,
path = local_path,
)
|
apache-2.0
|
Python
|
36cc738308b8ae4435d6becac38fa3c4e96dc491
|
Remove useless code
|
patchboard/patchboard-py
|
patchboard/patchboard.py
|
patchboard/patchboard.py
|
# patchboard.py
#
# Copyright 2014 BitVault.
from __future__ import print_function
import json
from resource import Resource
from api import API
from schema_manager import SchemaManager
from client import Client
from util import to_camel_case
def discover(url):
"""
Retrieve the API definition from the given URL and construct
a Patchboard to interface with it.
"""
# Retrieve JSON data from server
# Treat url like a file and read mock JSON for now
with open(url, u"r") as file:
api_spec = json.load(file)
return Patchboard(api_spec)
class Patchboard(object):
"""
The primary client interface to a patchboard server.
"""
def __init__(self, api_spec):
self.api = API(api_spec)
self.schema_manager = SchemaManager(self.api.schemas)
self.endpoint_classes = self.create_endpoint_classes()
client = self.spawn()
# Appears to be unused
#self.resources = client.resources
self.context = client.context
def create_endpoint_classes(self):
classes = {}
for resource_name, mapping in self.api.mappings.iteritems():
if resource_name not in classes:
schema = self.schema_manager.find_name(resource_name)
resource_def = mapping.resource
cls = self.create_class(
resource_name,
resource_def,
schema,
mapping)
classes[resource_name] = cls
return classes
def create_class(self, resource_name, definition, schema, mapping):
# Cannot use unicode for class names
class_name = to_camel_case(str(resource_name))
class_parents = (Resource,)
class_dict = {}
cls = type(class_name, class_parents, class_dict)
return cls
def spawn(self, context={}):
return Client(context, self.api, self.endpoint_classes)
|
# patchboard.py
#
# Copyright 2014 BitVault.
from __future__ import print_function
import json
from api import API
from schema_manager import SchemaManager
from client import Client
from util import to_camel_case
def discover(url):
"""
Retrieve the API definition from the given URL and construct
a Patchboard to interface with it.
"""
# Retrieve JSON data from server
# Treat url like a file and read mock JSON for now
with open(url, u"r") as file:
api_spec = json.load(file)
return Patchboard(api_spec)
class Patchboard(object):
"""
The primary client interface to a patchboard server.
"""
def __init__(self, api_spec):
self.api = API(api_spec)
self.schema_manager = SchemaManager(self.api.schemas)
self.endpoint_classes = self.create_endpoint_classes()
client = self.spawn()
# Appears to be unused
#self.resources = client.resources
self.context = client.context
def create_endpoint_classes(self):
classes = {}
for resource_name, mapping in self.api.mappings.iteritems():
if resource_name not in classes:
schema = self.schema_manager.find_name(resource_name)
resource_def = mapping.resource
cls = self.create_class(
resource_name,
resource_def,
schema,
mapping)
classes[resource_name] = cls
return classes
def create_class(self, resource_name, definition, schema, mapping):
# Cannot use unicode for class names
class_name = to_camel_case(str(resource_name))
class_parents = (object,)
# TODO: fill in stub class definition
class_body = """
def __init__(self):
pass
"""
class_dict = {}
exec(class_body, globals(), class_dict)
cls = type(class_name, class_parents, class_dict)
return cls
def spawn(self, context={}):
return Client(context, self.api, self.endpoint_classes)
|
mit
|
Python
|
382a715ec78d9bcc53e949e9536bdb1077d3ed98
|
Update docstring
|
thombashi/pathvalidate
|
pathvalidate/__init__.py
|
pathvalidate/__init__.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
import re
import dataproperty
__INVALID_PATH_CHARS = '\:*?"<>|'
def validate_filename(filename):
"""
:param str filename: Filename to validate.
:raises ValueError:
If the ``filename`` is empty or includes invalid char(s):
(``\``, ``:``, ``*``, ``?``, ``"``, ``<``, ``>``, ``|``).
"""
if dataproperty.is_empty_string(filename):
raise ValueError("null path")
match = re.search("[%s]" % (
re.escape(__INVALID_PATH_CHARS)), filename)
if match is not None:
raise ValueError(
"invalid char found in the file path: '%s'" % (
re.escape(match.group())))
def sanitize_filename(filename, replacement_text=""):
"""
Replace invalid chars within the ``filename`` with
the ``replacement_text``.
:param str filename: Filename to validate.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
"""
filename = filename.strip()
re_replace = re.compile("[%s]" % re.escape(__INVALID_PATH_CHARS))
return re_replace.sub(replacement_text, filename)
def replace_symbol(filename, replacement_text=""):
fname = sanitize_filename(filename, replacement_text)
if fname is None:
return None
re_replace = re.compile("[%s]" % re.escape(" ,.%()/"))
return re_replace.sub(replacement_text, fname)
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
import re
import dataproperty
__INVALID_PATH_CHARS = '\:*?"<>|'
def validate_filename(filename):
"""
:param str filename: Filename to validate.
:raises ValueError:
If ``filename`` is empty or include invalid char
(``\``, ``:``, ``*``, ``?``, ``"``, ``<``, ``>``, ``|``).
"""
if dataproperty.is_empty_string(filename):
raise ValueError("null path")
match = re.search("[%s]" % (
re.escape(__INVALID_PATH_CHARS)), filename)
if match is not None:
raise ValueError(
"invalid char found in the file path: '%s'" % (
re.escape(match.group())))
def sanitize_filename(filename, replacement_text=""):
filename = filename.strip()
re_replace = re.compile("[%s]" % re.escape(__INVALID_PATH_CHARS))
return re_replace.sub(replacement_text, filename)
def replace_symbol(filename, replacement_text=""):
fname = sanitize_filename(filename, replacement_text)
if fname is None:
return None
re_replace = re.compile("[%s]" % re.escape(" ,.%()/"))
return re_replace.sub(replacement_text, fname)
|
mit
|
Python
|
798639f4d22bec341667a4067db7a18095d36beb
|
Add missing doc string values.
|
christabor/flask_jsondash,christabor/flask_jsondash,christabor/flask_jsondash
|
flask_jsondash/data_utils/wordcloud.py
|
flask_jsondash/data_utils/wordcloud.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
flask_jsondash.data_utils.wordcloud
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Utilities for working with wordcloud formatted data.
:copyright: (c) 2016 by Chris Tabor.
:license: MIT, see LICENSE for more details.
"""
from collections import Counter
# Py2/3 compat.
try:
_unicode = unicode
except NameError:
_unicode = str
# NLTK stopwords
stopwords = [
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',
'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why',
'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other',
'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than',
'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now',
]
def get_word_freq_distribution(words):
"""Get the counted word frequency distribution of all words.
Arg:
words (list): A list of strings indicating words.
Returns:
collections.Counter: The Counter object with word frequencies.
"""
return Counter([w for w in words if w not in stopwords])
def format_4_wordcloud(words, size_multiplier=2):
"""Format words in a way suitable for wordcloud plugin.
Args:
words (list): A list of strings indicating words.
size_multiplier (int, optional): The size multiplier to scale
word sizing. Can improve visual display of word cloud.
Returns:
list: A list of dicts w/ appropriate keys.
"""
return [
{'text': word, 'size': size * size_multiplier}
for (word, size) in words if word
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
flask_jsondash.data_utils.wordcloud
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Utilities for working with wordcloud formatted data.
:copyright: (c) 2016 by Chris Tabor.
:license: MIT, see LICENSE for more details.
"""
from collections import Counter
# Py2/3 compat.
try:
_unicode = unicode
except NameError:
_unicode = str
# NLTK stopwords
stopwords = [
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',
'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why',
'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other',
'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than',
'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now',
]
def get_word_freq_distribution(words):
"""Get the counted word frequency distribution of all words.
Arg:
words (list): A list of strings indicating words.
Returns:
collections.Counter: The Counter object with word frequencies.
"""
return Counter([w for w in words if w not in stopwords])
def format_4_wordcloud(words, size_multiplier=2):
"""Format words in a way suitable for wordcloud plugin.
Args:
words (list): A list of strings indicating words.
size_multiplier (int, optional): The size multiplier to scale
word sizing. Can improve visual display of word cloud.
"""
return [
{'text': word, 'size': size * size_multiplier}
for (word, size) in words if word
]
|
mit
|
Python
|
253361e56ad2b1e331691f0bf3c9010c22c0c9aa
|
Fix tests
|
Charcoal-SE/SmokeDetector,Charcoal-SE/SmokeDetector
|
test/test_blacklists.py
|
test/test_blacklists.py
|
#!/usr/bin/env python3
from glob import glob
from helpers import only_blacklists_changed
def test_blacklist_integrity():
for bl_file in glob('bad_*.txt') + glob('blacklisted_*.txt'):
with open(bl_file, 'r') as lines:
seen = dict()
for lineno, line in enumerate(lines, 1):
if line.endswith('\r\n'):
raise(ValueError('{0}:{1}:DOS line ending'.format(bl_file, lineno)))
if not line.endswith('\n'):
raise(ValueError('{0}:{1}:No newline'.format(bl_file, lineno)))
if line == '\n':
raise(ValueError('{0}:{1}:Empty line'.format(bl_file, lineno)))
if line in seen:
raise(ValueError('{0}:{1}:Duplicate entry {2} (also on line {3})'.format(
bl_file, lineno, line.rstrip('\n'), seen[line])))
seen[line] = lineno
def test_blacklist_pull_diff():
only_blacklists_diff = """watched_keywords.txt
bad_keywords.txt
blacklisted_websites.txt"""
assert only_blacklists_changed(only_blacklists_diff)
mixed_files_diff = """helpers.py
test/test_blacklists.py
blacklisted_usernames.txt"""
assert not only_blacklists_changed(mixed_files_diff)
|
#!/usr/bin/env python3
from glob import glob
from helpers import only_blacklists_changed
def test_blacklist_integrity():
for bl_file in glob('bad_*.txt') + glob('blacklisted_*.txt'):
with open(bl_file, 'r') as lines:
seen = dict()
for lineno, line in enumerate(lines, 1):
if line.endswith('\r\n'):
raise(ValueError('{0}:{1}:DOS line ending'.format(bl_file, lineno)))
if not line.endswith('\n'):
raise(ValueError('{0}:{1}:No newline'.format(bl_file, lineno)))
if line == '\n':
raise(ValueError('{0}:{1}:Empty line'.format(bl_file, lineno)))
if line in seen:
raise(ValueError('{0}:{1}:Duplicate entry {2} (also on line {3})'.format(
bl_file, lineno, line.rstrip('\n'), seen[line])))
seen[line] = lineno
def test_blacklist_pull_diff():
only_blacklists_diff = """helpers.py
test/test_blacklists.py
blacklisted_usernames.txt"""
assert only_blacklists_changed(only_blacklists_diff)
mixed_files_diff = """blacklisted_websites.txt
watched_keywords.txt
bad_keywords.txt"""
assert not only_blacklists_changed(mixed_files_diff)
|
apache-2.0
|
Python
|
e13cfe7a7e215f43e8210fb6d116ccafe80c8756
|
fix names of functions
|
adrn/gary,adrn/gary,adrn/gary,adrn/gala,adrn/gala,adrn/gala
|
gary/observation/tests/test_rrlyrae.py
|
gary/observation/tests/test_rrlyrae.py
|
# coding: utf-8
"""
Test the RR Lyrae helper functions.
"""
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
import numpy as np
import pytest
from ..core import *
from ..rrlyrae import *
def test_gaia_rv_error():
d = np.linspace(1.,50.,100)*u.kpc
rv_errs = gaia_radial_velocity_error(d)
def test_gaia_pm_error():
d = np.linspace(1.,50.,100)*u.kpc
pm_errs = gaia_proper_motion_error(d)
vtan_errs = pm_errs.to(u.rad/u.yr).value/u.yr*d
vtan_errs = vtan_errs.to(u.km/u.s)
|
# coding: utf-8
"""
Test the RR Lyrae helper functions.
"""
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
import numpy as np
import pytest
from ..core import *
from ..rrlyrae import *
def test_gaia_rv_error():
d = np.linspace(1.,50.,100)*u.kpc
rv_errs = gaia_rv_error(d)
def test_gaia_pm_error():
d = np.linspace(1.,50.,100)*u.kpc
pm_errs = gaia_pm_error(d)
vtan_errs = pm_errs.to(u.rad/u.yr).value/u.yr*d
vtan_errs = vtan_errs.to(u.km/u.s)
|
mit
|
Python
|
ff2cfb51b1fa30d0103bc782843f69fea08e0d51
|
Fix formatting for table declaration
|
manthey/girder,kotfic/girder,Xarthisius/girder,RafaelPalomar/girder,jbeezley/girder,girder/girder,sutartmelson/girder,adsorensen/girder,RafaelPalomar/girder,adsorensen/girder,manthey/girder,RafaelPalomar/girder,data-exp-lab/girder,kotfic/girder,Xarthisius/girder,Kitware/girder,sutartmelson/girder,jbeezley/girder,data-exp-lab/girder,Xarthisius/girder,adsorensen/girder,kotfic/girder,Xarthisius/girder,Kitware/girder,Kitware/girder,RafaelPalomar/girder,jbeezley/girder,manthey/girder,data-exp-lab/girder,Kitware/girder,data-exp-lab/girder,manthey/girder,jbeezley/girder,kotfic/girder,girder/girder,Xarthisius/girder,adsorensen/girder,sutartmelson/girder,sutartmelson/girder,girder/girder,data-exp-lab/girder,kotfic/girder,RafaelPalomar/girder,girder/girder,adsorensen/girder,sutartmelson/girder
|
girder/utility/assetstore_utilities.py
|
girder/utility/assetstore_utilities.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from .filesystem_assetstore_adapter import FilesystemAssetstoreAdapter
from .gridfs_assetstore_adapter import GridFsAssetstoreAdapter
from .s3_assetstore_adapter import S3AssetstoreAdapter
from girder.constants import AssetstoreType
from girder import events
assetstoreTable = {
AssetstoreType.FILESYSTEM: FilesystemAssetstoreAdapter,
AssetstoreType.GRIDFS: GridFsAssetstoreAdapter,
AssetstoreType.S3: S3AssetstoreAdapter
}
def getAssetstoreAdapter(assetstore, instance=True):
"""
This is a factory method that will return the appropriate assetstore adapter
for the specified assetstore. The returned object will conform to
the interface of the AbstractAssetstoreAdapter.
:param assetstore: The assetstore document used to instantiate the adapter.
:type assetstore: dict
:param instance: Whether to return an instance of the adapter or the class.
If you are performing validation, set this to False to avoid throwing
unwanted exceptions during instantiation.
:type instance: bool
:returns: An adapter descending from AbstractAssetstoreAdapter
"""
storeType = assetstore['type']
cls = assetstoreTable.get(storeType)
if cls is None:
e = events.trigger('assetstore.adapter.get', assetstore)
if len(e.responses) > 0:
cls = e.responses[-1]
else:
raise Exception('No AssetstoreAdapter for type: %s.' % storeType)
if instance:
return cls(assetstore)
else:
return cls
def setAssetstoreAdapter(storeType, cls):
if storeType not in assetstoreTable:
raise Exception('Illegal assetstore type code: "%s"' % (storeType))
assetstoreTable[storeType] = cls
def fileIndexFields():
"""
This will return a set of all required index fields from all of the
different assetstore types.
"""
return list(set(
FilesystemAssetstoreAdapter.fileIndexFields() +
GridFsAssetstoreAdapter.fileIndexFields() +
S3AssetstoreAdapter.fileIndexFields()
))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from .filesystem_assetstore_adapter import FilesystemAssetstoreAdapter
from .gridfs_assetstore_adapter import GridFsAssetstoreAdapter
from .s3_assetstore_adapter import S3AssetstoreAdapter
from girder.constants import AssetstoreType
from girder import events
assetstoreTable = {AssetstoreType.FILESYSTEM: FilesystemAssetstoreAdapter,
AssetstoreType.GRIDFS: GridFsAssetstoreAdapter,
AssetstoreType.S3: S3AssetstoreAdapter}
def getAssetstoreAdapter(assetstore, instance=True):
"""
This is a factory method that will return the appropriate assetstore adapter
for the specified assetstore. The returned object will conform to
the interface of the AbstractAssetstoreAdapter.
:param assetstore: The assetstore document used to instantiate the adapter.
:type assetstore: dict
:param instance: Whether to return an instance of the adapter or the class.
If you are performing validation, set this to False to avoid throwing
unwanted exceptions during instantiation.
:type instance: bool
:returns: An adapter descending from AbstractAssetstoreAdapter
"""
storeType = assetstore['type']
cls = assetstoreTable.get(storeType)
if cls is None:
e = events.trigger('assetstore.adapter.get', assetstore)
if len(e.responses) > 0:
cls = e.responses[-1]
else:
raise Exception('No AssetstoreAdapter for type: %s.' % storeType)
if instance:
return cls(assetstore)
else:
return cls
def setAssetstoreAdapter(storeType, cls):
if storeType not in assetstoreTable:
raise Exception('Illegal assetstore type code: "%s"' % (storeType))
assetstoreTable[storeType] = cls
def fileIndexFields():
"""
This will return a set of all required index fields from all of the
different assetstore types.
"""
return list(set(
FilesystemAssetstoreAdapter.fileIndexFields() +
GridFsAssetstoreAdapter.fileIndexFields() +
S3AssetstoreAdapter.fileIndexFields()
))
|
apache-2.0
|
Python
|
38e224c282c62ad358c753eb707cf71ad1f00aff
|
fix os x gyp settings
|
angelmic/node-usb-detection,MadLittleMods/node-usb-detection,MadLittleMods/node-usb-detection,MadLittleMods/node-usb-detection,angelmic/node-usb-detection,angelmic/node-usb-detection,AMI-NTBU/AndyLau,AMI-NTBU/AndyLau,AMI-NTBU/AndyLau
|
binding.gyp
|
binding.gyp
|
{
"targets": [
{
"target_name": "detection",
"sources": [
"src/detection.cpp",
"src/detection.h",
"src/deviceList.cpp"
],
"include_dirs" : [
"<!(node -e \"require('nan')\")"
],
'conditions': [
['OS=="win"',
{
'sources': [
"src/detection_win.cpp"
],
'include_dirs+':
[
# Not needed now
]
}
],
['OS=="mac"',
{
'sources': [
"src/detection_mac.cpp"
],
"libraries": [
"-framework",
"IOKit"
]
}
],
['OS=="linux"',
{
'sources': [
"src/detection_linux.cpp"
],
'link_settings': {
'libraries': [
'-ludev'
]
}
}
]
]
}
]
}
|
{
"targets": [
{
"target_name": "detection",
"sources": [
"src/detection.cpp",
"src/detection.h",
"src/deviceList.cpp"
],
"include_dirs" : [
"<!(node -e \"require('nan')\")"
],
'conditions': [
['OS=="win"',
{
'sources': [
"src/detection_win.cpp"
],
'include_dirs+':
[
# Not needed now
]
}
],
['OS=="mac"',
{
'sources': [
"src/detection_mac.cpp"
]
}
],
['OS=="linux"',
{
'sources': [
"src/detection_linux.cpp"
],
'link_settings': {
'libraries': [
'-ludev'
]
}
}
]
]
}
]
}
|
mit
|
Python
|
de0180ccccdd3e2b83c6a7188fd2688c64c70580
|
add rpath linker option in binding.gyp when --sqlite option is used with a custom sqlite
|
mapbox/node-sqlite3,briangreenery/node-sqlite3,damonpetta/node-sqlite3,march1993/node-sqlite3,alejandronunez/sqlitenw,FeodorFitsner/node-sqlite3,tomhughes/node-sqlite3,ashikcse20/node-sqlite3,mapbox/node-sqlite3,Finkes/node-sqlite3,ashikcse20/node-sqlite3,kevinsawicki/node-sqlite3,kevinsawicki/node-sqlite3,t3mulligan/node-sqlite3,bengotow/node-sqlite3,alejandronunez/sqlitenw,mapbox/node-sqlite3,kkoopa/node-sqlite3,dragonfly-science/node-sqlite3,strongloop-forks/node-sqlite3,mapbox/node-sqlite3,dragonfly-science/node-sqlite3,t3mulligan/node-sqlite3,Finkes/node-sqlite3,briangreenery/node-sqlite3,t3mulligan/node-sqlite3,kkoopa/node-sqlite3,Finkes/node-sqlite3,march1993/node-sqlite3,damonpetta/node-sqlite3,javiergarmon/node-sqlite3,briangreenery/node-sqlite3,kenr/node-sqlite3,liubiggun/node-sqlite3,jBarz/node-sqlite3,kevinsawicki/node-sqlite3,kkoopa/node-sqlite3,tsufeki/node-sqlite3,tomhughes/node-sqlite3,briangreenery/node-sqlite3,javiergarmon/node-sqlite3,kevinsawicki/node-sqlite3,march1993/node-sqlite3,liubiggun/node-sqlite3,javiergarmon/node-sqlite3,jBarz/node-sqlite3,kenr/node-sqlite3,kenr/node-sqlite3,alejandronunez/sqlitenw,Finkes/node-sqlite3,dragonfly-science/node-sqlite3,damonpetta/node-sqlite3,damonpetta/node-sqlite3,tomhughes/node-sqlite3,javiergarmon/node-sqlite3,FeodorFitsner/node-sqlite3,march1993/node-sqlite3,dragonfly-science/node-sqlite3,FeodorFitsner/node-sqlite3,liubiggun/node-sqlite3,ashikcse20/node-sqlite3,kevinsawicki/node-sqlite3,javiergarmon/node-sqlite3,t3mulligan/node-sqlite3,t3mulligan/node-sqlite3,tsufeki/node-sqlite3,jBarz/node-sqlite3,kkoopa/node-sqlite3,march1993/node-sqlite3,kenr/node-sqlite3,dragonfly-science/node-sqlite3,t3mulligan/node-sqlite3,ashikcse20/node-sqlite3,bengotow/node-sqlite3,damonpetta/node-sqlite3,FeodorFitsner/node-sqlite3,ashikcse20/node-sqlite3,mapbox/node-sqlite3,bengotow/node-sqlite3,javiergarmon/node-sqlite3,bengotow/node-sqlite3,Finkes/node-sqlite3,bengotow/node-sqlite3,jBarz/node-sqlite3,ashikcse20/node-sqlite3,jBarz/node-sqlite3,alejandronunez/sqlitenw,FeodorFitsner/node-sqlite3,tomhughes/node-sqlite3,FeodorFitsner/node-sqlite3,kkoopa/node-sqlite3,strongloop-forks/node-sqlite3,bengotow/node-sqlite3,strongloop-forks/node-sqlite3,alejandronunez/sqlitenw,Finkes/node-sqlite3,dragonfly-science/node-sqlite3,briangreenery/node-sqlite3,mapbox/node-sqlite3,tomhughes/node-sqlite3,strongloop-forks/node-sqlite3,kenr/node-sqlite3,damonpetta/node-sqlite3,kenr/node-sqlite3,march1993/node-sqlite3,jBarz/node-sqlite3,liubiggun/node-sqlite3,liubiggun/node-sqlite3,strongloop-forks/node-sqlite3,tsufeki/node-sqlite3,tsufeki/node-sqlite3,kkoopa/node-sqlite3,strongloop-forks/node-sqlite3,tsufeki/node-sqlite3,briangreenery/node-sqlite3,tsufeki/node-sqlite3,alejandronunez/sqlitenw,tomhughes/node-sqlite3,kevinsawicki/node-sqlite3,liubiggun/node-sqlite3
|
binding.gyp
|
binding.gyp
|
{
'includes': [ 'deps/common-sqlite.gypi' ],
'variables': {
'sqlite%':'internal',
},
'targets': [
{
'target_name': 'node_sqlite3',
'conditions': [
['sqlite != "internal"', {
'libraries': [
'-L<@(sqlite)/lib',
'-lsqlite3'
],
'include_dirs': [ '<@(sqlite)/include' ],
'conditions': [ [ 'OS=="linux"', {'libraries+':['-Wl,-rpath=<@(sqlite)/lib']} ] ]
},
{
'dependencies': [
'deps/sqlite3.gyp:sqlite3'
]
}
]
],
'sources': [
'src/database.cc',
'src/node_sqlite3.cc',
'src/statement.cc'
],
}
]
}
|
{
'includes': [ 'deps/common-sqlite.gypi' ],
'variables': {
'sqlite%':'internal',
},
'targets': [
{
'target_name': 'node_sqlite3',
'conditions': [
['sqlite != "internal"', {
'libraries': [
'-L<@(sqlite)/lib',
'-lsqlite3'
],
'include_dirs': [ '<@(sqlite)/include' ]
},
{
'dependencies': [
'deps/sqlite3.gyp:sqlite3'
]
}
]
],
'sources': [
'src/database.cc',
'src/node_sqlite3.cc',
'src/statement.cc'
],
}
]
}
|
bsd-3-clause
|
Python
|
3fe8dd01906fe2d8e3e52b537acd34f65073ca01
|
remove the cflags
|
lukaskollmer/objc
|
binding.gyp
|
binding.gyp
|
{
"targets": [
{
"target_name": "objc",
"sources": [
"src/binding/objc.cc",
"src/binding/Proxy.cc",
"src/binding/utils.cc",
"src/binding/Invocation.cc",
"src/binding/constants.cpp"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
],
"xcode_settings": {
"OTHER_CFLAGS": [
"-std=c++14",
"-stdlib=libc++"
]
}
}
]
}
|
{
"targets": [
{
"target_name": "objc",
"sources": [
"src/binding/objc.cc",
"src/binding/Proxy.cc",
"src/binding/utils.cc",
"src/binding/Invocation.cc",
"src/binding/constants.cpp"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
],
"cflags": [
"-std=c++14",
"-stdlib=libc++"
],
"xcode_settings": {
"OTHER_CFLAGS": [
"-std=c++14",
"-stdlib=libc++"
]
}
}
]
}
|
mit
|
Python
|
0a90376144ee5568e6e140cbf657d7e85070f1f1
|
remove the matplotlib agg setting
|
simpeg/discretize,simpeg/discretize,simpeg/discretize
|
tests/base/test_view.py
|
tests/base/test_view.py
|
from __future__ import print_function
import matplotlib
import unittest
import numpy as np
import matplotlib.pyplot as plt
import discretize
from discretize import Tests, utils
import warnings
import pytest
np.random.seed(16)
TOL = 1e-1
class Cyl3DView(unittest.TestCase):
def setUp(self):
self.mesh = discretize.CylMesh([10, 4, 12])
def test_incorrectAxesWarnings(self):
# axes aren't polar
fig, ax = plt.subplots(1, 1)
# test z-slice
with pytest.warns(UserWarning):
self.mesh.plotGrid(slice='z', ax=ax)
# axes aren't right shape
with pytest.warns(UserWarning):
self.mesh.plotGrid(slice='both', ax=ax)
self.mesh.plotGrid(ax=ax)
# this should be fine
self.mesh.plotGrid(slice='theta', ax=ax)
fig, ax = plt.subplots(2, 1)
# axes are right shape, but not polar
with pytest.warns(UserWarning):
self.mesh.plotGrid(slice='both', ax=ax)
self.mesh.plotGrid(ax=ax)
# these should be fine
self.mesh.plotGrid()
ax0 = plt.subplot(121, projection='polar')
ax1 = plt.subplot(122)
self.mesh.plotGrid(slice='z', ax=ax0) # plot z only
self.mesh.plotGrid(slice='theta', ax=ax1) # plot theta only
self.mesh.plotGrid(slice='both', ax=[ax0, ax1]) # plot both
self.mesh.plotGrid(slice='both', ax=[ax1, ax0]) # plot both
self.mesh.plotGrid(ax=[ax1, ax0]) # plot both
def test_plotImage(self):
with self.assertRaises(Exception):
self.mesh.plotImage(np.random.rand(self.mesh.nC))
if __name__ == '__main__':
unittest.main()
|
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import unittest
import numpy as np
import matplotlib.pyplot as plt
import discretize
from discretize import Tests, utils
import warnings
import pytest
np.random.seed(16)
TOL = 1e-1
class Cyl3DView(unittest.TestCase):
def setUp(self):
self.mesh = discretize.CylMesh([10, 4, 12])
def test_incorrectAxesWarnings(self):
# axes aren't polar
fig, ax = plt.subplots(1, 1)
# test z-slice
with pytest.warns(UserWarning):
self.mesh.plotGrid(slice='z', ax=ax)
# axes aren't right shape
with pytest.warns(UserWarning):
self.mesh.plotGrid(slice='both', ax=ax)
self.mesh.plotGrid(ax=ax)
# this should be fine
self.mesh.plotGrid(slice='theta', ax=ax)
fig, ax = plt.subplots(2, 1)
# axes are right shape, but not polar
with pytest.warns(UserWarning):
self.mesh.plotGrid(slice='both', ax=ax)
self.mesh.plotGrid(ax=ax)
# these should be fine
self.mesh.plotGrid()
ax0 = plt.subplot(121, projection='polar')
ax1 = plt.subplot(122)
self.mesh.plotGrid(slice='z', ax=ax0) # plot z only
self.mesh.plotGrid(slice='theta', ax=ax1) # plot theta only
self.mesh.plotGrid(slice='both', ax=[ax0, ax1]) # plot both
self.mesh.plotGrid(slice='both', ax=[ax1, ax0]) # plot both
self.mesh.plotGrid(ax=[ax1, ax0]) # plot both
def test_plotImage(self):
with self.assertRaises(Exception):
self.mesh.plotImage(np.random.rand(self.mesh.nC))
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
a114325be4f81cd34438f96fbc134a7881d5fe7a
|
Add link to get_input_var_names
|
mperignon/bmi-delta,mperignon/bmi-STM,mperignon/bmi-delta,mperignon/bmi-STM
|
bmi/vars.py
|
bmi/vars.py
|
#! /usr/bin/env python
class BmiVars(object):
"""Defines an interface for converting a standalone model into an
integrated modeling framework component.
"""
def get_var_type(self, long_var_name):
"""Returns the type of the given variable.
Parameters
----------
long_var_name : str
An input or output variable name, a CSDMS Standard Name.
Returns
-------
str
The Python variable type; e.g., `str`, `int`, `float`.
"""
pass
def get_var_units(self, long_var_name):
"""Returns the units of the given variable.
Standard unit names, in lower case, should be used, such as
"meters" or "seconds". Standard abbreviations, like "m" for
meters, are also supported. For variables with compound units,
each unit name is separated by a single space, with exponents
other than 1 placed immediately after the name, as in "m s-1"
for velocity, "W m-2" for an energy flux, or "km2" for an
area.
Parameters
----------
long_var_name : str
An input or output variable name, a CSDMS Standard Name.
Returns
-------
str
The variable units.
Notes
-----
CSDMS uses the UDUNITS standard from Unidata.
"""
pass
def get_var_nbytes(self, long_var_name):
"""Returns the size, in bytes, of the given variable.
Parameters
----------
long_var_name : str
An input or output variable name, a CSDMS Standard Name.
Returns
-------
int
The size of the variable, counted in bytes.
"""
pass
def get_var_grid(self, long_var_name):
"""Returns the identifier of the grid associated with a given
variable.
Parameters
----------
long_var_name : str
An input or output variable name, a CSDMS Standard Name.
Returns
-------
int
The grid identifier.
See Also
--------
bmi.info.BmiInfo.get_input_var_names : Get `long_var_name` from this method or from **get_output_var_names**.
"""
pass
|
#! /usr/bin/env python
class BmiVars(object):
"""Defines an interface for converting a standalone model into an
integrated modeling framework component.
"""
def get_var_type(self, long_var_name):
"""Returns the type of the given variable.
Parameters
----------
long_var_name : str
An input or output variable name, a CSDMS Standard Name.
Returns
-------
str
The Python variable type; e.g., `str`, `int`, `float`.
"""
pass
def get_var_units(self, long_var_name):
"""Returns the units of the given variable.
Standard unit names, in lower case, should be used, such as
"meters" or "seconds". Standard abbreviations, like "m" for
meters, are also supported. For variables with compound units,
each unit name is separated by a single space, with exponents
other than 1 placed immediately after the name, as in "m s-1"
for velocity, "W m-2" for an energy flux, or "km2" for an
area.
Parameters
----------
long_var_name : str
An input or output variable name, a CSDMS Standard Name.
Returns
-------
str
The variable units.
Notes
-----
CSDMS uses the UDUNITS standard from Unidata.
"""
pass
def get_var_nbytes(self, long_var_name):
"""Returns the size, in bytes, of the given variable.
Parameters
----------
long_var_name : str
An input or output variable name, a CSDMS Standard Name.
Returns
-------
int
The size of the variable, counted in bytes.
"""
pass
def get_var_grid(self, long_var_name):
"""Returns the identifier of the grid associated with a given
variable.
Parameters
----------
long_var_name : str
An input or output variable name, a CSDMS Standard Name.
Returns
-------
int
The grid identifier.
"""
pass
|
mit
|
Python
|
390059007169b964649b3ec8af84503b38e41e97
|
refactor date formatting
|
ods94065/opost,ods94065/opost
|
postweb/utils.py
|
postweb/utils.py
|
import bleach
from django.conf import settings
import dateutil.parser
import markdown
markdown = markdown.Markdown()
# Tags suitable for rendering markdown
# From https://github.com/yourcelf/bleach-allowlist/blob/main/bleach_allowlist/bleach_allowlist.py
MARKDOWN_TAGS = [
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"b",
"i",
"strong",
"em",
"tt",
"p",
"br",
"span",
"div",
"blockquote",
"code",
"pre",
"hr",
"ul",
"ol",
"li",
"dd",
"dt",
"img",
"a",
"sub",
"sup",
]
MARKDOWN_ATTRS = {
"*": ["id"],
"img": ["src", "alt", "title"],
"a": ["href", "alt", "title"],
}
PREFERRED_DATE_FORMAT = "%a %b %d %Y %I:%M %p"
def service_url(service, path=""):
"""Construct a URL for accessing the named service."""
if path.startswith("/"):
path = path[1:]
service_def = settings.SERVICES.get(service, None)
if service_def is None:
raise ValueError(f"No service named {service} configured in settings.SERVICES")
endpoint = service_def.get("endpoint", None)
if endpoint is None:
raise ValueError(f"No endpoint configured in settings.SERVICES for {service}")
if not endpoint.endswith("/"):
endpoint = endpoint + "/"
return endpoint + path
def represent_date(iso_datetime_str):
"""Converts ISO-8601 datetime representation to something more readable.
TODO: This should be internationalized.
"""
dt = dateutil.parser.parse(iso_datetime_str)
return dt.strftime(PREFERRED_DATE_FORMAT)
def markdown_to_html(markdown_text):
"""Converts Markdown to HTML.
To avoid XSS vulnerabilities, only certain HTML tags will be allowed
in the Markdown text.
"""
return bleach.clean(
markdown.convert(markdown_text),
tags=MARKDOWN_TAGS,
attributes=MARKDOWN_ATTRS,
)
|
import bleach
from django.conf import settings
import dateutil.parser
import markdown
markdown = markdown.Markdown()
# Tags suitable for rendering markdown
# From https://github.com/yourcelf/bleach-allowlist/blob/main/bleach_allowlist/bleach_allowlist.py
MARKDOWN_TAGS = [
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"b",
"i",
"strong",
"em",
"tt",
"p",
"br",
"span",
"div",
"blockquote",
"code",
"pre",
"hr",
"ul",
"ol",
"li",
"dd",
"dt",
"img",
"a",
"sub",
"sup",
]
MARKDOWN_ATTRS = {
"*": ["id"],
"img": ["src", "alt", "title"],
"a": ["href", "alt", "title"],
}
def service_url(service, path=""):
"""Construct a URL for accessing the named service."""
if path.startswith("/"):
path = path[1:]
service_def = settings.SERVICES.get(service, None)
if service_def is None:
raise ValueError(f"No service named {service} configured in settings.SERVICES")
endpoint = service_def.get("endpoint", None)
if endpoint is None:
raise ValueError(f"No endpoint configured in settings.SERVICES for {service}")
if not endpoint.endswith("/"):
endpoint = endpoint + "/"
return endpoint + path
def represent_date(iso_datetime_str):
"""Converts ISO-8601 datetime representation to something more readable.
TODO: This should be internationalized.
"""
dt = dateutil.parser.parse(iso_datetime_str)
return dt.strftime("%a %b %d %Y %I:%M %p")
def markdown_to_html(markdown_text):
"""Converts Markdown to HTML.
To avoid XSS vulnerabilities, only certain HTML tags will be allowed
in the Markdown text.
"""
return bleach.clean(
markdown.convert(markdown_text),
tags=MARKDOWN_TAGS,
attributes=MARKDOWN_ATTRS,
)
|
mit
|
Python
|
d0ac7153cd9a88c5a9c6edef4f6d415b4d88143b
|
make admin classes overridable
|
pinax/pinax-referrals,pinax/pinax-referrals
|
pinax/referrals/admin.py
|
pinax/referrals/admin.py
|
from django.contrib import admin
from .models import Referral, ReferralResponse
@admin.register(Referral)
class ReferralAdmin(admin.ModelAdmin):
list_display = [
"user",
"code",
"label",
"redirect_to",
"target_content_type",
"target_object_id"
]
readonly_fields = ["code", "created_at"]
list_filter = ["target_content_type", "created_at"]
search_fields = ["user__first_name", "user__last_name", "user__email", "user__username", "code"]
@admin.register(ReferralResponse)
class ReferralResponseAdmin(admin.ModelAdmin):
list_display = [
"referral",
"session_key",
"user",
"ip_address",
"action"
]
readonly_fields = ["referral", "session_key", "user", "ip_address", "action"]
list_filter = ["action", "created_at"]
search_fields = ["referral__code", "referral__user__username", "ip_address"]
|
from django.contrib import admin
from .models import Referral, ReferralResponse
admin.site.register(
Referral,
list_display=[
"user",
"code",
"label",
"redirect_to",
"target_content_type",
"target_object_id"
],
readonly_fields=["code", "created_at"],
list_filter=["target_content_type", "created_at"],
search_fields=["user__first_name", "user__last_name", "user__email", "user__username", "code"]
)
admin.site.register(
ReferralResponse,
list_display=[
"referral",
"session_key",
"user",
"ip_address",
"action"
],
readonly_fields=["referral", "session_key", "user", "ip_address", "action"],
list_filter=["action", "created_at"],
search_fields=["referral__code", "referral__user__username", "ip_address"]
)
|
mit
|
Python
|
51cf3706504adb6b1772b491c6d9d612a64e49ab
|
fix check mention
|
everpcpc/yubari,everpcpc/yubari
|
yubari/bots/qq_watch.py
|
yubari/bots/qq_watch.py
|
#!/usr/bin/env python
# coding: utf-8
import time
import logging
from yubari.config import QQ_GROUP, MENTION_NAME, QQ_ME
from yubari.lib.qq import qqbot
logger = logging.getLogger(__name__)
def check_mention_self(content):
for word in MENTION_NAME:
if word in content:
return True
return False
def run():
continue_count = 0
last_msg = ""
last_call = 0
for msg in qqbot.poll():
logger.info(msg)
content = msg.get('msg').strip()
if check_mention_self(content):
now = int(time.time())
if now - last_call < 1800:
logger.info("called in last 30min")
continue
call_msg = "呀呀呀,召唤一号机[CQ:at,qq=%s]" % QQ_ME
qqbot.sendGroupMsg(call_msg)
last_call = now
continue
if msg.get('event') == 'GroupMsg':
if msg.get('group') == QQ_GROUP:
if content != last_msg:
last_msg = content
continue_count = 0
continue
if continue_count < 2:
continue_count += 1
else:
logger.info("repeat: %s", content)
qqbot.sendGroupMsg(content)
continue_count = 0
if __name__ == "__main__":
run()
|
#!/usr/bin/env python
# coding: utf-8
import time
import logging
from yubari.config import QQ_GROUP, MENTION_NAME
from yubari.lib.qq import qqbot
logger = logging.getLogger(__name__)
def run():
continue_count = 0
last_msg = ""
last_call = 0
for msg in qqbot.poll():
logger.info(msg)
content = msg.get('msg').strip()
for word in MENTION_NAME:
if word in content:
now = int(time.time())
if now - last_call < 1800:
logger.info("called in last 30min")
return
call_msg = "呀呀呀,召唤一号机[CQ:at,qq=%s]" % QQ_ME
qqbot.sendGroupMsg(call_msg)
last_call = now
return
if msg.get('event') == 'GroupMsg':
if msg.get('group') == QQ_GROUP:
if content != last_msg:
last_msg = content
continue_count = 0
continue
if continue_count < 2:
continue_count += 1
else:
logger.info("repeat: %s", content)
qqbot.sendGroupMsg(content)
continue_count = 0
if __name__ == "__main__":
run()
|
mit
|
Python
|
eda12e10ae41dce8a34903709afb7a0c73fcd3e2
|
Add dict merging and fix wrapped
|
1064CBread/1064Chat,1064CBread/1064Chat,1064CBread/1064Chat,1064CBread/1064Chat
|
src/server/blueprints/rest/restutil.py
|
src/server/blueprints/rest/restutil.py
|
"""
Utilities specific to REST blueprints.
"""
from enum import Enum
from collections.abc import MutableMapping
from util import get_current_app
from flask import Response
from functools import wraps
import re
class ClientType(str, Enum):
BROWSER = "browser" # most useful in debug
CURL = "cURL" # also useful in debug
OTHER = "other" # usually production apps
browsers = re.compile("|".join(("chrome", "firefox", "safari", "opera")), re.IGNORECASE)
def get_implied_client_type(useragent: str) -> ClientType:
"""
Attempts to get the client type based on user-agent. This is by no means exaustive for browser checking,
and may be incorrect if the client lies.
:param useragent: The user-agent that the client provided
:return: The ClientType the user-agent implies
"""
if browsers.search(useragent):
return ClientType.BROWSER
if "curl/" in useragent:
return ClientType.CURL
return ClientType.OTHER
_shared_decorator_key = __name__ + "_shared_decorator"
def _shared_decorator_logic(**response_kwargs):
"""
Shared deco logic, merges decorators that are used together
"""
def make_wrapper(f):
merged_kwargs = response_kwargs.copy()
fn = f
if hasattr(f, _shared_decorator_key):
data = getattr(f, _shared_decorator_key)
kwtomerge = data['kwargs']
merge_dict = dict()
for k, v in kwtomerge.items():
if k in merged_kwargs and isinstance(merged_kwargs[k], MutableMapping):
merged_kwargs[k].update(v)
else:
merge_dict[k] = v
merged_kwargs.update(merge_dict)
fn = data['wrapped']
@wraps(fn)
def wrapper(*args, **kwargs):
ret = fn(*args, **kwargs)
if isinstance(ret, Response):
# ahhhhhh
raise ValueError("No support for returning response and merging")
return get_current_app().response_class(ret, **merged_kwargs)
setattr(wrapper, _shared_decorator_key, {'kwargs': merged_kwargs, 'wrapped': fn})
return wrapper
return make_wrapper
def content_type(ctype):
return _shared_decorator_logic(content_type=ctype)
def status_code(code):
return _shared_decorator_logic(status=code)
def headers(direct_dict=None, **kwargs):
funneled = direct_dict or dict()
funneled.update(kwargs)
funneled = {k.replace('_', '-').upper(): v for k, v in funneled.items()}
return _shared_decorator_logic(headers=funneled)
|
"""
Utilities specific to REST blueprints.
"""
from enum import Enum
from util import get_current_app
from functools import wraps
import re
class ClientType(str, Enum):
BROWSER = "browser" # most useful in debug
CURL = "cURL" # also useful in debug
OTHER = "other" # usually production apps
browsers = re.compile("|".join(("chrome", "firefox", "safari", "opera")), re.IGNORECASE)
def get_implied_client_type(useragent: str) -> ClientType:
"""
Attempts to get the client type based on user-agent. This is by no means exaustive for browser checking,
and may be incorrect if the client lies.
:param useragent: The user-agent that the client provided
:return: The ClientType the user-agent implies
"""
if browsers.search(useragent):
return ClientType.BROWSER
if "curl/" in useragent:
return ClientType.CURL
return ClientType.OTHER
_shared_decorator_key = __name__ + "_shared_decorator"
def _shared_decorator_logic(**response_kwargs):
"""
Shared deco logic, merges decorators that are used together
"""
def make_wrapper(f):
merged_kwargs = response_kwargs.copy()
fn = f
if hasattr(f, _shared_decorator_key):
data = getattr(f, _shared_decorator_key)
merged_kwargs.update(data['kwargs'])
fn = data['wrapped']
@wraps(fn)
def wrapper(*args, **kwargs):
return get_current_app().response_class(fn(*args, **kwargs), **merged_kwargs)
setattr(wrapper, _shared_decorator_key, {'kwargs': merged_kwargs, 'wrapped': f})
return wrapper
return make_wrapper
def content_type(ctype):
return _shared_decorator_logic(content_type=ctype)
def status_code(code):
return _shared_decorator_logic(status=code)
def headers(direct_dict=None, **kwargs):
funneled = direct_dict or dict()
funneled.update(kwargs)
funneled = {k.replace('_', '-').upper(): v for k, v in funneled.items()}
return _shared_decorator_logic(headers=funneled)
|
mit
|
Python
|
74ad5c935abd69b7408a0c1ba2d7cc4ed57e3bd9
|
test solely running linkcheck as it requires the html to be built
|
simpeg/discretize,simpeg/discretize,simpeg/discretize
|
tests/docs/test_docs.py
|
tests/docs/test_docs.py
|
import subprocess
import unittest
import os
import platform
class Doc_Test(unittest.TestCase):
@property
def path_to_docs(self):
dirname, file_name = os.path.split(os.path.abspath(__file__))
return dirname.split(os.path.sep)[:-2] + ["docs"]
# def test_html(self):
# wd = os.getcwd()
# os.chdir(os.path.sep.join(self.path_to_docs))
#
# if platform.system() != "Windows":
# response = subprocess.run(["make", "html"])
# self.assertTrue(response.returncode == 0)
# else:
# response = subprocess.call(["make", "html"], shell=True)
# self.assertTrue(response == 0)
#
# os.chdir(wd)
def test_linkcheck(self):
wd = os.getcwd()
os.chdir(os.path.sep.join(self.path_to_docs))
if platform.system() != "Windows":
response = subprocess.run(["make", "linkcheck"])
self.assertTrue(response.returncode == 0)
else:
response = subprocess.call(["make", "linkcheck"], shell=True)
self.assertTrue(response == 0)
os.chdir(wd)
if __name__ == "__main__":
unittest.main()
|
import subprocess
import unittest
import os
import platform
class Doc_Test(unittest.TestCase):
@property
def path_to_docs(self):
dirname, file_name = os.path.split(os.path.abspath(__file__))
return dirname.split(os.path.sep)[:-2] + ["docs"]
def test_html(self):
wd = os.getcwd()
os.chdir(os.path.sep.join(self.path_to_docs))
if platform.system() != "Windows":
response = subprocess.run(["make", "html"])
self.assertTrue(response.returncode == 0)
else:
response = subprocess.call(["make", "html"], shell=True)
self.assertTrue(response == 0)
os.chdir(wd)
def test_linkcheck(self):
wd = os.getcwd()
os.chdir(os.path.sep.join(self.path_to_docs))
if platform.system() != "Windows":
response = subprocess.run(["make", "linkcheck"])
self.assertTrue(response.returncode == 0)
else:
response = subprocess.call(["make", "linkcheck"], shell=True)
self.assertTrue(response == 0)
os.chdir(wd)
if __name__ == "__main__":
unittest.main()
|
mit
|
Python
|
a80ac141b7341e867f1395858e0bdccaa9a83b37
|
Fix for Py2 test.
|
jeffrimko/Auxly
|
tests/filesys_test_7.py
|
tests/filesys_test_7.py
|
# -*- coding: utf-8 -*-
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
from auxly.filesys import File
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
UTF8_STR = u"ÁÍÓÚÀÈÌÒÙAEIOU"
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(BaseTest):
def test_file_1(test):
"""Basic File usage."""
p = FNAME[0]
f = File(p)
test.assertFalse(f.exists())
test.assertTrue(f.write(UTF8_STR))
test.assertTrue(f.exists())
test.assertEqual(UTF8_STR, f.read())
test.assertEqual(None, f.read(encoding="ascii"))
def test_file_2(test):
"""Basic File usage."""
p = FNAME[0]
f = File(p)
test.assertFalse(f.exists())
test.assertFalse(f.write(UTF8_STR, encoding="ascii"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
from auxly.filesys import File
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
UTF8_STR = "ÁÍÓÚÀÈÌÒÙAEIOU"
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(BaseTest):
def test_file_1(test):
"""Basic File usage."""
p = FNAME[0]
f = File(p)
test.assertFalse(f.exists())
test.assertTrue(f.write(UTF8_STR))
test.assertTrue(f.exists())
test.assertEqual(UTF8_STR, f.read())
test.assertEqual(None, f.read(encoding="ascii"))
def test_file_2(test):
"""Basic File usage."""
p = FNAME[0]
f = File(p)
test.assertFalse(f.exists())
test.assertFalse(f.write(UTF8_STR, encoding="ascii"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
e883d625bf78c52d4f1206f13ef64e53df23c3dd
|
Add a tool for getting the current schema. Not sure if this could break things. Concurrency might be a bitch.
|
luzfcb/django-boardinghouse,luzfcb/django-boardinghouse,luzfcb/django-boardinghouse
|
multi_schema/schema.py
|
multi_schema/schema.py
|
from django.db import models
from .models import Schema
def get_schema():
cursor = models.connection.cursor()
cursor.execute('SHOW search_path')
search_path = cursor.fetchone()[0]
return Schema.objects.get(schema=search_path.split(',')[0])
|
bsd-3-clause
|
Python
|
|
6306288b7b65481a7e0706d3515d673b2344d2f0
|
Bump version
|
beerfactory/hbmqtt
|
hbmqtt/__init__.py
|
hbmqtt/__init__.py
|
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
VERSION = (0, 3, 0, 'alpha', 0)
|
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
VERSION = (0, 2, 0, 'final', 0)
|
mit
|
Python
|
eebafcf8a7a34108fbae12645e469979496403ab
|
Add documentation and clean up daemon code.
|
myDevicesIoT/Cayenne-Agent,myDevicesIoT/Cayenne-Agent
|
myDevices/os/daemon.py
|
myDevices/os/daemon.py
|
"""
This module provides a class for restarting the agent if errors occur and exiting on critical failures.
"""
from sys import exit
from datetime import datetime
from myDevices.utils.logger import exception, info, warn, error, debug
from myDevices.os.services import ServiceManager
#defining reset timeout in seconds
RESET_TIMEOUT = 30
FAILURE_COUNT = 1000
failureCount = {}
startFailure = {}
errorList = (-3, -2, 12, 9, 24)
class Daemon:
"""class for restarting the agent if errors occur and exiting on critical failures."""
@staticmethod
def OnFailure(component, error=0):
"""Handle error in component and restart the agent if necessary"""
#-3=Temporary failure in name resolution
info('Daemon failure handling ' + str(error))
if error in errorList:
Daemon.Restart()
if component not in failureCount:
Daemon.Reset(component)
failureCount[component] += 1
now = datetime.now()
if startFailure[component] == 0:
startFailure[component] = now
elapsedTime = now - startFailure[component]
if (elapsedTime.total_seconds() >= RESET_TIMEOUT) or (failureCount[component] > FAILURE_COUNT):
warn('Daemon::OnFailure myDevices is going to restart after ' + str(component) + ' failed: ' + str(elapsedTime.total_seconds()) + ' seconds and ' + str(failureCount) + ' times')
Daemon.Restart()
@staticmethod
def Reset(component):
"""Reset failure count for component"""
startFailure[component] = 0
failureCount[component] = 0
@staticmethod
def Restart():
"""Restart the agent daemon"""
try:
info('Daemon restarting myDevices' )
(output, returncode) = ServiceManager.ExecuteCommand('sudo service myDevices restart')
debug(str(output) + ' ' + str(returncode))
del output
except:
exception("Daemon::Restart enexpected error")
Daemon.Exit()
@staticmethod
def Exit():
"""Stop the agent daemon"""
info('Critical failure. Closing myDevices process...')
exit('Daemon::Exit closing agent. Critical failure.')
|
#!/usr/bin/env python
from sys import exit
from datetime import datetime
from os.path import getmtime
from myDevices.utils.logger import exception, info, warn, error, debug
from myDevices.os.services import ServiceManager
#defining reset timeout in seconds
RESET_TIMEOUT=30
FAILURE_COUNT=1000
PYTHON_BIN='/usr/bin/python3'
failureCount={}
startFailure={}
errorList= (-3, -2, 12, 9, 24)
class Daemon:
def OnFailure(component, error=0):
#-3=Temporary failure in name resolution
info('Daemon failure handling ' + str(error))
if error in errorList:
Daemon.Restart()
if component not in failureCount:
Daemon.Reset(component)
failureCount[component]+=1
now = datetime.now()
if startFailure[component]==0:
startFailure[component]=now
elapsedTime=now-startFailure[component]
if (elapsedTime.total_seconds() >= RESET_TIMEOUT) or ( failureCount[component] > FAILURE_COUNT):
warn('Daemon::OnFailure myDevices is going to restart after ' +str(component) + ' failed: ' + str(elapsedTime.total_seconds()) + ' seconds and ' + str(failureCount) + ' times')
Daemon.Restart()
def Reset(component):
startFailure[component]=0
failureCount[component]=0
def Restart():
try:
info('Daemon Restarting myDevices' )
(output, returncode) = ServiceManager.ExecuteCommand('sudo service myDevices restart')
debug(str(output) + ' ' + str(returncode))
del output
except:
exception ("Daemon::Restart Unexpected error")
Daemon.Exit()
def Exit():
info('Critical failure. Closing myDevices process...')
exit('Daemon::Exit Closing agent. Critical failure.')
|
mit
|
Python
|
6076b6a7824072b97936aaa3da3ba1acf2bc87d6
|
Bump version
|
b-mueller/mythril,b-mueller/mythril,b-mueller/mythril,b-mueller/mythril
|
mythril/__version__.py
|
mythril/__version__.py
|
"""This file contains the current Mythril version.
This file is suitable for sourcing inside POSIX shell, e.g. bash as well
as for importing into Python.
"""
__version__ = "v0.21.7"
|
"""This file contains the current Mythril version.
This file is suitable for sourcing inside POSIX shell, e.g. bash as well
as for importing into Python.
"""
__version__ = "v0.21.6"
|
mit
|
Python
|
e906e108ab5118ec1c8856a54b8ebe1fd69484ac
|
Add shebang to and update permissions of servefiles.py
|
Traiver/FBI,Jerry-Shaw/FBI,Traiver/FBI,Jerry-Shaw/FBI,Jerry-Shaw/FBI,Jerry-Shaw/FBI,Traiver/FBI
|
servefiles/servefiles.py
|
servefiles/servefiles.py
|
#!/bin/python
import os
import socket
import struct
import sys
import threading
import time
import urllib
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
from urlparse import urljoin
from urllib import pathname2url, quote
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
from urllib.parse import urljoin, quote
from urllib.request import pathname2url
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " <ip> <file/directory>")
sys.exit(1)
ip = sys.argv[1]
directory = sys.argv[2]
if not os.path.exists(directory):
print(directory + ": No such file or directory.")
sys.exit(1)
print("Preparing data...")
baseUrl = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1] + ":8080/"
payload = ""
if os.path.isfile(directory):
if directory.endswith(('.cia', '.tik')):
payload += baseUrl + quote(os.path.basename(directory))
directory = os.path.dirname(directory)
else:
for file in [ file for file in next(os.walk(directory))[2] if file.endswith(('.cia', '.tik')) ]:
payload += baseUrl + quote(file) + "\n"
if len(payload) == 0:
print("No files to serve.")
sys.exit(1)
if not directory == "":
os.chdir(directory)
print("")
print("URLS:")
print(payload)
print("")
print("Opening HTTP server on port 8080...")
server = TCPServer(("", 8080), SimpleHTTPRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
try:
print("Sending URL(s) to " + ip + ":5000...")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, 5000))
try:
payloadBytes = bytes(payload, "ascii")
except:
payloadBytes = payload.encode("ascii")
networkPayload = struct.pack('!L', len(payloadBytes)) + payloadBytes
sentLength = 0
while sentLength < len(networkPayload):
sent = sock.send(networkPayload[sentLength:])
if sent == 0:
raise RuntimeError("Socket connection broken.")
sentLength += sent
while len(sock.recv(1)) < 1:
time.sleep(0.05)
sock.close()
except Exception as e:
print("Error: " + str(e))
print("Shutting down HTTP server...")
server.shutdown()
|
import os
import socket
import struct
import sys
import threading
import time
import urllib
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
from urlparse import urljoin
from urllib import pathname2url, quote
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
from urllib.parse import urljoin, quote
from urllib.request import pathname2url
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " <ip> <file/directory>")
sys.exit(1)
ip = sys.argv[1]
directory = sys.argv[2]
if not os.path.exists(directory):
print(directory + ": No such file or directory.")
sys.exit(1)
print("Preparing data...")
baseUrl = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1] + ":8080/"
payload = ""
if os.path.isfile(directory):
if directory.endswith(('.cia', '.tik')):
payload += baseUrl + quote(os.path.basename(directory))
directory = os.path.dirname(directory)
else:
for file in [ file for file in next(os.walk(directory))[2] if file.endswith(('.cia', '.tik')) ]:
payload += baseUrl + quote(file) + "\n"
if len(payload) == 0:
print("No files to serve.")
sys.exit(1)
if not directory == "":
os.chdir(directory)
print("")
print("URLS:")
print(payload)
print("")
print("Opening HTTP server on port 8080...")
server = TCPServer(("", 8080), SimpleHTTPRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
try:
print("Sending URL(s) to " + ip + ":5000...")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, 5000))
try:
payloadBytes = bytes(payload, "ascii")
except:
payloadBytes = payload.encode("ascii")
networkPayload = struct.pack('!L', len(payloadBytes)) + payloadBytes
sentLength = 0
while sentLength < len(networkPayload):
sent = sock.send(networkPayload[sentLength:])
if sent == 0:
raise RuntimeError("Socket connection broken.")
sentLength += sent
while len(sock.recv(1)) < 1:
time.sleep(0.05)
sock.close()
except Exception as e:
print("Error: " + str(e))
print("Shutting down HTTP server...")
server.shutdown()
|
mit
|
Python
|
d45e2237cccf9a29db93fd485de34b9cc4dc3cfe
|
Update gpio.py
|
souravsingh/beaglebone-codes,souravsingh/beaglebone-codes,souravsingh/beaglebone-codes
|
02traffic_python/gpio.py
|
02traffic_python/gpio.py
|
##########################################################
# * Python GPIO Functions for Traffic Signal Simulation
# * using Baglebone Black running Debian 7 Linux distribution
##########################################################
import sys
import os
SYSFS_GPIO_DIR = "/sys/class/gpio"
def gpioUnexport (gpio):
try:
fo = open(SYSFS_GPIO_DIR + "/unexport","w")
fo.write(gpio)
fo.close()
return
except IOError:
return
def gpioExport (gpio):
try:
fo = open(SYSFS_GPIO_DIR + "/export","w")
fo.write(gpio)
fo.close()
return
except IOError:
return
def gpioSetDir (gpio, flag):
try:
fo = open(SYSFS_GPIO_DIR + "/gpio" + gpio + "/direction" ,"w")
fo.write(flag)
fo.close()
return
except IOError:
return
def gpioSetVal (gpio, val):
try:
fo = open(SYSFS_GPIO_DIR + "/gpio" + gpio + "/value" ,"w")
fo.write(val)
fo.close()
return
except IOError:
return
|
##########################################################
# * Python GPIO Functions for Traffic Signal Simulation
# * using Baglebone Black running Debian 7 Linux distribution
##########################################################
# * Developed by MicroEmbedded Technologies
##########################################################
import sys
import os
SYSFS_GPIO_DIR = "/sys/class/gpio"
def gpioUnexport (gpio):
try:
fo = open(SYSFS_GPIO_DIR + "/unexport","w")
fo.write(gpio)
fo.close()
return
except IOError:
return
def gpioExport (gpio):
try:
fo = open(SYSFS_GPIO_DIR + "/export","w")
fo.write(gpio)
fo.close()
return
except IOError:
return
def gpioSetDir (gpio, flag):
try:
fo = open(SYSFS_GPIO_DIR + "/gpio" + gpio + "/direction" ,"w")
fo.write(flag)
fo.close()
return
except IOError:
return
def gpioSetVal (gpio, val):
try:
fo = open(SYSFS_GPIO_DIR + "/gpio" + gpio + "/value" ,"w")
fo.write(val)
fo.close()
return
except IOError:
return
|
apache-2.0
|
Python
|
22ab27f9966c19c1f3496e445e460f9ac6400de7
|
Fix doubling order admin when custom order model used
|
khchine5/django-shop,chriscauley/django-shop,jrief/django-shop,airtonix/django-shop,nimbis/django-shop,khchine5/django-shop,awesto/django-shop,dwx9/test,chriscauley/django-shop,katomaso/django-shop,ojii/django-shop,airtonix/django-shop,fusionbox/django-shop,katomaso/django-shop,divio/django-shop,creimers/django-shop,schacki/django-shop,schacki/django-shop,jrief/django-shop,DavideyLee/django-shop,ojii/django-shop,schacki/django-shop,dwx9/test,ojii/django-shop,febsn/django-shop,schacki/django-shop,fusionbox/django-shop,nimbis/django-shop,nimbis/django-shop,rfleschenberg/django-shop,bmihelac/django-shop,creimers/django-shop,rfleschenberg/django-shop,atheiste/django-shop,airtonix/django-shop,pjdelport/django-shop,creimers/django-shop,jrief/django-shop,jrief/django-shop,atheiste/django-shop,rfleschenberg/django-shop,katomaso/django-shop,pjdelport/django-shop,jrutila/django-shop,pjdelport/django-shop,febsn/django-shop,chriscauley/django-shop,khchine5/django-shop,divio/django-shop,thenewguy/django-shop,atheiste/django-shop,jrutila/django-shop,divio/django-shop,bmihelac/django-shop,thenewguy/django-shop,awesto/django-shop,jrutila/django-shop,dwx9/test,khchine5/django-shop,awesto/django-shop,rfleschenberg/django-shop,febsn/django-shop,DavideyLee/django-shop,nimbis/django-shop
|
shop/admin/orderadmin.py
|
shop/admin/orderadmin.py
|
#-*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from shop.models.ordermodel import (Order, OrderItem,
OrderExtraInfo, ExtraOrderPriceField, OrderPayment)
class OrderExtraInfoInline(admin.TabularInline):
model = OrderExtraInfo
extra = 0
class OrderPaymentInline(admin.TabularInline):
model = OrderPayment
extra = 0
class ExtraOrderPriceFieldInline(admin.TabularInline):
model = ExtraOrderPriceField
extra = 0
class OrderItemInline(admin.TabularInline):
model = OrderItem
extra = 0
#TODO: add ExtraOrderItemPriceField inline, ideas?
class OrderAdmin(ModelAdmin):
list_display = ('id', 'user', 'shipping_name', 'status','order_total',
'payment_method', 'created')
list_filter = ('status', 'payment_method', )
search_fields = ('id', 'shipping_name', 'user__username')
date_hierarchy = 'created'
inlines = (OrderItemInline, OrderExtraInfoInline,
ExtraOrderPriceFieldInline, OrderPaymentInline)
readonly_fields = ('created', 'modified',)
fieldsets = (
(None, {'fields': ('user', 'status', 'order_total',
'order_subtotal', 'payment_method', 'created', 'modified')}),
(_('Shipping'), {
'fields': ('shipping_name', 'shipping_address',
'shipping_address2', 'shipping_city', 'shipping_zip_code',
'shipping_state', 'shipping_country',)
}),
(_('Billing'), {
'fields': ('billing_name', 'billing_address',
'billing_address2', 'billing_city', 'billing_zip_code',
'billing_state', 'billing_country',)
}),
)
ORDER_MODEL = getattr(settings, 'SHOP_ORDER_MODEL', None)
if not ORDER_MODEL:
admin.site.register(Order, OrderAdmin)
|
#-*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.utils.translation import ugettext_lazy as _
from shop.models.ordermodel import (Order, OrderItem,
OrderExtraInfo, ExtraOrderPriceField, OrderPayment)
class OrderExtraInfoInline(admin.TabularInline):
model = OrderExtraInfo
extra = 0
class OrderPaymentInline(admin.TabularInline):
model = OrderPayment
extra = 0
class ExtraOrderPriceFieldInline(admin.TabularInline):
model = ExtraOrderPriceField
extra = 0
class OrderItemInline(admin.TabularInline):
model = OrderItem
extra = 0
#TODO: add ExtraOrderItemPriceField inline, ideas?
class OrderAdmin(ModelAdmin):
list_display = ('id', 'user', 'shipping_name', 'status','order_total',
'payment_method', 'created')
list_filter = ('status', 'payment_method', )
search_fields = ('id', 'shipping_name', 'user__username')
date_hierarchy = 'created'
inlines = (OrderItemInline, OrderExtraInfoInline,
ExtraOrderPriceFieldInline, OrderPaymentInline)
readonly_fields = ('created', 'modified',)
fieldsets = (
(None, {'fields': ('user', 'status', 'order_total',
'order_subtotal', 'payment_method', 'created', 'modified')}),
(_('Shipping'), {
'fields': ('shipping_name', 'shipping_address',
'shipping_address2', 'shipping_city', 'shipping_zip_code',
'shipping_state', 'shipping_country',)
}),
(_('Billing'), {
'fields': ('billing_name', 'billing_address',
'billing_address2', 'billing_city', 'billing_zip_code',
'billing_state', 'billing_country',)
}),
)
admin.site.register(Order, OrderAdmin)
|
bsd-3-clause
|
Python
|
1b3e5d52911f3c623b8f320adadea2d8f3ee226a
|
Implement web service based Pathway Commons client
|
pvtodorov/indra,jmuhlich/indra,johnbachman/belpy,sorgerlab/belpy,jmuhlich/indra,johnbachman/indra,pvtodorov/indra,pvtodorov/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/belpy,sorgerlab/belpy,pvtodorov/indra,johnbachman/belpy,sorgerlab/indra,jmuhlich/indra,johnbachman/indra,bgyori/indra,bgyori/indra,bgyori/indra,sorgerlab/indra
|
indra/biopax/pathway_commons_client.py
|
indra/biopax/pathway_commons_client.py
|
import urllib, urllib2
from indra.java_vm import autoclass, JavaException
pc2_url = 'http://www.pathwaycommons.org/pc2/'
def send_request(kind, source, target=None):
kind_str = kind.lower()
if kind not in ['neighborhood', 'pathsbetween', 'pathsfromto']:
print 'Invalid query type %s' % kind_str
return None
organism = '9606'
if isinstance(source, basestring):
source_str = source
else:
source_str = ','.join(source)
params = {'kind': kind_str,
'organism': organism,
'source': ','.join(source),
'format': 'BIOPAX'}
if target is not None:
if isinstance(target, basestring):
target_str = target
else:
target_str = ','.join(target)
params['target'] = target_str
print 'Sending Pathway Commons query...'
res = urllib2.urlopen(pc2_url + 'graph', data=urllib.urlencode(params))
owl_str = res.read()
model = owl_str_to_model(owl_str)
if model is not None:
print 'Pathway Commons query returned a model...'
return model
def owl_str_to_model(owl_str):
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
bais = autoclass('java.io.ByteArrayInputStream')
scs = autoclass('java.nio.charset.StandardCharsets')
jstr = autoclass('java.lang.String')
istream = bais(jstr(owl_str).getBytes(scs.UTF_8));
biopax_model = io.convertFromOWL(istream)
return biopax_model
def owl_to_model(fname):
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
try:
file_is = autoclass('java.io.FileInputStream')(fname)
except JavaException:
print 'Could not open data file %s' % fname
return
try:
biopax_model = io.convertFromOWL(file_is)
except JavaException:
print 'Could not convert data file %s to BioPax model' % data_file
return
file_is.close()
return biopax_model
def model_to_owl(model, fname):
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
try:
fileOS = autoclass('java.io.FileOutputStream')(fname)
except JavaException:
print 'Could not open data file %s' % fname
return
l3_factory = autoclass('org.biopax.paxtools.model.BioPAXLevel').L3.getDefaultFactory()
model_out = l3_factory.createModel()
for r in model.getObjects().toArray():
model_out.add(r)
io.convertToOWL(model_out, fileOS)
fileOS.close()
|
from indra.java_vm import autoclass, JavaException
def run_pc_query(query_type, source_genes, target_genes=None, neighbor_limit=1):
cpath_client = autoclass('cpath.client.CPathClient').\
newInstance('http://www.pathwaycommons.org/pc2/')
query = cpath_client.createGraphQuery()
query.kind(query_type)
query.sources(source_genes)
query.targets(target_genes)
query.organismFilter(['homo sapiens'])
query.mergeEquivalentInteractions(True)
query.limit(autoclass('java.lang.Integer')(neighbor_limit))
# Execute query
print 'Sending Pathway Commons query...'
model = query.result()
if model is not None:
print 'Pathway Commons query returned model...'
else:
print 'Pathway Commons query returned blank model...'
return model
def owl_to_model(fname):
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
try:
file_is = autoclass('java.io.FileInputStream')(fname)
except JavaException:
print 'Could not open data file %s' % fname
return
try:
biopax_model = io.convertFromOWL(file_is)
except JavaException:
print 'Could not convert data file %s to BioPax model' % data_file
return
file_is.close()
return biopax_model
def model_to_owl(model, fname):
io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')
io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)
try:
fileOS = autoclass('java.io.FileOutputStream')(fname)
except JavaException:
print 'Could not open data file %s' % fname
return
l3_factory = autoclass('org.biopax.paxtools.model.BioPAXLevel').L3.getDefaultFactory()
model_out = l3_factory.createModel()
for r in model.getObjects().toArray():
model_out.add(r)
io.convertToOWL(model_out, fileOS)
fileOS.close()
|
bsd-2-clause
|
Python
|
a7062bb3d87954478f4be23a8ac2cc3d125804e7
|
resolve #13: consecutive blank row are preserved
|
chfw/pyexcel-ods,chfw/pyexcel-ods
|
tests/test_bug_fixes.py
|
tests/test_bug_fixes.py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import os
from pyexcel_ods import get_data, save_data
from nose.tools import raises, eq_
def test_bug_fix_for_issue_1():
data = get_data(os.path.join("tests", "fixtures", "repeated.ods"))
assert data["Sheet1"] == [['repeated', 'repeated', 'repeated', 'repeated']]
def test_bug_fix_for_issue_2():
data = {}
data.update({"Sheet 1": [[1, 2, 3], [4, 5, 6]]})
data.update({"Sheet 2": [[u"row 1", u"Héllô!", u"HolÁ!"]]})
save_data("your_file.ods", data)
new_data = get_data("your_file.ods")
assert new_data["Sheet 2"] == [[u'row 1', u'H\xe9ll\xf4!', u'Hol\xc1!']]
def test_date_util_parse():
from pyexcel_ods.ods import date_value
value = "2015-08-17T19:20:00"
d = date_value(value)
assert d.strftime("%Y-%m-%dT%H:%M:%S") == "2015-08-17T19:20:00"
value = "2015-08-17"
d = date_value(value)
assert d.strftime("%Y-%m-%d") == "2015-08-17"
value = "2015-08-17T19:20:59.999999"
d = date_value(value)
assert d.strftime("%Y-%m-%dT%H:%M:%S") == "2015-08-17T19:20:59"
value = "2015-08-17T19:20:59.99999"
d = date_value(value)
assert d.strftime("%Y-%m-%dT%H:%M:%S") == "2015-08-17T19:20:59"
value = "2015-08-17T19:20:59.999999999999999"
d = date_value(value)
assert d.strftime("%Y-%m-%dT%H:%M:%S") == "2015-08-17T19:20:59"
@raises(Exception)
def test_invalid_date():
from pyexcel_ods.ods import date_value
value = "2015-08-"
date_value(value)
@raises(Exception)
def test_fake_date_time_10():
from pyexcel_ods.ods import date_value
date_value("1234567890")
@raises(Exception)
def test_fake_date_time_19():
from pyexcel_ods.ods import date_value
date_value("1234567890123456789")
@raises(Exception)
def test_fake_date_time_20():
from pyexcel_ods.ods import date_value
date_value("12345678901234567890")
def test_issue_13():
test_file = "test_issue_13.ods"
data = [
[1,2],
[],
[],
[],
[3,4]
]
save_data(test_file, {test_file: data})
written_data = get_data(test_file)
eq_(data, written_data[test_file])
os.unlink(test_file)
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import os
from pyexcel_ods import get_data, save_data
from nose.tools import raises
def test_bug_fix_for_issue_1():
data = get_data(os.path.join("tests", "fixtures", "repeated.ods"))
assert data["Sheet1"] == [['repeated', 'repeated', 'repeated', 'repeated']]
def test_bug_fix_for_issue_2():
data = {}
data.update({"Sheet 1": [[1, 2, 3], [4, 5, 6]]})
data.update({"Sheet 2": [[u"row 1", u"Héllô!", u"HolÁ!"]]})
save_data("your_file.ods", data)
new_data = get_data("your_file.ods")
assert new_data["Sheet 2"] == [[u'row 1', u'H\xe9ll\xf4!', u'Hol\xc1!']]
def test_date_util_parse():
from pyexcel_ods.ods import date_value
value = "2015-08-17T19:20:00"
d = date_value(value)
assert d.strftime("%Y-%m-%dT%H:%M:%S") == "2015-08-17T19:20:00"
value = "2015-08-17"
d = date_value(value)
assert d.strftime("%Y-%m-%d") == "2015-08-17"
value = "2015-08-17T19:20:59.999999"
d = date_value(value)
assert d.strftime("%Y-%m-%dT%H:%M:%S") == "2015-08-17T19:20:59"
value = "2015-08-17T19:20:59.99999"
d = date_value(value)
assert d.strftime("%Y-%m-%dT%H:%M:%S") == "2015-08-17T19:20:59"
value = "2015-08-17T19:20:59.999999999999999"
d = date_value(value)
assert d.strftime("%Y-%m-%dT%H:%M:%S") == "2015-08-17T19:20:59"
@raises(Exception)
def test_invalid_date():
from pyexcel_ods.ods import date_value
value = "2015-08-"
date_value(value)
@raises(Exception)
def test_fake_date_time_10():
from pyexcel_ods.ods import date_value
date_value("1234567890")
@raises(Exception)
def test_fake_date_time_19():
from pyexcel_ods.ods import date_value
date_value("1234567890123456789")
@raises(Exception)
def test_fake_date_time_20():
from pyexcel_ods.ods import date_value
date_value("12345678901234567890")
|
bsd-3-clause
|
Python
|
12d244ce9bd15d95817d4c4d774a1ab1758db894
|
fix broken build
|
lordakshaya/pyexcel,chfw/pyexcel,lordakshaya/pyexcel,lordakshaya/pyexcel,chfw/pyexcel
|
tests/test_extension.py
|
tests/test_extension.py
|
from nose.tools import raises
class TestExt:
def test_test(self):
"""test test"""
from pyexcel.ext import test
from pyexcel.io import READERS
from pyexcel.io import WRITERS
assert READERS['test'] == 'test'
assert WRITERS['test'] == 'test'
@raises(ImportError)
def test_unknown(self):
"""test unknown"""
from pyexcel.ext import unknown
def test_tabulate(self):
import pyexcel as pe
from pyexcel.ext import presentation
a = [[1,1]]
m = pe.sheets.Matrix(a)
print(str(m))
assert str(m) == "pyexcel.sheets.matrix.Matrix"
def tearDown(self):
from pyexcel.presentation import STRINGIFICATION
STRINGIFICATION = {}
|
from nose.tools import raises
class TestExt:
def test_test(self):
"""test test"""
from pyexcel.ext import test
from pyexcel.io import READERS
from pyexcel.io import WRITERS
assert READERS['test'] == 'test'
assert WRITERS['test'] == 'test'
@raises(ImportError)
def test_unknown(self):
"""test unknown"""
from pyexcel.ext import unknown
def test_tabulate(self):
import pyexcel as pe
from pyexcel.ext import presentation
a = [[1,1]]
m = pe.sheets.Matrix(a)
print(str(m))
assert str(m) == "pyexcel.sheets.matrix.Matrix"
|
bsd-3-clause
|
Python
|
47b32b1b2d5fe81dcf86c78d61690c1f0572b8ea
|
Add failing name and docstring test for things exported
|
Suor/funcy
|
tests/test_interface.py
|
tests/test_interface.py
|
import pkgutil
import pytest
import funcy
from funcy.cross import PY2, PY3
from funcy.py2 import cat
from funcy import py2, py3
py = py2 if PY2 else py3
# Introspect all modules
exclude = ('cross', '_inspect', 'py2', 'py3', 'simple_funcs', 'funcmakers')
module_names = list(name for _, name, _ in pkgutil.iter_modules(funcy.__path__)
if name not in exclude)
modules = [getattr(funcy, name) for name in module_names]
def test_match():
assert funcy.__all__ == py.__all__
@pytest.mark.skipif(PY3, reason="modules use python 2 internally")
def test_full_py2():
assert sorted(funcy.__all__) == sorted(cat(m.__all__ for m in modules))
def test_full():
assert len(py2.__all__) == len(py3.__all__)
def test_name_clashes():
counts = py2.count_reps(py2.icat(m.__all__ for m in modules))
clashes = [name for name, c in counts.items() if c > 1]
assert not clashes, 'names clash for ' + ', '.join(clashes)
def test_renames():
inames = [n for n in py2.__all__ if n.startswith('i')]
ipairs = [n[1:] for n in inames if n[1:] in py2.__all__]
for name in inames:
if name != 'izip':
assert name in py3.__all__ or name[1:] in py3.__all__
for name in ipairs:
assert name in py3.__all__
assert 'l' + name in py3.__all__
lnames = [n for n in py3.__all__ if n.startswith('l')]
lpairs = [n[1:] for n in lnames if n[1:] in py3.__all__]
for name in lnames:
if name != 'lzip':
assert name in py2.__all__ or name[1:] in py2.__all__
for name in lpairs:
assert name in py2.__all__
assert 'i' + name in py2.__all__
# Only inames a renamed
assert set(py2.__all__) - set(py3.__all__) <= set(inames)
# Only lnames a new, and zip_values/zip_dicts
assert set(py3.__all__) - set(py2.__all__) <= set(lnames) | set(['zip_values', 'zip_dicts'])
def test_docs():
exports = [(name, getattr(funcy, name)) for name in funcy.__all__
if name not in ('print_errors', 'print_durations') and
getattr(funcy, name).__module__ != 'funcy.types']
# NOTE: we are testing this way and not with all() to immediately get a list of offenders
assert [name for name, f in exports if f.__name__ == '<lambda>'] == []
assert [name for name, f in exports if f.__doc__ is None] == []
|
import pkgutil
import pytest
import funcy
from funcy.cross import PY2, PY3
from funcy.py2 import cat
from funcy import py2, py3
py = py2 if PY2 else py3
# Introspect all modules
exclude = ('cross', '_inspect', 'py2', 'py3', 'simple_funcs', 'funcmakers')
module_names = list(name for _, name, _ in pkgutil.iter_modules(funcy.__path__)
if name not in exclude)
modules = [getattr(funcy, name) for name in module_names]
def test_match():
assert funcy.__all__ == py.__all__
@pytest.mark.skipif(PY3, reason="modules use python 2 internally")
def test_full_py2():
assert sorted(funcy.__all__) == sorted(cat(m.__all__ for m in modules))
def test_full():
assert len(py2.__all__) == len(py3.__all__)
def test_name_clashes():
counts = py2.count_reps(py2.icat(m.__all__ for m in modules))
clashes = [name for name, c in counts.items() if c > 1]
assert not clashes, 'names clash for ' + ', '.join(clashes)
def test_renames():
inames = [n for n in py2.__all__ if n.startswith('i')]
ipairs = [n[1:] for n in inames if n[1:] in py2.__all__]
for name in inames:
if name != 'izip':
assert name in py3.__all__ or name[1:] in py3.__all__
for name in ipairs:
assert name in py3.__all__
assert 'l' + name in py3.__all__
lnames = [n for n in py3.__all__ if n.startswith('l')]
lpairs = [n[1:] for n in lnames if n[1:] in py3.__all__]
for name in lnames:
if name != 'lzip':
assert name in py2.__all__ or name[1:] in py2.__all__
for name in lpairs:
assert name in py2.__all__
assert 'i' + name in py2.__all__
# Only inames a renamed
assert set(py2.__all__) - set(py3.__all__) <= set(inames)
# Only lnames a new, and zip_values/zip_dicts
assert set(py3.__all__) - set(py2.__all__) <= set(lnames) | set(['zip_values', 'zip_dicts'])
|
bsd-3-clause
|
Python
|
5baa216b615b39fe5d9bf5bb71e9ae8048ef4dc0
|
delete samples on metric delete
|
shaunsephton/holodeck,euan/django-holodeck,euan/django-holodeck,shaunsephton/holodeck
|
holodeck/models.py
|
holodeck/models.py
|
import uuid
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
from holodeck.utils import get_widget_type_choices, load_class_by_string, \
metric_to_shard_mapper, sample_to_shard_mapper
class Dashboard(models.Model):
name = models.CharField(max_length=255)
owner = models.ForeignKey(User, null=True)
def __unicode__(self):
return self.name
class Metric(models.Model):
name = models.CharField(max_length=255)
dashboard = models.ForeignKey('holodeck.Dashboard')
widget_type = models.CharField(
max_length=64,
choices=get_widget_type_choices()
)
api_key = models.CharField(
max_length=32,
unique=True,
blank=True,
null=True
)
def __unicode__(self):
return self.name
@classmethod
def generate_api_key(cls):
return uuid.uuid4().hex
def render(self):
return load_class_by_string(self.widget_type)().render(self)
@property
def sample_set(self):
return Sample.objects.filter(metric_id=self.id).using(
'shard_%s' % metric_to_shard_mapper(self))
def save(self, *args, **kwargs):
if not self.api_key:
self.api_key = Metric.generate_api_key()
super(Metric, self).save(*args, **kwargs)
class Sample(models.Model):
metric_id = models.IntegerField(max_length=64)
integer_value = models.IntegerField()
string_value = models.CharField(max_length=64)
timestamp = models.DateTimeField()
def save(self, *args, **kwargs):
self.full_clean()
kwargs.update({'using': 'shard_%s' % sample_to_shard_mapper(self)})
super(Sample, self).save(*args, **kwargs)
@receiver(post_delete, sender=Metric)
def metric_post_delete_handler(sender, instance, **kwargs):
"""
Because relation between sample and metric is handled on the application
level ensure deletion of samples on metric delete.
"""
instance.sample_set.all().delete()
|
import uuid
from django.db import models
from holodeck.utils import get_widget_type_choices, load_class_by_string, \
metric_to_shard_mapper, sample_to_shard_mapper
from django.contrib.auth.models import User
class Dashboard(models.Model):
name = models.CharField(max_length=255)
owner = models.ForeignKey(User, null=True)
def __unicode__(self):
return self.name
class Metric(models.Model):
name = models.CharField(max_length=255)
dashboard = models.ForeignKey('holodeck.Dashboard')
widget_type = models.CharField(
max_length=64,
choices=get_widget_type_choices()
)
api_key = models.CharField(
max_length=32,
unique=True,
blank=True,
null=True
)
def __unicode__(self):
return self.name
@classmethod
def generate_api_key(cls):
return uuid.uuid4().hex
def render(self):
return load_class_by_string(self.widget_type)().render(self)
@property
def sample_set(self):
return Sample.objects.filter(metric_id=self.id).using(
'shard_%s' % metric_to_shard_mapper(self))
def save(self, *args, **kwargs):
if not self.api_key:
self.api_key = Metric.generate_api_key()
super(Metric, self).save(*args, **kwargs)
class Sample(models.Model):
metric_id = models.IntegerField(max_length=64)
integer_value = models.IntegerField()
string_value = models.CharField(max_length=64)
timestamp = models.DateTimeField()
def save(self, *args, **kwargs):
self.full_clean()
kwargs.update({'using': 'shard_%s' % sample_to_shard_mapper(self)})
super(Sample, self).save(*args, **kwargs)
|
bsd-3-clause
|
Python
|
62150ec45c9c062397f0ac0270466b4497d459de
|
Fix world time plugin
|
skoczen/will,skoczen/will,skoczen/will
|
will/plugins/productivity/world_time.py
|
will/plugins/productivity/world_time.py
|
import datetime
import pytz
import requests
import time
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
from will import settings
def get_location(place):
payload = {'address': place, 'sensor': False}
r = requests.get('http://maps.googleapis.com/maps/api/geocode/json', params=payload)
resp = r.json()
location = resp["results"][0]["geometry"]["location"]
return location
def get_timezone(lat, lng):
payload = {'location': "%s,%s" % (lat, lng), 'timestamp': int(time.time()), 'sensor': False}
r = requests.get('https://maps.googleapis.com/maps/api/timezone/json', params=payload)
resp = r.json()
tz = resp['timeZoneId']
return tz
class TimePlugin(WillPlugin):
@respond_to("what time is it in (?P<place>.*)")
def what_time_is_it_in(self, message, place):
"""what time is it in ___: Say the time in almost any city on earth."""
location = get_location(place)
tz = get_timezone(location['lat'], location['lng'])
ct = datetime.datetime.now(tz=pytz.timezone(tz))
self.say("It's %s in %s." % (self.to_natural_day_and_time(ct), place), message=message)
@respond_to("what time is it(\?)?$", multiline=False)
def what_time_is_it(self, message):
"""what time is it: Say the time where I am."""
now = datetime.datetime.now()
self.say("It's %s." % self.to_natural_day_and_time(now, with_timezone=True), message=message)
|
import datetime
import requests
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
from will import settings
class TimePlugin(WillPlugin):
@respond_to("what time is it in (?P<place>.*)")
def what_time_is_it_in(self, message, place):
"""what time is it in ___: Say the time in almost any city on earth."""
if (
not hasattr(settings, "WORLD_WEATHER_ONLINE_KEY") and
not hasattr(settings, "WORLD_WEATHER_ONLINE_V2_KEY")
):
self.say(
"I need a world weather online key to do that.\n"
"You can get one at http://developer.worldweatheronline.com, "
"and then set the key as WORLD_WEATHER_ONLINE_V2_KEY",
message=message
)
else:
if hasattr(settings, "WORLD_WEATHER_ONLINE_V2_KEY"):
r = requests.get(
"http://api2.worldweatheronline.com/free/v2/tz.ashx?q=%s&format=json&key=%s" %
(place, settings.WORLD_WEATHER_ONLINE_V2_KEY)
)
elif hasattr(settings, "WORLD_WEATHER_ONLINE_KEY"):
r = requests.get(
"http://api2.worldweatheronline.com/free/v1/tz.ashx?q=%s&format=json&key=%s" %
(place, settings.WORLD_WEATHER_ONLINE_KEY)
)
resp = r.json()
if "request" in resp["data"] and len(resp["data"]["request"]) > 0:
place = resp["data"]["request"][0]["query"]
current_time = self.parse_natural_time(resp["data"]["time_zone"][0]["localtime"])
self.say("It's %s in %s." % (self.to_natural_day_and_time(current_time), place), message=message)
else:
self.say("I couldn't find anywhere named %s." % (place, ), message=message)
@respond_to("what time is it(\?)?$", multiline=False)
def what_time_is_it(self, message):
"""what time is it: Say the time where I am."""
now = datetime.datetime.now()
self.say("It's %s." % self.to_natural_day_and_time(now, with_timezone=True), message=message)
|
mit
|
Python
|
47bb5b64dfec5ea4718d8eac4c204f8e61dd60f8
|
Add test that checks relative variable initialisation
|
OceanPARCELS/parcels,OceanPARCELS/parcels
|
tests/test_particles.py
|
tests/test_particles.py
|
from parcels import Grid, ScipyParticle, JITParticle, Variable
import numpy as np
import pytest
from operator import attrgetter
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
@pytest.fixture
def grid(xdim=100, ydim=100):
U = np.zeros((xdim, ydim), dtype=np.float32)
V = np.zeros((xdim, ydim), dtype=np.float32)
lon = np.linspace(0, 1, xdim, dtype=np.float32)
lat = np.linspace(0, 1, ydim, dtype=np.float32)
return Grid.from_data(U, lon, lat, V, lon, lat, mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_variable_init(grid, mode, npart=10):
"""Test that checks correct initialisation of custom variables"""
class TestParticle(ptype[mode]):
p_float = Variable('p_float', dtype=np.float32, default=10.)
p_double = Variable('p_double', dtype=np.float64, default=11.)
p_int = Variable('p_int', dtype=np.int32, default=12.)
pset = grid.ParticleSet(npart, pclass=TestParticle,
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
assert np.array([isinstance(p.p_float, np.float32) for p in pset]).all()
assert np.allclose([p.p_float for p in pset], 10., rtol=1e-12)
assert np.array([isinstance(p.p_double, np.float64) for p in pset]).all()
assert np.allclose([p.p_double for p in pset], 11., rtol=1e-12)
assert np.array([isinstance(p.p_int, np.int32) for p in pset]).all()
assert np.allclose([p.p_int for p in pset], 12, rtol=1e-12)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_variable_init_relative(grid, mode, npart=10):
"""Test that checks relative initialisation of custom variables"""
class TestParticle(ptype[mode]):
p_base = Variable('p_base', dtype=np.float32, default=10.)
p_relative = Variable('p_relative', dtype=np.float32,
default=attrgetter('p_base'))
pset = grid.ParticleSet(npart, pclass=TestParticle,
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
assert np.allclose([p.p_base for p in pset], 10., rtol=1e-12)
assert np.allclose([p.p_relative for p in pset], 10., rtol=1e-12)
|
from parcels import Grid, ScipyParticle, JITParticle, Variable
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
@pytest.fixture
def grid(xdim=100, ydim=100):
U = np.zeros((xdim, ydim), dtype=np.float32)
V = np.zeros((xdim, ydim), dtype=np.float32)
lon = np.linspace(0, 1, xdim, dtype=np.float32)
lat = np.linspace(0, 1, ydim, dtype=np.float32)
return Grid.from_data(U, lon, lat, V, lon, lat, mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_variable_init(grid, mode, npart=10):
class TestParticle(ptype[mode]):
p_float = Variable('p_float', dtype=np.float32, default=10.)
p_double = Variable('p_double', dtype=np.float64, default=11.)
p_int = Variable('p_int', dtype=np.int32, default=12)
pset = grid.ParticleSet(npart, pclass=TestParticle,
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
assert np.array([isinstance(p.p_float, np.float32) for p in pset]).all()
assert np.allclose([p.p_float for p in pset], 10., rtol=1e-12)
assert np.array([isinstance(p.p_double, np.float64) for p in pset]).all()
assert np.allclose([p.p_double for p in pset], 11., rtol=1e-12)
assert np.array([isinstance(p.p_int, np.int32) for p in pset]).all()
assert np.allclose([p.p_int for p in pset], 12., rtol=1e-12)
|
mit
|
Python
|
def480fd6b44e85cb85bcb3ed8cc0b98d771ee97
|
Rework test.
|
babble/babble,babble/babble,babble/babble,babble/babble,babble/babble,babble/babble
|
src/test/ed/lang/python/import4_test.py
|
src/test/ed/lang/python/import4_test.py
|
'''
Copyright (C) 2008 10gen Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
try:
import google.atom.service
except ImportError, e:
# This is OK -- can't import from outside a site with a _config packages map
pass
else:
raise AssertionError
import import3_help
for globals in [{}, None]:
for locals in [{}, None]:
for fromlist in [[]]: # could try None here too
m = __import__('import3_help', globals, locals, fromlist)
assert m == import3_help
|
'''
Copyright (C) 2008 10gen Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
try:
import google.atom.service
except ImportError, e:
# This is OK -- can't import from outside a site with a _config packages map
pass
else:
raise AssertionError
import import3_help
m = __import__('import3_help', {}, {}, [])
assert m == import3_help
m = __import__('import3_help', {}, {}, ['foo'])
assert m == import3_help
m = __import__('import3_help', None, None, [])
assert m == import3_help
m = __import__('import3_help', None, None, ['foo'])
assert m == import3_help
|
apache-2.0
|
Python
|
519ad83f47ead62549c2e0a533ffd3ff5488e384
|
Add lint test and format generated code (#4114)
|
googleapis/google-cloud-java,googleapis/google-cloud-java,googleapis/google-cloud-java
|
java-asset/google-cloud-asset/synth.py
|
java-asset/google-cloud-asset/synth.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.java as java
gapic = gcp.GAPICGenerator()
service = 'asset'
versions = ['v1beta1']
config_pattern = '/google/cloud/asset/artman_cloudasset_{version}.yaml'
for version in versions:
library = gapic.java_library(
service=service,
version=version,
config_path=config_pattern.format(version=version),
artman_output_name='')
s.copy(library / f'gapic-google-cloud-{service}-{version}/src', 'src')
s.copy(library / f'grpc-google-cloud-{service}-{version}/src', f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
s.copy(library / f'proto-google-cloud-{service}-{version}/src', f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
java.format_code('./src')
java.format_code(f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
java.format_code(f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
# tasks has two product names, and a poorly named artman yaml
v1beta1_library = gapic.java_library(
service='asset',
version='v1beta1',
config_path='artman_cloudasset_v1beta1.yaml',
artman_output_name='')
s.copy(v1beta1_library / 'gapic-google-cloud-asset-v1beta1/src', 'src')
s.copy(v1beta1_library / 'grpc-google-cloud-asset-v1beta1/src', '../../google-api-grpc/grpc-google-cloud-asset-v1beta1/src')
s.copy(v1beta1_library / 'proto-google-cloud-asset-v1beta1/src', '../../google-api-grpc/proto-google-cloud-asset-v1beta1/src')
|
apache-2.0
|
Python
|
c68e6c52b9576f149ef34aa9593bd5e46c2deb9f
|
reduce training size
|
maxpumperla/elephas,maxpumperla/elephas
|
tests/integration/test_custom_models.py
|
tests/integration/test_custom_models.py
|
import random
import numpy as np
import pytest
from tensorflow.keras.backend import sigmoid
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from elephas.spark_model import SparkModel
from elephas.utils import to_simple_rdd
@pytest.mark.parametrize('mode', ['synchronous', 'asynchronous', 'hogwild'])
def test_training_custom_activation(mode, spark_context):
def custom_activation(x):
return sigmoid(x) + 1
model = Sequential()
model.add(Dense(1, input_dim=1, activation=custom_activation))
model.add(Dense(1, activation='sigmoid'))
sgd = SGD(lr=0.1)
model.compile(sgd, 'binary_crossentropy', ['acc'])
x_train = np.random.rand(100)
y_train = np.zeros(100)
x_test = np.random.rand(10)
y_test = np.zeros(10)
y_train[:50] = 1
rdd = to_simple_rdd(spark_context, x_train, y_train)
spark_model = SparkModel(model, frequency='epoch', mode=mode,
custom_objects={'custom_activation': custom_activation},
port=4000 + random.randint(0, 800))
spark_model.fit(rdd, epochs=1, batch_size=16, verbose=0, validation_split=0.1)
assert spark_model.predict(x_test)
assert spark_model.evaluate(x_test, y_test)
|
import random
import numpy as np
import pytest
from tensorflow.keras.backend import sigmoid
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from elephas.spark_model import SparkModel
from elephas.utils import to_simple_rdd
@pytest.mark.parametrize('mode', ['synchronous', 'asynchronous', 'hogwild'])
def test_training_custom_activation(mode, spark_context):
def custom_activation(x):
return sigmoid(x) + 1
model = Sequential()
model.add(Dense(1, input_dim=1, activation=custom_activation))
model.add(Dense(1, activation='sigmoid'))
sgd = SGD(lr=0.1)
model.compile(sgd, 'binary_crossentropy', ['acc'])
x_train = np.random.rand(1000)
y_train = np.zeros(1000)
x_test = np.random.rand(100)
y_test = np.zeros(100)
y_train[:500] = 1
rdd = to_simple_rdd(spark_context, x_train, y_train)
spark_model = SparkModel(model, frequency='epoch', mode=mode,
custom_objects={'custom_activation': custom_activation},
port=4000 + random.randint(0, 800))
spark_model.fit(rdd, epochs=1, batch_size=16, verbose=0, validation_split=0.1)
assert spark_model.predict(x_test)
assert spark_model.evaluate(x_test, y_test)
|
mit
|
Python
|
286d0d577921126512263e8d16a01a75878ee453
|
Add missing import
|
JohnLZeller/jenkinsapi,mistermocha/jenkinsapi,jduan/jenkinsapi,JohnLZeller/jenkinsapi,mistermocha/jenkinsapi,imsardine/jenkinsapi,mistermocha/jenkinsapi,JohnLZeller/jenkinsapi,imsardine/jenkinsapi,salimfadhley/jenkinsapi,domenkozar/jenkinsapi,imsardine/jenkinsapi,salimfadhley/jenkinsapi,aerickson/jenkinsapi,zaro0508/jenkinsapi,zaro0508/jenkinsapi,aerickson/jenkinsapi,jduan/jenkinsapi,domenkozar/jenkinsapi,zaro0508/jenkinsapi
|
jenkinsapi/utils/urlopener_kerberos.py
|
jenkinsapi/utils/urlopener_kerberos.py
|
import urllib2
import kerberos as krb
from urlparse import urlparse
class KerberosAuthHandler(urllib2.BaseHandler):
"""
A BaseHandler class that will add Kerberos Auth headers to a request
"""
def __init__(self,tgt):
self.tgt = tgt
def http_request(self,req):
req.add_unredirected_header('Authorization', 'Negotiate %s' % self.tgt)
return req
def https_request(self,req):
return self.http_request(req)
def mkkrbopener( jenkinsurl ):
"""
Creates an url opener that works with kerberos auth
:param jenkinsurl: jenkins url, str
:return: urllib2.opener configured for kerberos auth
"""
handlers = []
for handler in get_kerberos_auth_handler(jenkinsurl=jenkinsurl):
handlers.append(handler)
opener = urllib2.build_opener(*handlers)
return opener.open
def get_kerberos_auth_handler(jenkinsurl):
"""
Get a handler which enabled authentication over GSSAPI
:param jenkinsurl: jenkins base url, str
:return: a list of handlers
"""
jenkinsnetloc = urlparse(jenkinsurl).netloc
assert type( jenkinsnetloc ) == str, "Jenkins network location should be a string, got %s" % repr( jenkinsnetloc )
_ignore, ctx = krb.authGSSClientInit('HTTP@%s' % jenkinsnetloc, gssflags=krb.GSS_C_DELEG_FLAG|krb.GSS_C_MUTUAL_FLAG|krb.GSS_C_SEQUENCE_FLAG)
rc = krb.authGSSClientStep(ctx,'')
if rc != krb.AUTH_GSS_CONTINUE:
return []
tgt = krb.authGSSClientResponse(ctx)
if not tgt:
return []
krb_handler = KerberosAuthHandler(tgt)
return [ krb_handler ]
|
import urllib2
import kerberos as krb
class KerberosAuthHandler(urllib2.BaseHandler):
"""
A BaseHandler class that will add Kerberos Auth headers to a request
"""
def __init__(self,tgt):
self.tgt = tgt
def http_request(self,req):
req.add_unredirected_header('Authorization', 'Negotiate %s' % self.tgt)
return req
def https_request(self,req):
return self.http_request(req)
def mkkrbopener( jenkinsurl ):
"""
Creates an url opener that works with kerberos auth
:param jenkinsurl: jenkins url, str
:return: urllib2.opener configured for kerberos auth
"""
handlers = []
for handler in get_kerberos_auth_handler(jenkinsurl=jenkinsurl):
handlers.append(handler)
opener = urllib2.build_opener(*handlers)
return opener.open
def get_kerberos_auth_handler(jenkinsurl):
"""
Get a handler which enabled authentication over GSSAPI
:param jenkinsurl: jenkins base url, str
:return: a list of handlers
"""
jenkinsnetloc = urlparse(jenkinsurl).netloc
assert type( jenkinsnetloc ) == str, "Jenkins network location should be a string, got %s" % repr( jenkinsnetloc )
_ignore, ctx = krb.authGSSClientInit('HTTP@%s' % jenkinsnetloc, gssflags=krb.GSS_C_DELEG_FLAG|krb.GSS_C_MUTUAL_FLAG|krb.GSS_C_SEQUENCE_FLAG)
rc = krb.authGSSClientStep(ctx,'')
if rc != krb.AUTH_GSS_CONTINUE:
return []
tgt = krb.authGSSClientResponse(ctx)
if not tgt:
return []
krb_handler = KerberosAuthHandler(tgt)
return [ krb_handler ]
|
mit
|
Python
|
f95555ee63323d4046444f14395813a415aa0683
|
implement just enough of mtrand to make tests start passing
|
NextThought/pypy-numpy,NextThought/pypy-numpy,NextThought/pypy-numpy,NextThought/pypy-numpy
|
numpy/random/mtrand.py
|
numpy/random/mtrand.py
|
import random
from numpy import zeros
def random_sample(length=0):
if length == 0:
return random.random()
ret = zeros((length,))
for x in xrange(length):
ret[x] = random.random()
return ret
def randn(length=0):
if length == 0:
return random.gauss(0., 1.)
ret = zeros((length,))
for x in xrange(length):
ret[x] = random.gauss(0., 1.)
return ret
|
random_sample = None
|
bsd-3-clause
|
Python
|
0b048cef1f0efd190d8bf8f50c69df35c59b91a3
|
Add verbosity on JSON compare fail
|
SymbiFlow/yosys-symbiflow-plugins,SymbiFlow/yosys-symbiflow-plugins,SymbiFlow/yosys-f4pga-plugins,SymbiFlow/yosys-symbiflow-plugins,chipsalliance/yosys-f4pga-plugins,antmicro/yosys-symbiflow-plugins,chipsalliance/yosys-f4pga-plugins,antmicro/yosys-symbiflow-plugins,antmicro/yosys-symbiflow-plugins,SymbiFlow/yosys-f4pga-plugins,SymbiFlow/yosys-f4pga-plugins
|
xdc-plugin/tests/compare_output_json.py
|
xdc-plugin/tests/compare_output_json.py
|
#!/usr/bin/env python3
"""
This script extracts the top module cells and their corresponding parameters
from json files produced by Yosys.
The return code of this script is used to check if the output is equivalent.
"""
import sys
import json
parameters = ["IOSTANDARD", "DRIVE", "SLEW", "IN_TERM"]
def read_cells(json_file):
with open(json_file) as f:
data = json.load(f)
f.close()
cells = data['modules']['top']['cells']
cells_parameters = dict()
for cell, opts in cells.items():
attributes = opts['parameters']
if len(attributes.keys()):
if any([x in parameters for x in attributes.keys()]):
cells_parameters[cell] = attributes
return cells_parameters
def main():
if len(sys.argv) < 3:
print("Incorrect number of arguments")
exit(1)
cells1 = read_cells(sys.argv[1])
cells2 = read_cells(sys.argv[2])
if cells1 == cells2:
exit(0)
else:
print(json.dumps(cells1, indent=4))
print("VS")
print(json.dumps(cells2, indent=4))
exit(1)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
"""
This script extracts the top module cells and their corresponding parameters
from json files produced by Yosys.
The return code of this script is used to check if the output is equivalent.
"""
import sys
import json
def read_cells(json_file):
with open(json_file) as f:
data = json.load(f)
f.close()
cells = data['modules']['top']['cells']
cells_parameters = dict()
for cell, opts in cells.items():
cells_parameters[cell] = opts['parameters']
return cells_parameters
def main():
if len(sys.argv) < 3:
print("Incorrect number of arguments")
exit(1)
cells1 = read_cells(sys.argv[1])
cells2 = read_cells(sys.argv[2])
if cells1 == cells2:
exit(0)
else:
exit(1)
if __name__ == "__main__":
main()
|
apache-2.0
|
Python
|
fa28919d3d968fead742138484fcc81a6fca46d4
|
Add tests for hubsync.sync.yesno_as_boolean
|
Mariocj89/hubsync
|
tests/unit/sync_test.py
|
tests/unit/sync_test.py
|
"""Sync module tests"""
import unittest
from hubsync import sync
class SyncTestCase(unittest.TestCase):
def test_yesno_as_boolean_yes(self):
self.assertTrue(sync.yesno_as_boolean("yes"))
def test_yesno_as_boolean_no(self):
self.assertFalse(sync.yesno_as_boolean("no"))
class ZipPairsTestCase(unittest.TestCase):
def test_empty_lists(self):
self.assertEqual(
[],
list(sync.zip_pairs([], []))
)
def test_empty_first_list(self):
self.assertEqual(
[(1, None)],
list(sync.zip_pairs([1], []))
)
def test_empty_second_list(self):
self.assertEqual(
[(None, 1)],
list(sync.zip_pairs([], [1]))
)
def test_single_element_lists(self):
self.assertEqual(
set([(1, 1), (1, None)]),
set(sync.zip_pairs([1, 1], [1]))
)
def test_non_matching_elements(self):
self.assertEqual(
set([(None, 2), (1, None)]),
set(sync.zip_pairs([1], [2]))
)
def test_unordered_matching(self):
self.assertEqual(
set([(1, 1), (2, 2)]),
set(sync.zip_pairs([1, 2], [2, 1]))
)
def test_diff_length_non_matching_lower(self):
self.assertEqual(
set([('etcaterva', 'etcaterva'), ('aa', None)]),
set(sync.zip_pairs(['aa', 'etcaterva'], ['etcaterva']))
)
def test_diff_length_non_matching_higher(self):
self.assertEqual(
set([('zz', None), ('etcaterva', 'etcaterva')]),
set(sync.zip_pairs(['zz', 'etcaterva'], ['etcaterva']))
)
if __name__ == '__main__':
unittest.main()
|
"""Sync module tests"""
import unittest
from hubsync import sync
class ZipPairsTestCase(unittest.TestCase):
def test_empty_lists(self):
self.assertEqual(
[],
list(sync.zip_pairs([], []))
)
def test_empty_first_list(self):
self.assertEqual(
[(1, None)],
list(sync.zip_pairs([1], []))
)
def test_empty_second_list(self):
self.assertEqual(
[(None, 1)],
list(sync.zip_pairs([], [1]))
)
def test_single_element_lists(self):
self.assertEqual(
set([(1, 1), (1, None)]),
set(sync.zip_pairs([1, 1], [1]))
)
def test_non_matching_elements(self):
self.assertEqual(
set([(None, 2), (1, None)]),
set(sync.zip_pairs([1], [2]))
)
def test_unordered_matching(self):
self.assertEqual(
set([(1, 1), (2, 2)]),
set(sync.zip_pairs([1, 2], [2, 1]))
)
def test_diff_length_non_matching_lower(self):
self.assertEqual(
set([('etcaterva', 'etcaterva'), ('aa', None)]),
set(sync.zip_pairs(['aa', 'etcaterva'], ['etcaterva']))
)
def test_diff_length_non_matching_higher(self):
self.assertEqual(
set([('zz', None), ('etcaterva', 'etcaterva')]),
set(sync.zip_pairs(['zz', 'etcaterva'], ['etcaterva']))
)
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
816d6bcd5660d539c4482ea76f1adcf69c23cc92
|
add inverse test
|
adrn/streams,adrn/streams
|
streams/coordinates/tests/test_core.py
|
streams/coordinates/tests/test_core.py
|
# coding: utf-8
"""
Test conversions in core.py
"""
from __future__ import absolute_import, division, print_function
__author__ = "adrn <[email protected]>"
import os
import pytest
import numpy as np
import astropy.coordinates as coord
import astropy.units as u
from astropy.io import ascii
from ..core import *
this_path = os.path.split(__file__)[0]
data = ascii.read(os.path.join(this_path, "idl_vgsr_vhel.txt"))
def test_gsr_to_hel():
for row in data:
l = row["lon"] * u.degree
b = row["lat"] * u.degree
v_gsr = row["vgsr"] * u.km/u.s
v_sun_lsr = [row["vx"],row["vy"],row["vz"]]*u.km/u.s
v_hel = vgsr_to_vhel(l, b, v_gsr,
v_sun_lsr=v_sun_lsr,
v_circ=row["vcirc"]*u.km/u.s)
np.testing.assert_almost_equal(v_hel.value, row['vhelio'], decimal=4)
def test_hel_to_gsr():
for row in data:
l = row["lon"] * u.degree
b = row["lat"] * u.degree
v_hel = row["vhelio"] * u.km/u.s
v_sun_lsr = [row["vx"],row["vy"],row["vz"]]*u.km/u.s
v_gsr = vhel_to_vgsr(l, b, v_hel,
v_sun_lsr=v_sun_lsr,
v_circ=row["vcirc"]*u.km/u.s)
np.testing.assert_almost_equal(v_gsr.value, row['vgsr'], decimal=4)
|
# coding: utf-8
"""
Test conversions in core.py
"""
from __future__ import absolute_import, division, print_function
__author__ = "adrn <[email protected]>"
import os
import pytest
import numpy as np
import astropy.coordinates as coord
import astropy.units as u
from astropy.io import ascii
from ..core import *
this_path = os.path.split(__file__)[0]
data = ascii.read(os.path.join(this_path, "idl_vgsr_vhel.txt"))
def test_gsr_to_hel():
for row in data:
l = row["lon"] * u.degree
b = row["lat"] * u.degree
v_gsr = row["vgsr"] * u.km/u.s
v_sun_lsr = [row["vx"],row["vy"],row["vz"]]*u.km/u.s
v_hel = vgsr_to_vhel(l, b, v_gsr,
v_sun_lsr=v_sun_lsr,
v_circ=row["vcirc"]*u.km/u.s)
np.testing.assert_almost_equal(v_hel, row['vhelio'], decimal=2)
|
mit
|
Python
|
2a4adaa7e5bca8bc8bd3f552f23a46d73ef04a86
|
fix authentication bug in token_Data and start_session
|
InPermutation/droste,InPermutation/droste
|
cheezapi.py
|
cheezapi.py
|
from flask import url_for, session
import os
import requests
def redirect_uri():
redirect_uri = url_for('cheez', _external=True)
# API includes protocol as part of URL matching. Use this to force HTTPS:
if os.environ.get('FORCE_HTTPS') == 'True':
redirect_uri = redirect_uri.replace('http://', 'https://')
return redirect_uri
def client_id():
return os.environ.get('CHZ_CLIENT_ID')
def client_secret():
return os.environ.get('CHZ_CLIENT_SECRET')
def token_data(code):
r = requests.post("https://api.cheezburger.com/oauth/access_token",
data={'client_id': client_id(), 'client_secret': client_secret(),
'code': code, 'grant_type': 'authorization_code'})
return r.json()
def start_session(code):
tdata = token_data(code)
session['access_token'] = tdata['access_token']
def user():
if 'access_token' in session:
r = requests.get('https://api.cheezburger.com/v1/me',
params = {'access_token': session['access_token']})
json = r.json()
if 'items' in json:
return json['items'][0]
return None
|
from flask import url_for, session
import os
import requests
def redirect_uri():
redirect_uri = url_for('cheez', _external=True)
# API includes protocol as part of URL matching. Use this to force HTTPS:
if os.environ.get('FORCE_HTTPS') == 'True':
redirect_uri = redirect_uri.replace('http://', 'https://')
return redirect_uri
def client_id():
return os.environ.get('CHZ_CLIENT_ID')
def client_secret():
return os.environ.get('CHZ_CLIENT_SECRET')
def token_data():
r = requests.post("https://api.cheezburger.com/oauth/access_token",
data={'client_id': client_id(), 'client_secret': client_secret(),
'code': code, 'grant_type': 'authorization_code'})
return r.json()
def start_session(code):
token_data = token_data(code)
session['access_token'] = token_data['access_token']
def user():
if 'access_token' in session:
r = requests.get('https://api.cheezburger.com/v1/me',
params = {'access_token': session['access_token']})
json = r.json()
if 'items' in json:
return json['items'][0]
return None
|
bsd-3-clause
|
Python
|
b4eb149099b64bcfccc8a8f2fd0c5008c74a4fe0
|
move write() out of the scope of an IOError catch clause not meant for it
|
dstufft/ooni-backend,dstufft/ooni-backend
|
oonib/deck/handlers.py
|
oonib/deck/handlers.py
|
import glob
import json
import os
import re
import yaml
from oonib import errors as e
from oonib.handlers import OONIBHandler
from oonib import log
from oonib.config import config
class DeckDescHandler(OONIBHandler):
def get(self, deckID):
# note:
# we don't have to sanitize deckID, because it's already checked
# against matching a certain pattern in the handler.
bn = os.path.basename(deckID + '.desc')
try:
with open(os.path.join(config.main.deck_dir, bn)) as f:
response = {}
deckDesc = yaml.safe_load(f)
for k in ['name', 'description', 'version', 'author', 'date']:
response[k] = deckDesc[k]
except IOError:
log.err("Deck %s missing" % deckID)
raise e.MissingDeck
except KeyError:
log.err("Deck %s missing required keys!" % deckID)
raise e.MissingDeckKeys
self.write(response)
class DeckListHandler(OONIBHandler):
def get(self):
if not config.main.deck_dir:
self.set_status(501)
raise e.NoDecksConfigured
path = os.path.abspath(config.main.deck_dir) + "/*"
decknames = map(os.path.basename, glob.iglob(path))
decknames = filter(lambda y: re.match("[a-z0-9]{64}.desc", y), decknames)
deckList = []
for deckname in decknames:
with open(os.path.join(config.main.deck_dir, deckname)) as f:
d = yaml.safe_load(f)
deckList.append({
'id': deckname,
'name': d['name'],
'description': d['description']
})
self.write(deckList)
|
import glob
import json
import os
import re
import yaml
from oonib import errors as e
from oonib.handlers import OONIBHandler
from oonib import log
from oonib.config import config
class DeckDescHandler(OONIBHandler):
def get(self, deckID):
# note:
# we don't have to sanitize deckID, because it's already checked
# against matching a certain pattern in the handler.
bn = os.path.basename(deckID + '.desc')
try:
with open(os.path.join(config.main.deck_dir, bn)) as f:
response = {}
deckDesc = yaml.safe_load(f)
for k in ['name', 'description', 'version', 'author', 'date']:
response[k] = deckDesc[k]
self.write(response)
except IOError:
log.err("Deck %s missing" % deckID)
raise e.MissingDeck
except KeyError:
log.err("Deck %s missing required keys!" % deckID)
raise e.MissingDeckKeys
class DeckListHandler(OONIBHandler):
def get(self):
if not config.main.deck_dir:
self.set_status(501)
raise e.NoDecksConfigured
path = os.path.abspath(config.main.deck_dir) + "/*"
decknames = map(os.path.basename, glob.iglob(path))
decknames = filter(lambda y: re.match("[a-z0-9]{64}.desc", y), decknames)
deckList = []
for deckname in decknames:
with open(os.path.join(config.main.deck_dir, deckname)) as f:
d = yaml.safe_load(f)
deckList.append({
'id': deckname,
'name': d['name'],
'description': d['description']
})
self.write(deckList)
|
bsd-2-clause
|
Python
|
fd456d55ceb4cd084c9ca754771c7b12235dcb5e
|
reset key
|
Watfaq/add-egg
|
s.py
|
s.py
|
#encoding: utf-8
import os
from apscheduler.schedulers.blocking import BlockingScheduler
import requests
MAILGUN_KEY = os.environ.get('MAILGUN_KEY')
sched = BlockingScheduler()
@sched.scheduled_job('cron', day_of_week='mon-fri', hour=12)
def add_egg():
print(send_mail(get_text(get_price())))
@sched.scheduled_job('calc_lost_money', 'interval', minutes=1, id='calc_lost_money')
def calc_lost_money():
price = get_price()
sell = price['sell']
lost = _calc_lost_money(float(sell))
print 'Current lost %s...' % lost
if lost > 10000:
send_mail('Lost > %s' % lost)
if lost < -50000:
send_mail('Win 5w!!!!!!!!')
send_mail('Win 5w!!!!!!!!')
send_mail('Win 5w!!!!!!!!')
def _calc_lost_money(x):
return ((16.72 - x) / 16.72 + 0.0002) * 40000
def get_price():
r = requests.get('https://yunbi.com/api/v2/tickers').json()
eos = r['eoscny']
return eos['ticker']
def get_text(price):
return '''
Cool!
Eos Sumary:
Buy: {buy}
Sell: {sell},
Low: {low},
High: {high},
Last: {last},
Vol: {vol}
Add an egg for your lunch!
'''.format(**price)
def send_mail(text):
api_host = 'https://api.mailgun.net/v3/no-reply.alipay-inc.xyz/messages'
token = MAILGUN_KEY
sender = 'NoReply <[email protected]>'
subject = u'加个蛋'
to = 'Jiatai <[email protected]>'
cc = 'Yuwei <[email protected]>'
text = text
r = requests.post(api_host, auth=('api', token), data={
'from': sender,
'to': to,
'cc': cc,
'text': text,
})
return r.status_code, r.content
if __name__ == '__main__':
# sched.start()
calc_lose_money()
|
#encoding: utf-8
import os
from apscheduler.schedulers.blocking import BlockingScheduler
import requests
MAILGUN_KEY = os.environ.get('MAILGUN_KEY')
sched = BlockingScheduler()
@sched.scheduled_job('cron', day_of_week='mon-fri', hour=12)
def add_egg():
print(send_mail(get_text(get_price())))
@sched.scheduled_job('calc_lost_money', 'interval', minutes=1, id='calc_lost_money')
def calc_lost_money():
price = get_price()
sell = price['sell']
lost = _calc_lose_money(float(sell))
print 'Current lost %s...' % lost
if lost > 10000:
send_mail('Lost > %s' % lost)
if lost < -50000:
send_mail('Win 5w!!!!!!!!')
send_mail('Win 5w!!!!!!!!')
send_mail('Win 5w!!!!!!!!')
def _calc_lose_money(x):
return ((16.72 - x) / 16.72 + 0.0002) * 40000
def get_price():
r = requests.get('https://yunbi.com/api/v2/tickers').json()
eos = r['eoscny']
return eos['ticker']
def get_text(price):
return '''
Cool!
Eos Sumary:
Buy: {buy}
Sell: {sell},
Low: {low},
High: {high},
Last: {last},
Vol: {vol}
Add an egg for your lunch!
'''.format(**price)
def send_mail(text):
api_host = 'https://api.mailgun.net/v3/no-reply.alipay-inc.xyz/messages'
token = MAILGUN_KEY
sender = 'NoReply <[email protected]>'
subject = u'加个蛋'
to = 'Jiatai <[email protected]>'
cc = 'Yuwei <[email protected]>'
text = text
r = requests.post(api_host, auth=('api', token), data={
'from': sender,
'to': to,
'cc': cc,
'text': text,
})
return r.status_code, r.content
if __name__ == '__main__':
# sched.start()
calc_lose_money()
|
unlicense
|
Python
|
c3fb87846d1f1a38fe2e37521464dea59832ff6c
|
remove unused import from distutils
|
PyWavelets/pywt,rgommers/pywt,rgommers/pywt,rgommers/pywt,PyWavelets/pywt,grlee77/pywt,rgommers/pywt,grlee77/pywt
|
pywt/__init__.py
|
pywt/__init__.py
|
# flake8: noqa
# Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/>
# Copyright (c) 2012-2016 The PyWavelets Developers
# <https://github.com/PyWavelets/pywt>
# See COPYING for license details.
"""
Discrete forward and inverse wavelet transform, stationary wavelet transform,
wavelet packets signal decomposition and reconstruction module.
"""
from __future__ import division, print_function, absolute_import
from ._extensions._pywt import *
from ._functions import *
from ._multilevel import *
from ._multidim import *
from ._thresholding import *
from ._wavelet_packets import *
from ._dwt import *
from ._swt import *
from ._cwt import *
from . import data
__all__ = [s for s in dir() if not s.startswith('_')]
try:
# In Python 2.x the name of the tempvar leaks out of the list
# comprehension. Delete it to not make it show up in the main namespace.
del s
except NameError:
pass
from pywt.version import version as __version__
from ._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
# flake8: noqa
# Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/>
# Copyright (c) 2012-2016 The PyWavelets Developers
# <https://github.com/PyWavelets/pywt>
# See COPYING for license details.
"""
Discrete forward and inverse wavelet transform, stationary wavelet transform,
wavelet packets signal decomposition and reconstruction module.
"""
from __future__ import division, print_function, absolute_import
from distutils.version import LooseVersion
from ._extensions._pywt import *
from ._functions import *
from ._multilevel import *
from ._multidim import *
from ._thresholding import *
from ._wavelet_packets import *
from ._dwt import *
from ._swt import *
from ._cwt import *
from . import data
__all__ = [s for s in dir() if not s.startswith('_')]
try:
# In Python 2.x the name of the tempvar leaks out of the list
# comprehension. Delete it to not make it show up in the main namespace.
del s
except NameError:
pass
from pywt.version import version as __version__
from ._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
mit
|
Python
|
b82f21ea92aad44ca101744a3f5300280f081524
|
Fix site when logged in
|
GNOME/extensions-web,magcius/sweettooth,GNOME/extensions-web,magcius/sweettooth,GNOME/extensions-web,GNOME/extensions-web
|
sweettooth/review/context_processors.py
|
sweettooth/review/context_processors.py
|
from extensions.models import ExtensionVersion
def n_unreviewed_extensions(request):
if not request.user.has_perm("review.can-review-extensions"):
return dict()
return dict(n_unreviewed_extensions=ExtensionVersion.objects.unreviewed().count())
|
from extensions.models import ExtensionVersion
def n_unreviewed_extensions(request):
if not request.user.has_perm("review.can-review-extensions"):
return dict()
return dict(n_unreviewed_extensions=ExtensionVersion.unreviewed().count())
|
agpl-3.0
|
Python
|
13ab494e0caaca6a460a49528c3aae4c7707042a
|
add a bit more docstring
|
chenjiandongx/pyecharts,chenjiandongx/pyecharts,chenjiandongx/pyecharts
|
pyecharts/custom/page.py
|
pyecharts/custom/page.py
|
#!/usr/bin/env python
# coding=utf-8
from pyecharts import template
class Page(object):
def __init__(self):
self.__charts = []
def add(self, achart_or_charts):
"""
Append chart(s) to the rendering page
:param achart_or_charts:
:return:
"""
if isinstance(achart_or_charts, list):
self.__charts.extend(achart_or_charts)
else:
self.__charts.append(achart_or_charts)
def render(self, path="render.html"):
"""
Produce rendered charts in a html file
:param path:
:return:
"""
template_name = "multicharts.html"
chart_content = self.render_embed()
tmp = template.JINJA2_ENV.get_template(template_name)
html = tmp.render(multi_chart_content=chart_content)
html = template.freeze_js(html)
template.write_utf8_html_file(path, html)
def render_embed(self):
"""
Produce rendered charts in html for embedding purpose
:return:
"""
chart_content = ""
for chart in self.__charts:
chart_content += chart.render_embed()
return chart_content
|
#!/usr/bin/env python
# coding=utf-8
from pyecharts import template
class Page(object):
def __init__(self):
self.__charts = []
def add(self, achart_or_charts):
"""
:param achart_or_charts:
:return:
"""
if isinstance(achart_or_charts, list):
self.__charts.extend(achart_or_charts)
else:
self.__charts.append(achart_or_charts)
def render(self, path="render.html"):
"""
:param path:
:return:
"""
template_name = "multicharts.html"
chart_content = self.render_embed()
tmp = template.JINJA2_ENV.get_template(template_name)
html = tmp.render(multi_chart_content=chart_content)
html = template.freeze_js(html)
template.write_utf8_html_file(path, html)
def render_embed(self):
"""
:return:
"""
chart_content = ""
for chart in self.__charts:
chart_content += chart.render_embed()
return chart_content
|
mit
|
Python
|
96e3d2720a805a08190936a78c91a8c9746daab9
|
Update Keras.py
|
paperrune/Neural-Networks,paperrune/Neural-Networks
|
Depthwise-Separable-Convolution/Keras.py
|
Depthwise-Separable-Convolution/Keras.py
|
from keras.datasets import mnist
from keras.initializers import RandomUniform
from keras.layers import Conv2D, Dense, DepthwiseConv2D, Flatten, MaxPooling2D
from keras.models import Sequential
from keras.optimizers import SGD
from keras.utils import to_categorical
# input image dimensions
img_rows, img_cols = 28, 28
batch_size = 128
epochs = 30
learning_rate = 0.05
momentum = 0.9
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape([x_train.shape[0], img_rows, img_cols, 1]).astype('float32') / 255
y_train = to_categorical(y_train, num_classes=10)
x_test = x_test.reshape([x_test.shape[0], img_rows, img_cols, 1]).astype('float32') / 255
y_test = to_categorical(y_test, num_classes=10)
model = Sequential()
model.add(Conv2D(24,
activation='relu',
kernel_initializer='he_normal',
kernel_size=(5, 5),
input_shape=(img_rows, img_cols, 1)))
model.add(MaxPooling2D())
# depthwise
model.add(DepthwiseConv2D(activation='relu',
depthwise_initializer='he_normal',
kernel_size=(5, 5)))
# pointwise
model.add(Conv2D(48,
activation='relu',
kernel_initializer='he_normal',
kernel_size=(1, 1)))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(512,
activation='relu',
kernel_initializer='he_normal'))
model.add(Dense(10,
activation='softmax',
kernel_initializer='glorot_uniform'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate, momentum=momentum, nesterov=True),
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
|
from keras.datasets import mnist
from keras.initializers import RandomUniform
from keras.layers import Conv2D, Dense, DepthwiseConv2D, Flatten, MaxPooling2D
from keras.models import Sequential
from keras.optimizers import SGD
from keras.utils import to_categorical
# input image dimensions
img_rows, img_cols = 28, 28
batch_size = 128
epochs = 30
learning_rate = 0.05
momentum = 0.9
num_classes = 10
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape([x_train.shape[0], img_rows, img_cols, 1]).astype('float32') / 255
y_train = to_categorical(y_train, num_classes)
x_test = x_test.reshape([x_test.shape[0], img_rows, img_cols, 1]).astype('float32') / 255
y_test = to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(24,
activation='relu',
kernel_initializer='he_normal',
kernel_size=(5, 5),
input_shape=(img_rows, img_cols, 1)))
model.add(MaxPooling2D())
# depthwise
model.add(DepthwiseConv2D(activation='relu',
depthwise_initializer='he_normal',
kernel_size=(5, 5)))
# pointwise
model.add(Conv2D(48,
activation='relu',
kernel_initializer='he_normal',
kernel_size=(1, 1)))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(512,
activation='relu',
kernel_initializer='he_normal'))
model.add(Dense(num_classes,
activation='softmax',
kernel_initializer='glorot_uniform'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate, momentum=momentum, nesterov=True),
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
|
mit
|
Python
|
efbb841bb0968abeb2d3bba5a535cb8619131b2b
|
Remove dupe licence header
|
yaybu/touchdown
|
touchdown/config/ini.py
|
touchdown/config/ini.py
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from six.moves import configparser
from touchdown.core.plan import Plan
from touchdown.core import argument, resource
from touchdown.interfaces import File, FileNotFound
class IniFile(resource.Resource):
resource_name = "ini_file"
name = argument.String()
file = argument.Resource(File)
class Describe(Plan):
resource = IniFile
name = "describe"
def write(self, c):
fp = self.runner.get_plan(self.resource.file)
s = six.StringIO()
c.write(s)
fp.write(s.getvalue())
def read(self):
fp = self.runner.get_plan(self.resource.file)
config = configparser.ConfigParser()
try:
config.readfp(fp.read())
except FileNotFound:
pass
return config
def get_actions(self):
self.object = self.read()
return []
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from six.moves import configparser
from touchdown.core.plan import Plan
from touchdown.core import argument, resource
from touchdown.interfaces import File, FileNotFound
class IniFile(resource.Resource):
resource_name = "ini_file"
name = argument.String()
file = argument.Resource(File)
class Describe(Plan):
resource = IniFile
name = "describe"
def write(self, c):
fp = self.runner.get_plan(self.resource.file)
s = six.StringIO()
c.write(s)
fp.write(s.getvalue())
def read(self):
fp = self.runner.get_plan(self.resource.file)
config = configparser.ConfigParser()
try:
config.readfp(fp.read())
except FileNotFound:
pass
return config
def get_actions(self):
self.object = self.read()
return []
|
apache-2.0
|
Python
|
1e21ba5101fe1e47fec5acacd8ac9329a71fc9bb
|
Change __init__
|
khrapovs/mygmm
|
__init__.py
|
__init__.py
|
from .mygmm import *
|
from .mygmm.mygmm import *
|
mit
|
Python
|
c26c44f044a2e48cc53a0f52adce366807c87e2d
|
Add version number.
|
vmlaker/coils
|
__init__.py
|
__init__.py
|
__version__ = '1.0'
from .Averager import Averager
from .Config import Config
from .RateTicker import RateTicker
from .Ring import Ring
from .SocketTalk import SocketTalk
from .SortedList import SortedList
from .String import string2time, time2string, time2levels, time2dir, time2fname
from .Timer import Timer
from .UserInput import user_input
|
from .Averager import Averager
from .Config import Config
from .RateTicker import RateTicker
from .Ring import Ring
from .SocketTalk import SocketTalk
from .SortedList import SortedList
from .String import string2time, time2string, time2levels, time2dir, time2fname
from .Timer import Timer
from .UserInput import user_input
|
mit
|
Python
|
bde3b3d1d90338e23e2550bc6fdd317e5e696f0f
|
Add command-line arguments: root, hosts and name with regexp
|
aleksandr-vin/zk-find
|
zk-find.py
|
zk-find.py
|
#
# This is a FIND utility for Zookeeper
#
# Author: Aleksandr Vinokurov <[email protected]>
# Url: https://github.com/aleksandr-vin/zk-find
#
import logging
import logging.config
import argparse
try:
logging.config.fileConfig('logging.conf')
except:
logging.basicConfig()
logger = logging.getLogger('zk-find')
from kazoo.client import KazooClient
from kazoo.client import KazooState
from kazoo.exceptions import NoNodeError
import re
def list_children(parent,prog):
try:
for node in zk.get_children(parent):
path = parent + "/" + node
if prog:
if prog.search(node):
print path
list_children(path,prog)
except NoNodeError:
pass
except ValueError as e:
print 'ValueError: %s' % (e)
def my_listener(state):
if state == KazooState.LOST:
logger.debug('Session lost')
elif state == KazooState.SUSPENDED:
logger.debug('Session suspended')
else:
logger.info('Session connected')
# defaults
defaults = {
'hosts' : '127.0.0.1:2181'
,'root' : ''
}
parser = argparse.ArgumentParser(epilog='''
Report (and track progress on fixing) bugs via the github issues
page at https://github.com/aleksandr-vin/zk-find/issues or,
if you have no web access, by sending email to <[email protected]>.
''')
parser.add_argument('root', nargs='?', type=str,
help='root of the search', default='%s' % defaults['root'],);
parser.add_argument('--hosts', default='%s' % defaults['hosts'],
type=str, metavar='HOST:PORT[,HOST:PORT]', dest='hosts', required=False,
help='comma-separated list of hosts to connect to (default: %s)' % defaults['hosts'])
parser.add_argument('--name',
type=str, metavar='REGEXP', dest='name',
help='regexp for matching node names')
if __name__ == "__main__":
# setting run-time args by the command-line parameters
settings = parser.parse_args()
zk = KazooClient(hosts=settings.hosts)
zk.add_listener(my_listener)
zk.start()
global prog
prog = None
if (settings.name):
prog = re.compile(settings.name)
list_children(settings.root,prog)
zk.stop()
|
#
# This is a FIND utility for Zookeeper
#
# Author: Aleksandr Vinokurov <[email protected]>
# Url: https://github.com/aleksandr-vin/zk-find
#
import logging
import logging.config
try:
logging.config.fileConfig('logging.conf')
except:
logging.basicConfig()
logger = logging.getLogger('zk-find')
from kazoo.client import KazooClient
from kazoo.client import KazooState
from kazoo.exceptions import NoNodeError
def list_children(parent):
try:
for node in zk.get_children(parent):
path = parent + "/" + node
print path
list_children(path)
except NoNodeError:
pass
from sys import argv
path = ''
hosts = '127.0.0.1:2181'
if len(argv) > 2:
hosts = argv[1]
path = argv[2]
elif len(argv) > 1:
path = argv[1]
def my_listener(state):
if state == KazooState.LOST:
logger.debug('Session lost')
elif state == KazooState.SUSPENDED:
logger.debug('Session suspended')
else:
logger.info('Session connected')
zk = KazooClient(hosts=hosts)
zk.add_listener(my_listener)
zk.start()
list_children(path)
zk.stop()
|
mit
|
Python
|
b6da8865c9a12b9ce88d809d2fa4dfb601be01d0
|
make sure same timezone is used when calculating delta
|
DataDog/integrations-extras,DataDog/integrations-extras,DataDog/integrations-extras,DataDog/integrations-extras,DataDog/integrations-extras
|
reboot_required/check.py
|
reboot_required/check.py
|
# vim: ts=4:sw=4:et
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# project
from checks import AgentCheck
from os import stat, utime, remove
from os.path import isfile
from stat import ST_MTIME
from datetime import datetime, timedelta
class RebootRequiredCheck(AgentCheck):
REBOOT_SIGNAL_FILE = '/var/run/reboot-required'
CREATED_AT_FILE = '/var/run/reboot-required.created_at'
def check(self, instance):
status, days_since, msg = self._check(instance)
self.service_check('system.reboot_required', status, message=msg)
def _check(self, instance):
reboot_signal_file = instance.get('reboot_signal_file', self.REBOOT_SIGNAL_FILE)
created_at_file = instance.get('created_at_file', self.CREATED_AT_FILE)
warning_days = int(instance.get('days_warning', 7))
critical_days = int(instance.get('days_critical', 14))
return self._get_status(critical_days, warning_days, self._days_since(reboot_signal_file, created_at_file))
def _days_since(self, reboot_signal_file, created_at_file):
if isfile(reboot_signal_file):
if isfile(created_at_file):
created_at = self._get_created_at(created_at_file)
return datetime.utcnow() - datetime.utcfromtimestamp(created_at)
else:
self._touch(created_at_file)
elif isfile(created_at_file):
remove(created_at_file)
return timedelta()
def _get_status(self, critical_days, warning_days, deltatime):
if deltatime.days > critical_days:
return AgentCheck.CRITICAL, deltatime.days, "Reboot is critical: security patches applied {0} days ago"\
.format(deltatime.days)
elif deltatime.days > warning_days:
return AgentCheck.WARNING, deltatime.days, "Reboot is necessary; security patches applied {0} days ago"\
.format(deltatime.days)
else:
return AgentCheck.OK, 0, ''
def _get_created_at(self, fname):
file_stat = stat(fname)
created_at = file_stat[ST_MTIME]
return created_at
def _touch(self, fname, times=None):
open(fname, 'a').close()
utime(fname, times)
|
# vim: ts=4:sw=4:et
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# project
from checks import AgentCheck
from os import stat, utime, remove
from os.path import isfile
from stat import ST_MTIME
from datetime import datetime, timedelta
class RebootRequiredCheck(AgentCheck):
REBOOT_SIGNAL_FILE = '/var/run/reboot-required'
CREATED_AT_FILE = '/var/run/reboot-required.created_at'
def check(self, instance):
status, days_since, msg = self._check(instance)
self.service_check('system.reboot_required', status, message=msg)
def _check(self, instance):
reboot_signal_file = instance.get('reboot_signal_file', self.REBOOT_SIGNAL_FILE)
created_at_file = instance.get('created_at_file', self.CREATED_AT_FILE)
warning_days = int(instance.get('days_warning', 7))
critical_days = int(instance.get('days_critical', 14))
return self._get_status(critical_days, warning_days, self._days_since(reboot_signal_file, created_at_file))
def _days_since(self, reboot_signal_file, created_at_file):
if isfile(reboot_signal_file):
if isfile(created_at_file):
created_at = self._get_created_at(created_at_file)
return datetime.utcnow() - datetime.fromtimestamp(created_at)
else:
self._touch(created_at_file)
elif isfile(created_at_file):
remove(created_at_file)
return timedelta()
def _get_status(self, critical_days, warning_days, deltatime):
if deltatime.days > critical_days:
return AgentCheck.CRITICAL, deltatime.days, "Reboot is critical: security patches applied {0} days ago"\
.format(deltatime.days)
elif deltatime.days > warning_days:
return AgentCheck.WARNING, deltatime.days, "Reboot is necessary; security patches applied {0} days ago"\
.format(deltatime.days)
else:
return AgentCheck.OK, 0, ''
def _get_created_at(self, fname):
file_stat = stat(fname)
created_at = file_stat[ST_MTIME]
return created_at
def _touch(self, fname, times=None):
open(fname, 'a').close()
utime(fname, times)
|
bsd-3-clause
|
Python
|
6f7fc9067df57c4c15204a3208768acf4b76ed85
|
Update version to 0.2.0.dev0
|
jimporter/bfg9000,jimporter/bfg9000,jimporter/bfg9000,jimporter/bfg9000
|
bfg9000/version.py
|
bfg9000/version.py
|
version = '0.2.0.dev0'
|
version = '0.1.0'
|
bsd-3-clause
|
Python
|
ef156eca331203696f38b2f829314c48eeb5f207
|
Update version to 0.1.0
|
jimporter/bfg9000,jimporter/bfg9000,jimporter/bfg9000,jimporter/bfg9000
|
bfg9000/version.py
|
bfg9000/version.py
|
version = '0.1.0'
|
version = '0.1.0-dev'
|
bsd-3-clause
|
Python
|
8099c35b31e67643e14dcd3cd30fa104fcca6fb5
|
Revert accidental change out of version.py
|
newville/scikit-image,emmanuelle/scikits.image,WarrenWeckesser/scikits-image,vighneshbirodkar/scikit-image,keflavich/scikit-image,almarklein/scikit-image,pratapvardhan/scikit-image,vighneshbirodkar/scikit-image,almarklein/scikit-image,SamHames/scikit-image,youprofit/scikit-image,chriscrosscutler/scikit-image,blink1073/scikit-image,emmanuelle/scikits.image,jwiggins/scikit-image,youprofit/scikit-image,michaelpacer/scikit-image,Midafi/scikit-image,paalge/scikit-image,bsipocz/scikit-image,SamHames/scikit-image,emmanuelle/scikits.image,rjeli/scikit-image,SamHames/scikit-image,Britefury/scikit-image,oew1v07/scikit-image,paalge/scikit-image,warmspringwinds/scikit-image,newville/scikit-image,Hiyorimi/scikit-image,chriscrosscutler/scikit-image,pratapvardhan/scikit-image,chintak/scikit-image,bsipocz/scikit-image,dpshelio/scikit-image,chintak/scikit-image,oew1v07/scikit-image,bennlich/scikit-image,almarklein/scikit-image,Britefury/scikit-image,GaZ3ll3/scikit-image,ofgulban/scikit-image,michaelpacer/scikit-image,paalge/scikit-image,juliusbierk/scikit-image,ClinicalGraphics/scikit-image,jwiggins/scikit-image,Midafi/scikit-image,vighneshbirodkar/scikit-image,ofgulban/scikit-image,chintak/scikit-image,ClinicalGraphics/scikit-image,dpshelio/scikit-image,emon10005/scikit-image,emmanuelle/scikits.image,michaelaye/scikit-image,WarrenWeckesser/scikits-image,robintw/scikit-image,GaelVaroquaux/scikits.image,SamHames/scikit-image,robintw/scikit-image,blink1073/scikit-image,bennlich/scikit-image,juliusbierk/scikit-image,almarklein/scikit-image,rjeli/scikit-image,ofgulban/scikit-image,warmspringwinds/scikit-image,ajaybhat/scikit-image,ajaybhat/scikit-image,chintak/scikit-image,michaelaye/scikit-image,GaelVaroquaux/scikits.image,Hiyorimi/scikit-image,keflavich/scikit-image,rjeli/scikit-image,emon10005/scikit-image,GaZ3ll3/scikit-image
|
scikits/image/version.py
|
scikits/image/version.py
|
version='unbuilt-dev'
|
# THIS FILE IS GENERATED FROM THE SCIKITS.IMAGE SETUP.PY
version='0.2dev'
|
bsd-3-clause
|
Python
|
fc51c36b636d4a396faac02285605dafe0779104
|
Bump version to 18.0.0a5
|
genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio
|
resolwe_bio/__about__.py
|
resolwe_bio/__about__.py
|
"""Central place for package metadata."""
# NOTE: We use __title__ instead of simply __name__ since the latter would
# interfere with a global variable __name__ denoting object's name.
__title__ = 'resolwe-bio'
__summary__ = 'Bioinformatics pipelines for the Resolwe platform'
__url__ = 'https://github.com/genialis/resolwe-bio'
# Semantic versioning is used. For more information see:
# https://packaging.python.org/en/latest/distributing/#semantic-versioning-preferred
__version__ = "18.0.0a5"
__author__ = 'Genialis, Inc.'
__email__ = '[email protected]'
__license__ = 'Apache License (2.0)'
__copyright__ = '2015-2019, ' + __author__
__all__ = (
"__title__", "__summary__", "__url__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
)
|
"""Central place for package metadata."""
# NOTE: We use __title__ instead of simply __name__ since the latter would
# interfere with a global variable __name__ denoting object's name.
__title__ = 'resolwe-bio'
__summary__ = 'Bioinformatics pipelines for the Resolwe platform'
__url__ = 'https://github.com/genialis/resolwe-bio'
# Semantic versioning is used. For more information see:
# https://packaging.python.org/en/latest/distributing/#semantic-versioning-preferred
__version__ = "18.0.0a4"
__author__ = 'Genialis, Inc.'
__email__ = '[email protected]'
__license__ = 'Apache License (2.0)'
__copyright__ = '2015-2019, ' + __author__
__all__ = (
"__title__", "__summary__", "__url__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
)
|
apache-2.0
|
Python
|
15a2ed134f32cefcba5e38fa9d043ab80fc36172
|
work on getClasses()
|
JPTrey/Scheduler,JPTrey/Scheduler
|
scheduler.py
|
scheduler.py
|
# Scheduler.py by Jon Paul, 2014
# Handles user input and returns all schedules fitting user filters
""" Global Variables """
# schedules = [] # all schedules fit to user criteria
# courses = [] # all courses within a schedule
""" Classes """
# Course object representing a class entry
class Course:
def __init__(self, name, number, credits, total_hours, meetings, required):
self.name = name # Course name
self.number = number # Course reference number
self.credits = credits # Number of credit hours
self.meetings = meetings # Array of meeting times/days
self.required = required # True: Course must be added to a schedule
self.marked = False # False: Course has yet to be added to a schedule
def __str__():
print self.name
# (elective only) Called when course has been added to any schedule
def mark():
self.marked = True
# Called when criteria for schedules has changed
def unmark():
self.marked = False
class Schedule:
def __init__(self, classes, target_days, target_credits, target_hours, total_hours):
self.classes = Classes # Classes Associated With This Schedule
self.target_days = target_days # Days Limited by User
self.target_credits = target_credits # Credits Preferred By User
self.target_hours = target_hours # Hours in Class Preferred By User
self.total_hours = total_hours # Total Number of Hours Spent in Class
class Meeting:
def __init__(self, start_time, end_time, day):
self.start_time = start_time
self.end_time = end_time
self.day = day # 1 = Monday, ... , 5 = Friday
""" Methods """
# Called when program launches
# returns: a list of classes to be placed into schedules
def getClasses():
courses = []
done = false
while done is False:
name = raw_input("Course Name: ")
number = int(raw_input("Course Number: "))
credits = int(raw_input("Credits: "))
duration = int(raw_input("80 or 55 mins: "))
perweek = int(raw_input("Meetings per week: "))
total_hours = duration * perweek
meetings = []
required = raw_input("Required? ('True' or 'False') ")
newCourse = Course(name, number, credits, total_hours, meetings, required)
courses.append(newCourse)
print courses
done = True
print courses[i].name
# Called when either courses are first added, or user changes preferences
# returns: a list of schedules fitting the criteria
# TODO: deal with all targets
# prevent infinite loop condition
def getSchedules(courses, target_days, target_credits, target_hours):
schedules = [] # schedules fitting the specified criteria
marked = 0 # courses already within a schedule
while marked < len(courses): # while: unique schedules can be constructed
newSchedule = []
days = []
credits = 0
hours = 0
# add required courses
for i in len(courses):
if courses[i].credits + credits > target_credits:
break
else if courses[i].required:
newSchedule.append(courses[i])
credits += courses[i].credits
marked++
# add unused courses
for i in len(courses):
if courses[i].credits + credits > target_credits:
break
else if courses[i].marked is False:
newSchedule.append(courses[i])
courses[i].marked = True
# add remaining courses
for i in len(courses):
if courses[i].credits + credits > target_credits:
break
# if credits >= target_credits:
# break
else:
newSchedule.append(courses[i])
schedules.append(newSchedule)
return schedules
def displaySchedules():
classes = getClasses()
schedules = getSchedules(classes, [1,5], 16, 0)
displaySchedules(schedules)
|
# Scheduler.py by Jon Paul, 2014
# Handles user input and returns all schedules fitting user filters
from array import *
""" Global Variables """
schedules = [] # all schedules fit to user criteria
courses = [] # all courses within a schedule
""" Classes """
# Course object representing a class entry
class Course:
def __init__(self, name, number, credits, total_hours, meetings, required):
self.name = name # Course name
self.number = number # Course reference number
self.credits = credits # Number of credit hours
self.meetings = meetings # Array of meeting times/days
self.required = required # True: Course must be added to a schedule
self.marked = False # False: Course has yet to be added to a schedule
def __str__():
print self.name
# Called when course has been added to any schedule
def mark():
self.marked = True
# Called when criteria for schedules has changed
def unmark():
self.marked = False
class Schedule:
def __init__(self, classes, target_days, target_credits, target_hours, total_hours):
self.classes = Classes # Classes Associated With This Schedule
self.target_days = target_days # Days Limited by User
self.target_credits = target_credits # Credits Preferred By User
self.target_hours = target_hours # Hours in Class Preferred By User
self.total_hours = total_hours # Total Number of Hours Spent in Class
class Meeting:
def __init__(self, start_time, end_time, day):
self.start_time = start_time
self.end_time = end_time
self.day = day # 1 = Monday, ... , 5 = Friday
""" Methods """
def getClasses():
global courses
done = false
while done is False:
name = raw_input("Course Name: ")
number = int(raw_input("Course Number: "))
credits = int(raw_input("Credits: "))
duration = int(raw_input("80 or 55 mins: "))
perweek = int(raw_input("Meetings per week: "))
total_hours = duration * perweek
meetings = []
required = raw_input("Required? ('True' or 'False') ")
newCourse = Course(name, number, credits, total_hours, meetings, required)
courses.append(newCourse)
print courses
done = True
print courses[i].name
def buildSchedules():
global courses
global schedules
marked = [] # courses already within a schedule
while marked.size() < courses.size() # while: unique schedules can be constructed
#def displaySchedules():
getClasses()
#buildSchedules()
#displaySchedules()
|
mit
|
Python
|
5bdd659768ad5e5a50f85113a2d354ac51653b42
|
Update ds_list_contains_duplicate.py
|
ngovindaraj/Python
|
leetcode/ds_list_contains_duplicate.py
|
leetcode/ds_list_contains_duplicate.py
|
# @file Contains Duplicate
# @brief Given an array of numbers find if there are any duplicates
# https://leetcode.com/problems/contains-duplicate/
'''
Given an array of integers, find if the array contains any duplicates.
Your function should return true if any value appears at least twice in the
array, and it should return false if every element is distinct.
'''
# Brute force - 2 loop approach (complexity: time = O(n^2), space = O(1))
# Note: This is not accepted by leetcode because of high time complexity
def containsDuplicate1(self, nums):
if len(nums) < 2:
return False
for i in range(len(nums)-1):
for j in range(i+1, len(nums)):
if nums[i] == nums[j]:
return True
return False
# Sort approach (complexity: time = O(n log n), space = O(1))
def containsDuplicate2(self, nums):
if len(nums) < 2: #if num elements is less than 2, no duplicates
return False
nums.sort()
for i in range(len(nums)-1):
if nums[i] == nums[i+1]:
return True
return False
# Dictionary approach
#Use a dictionary to store all numbers. If a number is seen 2nd time return immediately
#Time Complexity = O(n) since we have a single for-loop to look at all numbers
def containsDuplicate3(self, nums):
dict = {}
for num in nums:
if num not in dict: dict[num] = 1
elif num in dict: return True
return False
# Set approach (complexity: time = O(n), space = O(n))
def containsDuplicate4(self, nums):
return len(nums) != len(set(nums))
|
# @file Contains Duplicate
# @brief Given an array of numbers find if there are any duplicates
# https://leetcode.com/problems/contains-duplicate/
'''
Given an array of integers, find if the array contains any duplicates.
Your function should return true if any value appears at least twice in the
array, and it should return false if every element is distinct.
'''
# Brute force - 2 loop approach (complexity: time = O(n^2), space = O(1))
# Note: This is not accepted by leetcode because of high time complexity
def containsDuplicate1(self, nums):
if len(nums) < 2:
return False
for i in range(len(nums)-1):
for j in range(i+1, len(nums)):
if nums[i] == nums[j]:
return True
return False
# Sort approach (complexity: time = O(n log n), space = O(1))
def containsDuplicate2(self, nums):
if len(nums) < 2: #if num elements is less than 2, no duplicates
return False
nums.sort()
for i in range(len(nums)-1):
if nums[i] == nums[i+1]:
return True
return False
# Dictionary approach (complexity: time = O(n), space = O(n))
def containsDuplicate3(self, nums):
dict = {}
for elem in nums:
dict[elem] = dict.get(elem, 0) + 1
if dict[elem] > 1:
return True
return False
# Set approach (complexity: time = O(n), space = O(n))
def containsDuplicate4(self, nums):
return len(nums) != len(set(nums))
|
mit
|
Python
|
e10195f93cb39afcc432ec25466073cec093b2bb
|
remove debug output
|
anthraxx/arch-security-tracker,anthraxx/arch-security-tracker,anthraxx/arch-security-tracker,archlinux/arch-security-tracker,jelly/arch-security-tracker,archlinux/arch-security-tracker,jelly/arch-security-tracker
|
app/form/validators.py
|
app/form/validators.py
|
from wtforms.validators import ValidationError
from app.pacman import get_pkg
from app.util import multiline_to_list
from pyalpm import vercmp
class ValidPackageName(object):
def __init__(self):
self.message = u'Unknown package.'
def __call__(self, form, field):
versions = get_pkg(field.data)
if not versions:
raise ValidationError(self.message)
class ValidPackageNames(object):
def __init__(self):
self.message = u'Unknown package {}.'
def __call__(self, form, field):
pkgnames = multiline_to_list(field.data)
for pkgname in pkgnames:
versions = get_pkg(pkgname)
if not versions:
raise ValidationError(self.message.format(pkgname))
class SamePackageVersions(object):
def __init__(self):
self.message = u'Mismatching version {}.'
def __call__(self, form, field):
pkgnames = multiline_to_list(field.data)
ref_version = None
for pkgname in pkgnames:
versions = get_pkg(pkgname)
ref_version = ref_version if ref_version else versions[0]
if not versions or 0 != vercmp(ref_version.version, versions[0].version):
raise ValidationError(self.message.format(pkgname))
|
from wtforms.validators import ValidationError
from app.pacman import get_pkg
from app.util import multiline_to_list
from pyalpm import vercmp
class ValidPackageName(object):
def __init__(self):
self.message = u'Unknown package.'
def __call__(self, form, field):
versions = get_pkg(field.data)
if not versions:
raise ValidationError(self.message)
class ValidPackageNames(object):
def __init__(self):
self.message = u'Unknown package {}.'
def __call__(self, form, field):
pkgnames = multiline_to_list(field.data)
print(pkgnames)
for pkgname in pkgnames:
print(pkgname)
versions = get_pkg(pkgname)
if not versions:
raise ValidationError(self.message.format(pkgname))
class SamePackageVersions(object):
def __init__(self):
self.message = u'Mismatching version {}.'
def __call__(self, form, field):
pkgnames = multiline_to_list(field.data)
ref_version = None
for pkgname in pkgnames:
versions = get_pkg(pkgname)
ref_version = ref_version if ref_version else versions[0]
if not versions or 0 != vercmp(ref_version.version, versions[0].version):
raise ValidationError(self.message.format(pkgname))
|
mit
|
Python
|
e8b4f7d6917647d4158ed39991cd00cfe45a0264
|
Add some support for bigg.ucsd.edu/api/v2
|
biosustain/cameo,biosustain/cameo,KristianJensen/cameo
|
cameo/webmodels.py
|
cameo/webmodels.py
|
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WebModels API
-------------
An high level API for retrieving models from the
http://darwin.di.uminho.pt/models database
"""
from __future__ import absolute_import, print_function
import json
import requests
from pandas import DataFrame
import tempfile
import logging
logger = logging.getLogger(__name__)
class NotFoundException(Exception):
def __init__(self, type, index, *args, **kwargs):
message = "Could not retrieve %s for entry with index %i" % (type, index)
Exception.__init__(self, message, *args, **kwargs)
def index_models(host="http://darwin.di.uminho.pt/models"):
"""
Retrieves a summary of all models in the database.
Parameters
----------
host: the service host (optional, default: http://darwin.di.uminho.pt/models)
Returns
-------
pandas.DataFrame
summary of the models in the database
"""
uri = host + "/models.json"
try:
response = requests.get(uri)
except requests.ConnectionError as e:
logger.error("Cannot reach %s. Are you sure that you are connected to the internet?" % host)
raise e
if response.status_code == 200:
response = json.loads(response.text)
return DataFrame(response, columns=["id", "name", "doi", "author", "year", "formats", "organism", "taxonomy"])
else:
raise Exception("Could not index available models. %s returned status code %d" % (host, response.status_code))
def get_sbml_file(index, host="http://darwin.di.uminho.pt/models"):
temp = tempfile.NamedTemporaryFile()
uri = host + "/models/%i.sbml" % index
response = requests.get(uri)
if response.status_code == 200:
temp.write(response.text.encode('utf-8'))
temp.flush()
return temp
raise NotFoundException("sbml", index)
def index_models_bigg():
response = requests.get('http://bigg.ucsd.edu/api/v2/models')
if response.ok:
return DataFrame.from_dict(response.json()['results'])
else:
raise Exception("Could not index available models. bigg.ucsd.edu returned status code {}".format(response.status_code))
def get_model_from_bigg(id):
response = requests.get('http://bigg.ucsd.edu/api/v2/models/{}/download'.format(id))
if response.ok:
return DataFrame.from_dict(response.json()['results'])
else:
raise Exception("Could not download model {}. bigg.ucsd.edu returned status code {}".format(id, response.status_code))
if __name__ == "__main__":
print(index_models())
from cameo import load_model
model = load_model(get_sbml_file(2))
print(model.objective)
|
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WebModels API
-------------
An high level API for retrieving models from the
http://darwin.di.uminho.pt/models database
"""
from __future__ import absolute_import, print_function
import json
import requests
from pandas import DataFrame
import tempfile
import logging
logger = logging.getLogger(__name__)
class NotFoundException(Exception):
def __init__(self, type, index, *args, **kwargs):
message = "Could not retrieve %s for entry with index %i" % (type, index)
Exception.__init__(self, message, *args, **kwargs)
def index_models(host="http://darwin.di.uminho.pt/models"):
"""
Retrieves a summary of all models in the database.
Parameters
----------
host: the service host (optional, default: http://darwin.di.uminho.pt/models)
Returns
-------
pandas.DataFrame
summary of the models in the database
"""
uri = host + "/models.json"
try:
response = requests.get(uri)
except requests.ConnectionError as e:
logger.error("Cannot reach %s. Are you sure that you are connected to the internet?" % host)
raise e
if response.status_code == 200:
response = json.loads(response.text)
return DataFrame(response, columns=["id", "name", "doi", "author", "year", "formats", "organism", "taxonomy"])
else:
raise Exception("Could not index available models. %s returned status code %d" % (host, response.status_code))
def get_sbml_file(index, host="http://darwin.di.uminho.pt/models"):
temp = tempfile.NamedTemporaryFile()
uri = host + "/models/%i.sbml" % index
response = requests.get(uri)
if response.status_code == 200:
temp.write(response.text.encode('utf-8'))
temp.flush()
return temp
raise NotFoundException("sbml", index)
if __name__ == "__main__":
print(index_models())
from cameo import load_model
model = load_model(get_sbml_file(2))
print(model.objective)
|
apache-2.0
|
Python
|
314931a10f74afc6660b132db6eb2f9aaa3d640c
|
Detach mcpat.py
|
learning-on-chip/studio
|
sniper/scripts/bullet.py
|
sniper/scripts/bullet.py
|
import os, sim, sys, time
output = sim.config.output_dir
period = 1e6 * sim.util.Time.NS
mcpat_bin = os.path.join(os.getenv('TOOLS_ROOT'), 'mcpat.py')
if not os.path.exists(mcpat_bin): die('cannot find mcpat.py')
bullet_bin = os.path.join(os.getenv('BULLET_ROOT'), 'bin', 'bullet')
if not os.path.exists(bullet_bin): die('cannot find bullet')
redis_bin = 'redis-cli'
server = '127.0.0.1:6379'
queue = 'bullet-queue'
database = os.path.join(output, 'database.sqlite3')
class Bullet:
def setup(self, args):
bullet_start()
self.t_last = 0
sim.util.Every(period, self.periodic, roi_only = True)
def periodic(self, time, elapsed):
report('Time %.2f ms, elapsed %.2f ms' % (
time / sim.util.Time.MS, elapsed / sim.util.Time.MS
))
self.process(time)
def hook_sim_end(self):
self.process(sim.stats.get('performance_model', 0, 'elapsed_time'))
bullet_stop()
def process(self, time):
time = coarse(time)
sim.stats.write(str(time))
if self.t_last: self.compute_power(self.t_last, time)
self.t_last = time
def compute_power(self, t0, t1):
filebase = os.path.join(output, 'power-%s-%s-%s' % (t0, t1, t1 - t0))
bullet_send(filebase, t0, t1)
def coarse(time):
return long(long(time) / sim.util.Time.NS)
def bullet_send(filebase, t0, t1):
prepare = "%s -o %s -d %s --partial=%s:%s" % (mcpat_bin, filebase, output, t0, t1)
enqueue = "(%s RPUSH %s %s > /dev/null)" % (redis_bin, queue, filebase + '.xml')
run('unset PYTHONHOME && %s && %s &' % (prepare, enqueue))
def bullet_start():
run('%s -s %s -d %s -c &' % (bullet_bin, server, database))
def bullet_stop():
run('%s RPUSH %s bullet:halt > /dev/null' % (redis_bin, queue))
def die(message):
print('Error: %s.' % message)
sys.exit(1)
def report(message):
print('-------> %s' % message)
def run(command):
if os.system(command) != 0: die('failed to run `%s`' % command)
sim.util.register(Bullet())
|
import os, sim, sys, time
output = sim.config.output_dir
period = 1e6 * sim.util.Time.NS
mcpat_bin = os.path.join(os.getenv('TOOLS_ROOT'), 'mcpat.py')
if not os.path.exists(mcpat_bin): die('cannot find mcpat.py')
bullet_bin = os.path.join(os.getenv('BULLET_ROOT'), 'bin', 'bullet')
if not os.path.exists(bullet_bin): die('cannot find bullet')
redis_bin = 'redis-cli'
server = '127.0.0.1:6379'
job_queue = 'bullet-queue'
database = os.path.join(output, 'database.sqlite3')
class Bullet:
def setup(self, args):
bullet_run()
self.t_last = 0
sim.util.Every(period, self.periodic, roi_only = True)
def periodic(self, time, elapsed):
report('Time %.2f ms, elapsed %.2f ms' % (
time / sim.util.Time.MS, elapsed / sim.util.Time.MS
))
self.process(time)
def hook_sim_end(self):
self.process(sim.stats.get('performance_model', 0, 'elapsed_time'))
redis_run('bullet:halt')
def process(self, time):
time = coarse(time)
sim.stats.write(str(time))
if self.t_last: self.compute_power(self.t_last, time)
self.t_last = time
def compute_power(self, t0, t1):
filename = os.path.join(output, 'power-%s-%s-%s' % (t0, t1, t1 - t0))
mcpat_run(filename, t0, t1)
redis_run(filename + '.xml')
def coarse(time):
return long(long(time) / sim.util.Time.NS)
def bullet_run():
run('%s -s %s -d %s -c &' % (bullet_bin, server, database))
def die(message):
print('Error: %s.' % message)
sys.exit(1)
def mcpat_run(filename, t0, t1):
run('unset PYTHONHOME; %s -o %s -d %s --partial=%s:%s' % (
mcpat_bin, filename, output, t0, t1
))
def redis_run(filename):
run('%s RPUSH %s %s > /dev/null' % (redis_bin, job_queue, filename))
def report(message):
print('-------> %s' % message)
def run(command):
if os.system(command) != 0: die('failed to run `%s`' % command)
sim.util.register(Bullet())
|
mit
|
Python
|
0fe6e79f9bc201b3c63ad6f1ce400c9d79bd484a
|
Fix path issues in example script
|
djmattyg007/IdiotScript
|
bin/idiotscript.py
|
bin/idiotscript.py
|
#!/usr/bin/python3
import os, sys
def alter_path():
script_path = os.path.dirname(os.path.realpath(__file__))
try:
path_index = sys.path.index(script_path)
except ValueError:
return
sys.path.pop(path_index)
alter_path()
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("Invalid number of arguments.")
sys.exit()
if os.path.isfile(sys.argv[1]) == False:
print("IdiotScript program does not exist.")
sys.exit()
import io
import idiotscript
from idiotscript import InstructionSet, Collector, ScriptParser, ScriptRunner, InputContainer
from idiotscript import formatters
def get_input():
if len(sys.argv) == 3:
# We've been passed a filename for the input content
with open(sys.argv[2], "r", encoding = "utf-8") as input_file:
return input_file.read()
else:
# Assume we're receiving data from stdin
from io import StringIO
try:
stdin_file = sys.stdin.buffer.read()
except AttributeError:
stdin_file = sys.stdin.read()
io_obj = StringIO(stdin_file.decode("utf-8"))
return io_obj.read()
# Prepare the default instruction set.
my_iset = idiotscript.load_default_instruction_set(InstructionSet())
# Initialise the script parser with the default instruction set.
# We need to pass it an instruction list factory, as it's going to
# be creating lots of them.
parser = ScriptParser(my_iset, idiotscript.ilist_factory)
# Load the IdiotScript program into memory
with open(sys.argv[1]) as program_file:
program = program_file.read()
my_ilist = parser.parse(program)
inputtext = get_input()
my_collector = Collector()
runner = ScriptRunner(InputContainer(inputtext))
runner.run(my_ilist, my_collector)
nl_formatter = formatters.NewlineFormatter()
print(nl_formatter.format(my_collector))
|
#!/usr/bin/python3
import os, sys
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("Invalid number of arguments.")
sys.exit()
if os.path.isfile(sys.argv[1]) == False:
print("IdiotScript program does not exist.")
sys.exit()
import io
import idiotscript
from idiotscript import InstructionSet, Collector, ScriptParser, ScriptRunner, InputContainer
from idiotscript import formatters
def get_input():
if len(sys.argv) == 3:
# We've been passed a filename for the input content
with open(sys.argv[2], "r", encoding = "utf-8") as input_file:
return input_file.read()
else:
# Assume we're receiving data from stdin
from io import StringIO
try:
stdin_file = sys.stdin.buffer.read()
except AttributeError:
stdin_file = sys.stdin.read()
io_obj = StringIO(stdin_file.decode("utf-8"))
return io_obj.read()
# Prepare the default instruction set.
my_iset = idiotscript.load_default_instruction_set(InstructionSet())
# Initialise the script parser with the default instruction set.
# We need to pass it an instruction list factory, as it's going to
# be creating lots of them.
parser = ScriptParser(my_iset, idiotscript.ilist_factory)
# Load the IdiotScript program into memory
with open(sys.argv[1]) as program_file:
program = program_file.read()
my_ilist = parser.parse(program)
inputtext = get_input()
my_collector = Collector()
runner = ScriptRunner(InputContainer(inputtext))
runner.run(my_ilist, my_collector)
nl_formatter = formatters.NewlineFormatter()
print(nl_formatter.format(my_collector))
|
unlicense
|
Python
|
d926f321a26fe7c6b72513f88fe60bc4f3c899e4
|
Update model cnes bed
|
daniel1409/dataviva-api,DataViva/dataviva-api
|
app/models/cnes_bed.py
|
app/models/cnes_bed.py
|
from sqlalchemy import Column, Integer, String, func
from app import db
class CnesBed(db.Model):
__tablename__ = 'cnes_bed'
year = Column(Integer, primary_key=True)
region = Column(String(1), primary_key=True)
mesoregion = Column(String(4), primary_key=True)
microregion = Column(String(5), primary_key=True)
state = Column(String(2), primary_key=True)
municipality = Column(String(7), primary_key=True)
establishment = Column(String(7), primary_key=True)
unit_type = Column(String(2), primary_key=True)
bed_type = Column(String(1), primary_key=True)
bed_type_per_specialty = Column(String(2), primary_key=True)
number_existing_bed = Column(Integer, primary_key=True)
number_existing_contract = Column(Integer, primary_key=True)
number_sus_bed = Column(Integer, primary_key=True)
number_non_sus_bed = Column(Integer, primary_key=True)
health_region = Column(String(5), primary_key=True)
@classmethod
def dimensions(cls):
return [
'year',
'region',
'mesoregion',
'microregion',
'state',
'municipality',
'establishment',
'bed_type',
'bed_type_per_specialty',
'health_region'
]
@classmethod
def aggregate(cls, value):
return {
'beds': func.count()
}[value]
@classmethod
def values(cls):
return ['beds']
|
from sqlalchemy import Column, Integer, String, func
from app import db
class CnesBed(db.Model):
__tablename__ = 'cnes_bed'
year = Column(Integer, primary_key=True)
region = Column(String(1), primary_key=True)
mesoregion = Column(String(4), primary_key=True)
microregion = Column(String(5), primary_key=True)
state = Column(String(2), primary_key=True)
municipality = Column(String(7), primary_key=True)
cnes = Column(String(7), primary_key=True)
bed_type = Column(String(7), primary_key=True)
@classmethod
def dimensions(cls):
return [
'year',
'region',
'mesoregion',
'microregion',
'state',
'municipality',
'cnes',
'bed_type',
]
@classmethod
def aggregate(cls, value):
return {
'beds': func.count()
}[value]
@classmethod
def values(cls):
return ['beds']
|
mit
|
Python
|
8fb80540499d0f303d68150304fd896367313f94
|
remove incorrect is instance check in children_changed
|
cornhundred/ipywidgets,cornhundred/ipywidgets,cornhundred/ipywidgets,ipython/ipywidgets,SylvainCorlay/ipywidgets,ipython/ipywidgets,jupyter-widgets/ipywidgets,SylvainCorlay/ipywidgets,cornhundred/ipywidgets,cornhundred/ipywidgets,SylvainCorlay/ipywidgets,jupyter-widgets/ipywidgets,SylvainCorlay/ipywidgets,ipython/ipywidgets,ipython/ipywidgets,jupyter-widgets/ipywidgets,jupyter-widgets/ipywidgets,ipython/ipywidgets
|
IPython/html/widgets/widget_container.py
|
IPython/html/widgets/widget_container.py
|
"""ContainerWidget class.
Represents a container that can be used to group other widgets.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .widget import DOMWidget
from IPython.utils.traitlets import Unicode, Tuple, Instance, TraitError
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class TupleOfDOMWidgets(Tuple):
"""Like Tuple(Instance(DOMWidget)), but without checking length."""
def validate_elements(self, obj, value):
for v in value:
if not isinstance(v, DOMWidget):
raise TraitError("Container.children must be DOMWidgets, not %r" % v)
return value
class ContainerWidget(DOMWidget):
_view_name = Unicode('ContainerView', sync=True)
# Keys, all private and managed by helper methods. Flexible box model
# classes...
children = TupleOfDOMWidgets()
_children = TupleOfDOMWidgets(sync=True)
def _children_changed(self, name, old, new):
"""Validate children list.
Makes sure only one instance of any given model can exist in the
children list.
An excellent post on uniqifiers is available at
http://www.peterbe.com/plog/uniqifiers-benchmark
which provides the inspiration for using this implementation. Below
I've implemented the `f5` algorithm using Python comprehensions."""
if new is not None:
seen = {}
def add_item(i):
seen[i.model_id] = True
return i
self._children = [add_item(i) for i in new if not i.model_id in seen]
class PopupWidget(ContainerWidget):
_view_name = Unicode('PopupView', sync=True)
description = Unicode(sync=True)
button_text = Unicode(sync=True)
|
"""ContainerWidget class.
Represents a container that can be used to group other widgets.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .widget import DOMWidget
from IPython.utils.traitlets import Unicode, Tuple, Instance, TraitError
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class TupleOfDOMWidgets(Tuple):
"""Like Tuple(Instance(DOMWidget)), but without checking length."""
def validate_elements(self, obj, value):
for v in value:
if not isinstance(v, DOMWidget):
raise TraitError("Container.children must be DOMWidgets, not %r" % v)
return value
class ContainerWidget(DOMWidget):
_view_name = Unicode('ContainerView', sync=True)
# Keys, all private and managed by helper methods. Flexible box model
# classes...
children = TupleOfDOMWidgets()
_children = TupleOfDOMWidgets(sync=True)
def _children_changed(self, name, old, new):
"""Validate children list.
Makes sure only one instance of any given model can exist in the
children list.
An excellent post on uniqifiers is available at
http://www.peterbe.com/plog/uniqifiers-benchmark
which provides the inspiration for using this implementation. Below
I've implemented the `f5` algorithm using Python comprehensions."""
if new is not None and isinstance(new, list):
seen = {}
def add_item(i):
seen[i.model_id] = True
return i
self._children = [add_item(i) for i in new if not i.model_id in seen]
class PopupWidget(ContainerWidget):
_view_name = Unicode('PopupView', sync=True)
description = Unicode(sync=True)
button_text = Unicode(sync=True)
|
bsd-3-clause
|
Python
|
d24f3a1f008e4d2bef262a0f1253da04071a180d
|
move dataset crop into generic as it is used in several datasets
|
GuessWhatGame/generic,GuessWhatGame/generic
|
data_provider/dataset.py
|
data_provider/dataset.py
|
import copy
class AbstractDataset(object):
def __init__(self, games):
self.games = games
def get_data(self, indices=list()):
if len(indices) > 0:
return [self.games[i] for i in indices]
else:
return self.games
def n_examples(self):
return len(self.games)
class CropDataset(AbstractDataset):
"""
Each game contains no question/answers but a new object
"""
def __init__(self, dataset, expand_objects):
old_games = dataset.get_data()
new_games = []
for g in old_games:
if expand_objects:
new_games += self.split(g)
else:
new_games += self.update_ref(g)
super(CropDataset, self).__init__(new_games)
@staticmethod
def load(dataset_cls, expand_objects, **kwargs):
return CropDataset(dataset_cls(**kwargs), expand_objects=expand_objects)
def split(self, game):
games = []
for obj in game.objects:
new_game = copy.copy(game)
# select new object
new_game.object = obj
new_game.object_id = obj.id
# Hack the image id to differentiate objects
new_game.image = copy.copy(game.image)
new_game.image.id = obj.id
games.append(new_game)
return games
def update_ref(self, game):
new_game = copy.copy(game)
# Hack the image id to differentiate objects
new_game.image = copy.copy(game.image)
new_game.image.id = game.object_id
return [new_game]
|
class AbstractDataset(object):
def __init__(self, games):
self.games = games
def get_data(self, indices=list()):
if len(indices) > 0:
return [self.games[i] for i in indices]
else:
return self.games
def n_examples(self):
return len(self.games)
class DatasetMerger(AbstractDataset):
def __init__(self, datasets):
games = []
for d in datasets:
games += d.get_data()
super(DatasetMerger, self).__init__(games)
|
apache-2.0
|
Python
|
7907aeb6a006655ad96d5d3995b5fdbd4bf00d16
|
fix usage of get_all_enabled_projects
|
google/llvm-premerge-checks,google/llvm-premerge-checks
|
scripts/pipeline_main.py
|
scripts/pipeline_main.py
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script runs in checked out llvm-project directory.
import os
from typing import Dict
from steps import generic_linux, generic_windows, from_shell_output, extend_steps_env, bazel
from sync_fork import sync_fork
import git
import yaml
from choose_projects import ChooseProjects
steps_generators = [
'${BUILDKITE_BUILD_CHECKOUT_PATH}/libcxx/utils/ci/buildkite-pipeline-snapshot.sh',
]
if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "main")
no_cache = os.getenv('ph_no_cache') is not None
log_level = os.getenv('ph_log_level', 'WARNING')
notify_emails = list(filter(None, os.getenv('ph_notify_emails', '').split(',')))
# Syncing LLVM fork so any pipelines started from upstream llvm-project
# but then triggered a build on fork will observe the commit.
sync_fork(os.path.join(os.getenv('BUILDKITE_BUILD_PATH', ''), 'llvm-project-fork'), [os.getenv('BUILDKITE_BRANCH'), 'main'])
steps = []
env: Dict[str, str] = {}
for e in os.environ:
if e.startswith('ph_'):
env[e] = os.getenv(e, '')
repo = git.Repo('.')
cp = ChooseProjects(None)
linux_projects = cp.get_all_enabled_projects('linux')
steps.extend(generic_linux(os.getenv('ph_projects', ';'.join(linux_projects)), check_diff=False))
windows_projects = cp.get_all_enabled_projects('windows')
steps.extend(generic_windows(os.getenv('ph_projects', ';'.join(windows_projects))))
steps.extend(bazel([], force=True))
if os.getenv('ph_skip_generated') is None:
env = os.environ.copy()
# BUILDKITE_COMMIT might be an alias, e.g. "HEAD". Resolve it to make the build hermetic.
if ('BUILDKITE_COMMIT' not in env) or (env['BUILDKITE_COMMIT'] == "HEAD"):
env['BUILDKITE_COMMIT'] = repo.head.commit.hexsha
for gen in steps_generators:
steps.extend(from_shell_output(gen, env=env))
notify = []
for e in notify_emails:
notify.append({'email': e})
extend_steps_env(steps, env)
print(yaml.dump({'steps': steps, 'notify': notify}))
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script runs in checked out llvm-project directory.
import os
from typing import Dict
from steps import generic_linux, generic_windows, from_shell_output, extend_steps_env, bazel
from sync_fork import sync_fork
import git
import yaml
from choose_projects import ChooseProjects
steps_generators = [
'${BUILDKITE_BUILD_CHECKOUT_PATH}/libcxx/utils/ci/buildkite-pipeline-snapshot.sh',
]
if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "main")
no_cache = os.getenv('ph_no_cache') is not None
log_level = os.getenv('ph_log_level', 'WARNING')
notify_emails = list(filter(None, os.getenv('ph_notify_emails', '').split(',')))
# Syncing LLVM fork so any pipelines started from upstream llvm-project
# but then triggered a build on fork will observe the commit.
sync_fork(os.path.join(os.getenv('BUILDKITE_BUILD_PATH', ''), 'llvm-project-fork'), [os.getenv('BUILDKITE_BRANCH'), 'main'])
steps = []
env: Dict[str, str] = {}
for e in os.environ:
if e.startswith('ph_'):
env[e] = os.getenv(e, '')
repo = git.Repo('.')
cp = ChooseProjects(None)
linux_projects, _ = cp.get_all_enabled_projects('linux')
steps.extend(generic_linux(os.getenv('ph_projects', ';'.join(linux_projects)), check_diff=False))
windows_projects, _ = cp.get_all_enabled_projects('windows')
steps.extend(generic_windows(os.getenv('ph_projects', ';'.join(windows_projects))))
steps.extend(bazel([], force=True))
if os.getenv('ph_skip_generated') is None:
env = os.environ.copy()
# BUILDKITE_COMMIT might be an alias, e.g. "HEAD". Resolve it to make the build hermetic.
if ('BUILDKITE_COMMIT' not in env) or (env['BUILDKITE_COMMIT'] == "HEAD"):
env['BUILDKITE_COMMIT'] = repo.head.commit.hexsha
for gen in steps_generators:
steps.extend(from_shell_output(gen, env=env))
notify = []
for e in notify_emails:
notify.append({'email': e})
extend_steps_env(steps, env)
print(yaml.dump({'steps': steps, 'notify': notify}))
|
apache-2.0
|
Python
|
c38ad4f02d6f036f23ef6d3c1e033e9843fb068a
|
comment unused tests
|
simodalla/mezzanine_nowait,simodalla/mezzanine_nowait,simodalla/mezzanine_nowait
|
functional_tests/test_admins.py
|
functional_tests/test_admins.py
|
# -*- coding: utf-8 -*-
# from __future__ import unicode_literals, absolute_import
#
# from django.contrib.admin.templatetags.admin_urls import admin_urlname
#
# from .base import FunctionalTest
# from nowait.tests.factories import AdminF
# from nowait.models import BookingType
#
#
# class AdminTest(FunctionalTest):
# def setUp(self):
# super(AdminTest, self).setUp()
# self.admin = AdminF()
# self.create_pre_authenticated_session(self.admin)
#
# def test_add_booking_type(self):
# self.browser.get(
# self.get_url(admin_urlname(BookingType._meta, 'changelist')))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from unittest import skip
from django.contrib.admin.templatetags.admin_urls import admin_urlname
from .base import FunctionalTest
from nowait.tests.factories import AdminF
from nowait.models import BookingType
@skip
class AdminTest(FunctionalTest):
def setUp(self):
super(AdminTest, self).setUp()
self.admin = AdminF()
self.create_pre_authenticated_session(self.admin)
def test_add_booking_type(self):
self.browser.get(
self.get_url(admin_urlname(BookingType._meta, 'changelist')))
|
bsd-3-clause
|
Python
|
18bc54f964a2925005543df8b4989271ad4464be
|
Fix inheritance in soc.models.base module. FieldsProxy inherited from DbModelForm which was deleted in previous commits (replace that with BaseForm).
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
app/soc/models/base.py
|
app/soc/models/base.py
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing enhanced db.Model classes.
The classes in this module are intended to serve as base classes for all
Melange Datastore Models.
"""
__authors__ = [
'"Todd Larsen" <[email protected]>',
]
from google.appengine.ext import db
from soc.views.helper import forms as forms_helper
class ModelWithFieldAttributes(db.Model):
"""A db.Model extension that provides access to Model properties attributes.
Due to the way the Property class in Google App Engine implements __get__()
and __set__(), it is not possible to access attributes of Model properties,
such as verbose_name, from within a Django template. This class works
around that limitation by creating an inner Form class per Model class,
since an unbound Form object contains (most of?) the property attributes
attached to each corresponding Form field.
Some are attributes are renamed during the conversion from a Model Property
to a Form field; for example, verbose_name becomes label. This is tolerable
because any actual Form code refers to these new names, so they are should
be familiar to view creators.
"""
_fields_cache = None
@classmethod
def fields(cls):
"""Called by the Django template engine during template instantiation.
Since the attribute names use the Form fields naming instead of the
Property attribute naming, accessing, for example:
{{ entity.property.verbose_name }}
is accomplished using:
{{ entity.fields.property.label }}
Args:
cls: Model class, so that each Model class can create its own
unbound Form the first time fields() is called by the Django
template engine.
Returns:
A (created-on-first-use) unbound Form object that can be used to
access Property attributes that are not accessible from the
Property itself via the Model entity.
"""
if not cls._fields_cache or (cls != cls._fields_cache.__class__.Meta.model):
class FieldsProxy(forms_helper.BaseForm):
"""Form used as a proxy to access User model properties attributes.
"""
class Meta:
"""Inner Meta class that pairs the User Model with this "form".
"""
#: db.Model subclass for which to access model properties attributes
model = cls
cls._fields_cache = FieldsProxy()
return cls._fields_cache
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing enhanced db.Model classes.
The classes in this module are intended to serve as base classes for all
Melange Datastore Models.
"""
__authors__ = [
'"Todd Larsen" <[email protected]>',
]
from google.appengine.ext import db
from soc.views.helper import forms as forms_helper
class ModelWithFieldAttributes(db.Model):
"""A db.Model extension that provides access to Model properties attributes.
Due to the way the Property class in Google App Engine implements __get__()
and __set__(), it is not possible to access attributes of Model properties,
such as verbose_name, from within a Django template. This class works
around that limitation by creating an inner Form class per Model class,
since an unbound Form object contains (most of?) the property attributes
attached to each corresponding Form field.
Some are attributes are renamed during the conversion from a Model Property
to a Form field; for example, verbose_name becomes label. This is tolerable
because any actual Form code refers to these new names, so they are should
be familiar to view creators.
"""
_fields_cache = None
@classmethod
def fields(cls):
"""Called by the Django template engine during template instantiation.
Since the attribute names use the Form fields naming instead of the
Property attribute naming, accessing, for example:
{{ entity.property.verbose_name }}
is accomplished using:
{{ entity.fields.property.label }}
Args:
cls: Model class, so that each Model class can create its own
unbound Form the first time fields() is called by the Django
template engine.
Returns:
A (created-on-first-use) unbound Form object that can be used to
access Property attributes that are not accessible from the
Property itself via the Model entity.
"""
if not cls._fields_cache or (cls != cls._fields_cache.__class__.Meta.model):
class FieldsProxy(forms_helper.DbModelForm):
"""Form used as a proxy to access User model properties attributes.
"""
class Meta:
"""Inner Meta class that pairs the User Model with this "form".
"""
#: db.Model subclass for which to access model properties attributes
model = cls
cls._fields_cache = FieldsProxy()
return cls._fields_cache
|
apache-2.0
|
Python
|
b4acc5d4c5f6b3e94225ca5926d06a50c511173c
|
add json exporter
|
Informationretrieval2016/furnito
|
furnito_crawler/json_manager.py
|
furnito_crawler/json_manager.py
|
import json
class Json_Manager:
def __init__(self):
pass
def export_json(self, path, json_content):
'''
@usage: export dict to json and store on local storage
@arg: path, path to store json, string eg, 'downlaods/1.json'
@arg: json_content, the content want to export, dictionary format
'''
with open(path, 'w') as json_file:
json.dump(json_content, json_file, ensure_ascii = True, indent = 2)
|
class Json_Manager:
def __init__(self):
pass
|
mit
|
Python
|
5bde6ca1fd62277463156875e874c4c6843923fd
|
Use the correct variable for the test
|
luzfcb/cookiecutter-pytest-plugin,pytest-dev/cookiecutter-pytest-plugin,s0undt3ch/cookiecutter-pytest-plugin
|
pytest-{{cookiecutter.plugin_name}}/tests/test_{{cookiecutter.plugin_name}}.py
|
pytest-{{cookiecutter.plugin_name}}/tests/test_{{cookiecutter.plugin_name}}.py
|
# -*- coding: utf-8 -*-
def test_bar_fixture(testdir):
"""Make sure that pytest accepts our fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
def test_sth(bar):
assert bar == "europython2015"
""")
# run pytest with the following cmd args
result = testdir.runpytest(
'--foo=something',
'-v'
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_a PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'{{cookiecutter.plugin_name}}:',
'*--foo=DEST_FOO*Set the value for the fixture "bar".',
])
|
# -*- coding: utf-8 -*-
def test_bar_fixture(testdir):
"""Make sure that pytest accepts our fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
def test_sth(bar):
assert bar == "europython2015"
""")
# run pytest with the following cmd args
result = testdir.runpytest(
'--foo=something',
'-v'
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_a PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'cat:',
'*--foo=DEST_FOO*Set the value for the fixture "bar".',
])
|
mit
|
Python
|
164262400dbb265a3363eb9f1415284b391c079c
|
Remove duplicate call
|
henfredemars/Fork-Lang,henfredemars/Fork-Lang,henfredemars/Fork-Lang,henfredemars/Fork-Lang,henfredemars/Fork-Lang,henfredemars/Fork-Lang,henfredemars/Fork-Lang
|
fc.py
|
fc.py
|
#!/usr/bin/python3
#Fork compiler toolchain script
import argparse
import re, os
from sys import exit
def main():
#Parse arguments
parser = argparse.ArgumentParser(description='Fork toolchain command line parser...')
parser.add_argument('-v',action='store_true',help='Use valgrind')
parser.add_argument('-c',action='store_true',help='Compile and link static binary')
parser.add_argument('files',metavar='filename',type=str,nargs='+',help='files to process')
regex_delete = re.compile("(^\s*//.*)|(^\s*$)")
args = parser.parse_args()
files = args.files
#Check that parser exists
if not os.path.exists("./parser"):
print("Parser binary not found in current directory.")
exit(1)
#Preprocessing
temp_files = [x + '.wrapper_tmp_file' for x in files]
for file_in,file_out in zip(files,temp_files):
try:
f_in = open(file_in,'r')
f_out = open(file_out,'w')
except (FileNotFoundError,PermissionError) as e:
print(e)
exit(2)
for line in f_in:
if not regex_delete.match(line):
f_out.write(line)
f_in.close()
f_out.close()
#Build temp_files
for file in temp_files:
if args.v:
print("Please ignore GC_INIT() uninitialized memory.")
os.system("valgrind --vgdb=no ./parser {}".format(file))
else:
basename = file[0:-20]
if args.c:
os.system("""echo "./parser {0} 3>&1 1>&2 2>&3 | tee {1}.ll" | bash """.format(file,basename))
print("Attemping to compile and link IR statically.")
print("Compile LLVM IR to local architecture assembly...")
os.system("llvm/build/Release+Asserts/bin/llc -O2 {0}.ll; echo ; cat {0}.s".format(basename))
print("\nInvoking GCC assembler for static compilation...")
os.system("gcc -c {0}.s -o {0}.o".format(basename))
print("Linking executable...")
os.system("g++ -std=c++11 -fomit-frame-pointer -rdynamic -fvisibility-inlines-hidden -fno-exceptions -fno-rtti -fPIC -ffunction-sections -fdata-sections -Wl,-rpath=. -o {0}.bin {0}.o lib.o".format(basename))
#Postprocessing
for file in temp_files:
os.remove(file)
if __name__=='__main__':
print('Running Fork Compiler...')
main()
|
#!/usr/bin/python3
#Fork compiler toolchain script
import argparse
import re, os
from sys import exit
def main():
#Parse arguments
parser = argparse.ArgumentParser(description='Fork toolchain command line parser...')
parser.add_argument('-v',action='store_true',help='Use valgrind')
parser.add_argument('-c',action='store_true',help='Compile and link static binary')
parser.add_argument('files',metavar='filename',type=str,nargs='+',help='files to process')
regex_delete = re.compile("(^\s*//.*)|(^\s*$)")
args = parser.parse_args()
files = args.files
#Check that parser exists
if not os.path.exists("./parser"):
print("Parser binary not found in current directory.")
exit(1)
#Preprocessing
temp_files = [x + '.wrapper_tmp_file' for x in files]
for file_in,file_out in zip(files,temp_files):
try:
f_in = open(file_in,'r')
f_out = open(file_out,'w')
except (FileNotFoundError,PermissionError) as e:
print(e)
exit(2)
for line in f_in:
if not regex_delete.match(line):
f_out.write(line)
f_in.close()
f_out.close()
#Build temp_files
for file in temp_files:
if args.v:
print("Please ignore GC_INIT() uninitialized memory.")
os.system("valgrind --vgdb=no ./parser {}".format(file))
else:
basename = file[0:-20]
os.system("./parser {0}".format(file,basename))
if args.c:
os.system("""echo "./parser {0} 3>&1 1>&2 2>&3 | tee {1}.ll" | bash """.format(file,basename))
print("Attemping to compile and link IR statically.")
print("Compile LLVM IR to local architecture assembly...")
os.system("llvm/build/Release+Asserts/bin/llc -O2 {0}.ll; echo ; cat {0}.s".format(basename))
print("\nInvoking GCC assembler for static compilation...")
os.system("gcc -c {0}.s -o {0}.o".format(basename))
print("Linking executable...")
os.system("g++ -std=c++11 -fomit-frame-pointer -rdynamic -fvisibility-inlines-hidden -fno-exceptions -fno-rtti -fPIC -ffunction-sections -fdata-sections -Wl,-rpath=. -o {0}.bin {0}.o lib.o".format(basename))
#Postprocessing
for file in temp_files:
os.remove(file)
if __name__=='__main__':
print('Running Fork Compiler...')
main()
|
apache-2.0
|
Python
|
1e14d68c86e0cacb9bedc51884081ef0a1cfdcdc
|
Fix for trevis
|
FedericoPonzi/Isitdown.site,FedericoPonzi/Isitdown.site,FedericoPonzi/Isitdown.site
|
isitdown/config.py
|
isitdown/config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
DATABASE_URI = os.environ.get("ISITDOWN_DATABASE_URI", 'sqlite:///' + os.path.join(basedir, 'app.db'))
SECRET_KEY = os.environ.get('ISITDOWN_SECRET_KEY', 'you-will-never-guess')
SQLALCHEMY_DATABASE_URI = str(DATABASE_URI)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
BACKOFF_API_CALL_TIME = 30 * 1e3 # ms
class DevelopmentConfig(Config):
DEBUG = True
BACKOFF_API_CALL_TIME = 2 * 1e3 # ms
class TestingConfig(Config):
TESTING = True
BACKOFF_API_CALL_TIME = 0 # ms
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
DATABASE_URI = os.environ["ISITDOWN_DATABASE_URI"] or 'sqlite:///' + os.path.join(basedir, 'app.db')
SECRET_KEY = os.environ['ISITDOWN_SECRET_KEY'] or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = str(DATABASE_URI)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
BACKOFF_API_CALL_TIME = 30 * 1e3 # ms
class DevelopmentConfig(Config):
DEBUG = True
BACKOFF_API_CALL_TIME = 2 * 1e3 # ms
class TestingConfig(Config):
TESTING = True
BACKOFF_API_CALL_TIME = 0 # ms
|
apache-2.0
|
Python
|
16d87a91bcd6eb5cdb23d6aeb45e48ca7baf181a
|
remove unnecessary import
|
dschmaryl/golf-flask,dschmaryl/golf-flask,dschmaryl/golf-flask
|
createdb.py
|
createdb.py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask('app')
app.config.from_object('config')
db = SQLAlchemy(app)
db.create_all()
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask('app')
app.config.from_object('config')
db = SQLAlchemy(app)
from app import models
db.create_all()
|
mit
|
Python
|
ee1b02d7327eeeeb65115c705a0df1ffd7c82034
|
Make random election view a bit more random
|
DemocracyClub/Website,DemocracyClub/Website,DemocracyClub/Website,DemocracyClub/Website
|
democracy_club/apps/everyelection/views.py
|
democracy_club/apps/everyelection/views.py
|
import random
from django.db.models import Count
from django.shortcuts import get_object_or_404
from django.views.generic import RedirectView, UpdateView
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from braces.views import LoginRequiredMixin
from .models import AuthorityElection, AuthorityElectionPosition
from .forms import AuthorityAreaForm, AuthorityElectionSkippedForm
class RandomAuthority(RedirectView):
permanent = False
def get_redirect_url(self, *args, **kwargs):
authority_elections = list(AuthorityElection.objects.annotate(
position_count=Count('authorityelectionposition')
).order_by('position_count').values_list('election_id', flat=True))
half = authority_elections[0:int(len(authority_elections)/2)]
authority_election = random.choice(half)
return reverse('everyelection:authority', kwargs={
'pk': authority_election})
class AuthorityEdit(LoginRequiredMixin, UpdateView):
template_name = "everyelection/authority.html"
form_class = AuthorityAreaForm
model = AuthorityElection
def get_success_url(self):
return reverse('everyelection:random_election')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def post(self, *args, **kwargs):
if 'skipped_form' in self.request.POST:
form = AuthorityElectionSkippedForm({
'user': self.request.user.pk,
'authority_election': self.get_object().pk,
'notes': self.request.POST['notes'],
})
if form.is_valid():
form.save()
url = reverse('everyelection:random_election')
return redirect(url)
return super().post(*args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['elections_researched'] = \
AuthorityElectionPosition.objects.filter(
user=self.request.user)\
.values('authority_election')\
.distinct().count()
kwargs['areas_researched'] = AuthorityElectionPosition.objects.filter(
user=self.request.user
).count()
kwargs['skip_form'] = AuthorityElectionSkippedForm()
return super().get_context_data(**kwargs)
|
from django.db.models import Count
from django.shortcuts import get_object_or_404
from django.views.generic import RedirectView, UpdateView
from django.core.urlresolvers import reverse
from braces.views import LoginRequiredMixin
from .models import AuthorityElection, AuthorityElectionPosition
from .forms import AuthorityAreaForm
class RandomAuthority(RedirectView):
permanent = False
def get_redirect_url(self, *args, **kwargs):
authority_election = AuthorityElection.objects.annotate(
position_count=Count('authorityelectionposition')
).order_by('position_count').first()
return reverse('everyelection:authority', kwargs={
'pk': authority_election.pk})
class AuthorityEdit(LoginRequiredMixin, UpdateView):
template_name = "everyelection/authority.html"
form_class = AuthorityAreaForm
model = AuthorityElection
def get_success_url(self):
return reverse('everyelection:random_election')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def post(self, *args, **kwargs):
if 'skipped_form' in self.request.POST:
form = AuthorityElectionSkippedForm({
'user': self.request.user.pk,
'authority_election': self.get_object().pk,
'notes': self.request.POST['notes'],
})
if form.is_valid():
form.save()
url = reverse('everyelection:random_election')
return redirect(url)
return super().post(*args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['elections_researched'] = \
AuthorityElectionPosition.objects.filter(
user=self.request.user)\
.values('authority_election')\
.distinct().count()
kwargs['areas_researched'] = AuthorityElectionPosition.objects.filter(
user=self.request.user
).count()
kwargs['skip_form'] = AuthorityElectionSkippedForm()
return super().get_context_data(**kwargs)
|
bsd-3-clause
|
Python
|
2549a66b6785d5a0ed0658a4f375a21c486792df
|
Raise explicit exception on no type match
|
alisaifee/sifr,alisaifee/sifr
|
sifr/util.py
|
sifr/util.py
|
import datetime
from dateutil import parser
import six
def normalize_time(t):
try:
if isinstance(t, datetime.datetime):
return t
elif isinstance(t, datetime.date):
return datetime.datetime(t.year, t.month, t.day)
elif isinstance(t, (int, float)):
return datetime.datetime.fromtimestamp(t)
elif isinstance(t, six.string_types):
return parser.parse(t)
else:
raise TypeError
except: # noqa
raise TypeError(
"time must be represented as either a timestamp (int,float), "
"a datetime.datetime or datetime.date object, "
"or an iso-8601 formatted string"
)
|
import datetime
from dateutil import parser
import six
def normalize_time(t):
try:
if isinstance(t, datetime.datetime):
return t
elif isinstance(t, datetime.date):
return datetime.datetime(t.year, t.month, t.day)
elif isinstance(t, (int, float)):
return datetime.datetime.fromtimestamp(t)
elif isinstance(t, six.string_types):
return parser.parse(t)
else:
raise
except: # noqa
raise TypeError(
"time must be represented as either a timestamp (int,float), "
"a datetime.datetime or datetime.date object, "
"or an iso-8601 formatted string"
)
|
mit
|
Python
|
62ede23b0e13ab907b3eab620193921de29e162b
|
Bump version to 4.3.2b1
|
platformio/platformio,platformio/platformio-core,platformio/platformio-core
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 3, "2b1")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation ecosystem for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"RISC-V, FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
__pioaccount_api__ = "https://api.accounts.platformio.org"
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 3, "2a4")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation ecosystem for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"RISC-V, FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
__pioaccount_api__ = "https://api.accounts.platformio.org"
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
|
apache-2.0
|
Python
|
d4af985eb8786f6531e319e837fcc8d7b3e33ece
|
Bump version to 4.0.0a5
|
platformio/platformio-core,platformio/platformio,platformio/platformio-core
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0a5")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0a4")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
|
apache-2.0
|
Python
|
454cd8d7847074988bf967f6240d59f25bdf310e
|
Bump version to 5.0.2rc1
|
platformio/platformio-core,platformio/platformio,platformio/platformio-core
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
VERSION = (5, 0, "2rc1")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A professional collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, "
"STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, "
"MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, "
"STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_api__ = [
"https://api.registry.platformio.org",
"https://api.registry.ns1.platformio.org",
]
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
__default_requests_timeout__ = (10, None) # (connect, read)
__core_packages__ = {
"contrib-piohome": "~3.3.1",
"contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor),
"tool-unity": "~1.20500.0",
"tool-scons": "~2.20501.7" if sys.version_info.major == 2 else "~4.40001.0",
"tool-cppcheck": "~1.210.0",
"tool-clangtidy": "~1.100000.0",
"tool-pvs-studio": "~7.9.0",
}
__check_internet_hosts__ = [
"185.199.110.153", # Github.com
"88.198.170.159", # platformio.org
"github.com",
"platformio.org",
]
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
VERSION = (5, 0, "2b5")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A professional collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, "
"STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, "
"MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, "
"STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_api__ = [
"https://api.registry.platformio.org",
"https://api.registry.ns1.platformio.org",
]
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
__default_requests_timeout__ = (10, None) # (connect, read)
__core_packages__ = {
"contrib-piohome": "~3.3.1",
"contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor),
"tool-unity": "~1.20500.0",
"tool-scons": "~2.20501.7" if sys.version_info.major == 2 else "~4.40001.0",
"tool-cppcheck": "~1.210.0",
"tool-clangtidy": "~1.100000.0",
"tool-pvs-studio": "~7.9.0",
}
__check_internet_hosts__ = [
"185.199.110.153", # Github.com
"88.198.170.159", # platformio.org
"github.com",
"platformio.org",
]
|
apache-2.0
|
Python
|
dc14bd73623f453d8ef272a9cd46df3733fcfad9
|
Bump version to 6.1.5rc1
|
platformio/platformio-core,platformio/platformio-core
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
VERSION = (6, 1, "5rc1")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A professional collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, "
"STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, "
"MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, "
"STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO Labs"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO Labs"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_mirror_hosts__ = [
"registry.platformio.org",
"registry.nm1.platformio.org",
]
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
__core_packages__ = {
"contrib-piohome": "~3.4.2",
"contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor),
"tool-scons": "~4.40400.0",
"tool-cppcheck": "~1.270.0",
"tool-clangtidy": "~1.120001.0",
"tool-pvs-studio": "~7.18.0",
}
__check_internet_hosts__ = [
"185.199.110.153", # Github.com
"88.198.170.159", # platformio.org
"github.com",
] + __registry_mirror_hosts__
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
VERSION = (6, 1, "5a4")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A professional collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, "
"STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, "
"MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, "
"STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO Labs"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO Labs"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_mirror_hosts__ = [
"registry.platformio.org",
"registry.nm1.platformio.org",
]
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
__core_packages__ = {
"contrib-piohome": "~3.4.2",
"contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor),
"tool-scons": "~4.40400.0",
"tool-cppcheck": "~1.270.0",
"tool-clangtidy": "~1.120001.0",
"tool-pvs-studio": "~7.18.0",
}
__check_internet_hosts__ = [
"185.199.110.153", # Github.com
"88.198.170.159", # platformio.org
"github.com",
] + __registry_mirror_hosts__
|
apache-2.0
|
Python
|
1800e98f0570bfd1029d9df881fb144fdd943e72
|
Remove leftover debug print from Melnor (#78870)
|
mezz64/home-assistant,w1ll1am23/home-assistant,nkgilley/home-assistant,w1ll1am23/home-assistant,mezz64/home-assistant,nkgilley/home-assistant
|
tests/components/melnor/test_number.py
|
tests/components/melnor/test_number.py
|
"""Test the Melnor sensors."""
from __future__ import annotations
from .conftest import (
mock_config_entry,
patch_async_ble_device_from_address,
patch_async_register_callback,
patch_melnor_device,
)
async def test_manual_watering_minutes(hass):
"""Test the manual watering switch."""
entry = mock_config_entry(hass)
with patch_async_ble_device_from_address(), patch_melnor_device() as device_patch, patch_async_register_callback():
device = device_patch.return_value
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
number = hass.states.get("number.zone_1_manual_minutes")
assert number.state == "0"
assert number.attributes["max"] == 360
assert number.attributes["min"] == 1
assert number.attributes["step"] == 1.0
assert number.attributes["icon"] == "mdi:timer-cog-outline"
assert device.zone1.manual_watering_minutes == 0
await hass.services.async_call(
"number",
"set_value",
{"entity_id": "number.zone_1_manual_minutes", "value": 10},
blocking=True,
)
number = hass.states.get("number.zone_1_manual_minutes")
assert number.state == "10"
assert device.zone1.manual_watering_minutes == 10
|
"""Test the Melnor sensors."""
from __future__ import annotations
from .conftest import (
mock_config_entry,
patch_async_ble_device_from_address,
patch_async_register_callback,
patch_melnor_device,
)
async def test_manual_watering_minutes(hass):
"""Test the manual watering switch."""
entry = mock_config_entry(hass)
with patch_async_ble_device_from_address(), patch_melnor_device() as device_patch, patch_async_register_callback():
device = device_patch.return_value
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
number = hass.states.get("number.zone_1_manual_minutes")
print(number)
assert number.state == "0"
assert number.attributes["max"] == 360
assert number.attributes["min"] == 1
assert number.attributes["step"] == 1.0
assert number.attributes["icon"] == "mdi:timer-cog-outline"
assert device.zone1.manual_watering_minutes == 0
await hass.services.async_call(
"number",
"set_value",
{"entity_id": "number.zone_1_manual_minutes", "value": 10},
blocking=True,
)
number = hass.states.get("number.zone_1_manual_minutes")
assert number.state == "10"
assert device.zone1.manual_watering_minutes == 10
|
apache-2.0
|
Python
|
ae6025a4be24637e57c0545aa860d96a0447aa89
|
Raise any exceptions from ended listeners in workers
|
andrewgodwin/django-channels,django/channels,andrewgodwin/channels
|
channels/worker.py
|
channels/worker.py
|
import asyncio
from asgiref.server import StatelessServer
class Worker(StatelessServer):
"""
ASGI protocol server that surfaces events sent to specific channels
on the channel layer into a single application instance.
"""
def __init__(self, application, channels, channel_layer, max_applications=1000):
super().__init__(application, max_applications)
self.channels = channels
self.channel_layer = channel_layer
if self.channel_layer is None:
raise ValueError("Channel layer is not valid")
async def handle(self):
"""
Listens on all the provided channels and handles the messages.
"""
# For each channel, launch its own listening coroutine
listeners = []
for channel in self.channels:
listeners.append(asyncio.ensure_future(
self.listener(channel)
))
# Wait for them all to exit
await asyncio.wait(listeners)
# See if any of the listeners had an error (e.g. channel layer error)
[listener.result() for listener in listeners]
async def listener(self, channel):
"""
Single-channel listener
"""
while True:
message = await self.channel_layer.receive(channel)
if not message.get("type", None):
raise ValueError("Worker received message with no type.")
# Make a scope and get an application instance for it
scope = {"type": "channel", "channel": channel}
instance_queue = self.get_or_create_application_instance(channel, scope)
# Run the message into the app
await instance_queue.put(message)
|
import asyncio
from asgiref.server import StatelessServer
class Worker(StatelessServer):
"""
ASGI protocol server that surfaces events sent to specific channels
on the channel layer into a single application instance.
"""
def __init__(self, application, channels, channel_layer, max_applications=1000):
super().__init__(application, max_applications)
self.channels = channels
self.channel_layer = channel_layer
if self.channel_layer is None:
raise ValueError("Channel layer is not valid")
async def handle(self):
"""
Listens on all the provided channels and handles the messages.
"""
# For each channel, launch its own listening coroutine
listeners = []
for channel in self.channels:
listeners.append(asyncio.ensure_future(
self.listener(channel)
))
# Wait for them all to exit
await asyncio.wait(listeners)
async def listener(self, channel):
"""
Single-channel listener
"""
while True:
message = await self.channel_layer.receive(channel)
if not message.get("type", None):
raise ValueError("Worker received message with no type.")
# Make a scope and get an application instance for it
scope = {"type": "channel", "channel": channel}
instance_queue = self.get_or_create_application_instance(channel, scope)
# Run the message into the app
await instance_queue.put(message)
|
bsd-3-clause
|
Python
|
6149cef01900d6710efa322ef934965c5f1379f8
|
Fix pylint
|
marcore/pok-eco,marcore/pok-eco
|
xapi/utils.py
|
xapi/utils.py
|
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from courseware.module_render import get_module
def get_request_for_user(user):
"""Create a request object for user."""
request = RequestFactory()
request.user = user
request.COOKIES = {}
request.META = {}
request.is_secure = lambda: True
request.get_host = lambda: "edx.org"
request.method = 'GET'
return request
def get_course_key(course_id):
course_key = ""
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
return course_key
def get_course(course_id):
course_key = get_course_key(course_id)
course = get_course_by_id(course_key)
return course
def get_course_title(course_id):
course_key = get_course_key(course_id)
title = CourseOverview.get_from_id(course_key).display_name
return title
def get_course_description(course_id, user_id):
course = get_course(course_id)
user = User.objects.get(user_id)
request = get_request_for_user(user)
module = get_module(user, request, course.location.replace(category='about', name="short_description"), [])
return module.data
def get_usage_key(course_id, module_id):
"""
Get the usage key of sequential block
Can be :
i4x://test/TEST101/sequential/45b889d710424143aa7f13e7c4bc0446
or
block-v1:ORG+TEST101+RUNCODE+type@sequential+block@45b889d710424143aa7f13e7c4bc0446
depending on modulestore
"""
course_key = CourseKey.from_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
return items[0].location.to_deprecated_string()
|
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from courseware.courses import get_course_by_id, get_course_about_section
from courseware.module_render import get_module
def get_request_for_user(user):
"""Create a request object for user."""
request = RequestFactory()
request.user = user
request.COOKIES = {}
request.META = {}
request.is_secure = lambda: True
request.get_host = lambda: "edx.org"
request.method = 'GET'
return request
def get_course_key(course_id):
course_key = ""
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
return course_key
def get_course(course_id):
course_key = get_course_key(course_id)
course = get_course_by_id(course_key)
return course
def get_course_title(course_id):
course_key = get_course_key(course_id)
title = CourseOverview.get_from_id(course_key).display_name
return title
def get_course_description(course_id, user_id):
course = get_course(course_id)
user = User.objects.get(user_id)
request = get_request_for_user(user)
module = get_module(user, request, course.location.replace(category='about', name="short_description"), [])
return module.data
def get_usage_key(course_id, module_id):
"""
Get the usage key of sequential block
Can be :
i4x://test/TEST101/sequential/45b889d710424143aa7f13e7c4bc0446
or
block-v1:ORG+TEST101+RUNCODE+type@sequential+block@45b889d710424143aa7f13e7c4bc0446
depending on modulestore
"""
course_key = CourseKey.from_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
return items[0].location.to_deprecated_string()
|
agpl-3.0
|
Python
|
0b707c2e3a4d00aef5dad683c535498cb8bc1e21
|
use the default date formatter for logging
|
xgfone/xutils,xgfone/pycom
|
xutils/log.py
|
xutils/log.py
|
# -*- coding: utf-8 -*-
import os
import os.path
import logging
from logging.handlers import RotatingFileHandler
def init(logger=None, level="INFO", file=None, handler_cls=None, process=False,
max_count=30, propagate=True, file_config=None, dict_config=None):
root = logging.getLogger()
if not logger:
logger = root
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = ("%(asctime)s - %(process)d - %(pathname)s - %(funcName)s - "
"%(lineno)d - %(levelname)s - %(message)s")
formatter = logging.Formatter(fmt=fmt)
level = getattr(logging, level.upper())
if file:
if process:
filename, ext = os.path.splitext(file)
file = "{0}.{1}{2}".format(filename, os.getpid(), ext)
if handler_cls:
handler = handler_cls(file, max_count)
else:
handler = RotatingFileHandler(file, maxBytes=1024**3, backupCount=max_count)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
root.setLevel(level)
root.addHandler(handler)
loggers = logger if isinstance(logger, (list, tuple)) else [logger]
for logger in loggers:
if logger is root:
continue
logger.propagate = propagate
logger.setLevel(level)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
|
# -*- coding: utf-8 -*-
import os
import os.path
import logging
from logging.handlers import RotatingFileHandler
def init(logger=None, level="INFO", file=None, handler_cls=None, process=False,
max_count=30, propagate=True, file_config=None, dict_config=None):
root = logging.getLogger()
if not logger:
logger = root
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = ("%(asctime)s - %(process)d - %(pathname)s - %(funcName)s - "
"%(lineno)d - %(levelname)s - %(message)s")
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
if file:
if process:
filename, ext = os.path.splitext(file)
file = "{0}.{1}{2}".format(filename, os.getpid(), ext)
if handler_cls:
handler = handler_cls(file, max_count)
else:
handler = RotatingFileHandler(file, maxBytes=1024**3, backupCount=max_count)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
root.setLevel(level)
root.addHandler(handler)
loggers = logger if isinstance(logger, (list, tuple)) else [logger]
for logger in loggers:
if logger is root:
continue
logger.propagate = propagate
logger.setLevel(level)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
|
mit
|
Python
|
7d8380a65523e7d6ef77d39eaf125823e3e10812
|
Bump version to 5.0.0dev0
|
chardet/chardet,chardet/chardet
|
chardet/version.py
|
chardet/version.py
|
"""
This module exists only to simplify retrieving the version number of chardet
from within setup.py and from chardet subpackages.
:author: Dan Blanchard ([email protected])
"""
__version__ = "5.0.0dev0"
VERSION = __version__.split('.')
|
"""
This module exists only to simplify retrieving the version number of chardet
from within setup.py and from chardet subpackages.
:author: Dan Blanchard ([email protected])
"""
__version__ = "4.0.0"
VERSION = __version__.split('.')
|
lgpl-2.1
|
Python
|
4cb36450aa4ddabe0a6fa48300dc37edc053dd13
|
fix bug in isOpen for reports
|
clarkerubber/irwin,clarkerubber/irwin
|
modules/queue/ModReport.py
|
modules/queue/ModReport.py
|
"""Queue item for basic analysis by irwin"""
from collections import namedtuple
from datetime import datetime, timedelta
import pymongo
class ModReport(namedtuple('ModReport', ['id', 'processed', 'created'])):
@staticmethod
def new(userId):
return ModReport(
id=userId,
processed=False,
created=datetime.now())
class ModReportBSONHandler:
@staticmethod
def reads(bson):
return ModReport(
id=bson['_id'],
processed=bson['processed'],
created=bson['created'])
@staticmethod
def writes(modReport):
return {
'_id': modReport.id,
'processed': modReport.processed,
'created': modReport.created,
'updated': datetime.now()
}
class ModReportDB(namedtuple('ModReportDB', ['modReportColl'])):
def write(self, modReport):
self.modReportColl.update_one(
{'_id': modReport.id},
{'$set': ModReportBSONHandler.writes(modReport)},
upsert=True)
def close(self, userId):
self.modReportColl.update_one(
{'_id': userId},
{'$set': {'processed': True, 'updated': datetime.now()}},
upsert=False)
def isOpen(self, userId):
modReportBSON = self.modReportColl.find_one({'_id': userId})
processed = True
if modReportBSON is not None:
processed = modReportBSON['processed']
return not processed
def allOpen(self, limit=None):
return [ModReportBSONHandler.reads(bson) for bson in self.modReportColl.find({'processed': False}, batch_size=limit)]
def oldestUnprocessed(self):
modReportBSON = self.modReportColl.find_one_and_update(
filter={'processed': False, 'updated': {'$lt': datetime.now() - timedelta(days=2)}},
update={'$set': {'updated': datetime.now()}},
sort=[('updated', pymongo.ASCENDING)])
return None if modReportBSON is None else ModReportBSONHandler.reads(modReportBSON)
|
"""Queue item for basic analysis by irwin"""
from collections import namedtuple
from datetime import datetime, timedelta
import pymongo
class ModReport(namedtuple('ModReport', ['id', 'processed', 'created'])):
@staticmethod
def new(userId):
return ModReport(
id=userId,
processed=False,
created=datetime.now())
class ModReportBSONHandler:
@staticmethod
def reads(bson):
return ModReport(
id=bson['_id'],
processed=bson['processed'],
created=bson['created'])
@staticmethod
def writes(modReport):
return {
'_id': modReport.id,
'processed': modReport.processed,
'created': modReport.created,
'updated': datetime.now()
}
class ModReportDB(namedtuple('ModReportDB', ['modReportColl'])):
def write(self, modReport):
self.modReportColl.update_one(
{'_id': modReport.id},
{'$set': ModReportBSONHandler.writes(modReport)},
upsert=True)
def close(self, userId):
self.modReportColl.update_one(
{'_id': userId},
{'$set': {'processed': True, 'updated': datetime.now()}},
upsert=False)
def isOpen(self, userId):
modReportBSON = self.modReportColl.find_one({'_id': userId})
processed = False
if modReportBSON is not None:
processed = modReportBSON['processed']
return processed
def allOpen(self, limit=None):
return [ModReportBSONHandler.reads(bson) for bson in self.modReportColl.find({'processed': False}, batch_size=limit)]
def oldestUnprocessed(self):
modReportBSON = self.modReportColl.find_one_and_update(
filter={'processed': False, 'updated': {'$lt': datetime.now() - timedelta(days=2)}},
update={'$set': {'updated': datetime.now()}},
sort=[('updated', pymongo.ASCENDING)])
return None if modReportBSON is None else ModReportBSONHandler.reads(modReportBSON)
|
agpl-3.0
|
Python
|
6326cfa9ad5cb203eeade0c5875a005e06bbe932
|
fix isort test
|
aioworkers/aioworkers,aamalev/aioworkers
|
tests/test_core_base_multi_executor.py
|
tests/test_core_base_multi_executor.py
|
from datetime import timedelta
import pytest
@pytest.fixture
def config_yaml(unused_port):
return """
e:
cls: aioworkers.core.base.MultiExecutorEntity
executors:
get: 1
put: 1
none: none
x: null
"""
async def test_multiexecutor(context):
assert await context.e.run_in_executor('get', timedelta, days=1)
assert await context.e.run_in_executor('none', timedelta, hours=2)
assert await context.e.run_in_executor('put', timedelta, minutes=1)
assert await context.e.run_in_executor('x', timedelta, seconds=1)
|
import pytest
from datetime import timedelta
@pytest.fixture
def config_yaml(unused_port):
return """
e:
cls: aioworkers.core.base.MultiExecutorEntity
executors:
get: 1
put: 1
none: none
x: null
"""
async def test_multiexecutor(context):
assert await context.e.run_in_executor('get', timedelta, days=1)
assert await context.e.run_in_executor('none', timedelta, hours=2)
assert await context.e.run_in_executor('put', timedelta, minutes=1)
assert await context.e.run_in_executor('x', timedelta, seconds=1)
|
apache-2.0
|
Python
|
94260ec953de44ae9a4108b964a9a607b0457148
|
fix new search index
|
MuckRock/muckrock,MuckRock/muckrock,MuckRock/muckrock,MuckRock/muckrock
|
muckrock/news/search_indexes.py
|
muckrock/news/search_indexes.py
|
"""
Search Index for the news application
"""
from haystack.indexes import SearchIndex, CharField, DateTimeField
from haystack import site
from muckrock.news.models import Article
class ArticleIndex(SearchIndex):
"""Search index for news articles"""
text = CharField(document=True, use_template=True)
authors = CharField(model_attr='authors')
pub_date = DateTimeField(model_attr='pub_date')
def get_queryset(self):
"""Used when the entire index for model is updated."""
# pylint: disable=R0201
return Article.objects.get_published()
site.register(Article, ArticleIndex)
|
"""
Search Index for the news application
"""
from haystack.indexes import SearchIndex, CharField, DateTimeField
from haystack import site
from muckrock.news.models import Article
class ArticleIndex(SearchIndex):
"""Search index for news articles"""
text = CharField(document=True, use_template=True)
author = CharField(model_attr='author')
pub_date = DateTimeField(model_attr='pub_date')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return Article.objects.get_published()
site.register(Article, ArticleIndex)
|
agpl-3.0
|
Python
|
2bafdb04dc4e04c5a2cf9136135dfa130ac6e78b
|
read full buffer, not byte by byte
|
stharding/serialSniffer
|
serialSniffer/sniffer.py
|
serialSniffer/sniffer.py
|
from serial import Serial
from concurrent.futures import ThreadPoolExecutor
class Sniffer(object):
"""
TODO: write docstring
"""
def __init__(self, virtual_comm='COM7', physical_comm='COM1'):
self.virtual_comm = Serial(virtual_comm)
self.physical_comm = Serial(physical_comm)
self.pool = ThreadPoolExecutor(4)
self.v_bytes = []
self.p_bytes = []
self.pool.submit(self.read_physical)
self.pool.submit(self.read_virtual)
def read_physical(self):
"""
TODO: write docstring
"""
while True:
n = self.physical_comm.inWaiting()
if n:
msg = self.physical_comm.read()
self.virtual_comm.write(msg)
self.p_bytes.append(msg)
# TODO: store the data somewhere
def read_virtual(self):
"""
TODO: write docstring
"""
while True:
n = self.virtual_comm.inWaiting()
if n:
msg = self.virtual_comm.read()
self.physical_comm.write(msg)
self.v_bytes.append(msg)
|
from serial import Serial
from concurrent.futures import ThreadPoolExecutor
class Sniffer(object):
"""
TODO: write docstring
"""
def __init__(self, virtual_comm='COM7', physical_comm='COM1'):
self.virtual_comm = Serial(virtual_comm)
self.physical_comm = Serial(physical_comm)
self.pool = ThreadPoolExecutor(4)
self.v_bytes = []
self.p_bytes = []
self.pool.submit(self.read_physical)
self.pool.submit(self.read_virtual)
def read_physical(self):
"""
TODO: write docstring
"""
while True:
p_byte = self.physical_comm.read()
self.virtual_comm.write(p_byte)
self.p_bytes.append(p_byte)
# TODO: store the data somewhere
def read_virtual(self):
"""
TODO: write docstring
"""
while True:
v_byte = self.virtual_comm.read()
self.physical_comm.write(v_byte)
self.v_bytes.append(v_byte)
|
mit
|
Python
|
eac867faba8d4653fa580ee0c2bd708ff83b13ee
|
Remove test workaround
|
MACSIFS/IFS,MACSIFS/IFS,MACSIFS/IFS,MACSIFS/IFS
|
server/tests/test_api.py
|
server/tests/test_api.py
|
import json
from server.tests.base import BaseTestCase
from server.models import db, Lecturer, Course, Lecture, Comment
class GetCommentsApiTest(BaseTestCase):
def setUp(self):
super(GetCommentsApiTest, self).setUp()
simon = Lecturer('Simon', 'McCallum')
db.session.add(simon)
imt3601 = Course('IMT3601 - Game Programming', simon)
db.session.add(imt3601)
imt3601_l1 = Lecture('Lecture 1', imt3601)
db.session.add(imt3601_l1)
imt3601_l1_c1 = Comment('This is boring', imt3601_l1)
imt3601_l1_c2 = Comment('This is fun!', imt3601_l1)
db.session.add(imt3601_l1_c1)
db.session.add(imt3601_l1_c2)
db.session.commit()
def test_success(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.status_code == 200
def test_lecture_not_found(self):
rv = self.app.get('/api/0/lectures/2/comments')
assert rv.status_code == 404
def test_list(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert len(response['comments']) == 2
def test_content(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert response['comments'][0]['content'] == 'This is boring'
|
import json
from server.tests.base import BaseTestCase
from server.models import db, Lecturer, Course, Lecture, Comment
class GetCommentsApiTest(BaseTestCase):
def setUp(self):
super(GetCommentsApiTest, self).setUp()
simon = Lecturer('Simon', 'McCallum')
db.session.add(simon)
imt3601 = Course('IMT3601 - Game Programming', simon)
db.session.add(imt3601)
imt3601_l1 = Lecture('Lecture 1', imt3601)
db.session.add(imt3601_l1)
imt3601_l1_c1 = Comment('This is boring', imt3601_l1)
imt3601_l1_c2 = Comment('This is fun!', imt3601_l1)
db.session.add(imt3601_l1_c1)
db.session.add(imt3601_l1_c2)
db.session.commit()
def test_all(self):
self._test_success()
self._test_lecture_not_found()
self._test_list()
self._test_content()
def _test_success(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.status_code == 200
def _test_lecture_not_found(self):
rv = self.app.get('/api/0/lectures/2/comments')
assert rv.status_code == 404
def _test_list(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert len(response['comments']) == 2
def _test_content(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert response['comments'][0]['content'] == 'This is boring'
|
mit
|
Python
|
cf8cba155edb7f1d27fce7b20aaafce044415a22
|
Update 35b556aef8ef_add_news_flash_table.py
|
hasadna/anyway,hasadna/anyway,hasadna/anyway,hasadna/anyway
|
alembic/versions/35b556aef8ef_add_news_flash_table.py
|
alembic/versions/35b556aef8ef_add_news_flash_table.py
|
"""add new table
Revision ID: 35b556aef8ef
Revises: 423a7ea74c0a
Create Date: 2018-12-10 11:31:29.518909
"""
# revision identifiers, used by Alembic.
revision = '35b556aef8ef'
down_revision = '423a7ea74c0a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('news_flash',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('accident', sa.Boolean(), nullable=True),
sa.Column('author', sa.Text(), nullable=True),
sa.Column('date', sa.TIMESTAMP(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('lat', sa.Float(), nullable=True),
sa.Column('link', sa.Text(), nullable=True),
sa.Column('lon', sa.Float(), nullable=True),
sa.Column('title', sa.Text(), nullable=True),
sa.Column('source', sa.Text(), nullable=True),
sa.Column('location', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('news_flash')
### end Alembic commands ###
|
"""add new table
Revision ID: 35b556aef8ef
Revises: 3c8ad66233c0
Create Date: 2018-12-10 11:31:29.518909
"""
# revision identifiers, used by Alembic.
revision = '35b556aef8ef'
down_revision = '423a7ea74c0a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('news_flash',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('accident', sa.Boolean(), nullable=True),
sa.Column('author', sa.Text(), nullable=True),
sa.Column('date', sa.TIMESTAMP(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('lat', sa.Float(), nullable=True),
sa.Column('link', sa.Text(), nullable=True),
sa.Column('lon', sa.Float(), nullable=True),
sa.Column('title', sa.Text(), nullable=True),
sa.Column('source', sa.Text(), nullable=True),
sa.Column('location', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('news_flash')
### end Alembic commands ###
|
mit
|
Python
|
5f12bac216f5380e55dc65ccfcd16e369c575dac
|
Use redis on localhost
|
emmetog/page-hit-counter
|
app.py
|
app.py
|
from flask import Flask
from redis import Redis
import os
app = Flask(__name__)
redis = Redis(host= "localhost", port=6379)
@app.route('/')
def hello():
redis.incr('hits')
return 'This page has been seen {0} times' . format (redis.get( 'hits' ))
if __name__ == "__main__":
app.run(host= "0.0.0.0", debug=True)
|
from flask import Flask
from redis import Redis
import os
app = Flask(__name__)
redis = Redis(host= "redis", port=6379)
@app.route('/')
def hello():
redis.incr('hits')
return 'This page has been seen {0} times' . format (redis.get( 'hits' ))
if __name__ == "__main__":
app.run(host= "0.0.0.0", debug=True)
|
mit
|
Python
|
a1c558027cd17eec69a2babb786e35b147ffae6b
|
add start.html
|
abhiram304/APIWorldHackathon-SmartAssistant,abhiram304/APIWorldHackathon-SmartAssistant
|
app.py
|
app.py
|
#!flask/bin/python
from flask import request, render_template, Flask
import os, sys, json
import requests
app = Flask(__name__)
from random import randint
from telesign.messaging import MessagingClient
from telesign.voice import VoiceClient
from flask import request
@app.route('/')
def index():
return render_template('start.html')
@app.route('/send')
def send():
args = request.args
customer_id = "2C1097F6-3917-4A53-9D38-C45A3C8ADD2B"
api_key = "FTgHUVjcPWvgzCvtksi2v+tMLTAXbh5LLVEl1Wcl4NAtszxElZL4HS/ZwJqJufRkEmRpwUTwULxsZgL2c649vQ=="
phone_number = "14084299128"
message = args['msg']
message_type = "ARN"
messaging = MessagingClient(customer_id, api_key)
response2 = messaging.message(phone_number, message, message_type)
voice = VoiceClient(customer_id, api_key)
response1 = voice.call(phone_number, message, message_type)
return "success"
if __name__ == '__main__':
app.secret_key = os.urandom(12)
#app.run(debug=True)
|
#!flask/bin/python
from flask import request, render_template, Flask
import os, sys, json
import requests
app = Flask(__name__)
from random import randint
from telesign.messaging import MessagingClient
from telesign.voice import VoiceClient
from flask import request
@app.route('/')
def index():
return render_template('start.html')
@app.route('/profile')
def index():
@app.route('/send')
def send():
args = request.args
customer_id = "2C1097F6-3917-4A53-9D38-C45A3C8ADD2B"
api_key = "FTgHUVjcPWvgzCvtksi2v+tMLTAXbh5LLVEl1Wcl4NAtszxElZL4HS/ZwJqJufRkEmRpwUTwULxsZgL2c649vQ=="
phone_number = "14084299128"
message = args['msg']
message_type = "ARN"
messaging = MessagingClient(customer_id, api_key)
response2 = messaging.message(phone_number, message, message_type)
voice = VoiceClient(customer_id, api_key)
response1 = voice.call(phone_number, message, message_type)
return "success"
if __name__ == '__main__':
app.secret_key = os.urandom(12)
#app.run(debug=True)
|
apache-2.0
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.