commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
139cfb7756aa6c01d547c0a88cac939c6e88e926
|
Print where file is saved to.
|
nmabhi/Webface,Alexx-G/openface,xinfang/face-recognize,nmabhi/Webface,francisleunggie/openface,xinfang/face-recognize,cmusatyalab/openface,nhzandi/openface,francisleunggie/openface,Alexx-G/openface,francisleunggie/openface,Alexx-G/openface,nhzandi/openface,nmabhi/Webface,cmusatyalab/openface,Alexx-G/openface,cmusatyalab/openface,nmabhi/Webface,xinfang/face-recognize,nhzandi/openface
|
util/tsne.py
|
util/tsne.py
|
#!/usr/bin/env python2
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.style.use('bmh')
import os
import sys
import argparse
print("""
Note: This example assumes that `name i` corresponds to `label i`
in `labels.csv`.
""")
parser = argparse.ArgumentParser()
parser.add_argument('workDir', type=str)
parser.add_argument('--names', type=str, nargs='+', required=True)
args = parser.parse_args()
y = pd.read_csv("{}/labels.csv".format(args.workDir)).as_matrix()[:, 0]
X = pd.read_csv("{}/reps.csv".format(args.workDir)).as_matrix()
target_names = np.array(args.names)
colors = cm.gnuplot2(np.linspace(0, 0.7, len(target_names)))
X_pca = PCA(n_components=50).fit_transform(X, X)
tsne = TSNE(n_components=2, init='random', random_state=0)
X_r = tsne.fit_transform(X_pca)
for c, i, target_name in zip(colors,
list(range(1, len(target_names) + 1)),
target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1],
c=c, label=target_name)
plt.legend()
out = "{}/tsne.pdf".format(args.workDir)
plt.savefig(out)
print("Saved to: {}".format(out))
|
#!/usr/bin/env python2
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.style.use('bmh')
import os
import sys
import argparse
print("""
Note: This example assumes that `name i` corresponds to `label i`
in `labels.csv`.
""")
parser = argparse.ArgumentParser()
parser.add_argument('workDir', type=str)
parser.add_argument('--names', type=str, nargs='+', required=True)
args = parser.parse_args()
y = pd.read_csv("{}/labels.csv".format(args.workDir)).as_matrix()[:, 0]
X = pd.read_csv("{}/reps.csv".format(args.workDir)).as_matrix()
target_names = np.array(args.names)
colors = cm.gnuplot2(np.linspace(0, 0.7, len(target_names)))
X_pca = PCA(n_components=50).fit_transform(X, X)
tsne = TSNE(n_components=2, init='random', random_state=0)
X_r = tsne.fit_transform(X_pca)
for c, i, target_name in zip(colors,
list(range(1, len(target_names) + 1)),
target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1],
c=c, label=target_name)
plt.legend()
plt.savefig("{}/tsne.pdf".format(args.workDir))
|
apache-2.0
|
Python
|
e844847323a39f8bfd1870a21071f9f07f110274
|
manage password
|
eregnier/checkme,eregnier/checkme,eregnier/checkme
|
models/user.py
|
models/user.py
|
from peewee import CharField, DateTimeField
from flask_login import UserMixin
from hashlib import sha1
from time import mktime
import datetime
from models.base import BaseModel
class User(BaseModel, UserMixin):
created = DateTimeField(default=datetime.datetime.now)
email = CharField(max_length=50)
password = CharField(max_length=50)
@staticmethod
def create(email, password, fullname=None):
user = User(email=email)
user.update_password(password)
return user
def update_password(self, password):
self.password = sha1(password.encode('utf-8')).hexdigest()
def check(self, password):
return self.password == sha1(password.encode('utf-8')).hexdigest()
def to_json(self):
return {
'id': self.id,
'created': mktime(self.created.timetuple()) * 1000,
'email': self.email
}
|
from peewee import CharField, DateTimeField
from flask_login import UserMixin
from hashlib import sha1
from time import mktime
import datetime
from models.base import BaseModel
class User(BaseModel, UserMixin):
created = DateTimeField(default=datetime.datetime.now)
email = CharField(max_length=50)
password = CharField(max_length=50)
@staticmethod
def create(email, password, fullname=None):
user = User(email=email)
user.password = sha1(password.encode('utf-8')).hexdigest()
return user
def check(self, password):
return self.password == sha1(password.encode('utf-8')).hexdigest()
def to_json(self):
return {
'id': self.id,
'created': mktime(self.created.timetuple()) * 1000,
'email': self.email
}
|
mit
|
Python
|
31381728cb8d76314c82833d4400b4140fcc573f
|
Change parameter name so it does not conflict with an url parameter called "name".
|
akx/django-jinja,glogiotatidis/django-jinja,glogiotatidis/django-jinja,akx/django-jinja,akx/django-jinja,glogiotatidis/django-jinja,niwinz/django-jinja,glogiotatidis/django-jinja,niwinz/django-jinja,akx/django-jinja,niwinz/django-jinja
|
django_jinja/builtins/global_context.py
|
django_jinja/builtins/global_context.py
|
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.core.urlresolvers import reverse as django_reverse, NoReverseMatch
from django.contrib.staticfiles.storage import staticfiles_storage
JINJA2_MUTE_URLRESOLVE_EXCEPTIONS = getattr(settings, "JINJA2_MUTE_URLRESOLVE_EXCEPTIONS", False)
logger = logging.getLogger(__name__)
def url(view_name, *args, **kwargs):
"""
Shortcut filter for reverse url on templates. Is a alternative to
django {% url %} tag, but more simple.
Usage example:
{{ url('web:timeline', userid=2) }}
This is a equivalent to django:
{% url 'web:timeline' userid=2 %}
"""
try:
return django_reverse(view_name, args=args, kwargs=kwargs)
except NoReverseMatch as exc:
logger.error('Error: %s', exc)
if not JINJA2_MUTE_URLRESOLVE_EXCEPTIONS:
raise
return ''
def static(path):
return staticfiles_storage.url(path)
|
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.core.urlresolvers import reverse as django_reverse, NoReverseMatch
from django.contrib.staticfiles.storage import staticfiles_storage
JINJA2_MUTE_URLRESOLVE_EXCEPTIONS = getattr(settings, "JINJA2_MUTE_URLRESOLVE_EXCEPTIONS", False)
logger = logging.getLogger(__name__)
def url(name, *args, **kwargs):
"""
Shortcut filter for reverse url on templates. Is a alternative to
django {% url %} tag, but more simple.
Usage example:
{{ url('web:timeline', userid=2) }}
This is a equivalent to django:
{% url 'web:timeline' userid=2 %}
"""
try:
return django_reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch as exc:
logger.error('Error: %s', exc)
if not JINJA2_MUTE_URLRESOLVE_EXCEPTIONS:
raise
return ''
def static(path):
return staticfiles_storage.url(path)
|
bsd-3-clause
|
Python
|
6dffa2d22fa5da3b2d8fbcdff04477ff0116bfc1
|
Resolve a bug in the write function
|
razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects
|
utilities.py
|
utilities.py
|
# Function to return a list of pvs from a given file
import pkg_resources
pkg_resources.require('aphla')
import aphla as ap
def get_pv_names(mode):
''' Given a certain ring mode as a string, return all available pvs '''
ap.machines.load(mode)
result = set()
elements = ap.getElements('*')
for element in elements:
pvs = element.pv()
if(len(pvs) > 0):
pv_name = pvs[0].split(':')[0]
result.add(pv_name)
return result
def get_pvs_from_file(filepath):
''' Return a list of pvs from a given file '''
with open(filepath) as f:
contents = f.read().splitlines()
return contents
def write_pvs_to_file(filename, data):
''' Write given pvs to file '''
f = open(filename, 'w')
for element in data:
f.write(element + '\n')
f.close()
|
# Function to return a list of pvs from a given file
import pkg_resources
pkg_resources.require('aphla')
import aphla as ap
def get_pv_names(mode):
''' Given a certain ring mode as a string, return all available pvs '''
ap.machines.load(mode)
result = set()
elements = ap.getElements('*')
for element in elements:
pvs = element.pv()
if(len(pvs) > 0):
pv_name = pvs[0].split(':')[0]
result.add(pv_name)
return result
def get_pvs_from_file(filepath):
''' Return a list of pvs from a given file '''
with open(filepath) as f:
contents = f.read().splitlines()
return contents
def write_pvs_to_file(filename, data):
''' Write given pvs to file '''
f = open(filename, 'w')
for element in data:
f.write(element, '\n')
f.close()
|
apache-2.0
|
Python
|
55f4507c2285b5927e911a455065dd9c6d60112a
|
add a Node.__repr__ method
|
bootc/pypuppetdbquery
|
pypuppetdbquery/ast.py
|
pypuppetdbquery/ast.py
|
# -*- coding: utf-8 -*-
#
# This file is part of pypuppetdbquery.
# Copyright © 2016 Chris Boot <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
class Node(object):
def __repr__(self):
# Represent the variables defined in the constructor in the same order
# that they are listed in the constructor.
members = []
for var in inspect.getargspec(self.__init__).args:
if var == 'self':
continue
members.append(repr(getattr(self, var)))
# Put it together with the class name
return "{cls}({members})".format(
cls=self.__class__.__name__, members=', '.join(members))
class Literal(Node):
def __init__(self, value):
self.value = value
class Date(Literal):
pass
class Query(Node):
def __init__(self, expression):
self.expression = expression
class Expression(Node):
pass
class UnaryExpression(Node):
def __init__(self, expression):
self.expression = expression
class BinaryExpression(Node):
def __init__(self, left, right):
self.left = left
self.right = right
class AndExpression(BinaryExpression):
pass
class OrExpression(BinaryExpression):
pass
class NotExpression(UnaryExpression):
pass
class ParenthesizedExpression(UnaryExpression):
pass
class BlockExpression(UnaryExpression):
pass
class Comparison(Expression):
def __init__(self, operator, left, right):
self.operator = operator
self.left = left
self.right = right
class Identifier(Node):
def __init__(self, name):
self.name = name
class RegexpIdentifier(Identifier):
pass
class IdentifierPath(Node):
def __init__(self, component):
self.components = [component]
class Subquery(Node):
def __init__(self, endpoint, expression):
self.endpoint = endpoint
self.expression = expression
class Resource(Expression):
def __init__(self, res_type, title, exported, parameters=None):
self.res_type = res_type
self.title = title
self.exported = exported
self.parameters = parameters
class RegexpNodeMatch(Expression):
def __init__(self, value):
self.value = value
|
# -*- coding: utf-8 -*-
#
# This file is part of pypuppetdbquery.
# Copyright © 2016 Chris Boot <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Node(object):
pass
class Literal(Node):
def __init__(self, value):
self.value = value
class Date(Literal):
pass
class Query(Node):
def __init__(self, expression):
self.expression = expression
class Expression(Node):
pass
class UnaryExpression(Node):
def __init__(self, expression):
self.expression = expression
class BinaryExpression(Node):
def __init__(self, left, right):
self.left = left
self.right = right
class AndExpression(BinaryExpression):
pass
class OrExpression(BinaryExpression):
pass
class NotExpression(UnaryExpression):
pass
class ParenthesizedExpression(UnaryExpression):
pass
class BlockExpression(UnaryExpression):
pass
class Comparison(Expression):
def __init__(self, operator, left, right):
self.operator = operator
self.left = left
self.right = right
class Identifier(Node):
def __init__(self, name):
self.name = name
class RegexpIdentifier(Identifier):
pass
class IdentifierPath(Node):
def __init__(self, component):
self.components = [component]
class Subquery(Node):
def __init__(self, endpoint, expression):
self.endpoint = endpoint
self.expression = expression
class Resource(Expression):
def __init__(self, res_type, title, exported, parameters=None):
self.res_type = res_type
self.title = title
self.exported = exported
self.parameters = parameters
class RegexpNodeMatch(Expression):
def __init__(self, value):
self.value = value
|
apache-2.0
|
Python
|
4be8aea26f5dbec2c93413f6e545a47e850a7382
|
Mark test as xfail due to new connection factory behavior
|
sim0629/irc
|
irc/tests/test_client.py
|
irc/tests/test_client.py
|
import datetime
import random
import pytest
import mock
import irc.client
def test_version():
assert 'VERSION' in vars(irc.client)
assert isinstance(irc.client.VERSION, tuple)
assert irc.client.VERSION, "No VERSION detected."
def test_delayed_command_order():
"""
delayed commands should be sorted by delay time
"""
null = lambda: None
delays = [random.randint(0, 99) for x in xrange(5)]
cmds = sorted([
irc.client.DelayedCommand(delay, null, tuple())
for delay in delays
])
assert [c.delay.seconds for c in cmds] == sorted(delays)
def test_periodic_command_fixed_delay():
"""
Test that we can construct a periodic command with a fixed initial
delay.
"""
fd = irc.client.PeriodicCommandFixedDelay.at_time(
at = datetime.datetime.now(),
delay = datetime.timedelta(seconds=2),
function = lambda: None,
arguments = [],
)
assert fd.due() == True
assert fd.next().due() == False
@mock.patch('irc.connection.socket')
def test_privmsg_sends_msg(socket_mod):
pytest.xfail("Fails because server finds 'write' method on mock socket")
server = irc.client.IRC().server()
server.connect('foo', 6667, 'bestnick')
server.privmsg('#best-channel', 'You are great')
socket_mod.socket.return_value.send.assert_called_with(
b'PRIVMSG #best-channel :You are great\r\n')
@mock.patch('irc.connection.socket')
def test_privmsg_fails_on_embedded_carriage_returns(socket_mod):
server = irc.client.IRC().server()
server.connect('foo', 6667, 'bestnick')
with pytest.raises(ValueError):
server.privmsg('#best-channel', 'You are great\nSo are you')
|
import datetime
import random
import pytest
import mock
import irc.client
def test_version():
assert 'VERSION' in vars(irc.client)
assert isinstance(irc.client.VERSION, tuple)
assert irc.client.VERSION, "No VERSION detected."
def test_delayed_command_order():
"""
delayed commands should be sorted by delay time
"""
null = lambda: None
delays = [random.randint(0, 99) for x in xrange(5)]
cmds = sorted([
irc.client.DelayedCommand(delay, null, tuple())
for delay in delays
])
assert [c.delay.seconds for c in cmds] == sorted(delays)
def test_periodic_command_fixed_delay():
"""
Test that we can construct a periodic command with a fixed initial
delay.
"""
fd = irc.client.PeriodicCommandFixedDelay.at_time(
at = datetime.datetime.now(),
delay = datetime.timedelta(seconds=2),
function = lambda: None,
arguments = [],
)
assert fd.due() == True
assert fd.next().due() == False
@mock.patch('irc.connection.socket')
def test_privmsg_sends_msg(socket_mod):
server = irc.client.IRC().server()
server.connect('foo', 6667, 'bestnick')
server.privmsg('#best-channel', 'You are great')
socket_mod.socket.return_value.send.assert_called_with(
b'PRIVMSG #best-channel :You are great\r\n')
@mock.patch('irc.connection.socket')
def test_privmsg_fails_on_embedded_carriage_returns(socket_mod):
server = irc.client.IRC().server()
server.connect('foo', 6667, 'bestnick')
with pytest.raises(ValueError):
server.privmsg('#best-channel', 'You are great\nSo are you')
|
lgpl-2.1
|
Python
|
0314334373b380c41e72ed41bfef1f7cbc65b894
|
Add CAN_DETECT
|
chriscoyfish/coala-bears,dosarudaniel/coala-bears,coala/coala-bears,yashtrivedi96/coala-bears,meetmangukiya/coala-bears,Asnelchristian/coala-bears,refeed/coala-bears,mr-karan/coala-bears,dosarudaniel/coala-bears,coala/coala-bears,sounak98/coala-bears,dosarudaniel/coala-bears,shreyans800755/coala-bears,sounak98/coala-bears,yash-nisar/coala-bears,kaustubhhiware/coala-bears,chriscoyfish/coala-bears,SanketDG/coala-bears,damngamerz/coala-bears,srisankethu/coala-bears,Vamshi99/coala-bears,Vamshi99/coala-bears,naveentata/coala-bears,yash-nisar/coala-bears,chriscoyfish/coala-bears,gs0510/coala-bears,horczech/coala-bears,Asnelchristian/coala-bears,ankit01ojha/coala-bears,Asnelchristian/coala-bears,chriscoyfish/coala-bears,ku3o/coala-bears,gs0510/coala-bears,aptrishu/coala-bears,shreyans800755/coala-bears,incorrectusername/coala-bears,SanketDG/coala-bears,yashtrivedi96/coala-bears,refeed/coala-bears,seblat/coala-bears,chriscoyfish/coala-bears,incorrectusername/coala-bears,mr-karan/coala-bears,naveentata/coala-bears,mr-karan/coala-bears,aptrishu/coala-bears,aptrishu/coala-bears,Vamshi99/coala-bears,chriscoyfish/coala-bears,SanketDG/coala-bears,damngamerz/coala-bears,seblat/coala-bears,vijeth-aradhya/coala-bears,srisankethu/coala-bears,shreyans800755/coala-bears,SanketDG/coala-bears,kaustubhhiware/coala-bears,sounak98/coala-bears,coala-analyzer/coala-bears,madhukar01/coala-bears,refeed/coala-bears,yashtrivedi96/coala-bears,gs0510/coala-bears,refeed/coala-bears,LWJensen/coala-bears,SanketDG/coala-bears,Shade5/coala-bears,aptrishu/coala-bears,yashtrivedi96/coala-bears,srisankethu/coala-bears,coala-analyzer/coala-bears,meetmangukiya/coala-bears,LWJensen/coala-bears,Vamshi99/coala-bears,coala/coala-bears,Shade5/coala-bears,dosarudaniel/coala-bears,seblat/coala-bears,yashtrivedi96/coala-bears,coala-analyzer/coala-bears,LWJensen/coala-bears,incorrectusername/coala-bears,Vamshi99/coala-bears,Asnelchristian/coala-bears,dosarudaniel/coala-bears,ku3o/coala-bears,horczech/coala-bears,SanketDG/coala-bears,damngamerz/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,horczech/coala-bears,chriscoyfish/coala-bears,sounak98/coala-bears,LWJensen/coala-bears,refeed/coala-bears,Vamshi99/coala-bears,sounak98/coala-bears,vijeth-aradhya/coala-bears,ankit01ojha/coala-bears,coala/coala-bears,damngamerz/coala-bears,LWJensen/coala-bears,meetmangukiya/coala-bears,ku3o/coala-bears,kaustubhhiware/coala-bears,madhukar01/coala-bears,naveentata/coala-bears,sounak98/coala-bears,aptrishu/coala-bears,naveentata/coala-bears,yash-nisar/coala-bears,damngamerz/coala-bears,ankit01ojha/coala-bears,aptrishu/coala-bears,arjunsinghy96/coala-bears,srisankethu/coala-bears,coala/coala-bears,arjunsinghy96/coala-bears,incorrectusername/coala-bears,coala-analyzer/coala-bears,srisankethu/coala-bears,gs0510/coala-bears,mr-karan/coala-bears,kaustubhhiware/coala-bears,dosarudaniel/coala-bears,ankit01ojha/coala-bears,madhukar01/coala-bears,sounak98/coala-bears,horczech/coala-bears,srisankethu/coala-bears,ankit01ojha/coala-bears,shreyans800755/coala-bears,Shade5/coala-bears,shreyans800755/coala-bears,horczech/coala-bears,shreyans800755/coala-bears,vijeth-aradhya/coala-bears,Asnelchristian/coala-bears,Asnelchristian/coala-bears,ankit01ojha/coala-bears,madhukar01/coala-bears,sounak98/coala-bears,coala/coala-bears,kaustubhhiware/coala-bears,gs0510/coala-bears,srisankethu/coala-bears,coala-analyzer/coala-bears,vijeth-aradhya/coala-bears,LWJensen/coala-bears,ankit01ojha/coala-bears,incorrectusername/coala-bears,Vamshi99/coala-bears,horczech/coala-bears,kaustubhhiware/coala-bears,kaustubhhiware/coala-bears,seblat/coala-bears,Vamshi99/coala-bears,naveentata/coala-bears,arjunsinghy96/coala-bears,aptrishu/coala-bears,yash-nisar/coala-bears,meetmangukiya/coala-bears,coala/coala-bears,Asnelchristian/coala-bears,arjunsinghy96/coala-bears,aptrishu/coala-bears,damngamerz/coala-bears,dosarudaniel/coala-bears,vijeth-aradhya/coala-bears,ankit01ojha/coala-bears,refeed/coala-bears,refeed/coala-bears,dosarudaniel/coala-bears,vijeth-aradhya/coala-bears,mr-karan/coala-bears,SanketDG/coala-bears,seblat/coala-bears,yashtrivedi96/coala-bears,SanketDG/coala-bears,naveentata/coala-bears,srisankethu/coala-bears,Vamshi99/coala-bears,gs0510/coala-bears,kaustubhhiware/coala-bears,Asnelchristian/coala-bears,horczech/coala-bears,gs0510/coala-bears,refeed/coala-bears,meetmangukiya/coala-bears,incorrectusername/coala-bears,yash-nisar/coala-bears,arjunsinghy96/coala-bears,shreyans800755/coala-bears,srisankethu/coala-bears,dosarudaniel/coala-bears,damngamerz/coala-bears,coala/coala-bears,refeed/coala-bears,yashtrivedi96/coala-bears,shreyans800755/coala-bears,vijeth-aradhya/coala-bears,madhukar01/coala-bears,incorrectusername/coala-bears,gs0510/coala-bears,srisankethu/coala-bears,mr-karan/coala-bears,ankit01ojha/coala-bears,incorrectusername/coala-bears,yash-nisar/coala-bears,ku3o/coala-bears,LWJensen/coala-bears,shreyans800755/coala-bears,arjunsinghy96/coala-bears,horczech/coala-bears,yash-nisar/coala-bears,LWJensen/coala-bears,ankit01ojha/coala-bears,Shade5/coala-bears,ku3o/coala-bears,naveentata/coala-bears,gs0510/coala-bears,SanketDG/coala-bears,madhukar01/coala-bears,arjunsinghy96/coala-bears,Asnelchristian/coala-bears,coala-analyzer/coala-bears,naveentata/coala-bears,aptrishu/coala-bears,yash-nisar/coala-bears,horczech/coala-bears,seblat/coala-bears,horczech/coala-bears,ku3o/coala-bears,srisankethu/coala-bears,meetmangukiya/coala-bears,refeed/coala-bears,arjunsinghy96/coala-bears,ku3o/coala-bears,kaustubhhiware/coala-bears,LWJensen/coala-bears,ku3o/coala-bears,Shade5/coala-bears,mr-karan/coala-bears,Shade5/coala-bears,yash-nisar/coala-bears,coala/coala-bears,mr-karan/coala-bears,madhukar01/coala-bears,Vamshi99/coala-bears,yash-nisar/coala-bears,ankit01ojha/coala-bears,naveentata/coala-bears,shreyans800755/coala-bears,incorrectusername/coala-bears,seblat/coala-bears,meetmangukiya/coala-bears,coala/coala-bears,Vamshi99/coala-bears,damngamerz/coala-bears,arjunsinghy96/coala-bears,Shade5/coala-bears,Shade5/coala-bears,damngamerz/coala-bears,yashtrivedi96/coala-bears,aptrishu/coala-bears,madhukar01/coala-bears,shreyans800755/coala-bears,aptrishu/coala-bears,madhukar01/coala-bears,sounak98/coala-bears,coala/coala-bears,Shade5/coala-bears,damngamerz/coala-bears,seblat/coala-bears,chriscoyfish/coala-bears,coala/coala-bears,damngamerz/coala-bears,vijeth-aradhya/coala-bears,meetmangukiya/coala-bears,coala-analyzer/coala-bears,yash-nisar/coala-bears,meetmangukiya/coala-bears,yashtrivedi96/coala-bears,horczech/coala-bears,coala-analyzer/coala-bears,ku3o/coala-bears,vijeth-aradhya/coala-bears
|
bears/yml/YAMLLintBear.py
|
bears/yml/YAMLLintBear.py
|
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.PipRequirement import PipRequirement
@linter(executable='yamllint',
output_format="regex",
output_regex=r'.+:(?P<line>\d+):(?P<column>\d+): '
r'\[(?P<severity>error|warning)\] (?P<message>.+)')
class YAMLLintBear:
"""
Check yaml code for errors and possible problems.
You can read more about capabilities at
<http://yamllint.readthedocs.org/en/latest/rules.html>.
"""
LANGUAGES = {"YAML"}
REQUIREMENTS = {PipRequirement('yamllint', '1.*')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Formatting'}
@staticmethod
def create_arguments(filename, file, config_file, yamllint_config: str=''):
"""
:param yamllint_config: Path to a custom configuration file.
"""
args = ('-f', 'parsable', filename)
if yamllint_config:
args += ('--config=' + yamllint_config,)
return args
|
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.PipRequirement import PipRequirement
@linter(executable='yamllint',
output_format="regex",
output_regex=r'.+:(?P<line>\d+):(?P<column>\d+): '
r'\[(?P<severity>error|warning)\] (?P<message>.+)')
class YAMLLintBear:
"""
Check yaml code for errors and possible problems.
You can read more about capabilities at
<http://yamllint.readthedocs.org/en/latest/rules.html>.
"""
LANGUAGES = {"YAML"}
REQUIREMENTS = {PipRequirement('yamllint', '1.*')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
@staticmethod
def create_arguments(filename, file, config_file, yamllint_config: str=''):
"""
:param yamllint_config: Path to a custom configuration file.
"""
args = ('-f', 'parsable', filename)
if yamllint_config:
args += ('--config=' + yamllint_config,)
return args
|
agpl-3.0
|
Python
|
0d9b29c80502f8c4f23920ec65bc89093d553e47
|
Corrige numero da versao do pacote
|
mstuttgart/python-sigep,mstuttgart/pysigep
|
pysigep/__version__.py
|
pysigep/__version__.py
|
__title__ = 'pysigep'
__description__ = 'API python para uso dos serviços fornecidos pelo ' \
'SIGEPWeb dos Correios '
__version__ = '0.1.0'
__url__ = 'https://github.com/mstuttgart/pysigep'
__download_url__ = 'https://github.com/mstuttgart/pysigep'
__author__ = 'Michell Stuttgart'
__author_email__ = '[email protected]'
__maintainer__ = 'Michell Stuttgart'
__maintainer_email__ = '[email protected]'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2016-2017 Michell Stuttgart'
__status__ = 'Development'
|
__title__ = 'pysigep'
__description__ = 'API python para uso dos serviços fornecidos pelo ' \
'SIGEPWeb dos Correios '
__version__ = '0.4.4'
__url__ = 'https://github.com/mstuttgart/pysigep'
__download_url__ = 'https://github.com/mstuttgart/pysigep'
__author__ = 'Michell Stuttgart'
__author_email__ = '[email protected]'
__maintainer__ = 'Michell Stuttgart'
__maintainer_email__ = '[email protected]'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2016-2017 Michell Stuttgart'
__status__ = 'Development'
|
mit
|
Python
|
2373734b9eda5c887621ee64a2ca755850685699
|
test c-model
|
Oscarlight/PiNN_Caffe2,Oscarlight/PiNN_Caffe2,Oscarlight/PiNN_Caffe2,Oscarlight/PiNN_Caffe2
|
transiNXOR_modeling/transixor_predictor.py
|
transiNXOR_modeling/transixor_predictor.py
|
import sys
sys.path.append('../')
import numpy as np
from itertools import product
from pinn_api import predict_ids_grads, predict_ids
import matplotlib.pyplot as plt
import glob
## ------------ True data ---------------
ids_file = glob.glob('./transiXOR_data/current_D9.npy')
# ids_file = glob.glob('./transiXOR_data/*_id_*.npy')
# vds, vbg, vtg, id
ids_data = np.load(ids_file[0])
print(ids_data.shape)
## ------------ Prediction ---------------
# vds = np.linspace(-0.1, 0.3, 41)
# vbg = np.linspace(0.1, 0.1, 1)
# vtg = np.linspace(0.2, 0.2, 1)
vds = np.linspace(0.2, 0.2, 1)
vbg = np.linspace(0.1, 0.1, 1)
vtg = np.linspace(-0.1, 0.3, 41)
iter_lst = list(product(vds, vbg, vtg))
vds_pred = np.expand_dims(np.array([e[0] for e in iter_lst], dtype=np.float32), axis=1)
vbg_pred = np.array([e[1] for e in iter_lst], dtype=np.float32)
vtg_pred = np.array([e[2] for e in iter_lst], dtype=np.float32)
vg_pred = np.column_stack((vtg_pred, vbg_pred))
vg_pred = np.sum(vg_pred, axis=1, keepdims=True)
# vg_pred = np.sum(vg_pred, axis=1, keepdims=True)
## If trained with adjoint builder
# ids_pred, _, _ = predict_ids_grads(
# './transiXOR_Models/bise_h16', vg_pred, vds_pred)
## If trained with origin builder
ids_pred = predict_ids(
'./transiXOR_Models/bise_ext_sym_h264_0', vg_pred, vds_pred)
# ids_true = ids_data[:, 30, 20]
# vds_true = np.linspace(-0.1, 0.3, 41)
# plt.plot(vds, ids_pred, 'r')
# plt.plot(vds_true, ids_true)
# plt.show()
# plt.semilogy(vds, np.abs(ids_pred), 'r')
# plt.semilogy(vds_true, np.abs(ids_true))
# plt.show()
# ids_true = ids_data[30, 20, :]
# vtg_true = np.linspace(-0.1, 0.3, 41)
# plt.plot(vtg, ids_pred, 'r')
# plt.plot(vtg_true, ids_true)
# plt.show()
# plt.semilogy(vtg, np.abs(ids_pred), 'r')
# plt.semilogy(vtg_true, np.abs(ids_true))
# plt.show()
## Point test
ids_pred = predict_ids(
'./transiXOR_Models/bise_ext_sym_h264_0',
np.array([0.2+0.2]), np.array([0.2]))
print(ids_pred)
ids_pred = predict_ids(
'./transiXOR_Models/bise_ext_sym_h264_0',
np.array([0.0+0.0]), np.array([0.2]))
print(ids_pred)
ids_pred = predict_ids(
'./transiXOR_Models/bise_ext_sym_h264_0',
np.array([0.0+0.1]), np.array([0.2]))
print(ids_pred)
ids_pred = predict_ids(
'./transiXOR_Models/bise_ext_sym_h264_0',
np.array([0.1+0.0]), np.array([0.2]))
print(ids_pred)
|
import sys
sys.path.append('../')
import numpy as np
from itertools import product
from pinn_api import predict_ids_grads, predict_ids
import matplotlib.pyplot as plt
import glob
## ------------ True data ---------------
ids_file = glob.glob('./transiXOR_data/current_D9.npy')
# ids_file = glob.glob('./transiXOR_data/*_id_*.npy')
# vds, vbg, vtg, id
ids_data = np.load(ids_file[0])
print(ids_data.shape)
## ------------ Prediction ---------------
# vds = np.linspace(-0.1, 0.3, 41)
# vbg = np.linspace(0.1, 0.1, 1)
# vtg = np.linspace(0.2, 0.2, 1)
vds = np.linspace(0.2, 0.2, 1)
vbg = np.linspace(0.1, 0.1, 1)
vtg = np.linspace(-0.1, 0.3, 41)
iter_lst = list(product(vds, vbg, vtg))
vds_pred = np.expand_dims(np.array([e[0] for e in iter_lst], dtype=np.float32), axis=1)
vbg_pred = np.array([e[1] for e in iter_lst], dtype=np.float32)
vtg_pred = np.array([e[2] for e in iter_lst], dtype=np.float32)
vg_pred = np.column_stack((vtg_pred, vbg_pred))
vg_pred = np.sum(vg_pred, axis=1, keepdims=True)
# vg_pred = np.sum(vg_pred, axis=1, keepdims=True)
## If trained with adjoint builder
# ids_pred, _, _ = predict_ids_grads(
# './transiXOR_Models/bise_h16', vg_pred, vds_pred)
## If trained with origin builder
ids_pred = predict_ids(
'./transiXOR_Models/bise_ext_sym_h264_0', vg_pred, vds_pred)
# ids_true = ids_data[:, 30, 20]
# vds_true = np.linspace(-0.1, 0.3, 41)
# plt.plot(vds, ids_pred, 'r')
# plt.plot(vds_true, ids_true)
# plt.show()
# plt.semilogy(vds, np.abs(ids_pred), 'r')
# plt.semilogy(vds_true, np.abs(ids_true))
# plt.show()
ids_true = ids_data[30, 20, :]
vtg_true = np.linspace(-0.1, 0.3, 41)
plt.plot(vtg, ids_pred, 'r')
plt.plot(vtg_true, ids_true)
plt.show()
plt.semilogy(vtg, np.abs(ids_pred), 'r')
plt.semilogy(vtg_true, np.abs(ids_true))
plt.show()
## Point test
ids_pred = predict_ids(
'./transiXOR_Models/bise_ext_sym_h264_0',
np.array([0.2+0.2]), np.array([0.2]))
print(ids_pred)
|
mit
|
Python
|
d692ed6c48fc36b296b9a3e952dd1f70b133210c
|
add migrate script to remove ezid from suggestions
|
DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj
|
portality/migrate/p1p2/suggestionrestructure.py
|
portality/migrate/p1p2/suggestionrestructure.py
|
from portality import models, settings
import requests, json
# first thing to do is delete suggestions which are marked "waiting for answer"
q = {
"query" : {
"bool" : {
"must" : [
{"term" : {"admin.application_status.exact" : "waiting for answer"}}
]
}
}
}
url = settings.ELASTIC_SEARCH_HOST + "/" + settings.ELASTIC_SEARCH_DB + "/suggestion/_query"
resp = requests.delete(url, data=json.dumps(q))
deletable = models.Suggestion.iterate(q, page_size=15000, wrap=False)
for d in deletable:
id = d.get("id")
if id is not None:
models.Suggestion.remove_by_id(id)
print "removing", id
batch_size = 1000
total=0
batch = []
suggestion_iterator = models.Suggestion.iterall(page_size=10000)
for s in suggestion_iterator:
# remove any author-pays stuff
if "author_pays" in s.data.get("bibjson"):
del s.data["bibjson"]["author_pays"]
if "author_pays_url" in s.data.get("bibjson"):
del s.data["bibjson"]["author_pays_url"]
# normalise the application statuses
if s.application_status == "answer received":
s.set_application_status("in progress")
# remove any EzID from the persistent identifier schemes
pids = s.bibjson().persistent_identifier_scheme
if "EzID" in pids:
i = pids.index("EzID")
del pids[i]
s.bibjson().persistent_identifier_scheme = pids
s.prep()
batch.append(s.data)
if len(batch) >= batch_size:
total += len(batch)
print "writing", len(batch), "; total so far", total
models.Suggestion.bulk(batch)
batch = []
if len(batch) > 0:
total += len(batch)
print "writing", len(batch), "; total so far", total
models.Suggestion.bulk(batch)
|
from portality import models, settings
import requests, json
# first thing to do is delete suggestions which are marked "waiting for answer"
q = {
"query" : {
"bool" : {
"must" : [
{"term" : {"admin.application_status.exact" : "waiting for answer"}}
]
}
}
}
url = settings.ELASTIC_SEARCH_HOST + "/" + settings.ELASTIC_SEARCH_DB + "/suggestion/_query"
resp = requests.delete(url, data=json.dumps(q))
deletable = models.Suggestion.iterate(q, page_size=15000, wrap=False)
for d in deletable:
id = d.get("id")
if id is not None:
models.Suggestion.remove_by_id(id)
print "removing", id
batch_size = 1000
total=0
batch = []
suggestion_iterator = models.Suggestion.iterall(page_size=10000)
for s in suggestion_iterator:
# remove any author-pays stuff
if "author_pays" in s.data.get("bibjson"):
del s.data["bibjson"]["author_pays"]
if "author_pays_url" in s.data.get("bibjson"):
del s.data["bibjson"]["author_pays_url"]
# normalise the application statuses
if s.application_status == "answer received":
s.set_application_status("in progress")
s.prep()
batch.append(s.data)
if len(batch) >= batch_size:
total += len(batch)
print "writing", len(batch), "; total so far", total
models.Suggestion.bulk(batch)
batch = []
if len(batch) > 0:
total += len(batch)
print "writing", len(batch), "; total so far", total
models.Suggestion.bulk(batch)
|
apache-2.0
|
Python
|
4bc871aaa72fa1d793203e5627a2ac5f859ae27d
|
add dependencies; still incomplete
|
Minhmo/tardis,kaushik94/tardis,Minhmo/tardis,Tobychev/tardis,Tobychev/tardis,Tobychev/tardis,wkerzendorf/tardis,kaushik94/tardis,kaushik94/tardis,Minhmo/tardis,orbitfold/tardis,wkerzendorf/tardis,utkbansal/tardis,utkbansal/tardis,orbitfold/tardis,utkbansal/tardis,orbitfold/tardis,kaushik94/tardis,orbitfold/tardis,wkerzendorf/tardis
|
tardis/montecarlo/setup_package.py
|
tardis/montecarlo/setup_package.py
|
#setting the right include
from setuptools import Extension
import numpy as np
import os
from astropy_helpers.setup_helpers import get_distutils_option
from glob import glob
if get_distutils_option('with_openmp', ['build', 'install', 'develop']) is not None:
compile_args = ['-fopenmp', '-W', '-Wall', '-Wmissing-prototypes', '-std=c99']
link_args = ['-fopenmp']
define_macros = [('WITHOPENMP', None)]
else:
compile_args = ['-W', '-Wall', '-Wmissing-prototypes', '-std=c99']
link_args = []
define_macros = []
def get_extensions():
sources = ['tardis/montecarlo/montecarlo.pyx']
sources += [os.path.relpath(fname) for fname in glob(
os.path.join(os.path.dirname(__file__), 'src', '*.c'))]
sources += [os.path.relpath(fname) for fname in glob(
os.path.join(os.path.dirname(__file__), 'src/randomkit', '*.c'))]
deps = [os.path.relpath(fname) for fname in glob(
os.path.join(os.path.dirname(__file__), 'src', '*.h'))]
deps += [os.path.relpath(fname) for fname in glob(
os.path.join(os.path.dirname(__file__), 'src/randomkit', '*.h'))]
return [Extension('tardis.montecarlo.montecarlo', sources,
include_dirs=['tardis/montecarlo/src',
'tardis/montecarlo/src/randomkit',
np.get_include()],
depends=deps,
extra_compile_args=compile_args,
extra_link_args=link_args,
define_macros=define_macros)]
|
#setting the right include
from setuptools import Extension
import numpy as np
import os
from astropy_helpers.setup_helpers import get_distutils_option
from glob import glob
if get_distutils_option('with_openmp', ['build', 'install', 'develop']) is not None:
compile_args = ['-fopenmp', '-W', '-Wall', '-Wmissing-prototypes', '-std=c99']
link_args = ['-fopenmp']
define_macros = [('WITHOPENMP', None)]
else:
compile_args = ['-W', '-Wall', '-Wmissing-prototypes', '-std=c99']
link_args = []
define_macros = []
def get_extensions():
sources = ['tardis/montecarlo/montecarlo.pyx']
sources += [os.path.relpath(fname) for fname in glob(
os.path.join(os.path.dirname(__file__), 'src', '*.c'))]
sources += [os.path.relpath(fname) for fname in glob(
os.path.join(os.path.dirname(__file__), 'src/randomkit', '*.c'))]
return [Extension('tardis.montecarlo.montecarlo', sources,
include_dirs=['tardis/montecarlo/src',
'tardis/montecarlo/src/randomkit',
np.get_include()],
extra_compile_args=compile_args,
extra_link_args=link_args,
define_macros=define_macros)]
|
bsd-3-clause
|
Python
|
df16f3e9c49ba2fb3cdbfdc62e120c6358eb25f9
|
Add 'dump_header' function
|
edgedb/edgedb,edgedb/edgedb,edgedb/edgedb
|
edgedb/lang/common/markup/__init__.py
|
edgedb/lang/common/markup/__init__.py
|
##
# Copyright (c) 2011 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
from . import elements, serializer, renderers
from .serializer import serialize
from .serializer import base as _base_serializer
from semantix.exceptions import ExceptionContext as _ExceptionContext
from semantix.utils import abc
@serializer.serializer(method='as_markup')
class MarkupExceptionContext(_ExceptionContext, metaclass=abc.AbstractMeta):
@abc.abstractclassmethod
def as_markup(cls, *, ctx):
pass
def _serialize(obj, trim=True):
ctx = _base_serializer.Context(trim=trim)
try:
return serialize(obj, ctx=ctx)
finally:
ctx.reset()
def dumps(obj, header=None, trim=True):
markup = _serialize(obj, trim=trim)
if header is not None:
markup = elements.doc.Section(title=header, body=[markup])
return renderers.terminal.renders(markup)
def _dump(markup, header, file):
if header is not None:
markup = elements.doc.Section(title=header, body=[markup])
renderers.terminal.render(markup, file=file)
def dump_header(header, file=None):
markup = elements.doc.Section(title=header, body=[])
renderers.terminal.render(markup, file=file)
def dump(obj, *, header=None, file=None, trim=True):
markup = _serialize(obj, trim=trim)
_dump(markup, header, file)
def dump_code(code:str, *, lexer='python', header=None, file=None):
markup = serializer.serialize_code(code, lexer=lexer)
_dump(markup, header, file)
|
##
# Copyright (c) 2011 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
from . import elements, serializer, renderers
from .serializer import serialize
from .serializer import base as _base_serializer
from semantix.exceptions import ExceptionContext as _ExceptionContext
from semantix.utils import abc
@serializer.serializer(method='as_markup')
class MarkupExceptionContext(_ExceptionContext, metaclass=abc.AbstractMeta):
@abc.abstractclassmethod
def as_markup(cls, *, ctx):
pass
def _serialize(obj, trim=True):
ctx = _base_serializer.Context(trim=trim)
try:
return serialize(obj, ctx=ctx)
finally:
ctx.reset()
def dumps(obj, header=None, trim=True):
markup = _serialize(obj, trim=trim)
if header is not None:
markup = elements.doc.Section(title=header, body=[markup])
return renderers.terminal.renders(markup)
def _dump(markup, header, file):
if header is not None:
markup = elements.doc.Section(title=header, body=[markup])
renderers.terminal.render(markup, file=file)
def dump(obj, *, header=None, file=None, trim=True):
markup = _serialize(obj, trim=trim)
_dump(markup, header, file)
def dump_code(code:str, *, lexer='python', header=None, file=None):
markup = serializer.serialize_code(code, lexer=lexer)
_dump(markup, header, file)
|
apache-2.0
|
Python
|
ab6e5754283999ece4e77da959c6f9c868b964a7
|
Add Security Manager information
|
Shosta/Report-Creation-on-Python
|
variables.py
|
variables.py
|
"""
Define the variables in a module.
"""
NOT_EVALUATED_PHASE = 'Not Evaluated'
NOT_STARTED_PHASE = 'Not Started'
IN_PROGRESS_PHASE = 'In Progress'
DONE_PHASE = 'Done'
SECURITY_MANAGER_NAME = 'Rémi Lavedrine'
SECURITY_MANAGER_EMAIL = '[email protected]'
SECURITY_MANAGER_PHONE = '06 31 17 80 39'
|
"""
Define the variables in a module.
"""
NOT_EVALUATED_PHASE = 'Not Evaluated'
NOT_STARTED_PHASE = 'Not Started'
IN_PROGRESS_PHASE = 'In Progress'
DONE_PHASE = 'Done'
|
mpl-2.0
|
Python
|
e44021fff840435fe49aaef1a1531cb2ccf44e43
|
Add back to "rebuild_data" command
|
barberscore/barberscore-api,dbinetti/barberscore-django,barberscore/barberscore-api,dbinetti/barberscore-django,dbinetti/barberscore,barberscore/barberscore-api,dbinetti/barberscore,barberscore/barberscore-api
|
project/api/management/commands/rebuild_data.py
|
project/api/management/commands/rebuild_data.py
|
# Django
from django.apps import apps
from django.core.management.base import BaseCommand
from django.utils import timezone
import datetime
class Command(BaseCommand):
help = "Command to rebuild denorms."
def add_arguments(self, parser):
parser.add_argument(
'--days',
type=int,
dest='days',
nargs='?',
const=1,
help='Number of days to update.',
)
parser.add_argument(
'--hours',
type=int,
dest='hours',
nargs='?',
const=1,
help='Number of hours to update.',
)
parser.add_argument(
'--minutes',
type=int,
dest='minutes',
nargs='?',
const=1,
help='Number of hours to update.',
)
def handle(self, *args, **options):
# Set Cursor
if options['days']:
cursor = timezone.now() - datetime.timedelta(days=options['days'], hours=1)
elif options['hours']:
cursor = timezone.now() - datetime.timedelta(hours=options['hours'], minutes=5)
elif options['minutes']:
cursor = timezone.now() - datetime.timedelta(minutes=options['minutes'], seconds=5)
else:
cursor = None
Group = apps.get_model('api.group')
Group.objects.denormalize(cursor=cursor)
Group.objects.sort_tree()
Group.objects.update_seniors()
Award = apps.get_model('api.award')
Award.objects.sort_tree()
return
|
# Django
from django.apps import apps
from django.core.management.base import BaseCommand
from django.utils import timezone
import datetime
class Command(BaseCommand):
help = "Command to rebuild denorms."
def add_arguments(self, parser):
parser.add_argument(
'--days',
type=int,
dest='days',
nargs='?',
const=1,
help='Number of days to update.',
)
parser.add_argument(
'--hours',
type=int,
dest='hours',
nargs='?',
const=1,
help='Number of hours to update.',
)
parser.add_argument(
'--minutes',
type=int,
dest='minutes',
nargs='?',
const=1,
help='Number of hours to update.',
)
def handle(self, *args, **options):
# Set Cursor
if options['days']:
cursor = timezone.now() - datetime.timedelta(days=options['days'], hours=1)
elif options['hours']:
cursor = timezone.now() - datetime.timedelta(hours=options['hours'], minutes=5)
elif options['minutes']:
cursor = timezone.now() - datetime.timedelta(minutes=options['minutes'], seconds=5)
else:
cursor = None
Group = apps.get_model('api.group')
# Group.objects.denormalize(cursor=cursor)
# Group.objects.sort_tree()
# Group.objects.update_seniors()
Award = apps.get_model('api.award')
Award.objects.sort_tree()
return
|
bsd-2-clause
|
Python
|
48b33bedda0da0ad324f8f7a3ac2fbafa8e6f665
|
change issue commit to markdown
|
ZhangBohan/BotMoment
|
moment/main.py
|
moment/main.py
|
from sanic import Sanic
from sanic.response import json as response_json
import aiohttp
import json
from moment.gitlab_message_dict import get_dingtalk_data
app = Sanic(__name__)
async def post(url, json_data):
headers = {
"Content-Type": "application/json"
}
conn = aiohttp.TCPConnector(verify_ssl=False)
async with aiohttp.ClientSession(connector=conn, headers=headers) as session:
async with session.post(url, data=json.dumps(json_data)) as resp:
return await resp.json()
@app.post("/gitlab")
async def test(request):
access_token = request.args.get('access_token')
request_data = request.json
print(f'request: {request.body.decode()}')
url = f'https://oapi.dingtalk.com/robot/send?access_token={access_token}'
data = get_dingtalk_data(request_data)
response = await post(url, data)
print(f'{url}: {response}')
return response_json(request.json)
|
from sanic import Sanic
from sanic.response import json as response_json
import aiohttp
import json
from moment.gitlab_message_dict import get_dingtalk_data
app = Sanic(__name__)
async def post(url, json_data):
headers = {
"Content-Type": "application/json"
}
conn = aiohttp.TCPConnector(verify_ssl=False)
async with aiohttp.ClientSession(connector=conn, headers=headers) as session:
async with session.post(url, data=json.dumps(json_data)) as resp:
return await resp.json()
@app.post("/gitlab")
async def test(request):
access_token = request.args.get('access_token')
request_data = request.json
print(f'request: {request.body}')
url = f'https://oapi.dingtalk.com/robot/send?access_token={access_token}'
data = get_dingtalk_data(request_data)
response = await post(url, data)
print(f'{url}: {response}')
return response_json(request.json)
|
mit
|
Python
|
8ec6b8b6c2f099261f85a3f68b5d6e87cbdb1c25
|
set context to none for ws://
|
Vaelor/python-mattermost-driver
|
src/mattermostdriver/websocket.py
|
src/mattermostdriver/websocket.py
|
import json
import ssl
import asyncio
import logging
import websockets
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('mattermostdriver.websocket')
class Websocket:
def __init__(self, options, token):
self.options = options
self._token = token
@asyncio.coroutine
def connect(self, event_handler):
"""
Connect to the websocket and authenticate it.
When the authentication has finished, start the loop listening for messages,
sending a ping to the server to keep the connection alive.
:param event_handler: Every websocket event will be passed there
:type event_handler: Function
:return:
"""
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
if not self.options['verify']:
context.verify_mode = ssl.CERT_NONE
scheme = 'wss://'
if self.options['scheme'] != 'https':
scheme = 'ws://'
context = None
url = scheme + self.options['url'] + ':' + str(self.options['port']) + self.options['basepath'] + '/websocket'
websocket = yield from websockets.connect(
url,
ssl=context,
)
yield from self._authenticate_websocket(websocket, event_handler)
yield from self._start_loop(websocket, event_handler)
@asyncio.coroutine
def _start_loop(self, websocket, event_handler):
"""
We will listen for websockets events, sending a heartbeat/pong everytime
we react a TimeoutError. If we don't the webserver would close the idle connection,
forcing us to reconnect.
"""
log.debug('Starting websocket loop')
while True:
try:
yield from asyncio.wait_for(
self._wait_for_message(websocket, event_handler),
timeout=self.options['timeout']
)
except asyncio.TimeoutError:
yield from websocket.pong()
log.debug("Sending heartbeat...")
continue
@asyncio.coroutine
def _authenticate_websocket(self, websocket, event_handler):
"""
Sends a authentication challenge over a websocket.
This is not needed when we just send the cookie we got on login
when connecting to the websocket.
"""
json_data = json.dumps({
"seq": 1,
"action": "authentication_challenge",
"data": {
"token": self._token
}
}).encode('utf8')
yield from websocket.send(json_data)
while True:
message = yield from websocket.recv()
status = json.loads(message)
log.debug(status)
# We want to pass the events to the event_handler already
# because the hello event could arrive before the authentication ok response
yield from event_handler(message)
if ('status' in status and status['status'] == 'OK') and \
('seq_reply' in status and status['seq_reply'] == 1):
log.info('Websocket authentification OK')
return True
@asyncio.coroutine
def _wait_for_message(self, websocket, event_handler):
while True:
message = yield from websocket.recv()
yield from event_handler(message)
|
import json
import ssl
import asyncio
import logging
import websockets
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('mattermostdriver.websocket')
class Websocket:
def __init__(self, options, token):
self.options = options
self._token = token
@asyncio.coroutine
def connect(self, event_handler):
"""
Connect to the websocket and authenticate it.
When the authentication has finished, start the loop listening for messages,
sending a ping to the server to keep the connection alive.
:param event_handler: Every websocket event will be passed there
:type event_handler: Function
:return:
"""
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
if not self.options['verify']:
context.verify_mode = ssl.CERT_NONE
scheme = 'wss://'
if self.options['scheme'] != 'https':
scheme = 'ws://'
url = scheme + self.options['url'] + ':' + str(self.options['port']) + self.options['basepath'] + '/websocket'
websocket = yield from websockets.connect(
url,
ssl=context,
)
yield from self._authenticate_websocket(websocket, event_handler)
yield from self._start_loop(websocket, event_handler)
@asyncio.coroutine
def _start_loop(self, websocket, event_handler):
"""
We will listen for websockets events, sending a heartbeat/pong everytime
we react a TimeoutError. If we don't the webserver would close the idle connection,
forcing us to reconnect.
"""
log.debug('Starting websocket loop')
while True:
try:
yield from asyncio.wait_for(
self._wait_for_message(websocket, event_handler),
timeout=self.options['timeout']
)
except asyncio.TimeoutError:
yield from websocket.pong()
log.debug("Sending heartbeat...")
continue
@asyncio.coroutine
def _authenticate_websocket(self, websocket, event_handler):
"""
Sends a authentication challenge over a websocket.
This is not needed when we just send the cookie we got on login
when connecting to the websocket.
"""
json_data = json.dumps({
"seq": 1,
"action": "authentication_challenge",
"data": {
"token": self._token
}
}).encode('utf8')
yield from websocket.send(json_data)
while True:
message = yield from websocket.recv()
status = json.loads(message)
log.debug(status)
# We want to pass the events to the event_handler already
# because the hello event could arrive before the authentication ok response
yield from event_handler(message)
if ('status' in status and status['status'] == 'OK') and \
('seq_reply' in status and status['seq_reply'] == 1):
log.info('Websocket authentification OK')
return True
@asyncio.coroutine
def _wait_for_message(self, websocket, event_handler):
while True:
message = yield from websocket.recv()
yield from event_handler(message)
|
mit
|
Python
|
238c49d4fb1fe67ffd63ed7b9dc5dce0915ae389
|
remove internationalisation of uri. fix issue #2
|
ramusus/django-authopenid,ramusus/django-authopenid,psi29a/django-authopenid,ramusus/django-authopenid,psi29a/django-authopenid
|
django_authopenid/urls.py
|
django_authopenid/urls.py
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext as _
urlpatterns = patterns('django_authopenid.views',
# yadis rdf
url(r'^yadis.xrdf$', 'xrdf', name='yadis_xrdf'),
# manage account registration
url(r'^signin/$', 'signin', name='user_signin'),
url(r'^signout/$', 'signout', name='user_signout'),
url(r'^signin/complete/$', 'complete_signin', name='user_complete_signin'),
url(r'^register/$', 'register', name='user_register'),
)
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext as _
urlpatterns = patterns('django_authopenid.views',
# yadis rdf
url(r'^yadis.xrdf$', 'xrdf', name='yadis_xrdf'),
# manage account registration
url(r'^%s$' % _('signin/'), 'signin', name='user_signin'),
url(r'^%s$' % _('signout/'), 'signout', name='user_signout'),
url(r'^%s%s$' % (_('signin/'), _('complete/')), 'complete_signin',
name='user_complete_signin'),
url(r'^%s$' % _('register/'), 'register', name='user_register'),
)
|
apache-2.0
|
Python
|
fe314468c4a8c02650b3b983a239acd06bfc003f
|
Improve config file handling on the job.
|
matz-e/lobster,matz-e/lobster,matz-e/lobster
|
lobster/cmssw/data/job.py
|
lobster/cmssw/data/job.py
|
#!/usr/bin/env python
import base64
import json
import os
import pickle
import shutil
import subprocess
import sys
fragment = """import FWCore.ParameterSet.Config as cms
process.source.fileNames = cms.untracked.vstring({input_files})
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange({lumis})"""
def edit_process_source(cmssw_config_file, config_params):
(dataset_files, lumis) = config_params
with open(cmssw_config_file, 'a') as config:
frag = fragment.format(input_files=repr([str(f) for f in dataset_files]), lumis=[str(l) for l in lumis])
print "--- config file fragment:"
print frag
print "---"
config.write(frag)
(config, inputs) = sys.argv[1:3]
args = sys.argv[3:]
configfile = config.replace(".py", "_mod.py")
shutil.copy2(config, configfile)
for d in os.listdir('.'):
if d.startswith('CMSSW'):
break
env = os.environ
env['X509_USER_PROXY'] = os.path.join(d, 'proxy')
edit_process_source(configfile, pickle.loads(base64.b64decode(inputs)))
exit_code = subprocess.call('cmsRun -j report.xml "{0}" {1} > cmssw.log 2>&1'.format(configfile, ' '.join(args)), shell=True, env=env)
sys.exit(exit_code)
|
#!/usr/bin/env python
import base64
import json
import os
import pickle
import subprocess
import sys
def edit_process_source(cmssw_config_file, config_params):
(dataset_files, lumis) = config_params
config = open(cmssw_config_file, 'a')
with open(cmssw_config_file, 'a') as config:
fragment = ('import FWCore.ParameterSet.Config as cms'
'\nprocess.source.fileNames = cms.untracked.vstring({input_files})'
'\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))'
'\nprocess.source.lumisToProcess = cms.untracked.VLuminosityBlockRange({lumis})')
config.write(fragment.format(input_files=repr([str(f) for f in dataset_files]), lumis=[str(l) for l in lumis]))
(configfile, inputs) = sys.argv[1:3]
args = sys.argv[3:]
for d in os.listdir('.'):
if d.startswith('CMSSW'):
break
env = os.environ
env['X509_USER_PROXY'] = os.path.join(d, 'proxy')
edit_process_source(configfile, pickle.loads(base64.b64decode(inputs)))
exit_code = subprocess.call('cmsRun -j report.xml "{0}" {1} > cmssw.log 2>&1'.format(configfile, ' '.join(args)), shell=True, env=env)
sys.exit(exit_code)
|
mit
|
Python
|
abffd85d6038494eea93b277b2d25af816dc2b78
|
Enable bidi tests for Firefox 86+
|
joshmgrant/selenium,SeleniumHQ/selenium,valfirst/selenium,valfirst/selenium,HtmlUnit/selenium,titusfortner/selenium,titusfortner/selenium,titusfortner/selenium,titusfortner/selenium,joshmgrant/selenium,titusfortner/selenium,HtmlUnit/selenium,valfirst/selenium,joshmgrant/selenium,HtmlUnit/selenium,HtmlUnit/selenium,HtmlUnit/selenium,valfirst/selenium,SeleniumHQ/selenium,HtmlUnit/selenium,valfirst/selenium,SeleniumHQ/selenium,HtmlUnit/selenium,HtmlUnit/selenium,valfirst/selenium,joshmgrant/selenium,SeleniumHQ/selenium,joshmgrant/selenium,joshmgrant/selenium,valfirst/selenium,joshmgrant/selenium,titusfortner/selenium,SeleniumHQ/selenium,SeleniumHQ/selenium,SeleniumHQ/selenium,valfirst/selenium,joshmgrant/selenium,titusfortner/selenium,joshmgrant/selenium,SeleniumHQ/selenium,HtmlUnit/selenium,SeleniumHQ/selenium,valfirst/selenium,SeleniumHQ/selenium,valfirst/selenium,SeleniumHQ/selenium,titusfortner/selenium,titusfortner/selenium,valfirst/selenium,titusfortner/selenium,titusfortner/selenium,joshmgrant/selenium,HtmlUnit/selenium,joshmgrant/selenium
|
py/test/selenium/webdriver/common/bidi_tests.py
|
py/test/selenium/webdriver/common/bidi_tests.py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import pytest
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote
async def test_check_console_messages(driver, pages):
pages.load("javascriptPage.html")
from selenium.webdriver.common.bidi.console import Console
async with driver.log.add_listener(Console.ALL) as messages:
driver.execute_script("console.log('I love cheese')")
assert messages["message"] == "I love cheese"
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote
async def test_check_error_console_messages(driver, pages):
pages.load("javascriptPage.html")
from selenium.webdriver.common.bidi.console import Console
async with driver.log.add_listener(Console.ERROR) as messages:
driver.execute_script("console.error(\"I don't cheese\")")
driver.execute_script("console.log('I love cheese')")
assert messages["message"] == "I don't cheese"
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote
async def test_collect_js_exceptions(driver, pages):
pages.load("javascriptPage.html")
async with driver.log.add_js_error_listener() as exceptions:
driver.find_element(By.ID, "throwing-mouseover").click()
assert exceptions is not None
assert exceptions.exception_details.stack_trace.call_frames[0].function_name == "onmouseover"
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote
async def test_collect_log_mutations(driver, pages):
async with driver.log.mutation_events() as event:
pages.load("dynamic.html")
driver.find_element(By.ID, "reveal").click()
WebDriverWait(driver, 5, ignored_exceptions=InvalidSelectorException)\
.until(EC.visibility_of(driver.find_element(By.ID, "revealed")))
assert event["attribute_name"] == "style"
assert event["current_value"] == ""
assert event["old_value"] == "display:none;"
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import pytest
@pytest.mark.xfail_safari
@pytest.mark.xfail_firefox(reason="This is not in release firefox yet")
@pytest.mark.xfail_remote
async def test_check_console_messages(driver, pages):
pages.load("javascriptPage.html")
from selenium.webdriver.common.bidi.console import Console
async with driver.log.add_listener(Console.ALL) as messages:
driver.execute_script("console.log('I love cheese')")
assert messages["message"] == "I love cheese"
@pytest.mark.xfail_safari
@pytest.mark.xfail_firefox(reason="This is not in release firefox yet")
@pytest.mark.xfail_remote
async def test_check_error_console_messages(driver, pages):
pages.load("javascriptPage.html")
from selenium.webdriver.common.bidi.console import Console
async with driver.log.add_listener(Console.ERROR) as messages:
driver.execute_script("console.error(\"I don't cheese\")")
driver.execute_script("console.log('I love cheese')")
assert messages["message"] == "I don't cheese"
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote
async def test_collect_js_exceptions(driver, pages):
pages.load("javascriptPage.html")
async with driver.log.add_js_error_listener() as exceptions:
driver.find_element(By.ID, "throwing-mouseover").click()
assert exceptions is not None
assert exceptions.exception_details.stack_trace.call_frames[0].function_name == "onmouseover"
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote
async def test_collect_log_mutations(driver, pages):
async with driver.log.mutation_events() as event:
pages.load("dynamic.html")
driver.find_element(By.ID, "reveal").click()
WebDriverWait(driver, 5, ignored_exceptions=InvalidSelectorException)\
.until(EC.visibility_of(driver.find_element(By.ID, "revealed")))
assert event["attribute_name"] == "style"
assert event["current_value"] == ""
assert event["old_value"] == "display:none;"
|
apache-2.0
|
Python
|
9722016a0117682fa7d0d5599a8dc2f1a75f7c6a
|
remove softmax / centroidloss
|
pyannote/pyannote-audio,pyannote/pyannote-audio,pyannote/pyannote-audio
|
pyannote/audio/embedding/approaches/__init__.py
|
pyannote/audio/embedding/approaches/__init__.py
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2018 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from .triplet_loss import TripletLoss
# from .centroid_loss import CentroidLoss
# from .softmax import Softmax
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2018 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from .triplet_loss import TripletLoss
from .centroid_loss import CentroidLoss
from .softmax import Softmax
# from .agg_triplet_loss import AggTripletLoss
|
mit
|
Python
|
57a1e59f034b0edbabaa76376ba6475d6e4d0297
|
Add code to work out the Julian representation of a date.
|
jwg4/calexicon,jwg4/qual
|
qual/calendars/main.py
|
qual/calendars/main.py
|
from datetime import date, timedelta
from qual.helpers import ordinal, month_string
from date import DateWithCalendar, InvalidDate
from base import Calendar
class ProlepticGregorianCalendar(Calendar):
display_name = "Proleptic Gregorian Calendar"
def date(self, year, month, day):
try:
d = date(year, month, day)
except ValueError as e:
raise InvalidDate(e.message)
return self.from_date(d)
@staticmethod
def date_display_string(d):
return "%s %s %s" % (ordinal(d.day), month_string(d.month), d.year)
class JulianCalendar(Calendar):
display_name = "Julian Calendar"
@staticmethod
def is_julian_leap_year(y):
return (y % 4) == 0
@staticmethod
def is_gregorian_leap_year(y):
if (y % 400) == 0:
return True
if (y % 100) == 0:
return False
if (y % 4) == 0:
return True
return False
@staticmethod
def date_display_string(d):
year, month, day = JulianCalendar.julian_representation(d)
return "%s %s %s" % (ordinal(day), month_string(month), year)
@staticmethod
def julian_representation(d):
offset = JulianCalendar.number_of_extra_leap_days(d)
d = d - timedelta(days=offset)
return (d.year, d.month, d.day)
@staticmethod
def number_of_extra_leap_days(end, start=date(200, 3, 1)):
count = 0
for x in range(start.year, end.year + 1, 100):
if not JulianCalendar.is_gregorian_leap_year(x):
leap_day = date(x, 2, 28)
if start < leap_day < end:
count = count + 1
return count
def date(self, year, month, day):
if day == 29 and month == 2 and self.is_julian_leap_year(year):
d = date(year, 2, 28)
offset = self.number_of_extra_leap_days(d) + 1
else:
d = date(year, month, day)
offset = self.number_of_extra_leap_days(d)
d = d + timedelta(days=offset)
return self.from_date(d)
|
from datetime import date, timedelta
from qual.helpers import ordinal, month_string
from date import DateWithCalendar, InvalidDate
from base import Calendar
class ProlepticGregorianCalendar(Calendar):
display_name = "Proleptic Gregorian Calendar"
def date(self, year, month, day):
try:
d = date(year, month, day)
except ValueError as e:
raise InvalidDate(e.message)
return self.from_date(d)
@staticmethod
def date_display_string(d):
return "%s %s %s" % (ordinal(d.day), month_string(d.month), d.year)
class JulianCalendar(Calendar):
display_name = "Julian Calendar"
@staticmethod
def is_julian_leap_year(y):
return (y % 4) == 0
@staticmethod
def is_gregorian_leap_year(y):
if (y % 400) == 0:
return True
if (y % 100) == 0:
return False
if (y % 4) == 0:
return True
return False
def number_of_extra_leap_days(self, end, start=date(200, 3, 1)):
count = 0
for x in range(start.year, end.year + 1, 100):
if not self.is_gregorian_leap_year(x):
leap_day = date(x, 2, 28)
if start < leap_day < end:
count = count + 1
return count
def date(self, year, month, day):
if day == 29 and month == 2 and self.is_julian_leap_year(year):
d = date(year, 2, 28)
offset = self.number_of_extra_leap_days(d) + 1
else:
d = date(year, month, day)
offset = self.number_of_extra_leap_days(d)
d = d + timedelta(days=offset)
return self.from_date(d)
|
apache-2.0
|
Python
|
82f563d7ed8dc53d00edf361af1f607f9a89b918
|
Add the rv32mi tests.
|
AngelTerrones/Algol,AngelTerrones/Algol
|
Simulation/core/conftest.py
|
Simulation/core/conftest.py
|
#!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<[email protected]>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
def auto_int(value):
return int(value, 0)
def pytest_addoption(parser):
parser.addoption('--hex_file', type=str, action='append', default=[],
help='Memory image in HEX format')
parser.addoption('--all', action='store_true', default=False, help='Run all RV32 tests')
parser.addoption('--vcd', action='store_true', default=False, help='Generate VCD files')
def pytest_generate_tests(metafunc):
if 'hex_file' in metafunc.fixturenames:
if metafunc.config.option.all:
list_hex = glob.glob("Simulation/tests/rv32mi-p-*.hex")
list_hex = list_hex + glob.glob("Simulation/tests/rv32ui-*.hex")
metafunc.parametrize('hex_file', list_hex)
else:
metafunc.parametrize('hex_file', metafunc.config.option.hex_file)
if 'vcd' in metafunc.fixturenames:
metafunc.parametrize('vcd', [metafunc.config.option.vcd])
# Local Variables:
# flycheck-flake8-maximum-line-length: 120
# flycheck-flake8rc: ".flake8rc"
# End:
|
#!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<[email protected]>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
def auto_int(value):
return int(value, 0)
def pytest_addoption(parser):
parser.addoption('--hex_file', type=str, action='append', default=[],
help='Memory image in HEX format')
parser.addoption('--all', action='store_true', default=False, help='Run all RV32 tests')
parser.addoption('--vcd', action='store_true', default=False, help='Generate VCD files')
def pytest_generate_tests(metafunc):
if 'hex_file' in metafunc.fixturenames:
if metafunc.config.option.all:
list_hex = glob.glob("Simulation/tests/rv32ui-*.hex")
metafunc.parametrize('hex_file', list_hex)
else:
metafunc.parametrize('hex_file', metafunc.config.option.hex_file)
if 'vcd' in metafunc.fixturenames:
metafunc.parametrize('vcd', [metafunc.config.option.vcd])
# Local Variables:
# flycheck-flake8-maximum-line-length: 120
# flycheck-flake8rc: ".flake8rc"
# End:
|
mit
|
Python
|
b193f9ccabb1093db8a803f7994adb14a85caf5a
|
Update __init__.py
|
KerkhoffTechnologies/django-connectwise,KerkhoffTechnologies/django-connectwise
|
djconnectwise/__init__.py
|
djconnectwise/__init__.py
|
# -*- coding: utf-8 -*-
VERSION = (0, 3, 32, 'final')
# pragma: no cover
if VERSION[-1] != "final":
__version__ = '.'.join(map(str, VERSION))
else:
# pragma: no cover
__version__ = '.'.join(map(str, VERSION[:-1]))
default_app_config = 'djconnectwise.apps.DjangoConnectwiseConfig'
|
# -*- coding: utf-8 -*-
VERSION = (0, 3, 31, 'final')
# pragma: no cover
if VERSION[-1] != "final":
__version__ = '.'.join(map(str, VERSION))
else:
# pragma: no cover
__version__ = '.'.join(map(str, VERSION[:-1]))
default_app_config = 'djconnectwise.apps.DjangoConnectwiseConfig'
|
mit
|
Python
|
b8e1d2419a1dbe065e1828599e60867bc845f0e3
|
Add some docs to nm root package
|
bastings/neuralmonkey,ufal/neuralmonkey,bastings/neuralmonkey,bastings/neuralmonkey,ufal/neuralmonkey,juliakreutzer/bandit-neuralmonkey,ufal/neuralmonkey,bastings/neuralmonkey,ufal/neuralmonkey,juliakreutzer/bandit-neuralmonkey,juliakreutzer/bandit-neuralmonkey,bastings/neuralmonkey,juliakreutzer/bandit-neuralmonkey,juliakreutzer/bandit-neuralmonkey,ufal/neuralmonkey
|
neuralmonkey/__init__.py
|
neuralmonkey/__init__.py
|
"""The neuralmonkey package is the root package of this project."""
|
bsd-3-clause
|
Python
|
|
aa741b5a2b18a7df402325b53476eba36e448b40
|
Update to 0.0.49
|
KerkhoffTechnologies/django-connectwise,KerkhoffTechnologies/django-connectwise,AparatTechnologies/django-connectwise
|
djconnectwise/__init__.py
|
djconnectwise/__init__.py
|
# -*- coding: utf-8 -*-
VERSION = (0, 0, 49, 'alpha')
# pragma: no cover
if VERSION[-1] != "final":
__version__ = '.'.join(map(str, VERSION))
else:
# pragma: no cover
__version__ = '.'.join(map(str, VERSION[:-1]))
|
# -*- coding: utf-8 -*-
VERSION = (0, 0, 48, 'alpha')
# pragma: no cover
if VERSION[-1] != "final":
__version__ = '.'.join(map(str, VERSION))
else:
# pragma: no cover
__version__ = '.'.join(map(str, VERSION[:-1]))
|
mit
|
Python
|
95f09bc7d61d6ea0a1228229a5092e2bff889855
|
make website_multi_company_demo hidden
|
it-projects-llc/website-addons,it-projects-llc/website-addons,it-projects-llc/website-addons
|
website_multi_company_demo/__manifest__.py
|
website_multi_company_demo/__manifest__.py
|
# -*- coding: utf-8 -*-
{
"name": """Demo Data for \"Real Multi Website\"""",
"summary": """Provides demo websites""",
"category": "Hidden",
# "live_test_URL": "",
"images": [],
"version": "1.0.0",
"application": False,
"author": "IT-Projects LLC, Ivan Yelizariev",
"support": "[email protected]",
"website": "https://it-projects.info",
"license": "LGPL-3",
# "price": 9.00,
# "currency": "EUR",
"depends": [
"website_multi_company",
"website_sale",
"theme_bootswatch",
],
"external_dependencies": {"python": [], "bin": []},
"data": [
],
"qweb": [
],
"demo": [
"demo/res.company.csv",
"demo/website.csv",
"demo/product.template.csv",
"demo/ir.ui.view.csv",
"demo/website.menu.csv",
"demo/website_templates.xml",
],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": False,
"installable": True,
}
|
# -*- coding: utf-8 -*-
{
"name": """Demo Data for \"Real Multi Website\"""",
"summary": """Provides demo websites""",
"category": "eCommerce",
# "live_test_URL": "",
"images": [],
"version": "1.0.0",
"application": False,
"author": "IT-Projects LLC, Ivan Yelizariev",
"support": "[email protected]",
"website": "https://it-projects.info",
"license": "LGPL-3",
# "price": 9.00,
# "currency": "EUR",
"depends": [
"website_multi_company",
"website_sale",
"theme_bootswatch",
],
"external_dependencies": {"python": [], "bin": []},
"data": [
],
"qweb": [
],
"demo": [
"demo/res.company.csv",
"demo/website.csv",
"demo/product.template.csv",
"demo/ir.ui.view.csv",
"demo/website.menu.csv",
"demo/website_templates.xml",
],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": False,
"installable": True,
}
|
mit
|
Python
|
a8333a5c3e9c6b07df2b04782c9e0cc3c4b6e60c
|
Bump Version
|
nluedtke/brochat-bot
|
common.py
|
common.py
|
VERSION_YEAR = 2017
VERSION_MONTH = 10
VERSION_DAY = 5
VERSION_REV = 0
whos_in = None
twitter = None
users = {}
twilio_client = None
ARGS = {}
smmry_api_key = None
# Variable hold trumps last tweet id
last_id = 0
trump_chance_roll_rdy = False
# Runtime stats
duels_conducted = 0
items_awarded = 0
trump_tweets_seen = 0
# Shot_duel acceptance and active
accepted = False
shot_duel_running = False
vict_name = ""
# News handles to pull from
news_handles = ['mashable', 'cnnbrk', 'whitehouse', 'cnn', 'nytimes',
'foxnews', 'reuters', 'npr', 'usatoday', 'cbsnews',
'abc', 'washingtonpost', 'msnbc', 'ap', 'aphealthscience',
'lifehacker', 'cnnnewsroom', 'theonion']
# Delays for Newsfeed and Check_trump, These are in minutes
# remember that news_del is fuzzed + (0-10)
trump_del = 15
news_del = 55
# Location of db.json and tokens.config
data_dir = "/data"
# Create/Load Local Database
db_file = '{}/db.json'.format(data_dir)
db = {}
# Global toggle for news feed
NEWS_FEED_ON = False
NEWS_FEED_CREATED = False
async def trigger_social(ctx):
"""Triggers a social """
for m in ctx.bot.get_all_members():
if m.display_name != 'brochat-bot' and m.status == 'online':
add_drink(m.display_name)
glass = ":tumbler_glass:"
await ctx.bot.say("Ah shit that's three in a row! ITS A SOCIAL! SHOTS! "
"SHOTS! SHOTS!\n{}{}{}".format(glass, glass, glass))
def add_drink(user):
"""
Adds a drink for the user.
:param user: users display name
:return:
"""
if user not in users:
users[user] = {}
if "drinks_owed" in users[user]:
users[user]['drinks_owed'] += 1
else:
users[user]['drinks_owed'] = 1
return users[user]['drinks_owed']
|
VERSION_YEAR = 2017
VERSION_MONTH = 10
VERSION_DAY = 2
VERSION_REV = 1
whos_in = None
twitter = None
users = {}
twilio_client = None
ARGS = {}
smmry_api_key = None
# Variable hold trumps last tweet id
last_id = 0
trump_chance_roll_rdy = False
# Runtime stats
duels_conducted = 0
items_awarded = 0
trump_tweets_seen = 0
# Shot_duel acceptance and active
accepted = False
shot_duel_running = False
vict_name = ""
# News handles to pull from
news_handles = ['mashable', 'cnnbrk', 'whitehouse', 'cnn', 'nytimes',
'foxnews', 'reuters', 'npr', 'usatoday', 'cbsnews',
'abc', 'washingtonpost', 'msnbc', 'ap', 'aphealthscience',
'lifehacker', 'cnnnewsroom', 'theonion']
# Delays for Newsfeed and Check_trump, These are in minutes
# remember that news_del is fuzzed + (0-10)
trump_del = 15
news_del = 55
# Location of db.json and tokens.config
data_dir = "/data"
# Create/Load Local Database
db_file = '{}/db.json'.format(data_dir)
db = {}
# Global toggle for news feed
NEWS_FEED_ON = False
NEWS_FEED_CREATED = False
async def trigger_social(ctx):
"""Triggers a social """
for m in ctx.bot.get_all_members():
if m.display_name != 'brochat-bot' and m.status == 'online':
add_drink(m.display_name)
glass = ":tumbler_glass:"
await ctx.bot.say("Ah shit that's three in a row! ITS A SOCIAL! SHOTS! "
"SHOTS! SHOTS!\n{}{}{}".format(glass, glass, glass))
def add_drink(user):
"""
Adds a drink for the user.
:param user: users display name
:return:
"""
if user not in users:
users[user] = {}
if "drinks_owed" in users[user]:
users[user]['drinks_owed'] += 1
else:
users[user]['drinks_owed'] = 1
return users[user]['drinks_owed']
|
mit
|
Python
|
7047816b5edc7911685219d53970c892728d0220
|
add os to config
|
pcd1193182/hauler-tool,pcd1193182/hauler-tool,pcd1193182/hauler-tool
|
config.py
|
config.py
|
# -*- encoding: utf-8 -*-
import datetime
import os
# -----------------------------------------------------
# Application configurations
# ------------------------------------------------------
DEBUG = True
SECRET_KEY = os.environ['SECRET_KEY']
PORT = os.environ['PORT']
HOST = os.environ['HOST']
# -----------------------------------------------------
# SQL Alchemy configs
# -----------------------------------------------------
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# -----------------------------------------------------
# ESI Configs
# -----------------------------------------------------
ESI_DATASOURCE = 'tranquility' # Change it to 'singularity' to use the test server
ESI_SWAGGER_JSON = 'https://esi.tech.ccp.is/latest/swagger.json?datasource=%s' % ESI_DATASOURCE
ESI_SECRET_KEY = os.environ['ESI_SECRET_KEY'] # your secret key
ESI_CLIENT_ID = os.environ['ESI_CLIENT_ID'] # your client ID
ESI_CALLBACK = 'http://%s:%d/sso/callback' % (HOST, PORT) # the callback URI you gave CCP
ESI_USER_AGENT = 'hauler-packing-tool'
# ------------------------------------------------------
# Session settings for flask login
# ------------------------------------------------------
PERMANENT_SESSION_LIFETIME = datetime.timedelta(days=30)
# ------------------------------------------------------
# DO NOT EDIT
# Fix warnings from flask-sqlalchemy / others
# ------------------------------------------------------
SQLALCHEMY_TRACK_MODIFICATIONS = True
|
# -*- encoding: utf-8 -*-
import datetime
# -----------------------------------------------------
# Application configurations
# ------------------------------------------------------
DEBUG = True
SECRET_KEY = os.environ['SECRET_KEY']
PORT = os.environ['PORT']
HOST = os.environ['HOST']
# -----------------------------------------------------
# SQL Alchemy configs
# -----------------------------------------------------
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# -----------------------------------------------------
# ESI Configs
# -----------------------------------------------------
ESI_DATASOURCE = 'tranquility' # Change it to 'singularity' to use the test server
ESI_SWAGGER_JSON = 'https://esi.tech.ccp.is/latest/swagger.json?datasource=%s' % ESI_DATASOURCE
ESI_SECRET_KEY = os.environ['ESI_SECRET_KEY'] # your secret key
ESI_CLIENT_ID = os.environ['ESI_CLIENT_ID'] # your client ID
ESI_CALLBACK = 'http://%s:%d/sso/callback' % (HOST, PORT) # the callback URI you gave CCP
ESI_USER_AGENT = 'hauler-packing-tool'
# ------------------------------------------------------
# Session settings for flask login
# ------------------------------------------------------
PERMANENT_SESSION_LIFETIME = datetime.timedelta(days=30)
# ------------------------------------------------------
# DO NOT EDIT
# Fix warnings from flask-sqlalchemy / others
# ------------------------------------------------------
SQLALCHEMY_TRACK_MODIFICATIONS = True
|
mit
|
Python
|
5560a37e222f8bf8aade08c17d1a4edb1b821d8a
|
Advance version number 0.9.1
|
TRUFA-rnaseq/trufa-web,TRUFA-rnaseq/trufa-web,TRUFA-rnaseq/trufa-web,TRUFA-rnaseq/trufa-web
|
config.py
|
config.py
|
PROJECT_NAME = "trufa"
VERSION = "0.9.1"
REMOTEHOST = "[email protected]"
# for testing
REMOTEHOME = "testing"
DATADIR = "testing"
# for stable
#REMOTEHOME = "users"
#DATADIR = "/gpfs/res_projects/cvcv/webserver/users/"
# for testing
PIPE_LAUNCH = "../server_side/pipe_launcher.py"
# for stable
#PIPE_LAUNCH = "../../server_side/stable/pipe_launcher.py"
## Database Configurations
DB_RESET = True
DB_DATABASE = 'database.db'
DB_PASSFILE = 'htpasswd.db'
USELOGFILE = False
LOGFILE = "trufa.log"
#LOGFILE = "/var/genorama/log/trufa.log"
LOGFILEBYTES = 500*1024
USEWLOGFILE = True
WLOGFILE = "trufa_web.log"
#WLOGFILE = "/var/genorama/log/trufa_web.log"
import logging
logging.getLogger().setLevel( logging.DEBUG )
|
PROJECT_NAME = "trufa"
VERSION = "0.9.0"
REMOTEHOST = "[email protected]"
# for testing
REMOTEHOME = "testing"
DATADIR = "testing"
# for stable
#REMOTEHOME = "users"
#DATADIR = "/gpfs/res_projects/cvcv/webserver/users/"
# for testing
PIPE_LAUNCH = "../server_side/pipe_launcher.py"
# for stable
#PIPE_LAUNCH = "../../server_side/stable/pipe_launcher.py"
## Database Configurations
DB_RESET = True
DB_DATABASE = 'database.db'
DB_PASSFILE = 'htpasswd.db'
USELOGFILE = False
LOGFILE = "trufa.log"
#LOGFILE = "/var/genorama/log/trufa.log"
LOGFILEBYTES = 500*1024
USEWLOGFILE = True
WLOGFILE = "trufa_web.log"
#WLOGFILE = "/var/genorama/log/trufa_web.log"
import logging
logging.getLogger().setLevel( logging.DEBUG )
|
bsd-3-clause
|
Python
|
9a09b6fdcd26fbacfa73574835da1fe27a8760f6
|
Add separate config for preview.
|
LandRegistry/lc-alpha-b2b-processor,LandRegistry/lc-automatic-process
|
config.py
|
config.py
|
class Config(object):
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
RULES_ENGINE_URL = "http://localhost:5005"
BANKRUPTCY_DATABASE_API = "http://localhost:5004"
CASEWORK_DATABASE_API = "http://localhost:5006"
class PreviewConfig(Config):
RULES_ENGINE_URL = "http://localhost:5005"
BANKRUPTCY_DATABASE_API = "http://localhost:5004"
CASEWORK_DATABASE_API = "http://localhost:5006"
|
class Config(object):
DEBUG = False
class DevelopmentConfig(object):
DEBUG = True
RULES_ENGINE_URL = "http://localhost:5005"
BANKRUPTCY_DATABASE_API = "http://localhost:5004"
CASEWORK_DATABASE_API = "http://localhost:5006"
|
mit
|
Python
|
7f13b29cc918f63c4d1fc24717c0a0b5d2f5f8ad
|
Fix problem with array values.
|
jcsharp/DriveIt
|
filter.py
|
filter.py
|
import numpy as np
class LowPassFilter(object):
'''
First order discrete IIR filter.
'''
def __init__(self, feedback_gain, initial_value=0.0):
self.feedback_gain = np.ones_like(initial_value) * feedback_gain
self.initial_value = initial_value
self.output_gain = 1.0 - feedback_gain
self.input = np.nan
self.output = initial_value
self.feedback_value = initial_value / self.output_gain
def filter(self, value):
#if not math.isanan(value) and math.isinf(value):
self.input = value
self.feedback_value = value + self.feedback_gain * self.feedback_value
self.output = self.output_gain * self.feedback_value
return self.output
class MovingAverage(object):
'''
Moving average filter.
'''
def __init__(self, lifetime, sampling_time):
self.lifetime = lifetime
self.sampling_time = sampling_time
self.exp = np.exp(-sampling_time / lifetime)
self.last_value = None
self.mean_value = None
def filter(self, value):
self.last_value = value
if self.mean_value is None:
self.mean_value = value
else:
self.mean_value = value + self.exp * (self.mean_value - value)
return self.mean_value
|
import numpy as np
class LowPassFilter(object):
'''
First order discrete IIR filter.
'''
def __init__(self, feedback_gain, initial_value=0.0):
self.feedback_gain = np.ones_like(initial_value) * feedback_gain
self.initial_value = initial_value
self.output_gain = 1.0 - feedback_gain
self.input = np.nan
self.output = initial_value
self.feedback_value = initial_value / self.output_gain
def filter(self, value):
#if not math.isanan(value) and math.isinf(value):
self.input = value
self.feedback_value = value + self.feedback_gain * self.feedback_value
self.output = self.output_gain * self.feedback_value
return self.output
class MovingAverage(object):
'''
Moving average filter.
'''
def __init__(self, lifetime, sampling_time):
self.lifetime = lifetime
self.sampling_time = sampling_time
self.exp = np.exp(-sampling_time / lifetime)
self.last_value = np.nan
self.mean_value = np.nan
def filter(self, value):
self.last_value = value
if np.isnan(self.mean_value):
self.mean_value = value
else:
self.mean_value = value + self.exp * (self.mean_value - value)
return self.mean_value
|
mit
|
Python
|
9378ee0d414321bd557b478ffb6725ee899bc9b0
|
simplify code and add comment
|
CaptainDesAstres/Simple-Blender-Render-Manager
|
TaskList/FileInfo/FileInfo.py
|
TaskList/FileInfo/FileInfo.py
|
#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module to manage blender file info'''
import xml.etree.ElementTree as xmlMod
from TaskList.FileInfo.Scene import *
from usefullFunctions import XML
import os
class FileInfo:
'''class to manage blender file info'''
def __init__(self, xml):
'''initialize blender file info with default settings or saved settings'''
self.fromXml(xml)
def fromXml(self, xml):
'''initialize blender file info with savedd settings'''
self.active = XML.decode(xml.get('active'))
self.scenes = {}
for scene in xml.findall('scene'):
self.scenes[scene.get('name')] = Scene(scene)
def toXml(self):
'''export blender file info into xml syntaxed string'''
xml = ' <fileInfo active="'+XML.encode(self.active)+'">\n'
for scene in self.scenes.values():
xml += scene.toXml()
xml += ' </fileInfo>\n'
return xml
def sceneChoice(self, log):
'''choose between render the active scene or all the scene'''
scenes = len(self.scenes)
# can't add empty task file
if scenes == 0:
log.error(' no scene in this file… Abort')
return None
# no need to choose if there is only one scene in the file
if scenes == 1:
log.write(' Only one scene to render in file.')
return True
# get user choice
log.menuIn('Scene Choice')
while True:
choice = input('there is '+str(scenes)+''' scenes in this file. Do you want to:
1- Render all scenes
2- Render active scene «'''+self.active+'''»
0- Abort''').strip().lower()
# quit and abort task adding
if choice in [ '', 'q', '0' ]:
log.menuOut()
log.write(' Abort task adding')
return None
# quit and render all scene
if choice == '1':
log.menuOut()
log.write(' Set to render all task scene')
return True
# quit and render only active scene
if choice == '2':
log.menuOut()
log.write(' Set to render task active scene only')
return False
log.error('unvalid choice')
|
#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module to manage blender file info'''
import xml.etree.ElementTree as xmlMod
from TaskList.FileInfo.Scene import *
from usefullFunctions import XML
import os
class FileInfo:
'''class to manage blender file info'''
def __init__(self, xml):
'''initialize blender file info with default settings or saved settings'''
self.fromXml(xml)
def fromXml(self, xml):
'''initialize blender file info with savedd settings'''
self.active = XML.decode(xml.get('active'))
self.scenes = {}
for scene in xml.findall('scene'):
self.scenes[scene.get('name')] = Scene(scene)
def toXml(self):
'''export blender file info into xml syntaxed string'''
xml = ' <fileInfo active="'+XML.encode(self.active)+'">\n'
for scene in self.scenes.values():
xml += scene.toXml()
xml += ' </fileInfo>\n'
return xml
def sceneChoice(self, log):
'''choose between render the active scene or all the scene'''
scenes = len(self.scenes)
if scenes == 0:
log.error(' no scene in this file… Abort')
return None
if scenes == 1:
log.write(' Only one scene in file. All scene will be rendered.')
return True
log.menuIn('Scene Choice')
while True:
choice = input('there is '+str(scenes)+''' scenes in this file. Do you want to:
1- Render all scenes
2- Render active scene «'''+self.active+'''»
0- Abort''').strip().lower()
if choice in [ '', 'q', '0' ]:
log.menuOut()
log.write(' Abort task adding')
return None
elif choice == '1':
log.menuOut()
log.write(' Set to render all task scene')
return True
elif choice == '2':
log.menuOut()
log.write(' Set to render task active scene only')
return False
else:
log.error('unvalid choice')
|
mit
|
Python
|
b0f25b7263a42fbd1e90cf7ebe3dcba50f9cfe42
|
use the correct class name
|
sassoftware/amiconfig,sassoftware/amiconfig
|
amiconfig/plugins/rmakeserver.py
|
amiconfig/plugins/rmakeserver.py
|
#
# Copyright (c) 2008 rPath, Inc.
#
import os
from rmakeplugin import rMakePlugin
class AMIConfigPlugin(rMakePlugin):
name = 'rmakeserver'
def pluginMethod(self):
self._setupProxy()
self._setuprBuilder()
self._setupRepoUrl()
def _setupProxy(self):
proxycfg = '/etc/rmake/server.d/proxy'
if 'conaryproxy' in self.rmakecfg:
proxy = self.rmakecfg['conaryproxy']
else:
host = self.id.getLocalHostname()
fh = open(proxycfg, 'w')
fh.write('proxy http://%s:7778/\n' % host)
def _setuprBuilder(self):
rbuildercfg = '/etc/rmake/server.d/rbuilder'
if 'rbuilderurl' in self.rmakecfg:
fh = open(rbuildercfg, 'w')
fh.write('rbuilderUrl %s\n' % self.rmakecfg['rbuilderurl'])
def _setupRepoUrl(self):
repourlcfg = '/etc/rmake/server.d/serverurl'
if 'serverurl' in self.rmakecfg:
url = self.rmakecfg['serverurl']
else:
url = 'http://%s/conary/' % self.id.getLocalHostname()
fh = open(repourlcfg, 'w')
fh.write('serverUrl %s\n' % url)
|
#
# Copyright (c) 2008 rPath, Inc.
#
import os
from rmakeplugin import rMakePlugin
class rMakeServer(rMakePlugin):
name = 'rmakeserver'
def pluginMethod(self):
self._setupProxy()
self._setuprBuilder()
self._setupRepoUrl()
def _setupProxy(self):
proxycfg = '/etc/rmake/server.d/proxy'
if 'conaryproxy' in self.rmakecfg:
proxy = self.rmakecfg['conaryproxy']
else:
host = self.id.getLocalHostname()
fh = open(proxycfg, 'w')
fh.write('proxy http://%s:7778/\n' % host)
def _setuprBuilder(self):
rbuildercfg = '/etc/rmake/server.d/rbuilder'
if 'rbuilderurl' in self.rmakecfg:
fh = open(rbuildercfg, 'w')
fh.write('rbuilderUrl %s\n' % self.rmakecfg['rbuilderurl'])
def _setupRepoUrl(self):
repourlcfg = '/etc/rmake/server.d/serverurl'
if 'serverurl' in self.rmakecfg:
url = self.rmakecfg['serverurl']
else:
url = 'http://%s/conary/' % self.id.getLocalHostname()
fh = open(repourlcfg, 'w')
fh.write('serverUrl %s\n' % url)
|
apache-2.0
|
Python
|
ba31be554d3cc4fd51b7189434071596143b686c
|
add audio.load.readrecf
|
NickleDave/hybrid-vocal-classifier
|
hvc/audio/load.py
|
hvc/audio/load.py
|
import numpy as np
def read_cbin(filename):
"""
loads .cbin files output by EvTAF
"""
data = np.fromfile(filename,dtype=">d") # ">d" means big endian, double
return data
def readrecf(filename):
"""
reads .rec files output by EvTAF
"""
rec_dict = {}
with open(filename,'r') as recfile:
line_tmp = ""
while 1:
if line_tmp == "":
line = recfile.readline()
else:
line = lime_tmp
line_tmp = ""
if line == "": # if End Of File
break
elif line == "\n": # if blank line
continue
elif "Catch" in line:
ind = line.find('=')
rec_dict['iscatch'] = line[ind+1:]
elif "Chans" in line:
ind = line.find('=')
rec_dict['num_channels'] = int(line[ind+1:])
elif "ADFREQ" in line:
ind = line.find('=')
rec_dict['sample_freq'] = int(line[ind+1:])
elif "Samples" in line:
ind = line.find('=')
rec_dict['num_samples'] = int(line[ind+1:])
elif "T after" in line:
ind = line.find('=')
rec_dict['time_after'] = float(line[ind+1:])
elif "T Before" in line:
ind = line.find('=')
rec_dict['time before'] = float(line[ind+1:])
elif "Output Sound File" in line:
ind = line.find('=')
rec_dict['outfile'] = int(line[ind+1:])
elif "thresholds" in line:
th_list = []
while 1:
line = recfile.line()
if line == "":
break
try:
th_list.append(float)
except ValueError: # because we reached next section
line_tmp = line
break
rec_dict['thresholds'] = th_list
if line = "":
break
elif "feedback information" in line:
fb_dict = {}
while 1:
line = recfile.readline()
if line = "":
break
elif line = "\n":
continue
ind = line.find("msec")
time = float(line[:ind-1])
ind = line.find(":")
fb_type = line[ind+2:]
fb_dict[time] = fb_type
rec_dict['feedback_info'] = fb_dict
if line = "":
break
elif "trigger times" in line:
pass
elif "file created" in line:
pass
return rec_dict
|
import numpy as np
def read_cbin(filename):
"""
loads .cbin files output by EvTAF
"""
data = np.fromfile(filename,dtype=">d") # ">d" means big endian, double
return data
def readrecf(filename):
"""
reads .rec files output by EvTAF
"""
|
bsd-3-clause
|
Python
|
4b46c07b795e3e16c16a8897ac42a0755e88c213
|
Use trial logging.
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
analysis/sanity-count-markers.py
|
analysis/sanity-count-markers.py
|
#!/usr/bin/env python
import climate
import collections
import joblib
import lmj.cubes
import lmj.plot
import numpy as np
logging = climate.get_logger('count')
def count(trial):
trial.load()
trial.mask_dropouts()
total = len(trial.df)
markers = {m: trial.df[m + '-c'].count() / total for m in trial.marker_columns}
full = len(trial.df[[m + '-c' for m in markers]].dropna(axis=0))
trial.log('%d rows, %d full (%.1f%%)', total, full, 100 * full / total)
return markers
PERCENTILES = [1, 2, 5, 10, 20, 50, 80, 90, 95, 98, 99]
def main(root):
trials = lmj.cubes.Experiment(root).trials_matching('*')
counts = collections.defaultdict(int)
percents = collections.defaultdict(list)
f = joblib.delayed(count)
for markers in joblib.Parallel(-1)(f(t) for t in trials):
for m in markers:
counts[m] += markers[m] > 0.1
percents[m].append(markers[m])
for m, c in counts.items():
print(m, c, *np.percentile(percents[m], PERCENTILES), sep='\t')
return
with lmj.plot.axes(spines=True) as ax:
for m, values in percents.items():
ax.hist(values, bins=np.linspace(0, 1, 127), alpha=0.5, lw=0, label=m[9:])
ax.legend(ncol=3, loc=0)
if __name__ == '__main__':
climate.call(main)
|
#!/usr/bin/env python
import climate
import collections
import joblib
import lmj.cubes
import lmj.plot
import numpy as np
logging = climate.get_logger('count')
def count(trial):
trial.load()
trial.mask_dropouts()
total = len(trial.df)
markers = {m: trial.df[m + '-c'].count() / total for m in trial.marker_columns}
full = len(trial.df[[m + '-c' for m in markers]].dropna(axis=0))
logging.info('%s %s %s: %d rows, %d full (%.1f%%)',
trial.subject.key, trial.block.key, trial.key,
total, full, 100 * full / total)
return markers
PERCENTILES = [1, 2, 5, 10, 20, 50, 80, 90, 95, 98, 99]
def main(root):
trials = lmj.cubes.Experiment(root).trials_matching('*')
counts = collections.defaultdict(int)
percents = collections.defaultdict(list)
f = joblib.delayed(count)
for markers in joblib.Parallel(-1)(f(t) for t in trials):
for m in markers:
counts[m] += markers[m] > 0.1
percents[m].append(markers[m])
for m, c in counts.items():
print(m, c, *np.percentile(percents[m], PERCENTILES), sep='\t')
return
with lmj.plot.axes(spines=True) as ax:
for m, values in percents.items():
ax.hist(values, bins=np.linspace(0, 1, 127), alpha=0.5, lw=0, label=m[9:])
ax.legend(ncol=3, loc=0)
if __name__ == '__main__':
climate.call(main)
|
mit
|
Python
|
be291475601657cbcd3903679c77c2860b543308
|
fix doc
|
deepchem/deepchem,deepchem/deepchem,peastman/deepchem,peastman/deepchem
|
deepchem/feat/tests/test_dummy_featurizer.py
|
deepchem/feat/tests/test_dummy_featurizer.py
|
import unittest
import deepchem as dc
import numpy as np
class TestDummyFeaturizer(unittest.TestCase):
"""
Test for DummyFeaturizer.
"""
def test_featurize(self):
"""
Test the featurize method on an array of inputs.
"""
input_array = np.array([[
"N#C[S-].O=C(CBr)c1ccc(C(F)(F)F)cc1>CCO.[K+]",
"N#CSCC(=O)c1ccc(C(F)(F)F)cc1"
], [
"C1COCCN1.FCC(Br)c1cccc(Br)n1>CCN(C(C)C)C(C)C.CN(C)C=O.O",
"FCC(c1cccc(Br)n1)N1CCOCC1"
]])
featurizer = dc.feat.DummyFeaturizer()
out = featurizer.featurize(input_array)
assert (type(out) == np.ndarray)
assert (out.shape == input_array.shape)
|
import unittest
import deepchem as dc
import numpy as np
class TestDummyFeaturizer(unittest.TestCase):
"""
Test for DummyFeaturizer.
"""
def test_featurize(self):
"""
Test the featurize method on a list of inputs.
"""
input_array = np.array([[
"N#C[S-].O=C(CBr)c1ccc(C(F)(F)F)cc1>CCO.[K+]",
"N#CSCC(=O)c1ccc(C(F)(F)F)cc1"
], [
"C1COCCN1.FCC(Br)c1cccc(Br)n1>CCN(C(C)C)C(C)C.CN(C)C=O.O",
"FCC(c1cccc(Br)n1)N1CCOCC1"
]])
featurizer = dc.feat.DummyFeaturizer()
out = featurizer.featurize(input_array)
assert (type(out) == np.ndarray)
assert (out.shape == input_array.shape)
|
mit
|
Python
|
c8a97a33449eedc110169cb9b3f0120124d95e49
|
Add tiny test for ToPickle (#6021)
|
dask/distributed,dask/distributed,dask/distributed,dask/distributed
|
distributed/protocol/tests/test_to_pickle.py
|
distributed/protocol/tests/test_to_pickle.py
|
from typing import Dict
import dask.config
from dask.highlevelgraph import HighLevelGraph, MaterializedLayer
from distributed.client import Client
from distributed.protocol import dumps, loads
from distributed.protocol.serialize import ToPickle
from distributed.utils_test import gen_cluster
def test_ToPickle():
class Foo:
def __init__(self, data):
self.data = data
msg = {"x": ToPickle(Foo(123))}
frames = dumps(msg)
out = loads(frames)
assert out["x"].data == 123
class NonMsgPackSerializableLayer(MaterializedLayer):
"""Layer that uses non-msgpack-serializable data"""
def __dask_distributed_pack__(self, *args, **kwargs):
ret = super().__dask_distributed_pack__(*args, **kwargs)
# Some info that contains a `list`, which msgpack will convert to
# a tuple if getting the chance.
ret["myinfo"] = ["myinfo"]
return ToPickle(ret)
@classmethod
def __dask_distributed_unpack__(cls, state, *args, **kwargs):
assert state["myinfo"] == ["myinfo"]
return super().__dask_distributed_unpack__(state, *args, **kwargs)
@gen_cluster(client=True)
async def test_non_msgpack_serializable_layer(c: Client, s, w1, w2):
with dask.config.set({"distributed.scheduler.allowed-imports": "test_to_pickle"}):
a = NonMsgPackSerializableLayer({"x": 42})
layers = {"a": a}
dependencies: Dict[str, set] = {"a": set()}
hg = HighLevelGraph(layers, dependencies)
res = await c.get(hg, "x", sync=False)
assert res == 42
|
from typing import Dict
import dask.config
from dask.highlevelgraph import HighLevelGraph, MaterializedLayer
from distributed.client import Client
from distributed.protocol.serialize import ToPickle
from distributed.utils_test import gen_cluster
class NonMsgPackSerializableLayer(MaterializedLayer):
"""Layer that uses non-msgpack-serializable data"""
def __dask_distributed_pack__(self, *args, **kwargs):
ret = super().__dask_distributed_pack__(*args, **kwargs)
# Some info that contains a `list`, which msgpack will convert to
# a tuple if getting the chance.
ret["myinfo"] = ["myinfo"]
return ToPickle(ret)
@classmethod
def __dask_distributed_unpack__(cls, state, *args, **kwargs):
assert state["myinfo"] == ["myinfo"]
return super().__dask_distributed_unpack__(state, *args, **kwargs)
@gen_cluster(client=True)
async def test_non_msgpack_serializable_layer(c: Client, s, w1, w2):
with dask.config.set({"distributed.scheduler.allowed-imports": "test_to_pickle"}):
a = NonMsgPackSerializableLayer({"x": 42})
layers = {"a": a}
dependencies: Dict[str, set] = {"a": set()}
hg = HighLevelGraph(layers, dependencies)
res = await c.get(hg, "x", sync=False)
assert res == 42
|
bsd-3-clause
|
Python
|
288d02bccf08ff0498767aafca9bd37509213ec3
|
Update forms.py
|
carthage-college/django-djforms,carthage-college/django-djforms,carthagecollege/django-djforms,carthage-college/django-djforms,carthagecollege/django-djforms,carthagecollege/django-djforms,carthage-college/django-djforms,carthagecollege/django-djforms
|
djforms/communications/printrequest/forms.py
|
djforms/communications/printrequest/forms.py
|
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from localflavor.us.forms import USPhoneNumberField
from djforms.communications.printrequest.models import PrintRequest, FORMATS
class PrintRequestForm(forms.ModelForm):
phone = USPhoneNumberField(
label = "Phone number",
max_length=12,
required=True,
widget=forms.TextInput(attrs={'class': 'required phoneUS'})
)
print_format = forms.MultipleChoiceField(
label = "What is the format of your finished piece",
choices=FORMATS,
help_text="Check all that apply"
)
def clean(self):
cleaned_data = super(PrintRequestForm, self).clean()
is_mailing = cleaned_data.get("is_mailing")
who_mailing = cleaned_data.get("who_mailing")
how_mailing = cleaned_data.get("how_mailing")
speed_mailing = cleaned_data.get("speed_mailing")
print_format = cleaned_data.get("print_format")
print_format_other = cleaned_data.get("print_format_other")
if is_mailing == "Yes":
msg = "Required"
if who_mailing == "":
self._errors["who_mailing"] = self.error_class(["Required field."])
if how_mailing == "":
self._errors["how_mailing"] = self.error_class(["Required field."])
if speed_mailing == "":
self._errors["speed_mailing"] = self.error_class(["Required field."])
if print_format == "Other":
if print_format_other == "":
self._errors["print_format_other"] = self.error_class(["Required field."])
return cleaned_data
class Meta:
model = PrintRequest
widgets = {
'phone': forms.TextInput(attrs={
'placeholder': 'eg. 123-456-7890', 'class': 'phoneUS'
}),
}
exclude = (
'user','updated_by','date_created','date_updated'
)
|
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from localflavor.us.forms import USPhoneNumberField
from djforms.communications.printrequest.models import PrintRequest, FORMATS
class PrintRequestForm(forms.ModelForm):
phone = USPhoneNumberField(
label = "Phone number",
max_length=12,
required=True,
widget=forms.TextInput(attrs={'class': 'required phoneUS'})
)
print_format = forms.MultipleChoiceField(
label = "What is the format of your finished piece",
choices=FORMATS,
help_text="Check all that apply"
)
def clean(self):
cleaned_data = super(PrintRequestForm, self).clean()
is_mailing = cleaned_data.get("is_mailing")
who_mailing = cleaned_data.get("who_mailing")
how_mailing = cleaned_data.get("how_mailing")
speed_mailing = cleaned_data.get("speed_mailing")
if is_mailing == "Yes":
msg = "Required"
if who_mailing == "":
self._errors["who_mailing"] = self.error_class(["Required field."])
if how_mailing == "":
self._errors["how_mailing"] = self.error_class(["Required field."])
if speed_mailing == "":
self._errors["speed_mailing"] = self.error_class(["Required field."])
return cleaned_data
class Meta:
model = PrintRequest
widgets = {
'phone': forms.TextInput(attrs={
'placeholder': 'eg. 123-456-7890', 'class': 'phoneUS'
}),
}
exclude = (
'user','updated_by','date_created','date_updated'
)
|
unlicense
|
Python
|
1b13a929122c2bcb7e524b39183610ac3e57f191
|
Mark Show.upcoming as @staticmethod
|
Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet
|
karspexet/show/models.py
|
karspexet/show/models.py
|
from django.db import models
import datetime
class Production(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
def __str__(self):
return self.name
class Show(models.Model):
production = models.ForeignKey(Production, on_delete=models.PROTECT)
date = models.DateTimeField()
venue = models.ForeignKey('venue.Venue', on_delete=models.PROTECT)
@staticmethod
def upcoming():
return Show.objects.filter(date__gte=datetime.date.today())
def date_string(self):
return self.date.strftime("%Y-%m-%d %H:%M")
def __str__(self):
return self.production.name + " " + self.date_string()
class Meta:
ordering = ('date',)
|
from django.db import models
import datetime
class Production(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
def __str__(self):
return self.name
class Show(models.Model):
production = models.ForeignKey(Production, on_delete=models.PROTECT)
date = models.DateTimeField()
venue = models.ForeignKey('venue.Venue', on_delete=models.PROTECT)
def upcoming():
return Show.objects.filter(date__gte=datetime.date.today())
def date_string(self):
return self.date.strftime("%Y-%m-%d %H:%M")
def __str__(self):
return self.production.name + " " + self.date_string()
class Meta:
ordering = ('date',)
|
mit
|
Python
|
18282c34d3497934da6d1f9a36d3feb5bdf74297
|
Adjust sos-report tests for label change
|
Scribery/cockpit,stefwalter/cockpit,larskarlitski/cockpit,SotolitoLabs/cockpit,larskarlitski/cockpit,harishanand95/cockpit,moraleslazaro/cockpit,andreasn/cockpit,cockpit-project/cockpit,SotolitoLabs/cockpit,moraleslazaro/cockpit,moraleslazaro/cockpit,xhad/cockpit,SotolitoLabs/cockpit,andreasn/cockpit,mvollmer/cockpit,SotolitoLabs/cockpit,garrett/cockpit,garrett/cockpit,petervo/cockpit,SotolitoLabs/cockpit,larsu/cockpit,cockpituous/cockpit,xhad/cockpit,michalskrivanek/cockpit,xhad/cockpit,moraleslazaro/cockpit,stefwalter/cockpit,andreasn/cockpit,Scribery/cockpit,deryni/cockpit,stefwalter/cockpit,andreasn/cockpit,larskarlitski/cockpit,michalskrivanek/cockpit,harishanand95/cockpit,petervo/cockpit,deryni/cockpit,petervo/cockpit,martinpitt/cockpit,garrett/cockpit,mvollmer/cockpit,deryni/cockpit,larskarlitski/cockpit,moraleslazaro/cockpit,Scribery/cockpit,michalskrivanek/cockpit,larsu/cockpit,martinpitt/cockpit,garrett/cockpit,xhad/cockpit,andreasn/cockpit,Scribery/cockpit,petervo/cockpit,cockpituous/cockpit,mvollmer/cockpit,Scribery/cockpit,garrett/cockpit,cockpituous/cockpit,cockpituous/cockpit,harishanand95/cockpit,mareklibra/cockpit,petervo/cockpit,cockpit-project/cockpit,xhad/cockpit,michalskrivanek/cockpit,andreasn/cockpit,mvollmer/cockpit,cockpituous/cockpit,michalskrivanek/cockpit,harishanand95/cockpit,mvollmer/cockpit,petervo/cockpit,mareklibra/cockpit,larsu/cockpit,mareklibra/cockpit,deryni/cockpit,moraleslazaro/cockpit,larskarlitski/cockpit,larsu/cockpit,stefwalter/cockpit,stefwalter/cockpit,michalskrivanek/cockpit,larsu/cockpit,andreasn/cockpit,Scribery/cockpit,cockpituous/cockpit,harishanand95/cockpit,deryni/cockpit,mareklibra/cockpit,harishanand95/cockpit,petervo/cockpit,mareklibra/cockpit,stefwalter/cockpit,harishanand95/cockpit,xhad/cockpit,martinpitt/cockpit,cockpit-project/cockpit,mareklibra/cockpit,martinpitt/cockpit,larskarlitski/cockpit,SotolitoLabs/cockpit,deryni/cockpit,SotolitoLabs/cockpit,larsu/cockpit,cockpit-project/cockpit,moraleslazaro/cockpit,Scribery/cockpit,stefwalter/cockpit,martinpitt/cockpit,deryni/cockpit,larskarlitski/cockpit,xhad/cockpit,cockpit-project/cockpit,mareklibra/cockpit,michalskrivanek/cockpit
|
test/avocado/selenium-sosreport.py
|
test/avocado/selenium-sosreport.py
|
#!/usr/bin/python
# we need to be able to find and import seleniumlib, so add this directory
import os
import sys
machine_test_dir = os.path.dirname(os.path.abspath(__file__))
if not machine_test_dir in sys.path:
sys.path.insert(1, machine_test_dir)
from avocado import main
from avocado.utils import process
from seleniumlib import *
from timeoutlib import Retry
class SosReportingTab(SeleniumTest):
"""
:avocado: enable
"""
def test10SosReport(self):
self.login()
self.wait_id("sidebar")
self.click(self.wait_link('Diagnostic Report', cond=clickable))
self.wait_frame("sosreport")
self.wait_text("This tool will collect system configuration and diagnostic")
self.click(self.wait_xpath('//button[@data-target="#sos"]', cond=clickable))
self.wait_id("sos")
self.wait_text("Generating report")
@Retry(attempts = 10, timeout = 3, error = Exception('Timeout: sosreport did not start'))
def waitforsosreportstarted():
process.run("pgrep sosreport", shell=True)
waitforsosreportstarted()
# duration of report generation depends on the target system - as along as sosreport is active, we don't want to timeout
# it is also important to call some selenium method there to ensure that connection to HUB will not be lost
@Retry(attempts = 30, timeout = 10, error = Exception('Timeout: sosreport did not finish'), inverse = True)
def waitforsosreport():
process.run("pgrep sosreport", shell=True)
self.wait_text("Generating report", overridetry=5)
waitforsosreport()
element = self.wait_id("sos-download")
self.wait_xpath('//button[contains(text(), "%s")]' % "Download", cond=clickable, baseelement=element)
self.click(self.wait_id("sos-cancel", cond=clickable))
self.wait_text("This tool will collect system configuration and diagnostic")
self.mainframe()
self.error = False
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# we need to be able to find and import seleniumlib, so add this directory
import os
import sys
machine_test_dir = os.path.dirname(os.path.abspath(__file__))
if not machine_test_dir in sys.path:
sys.path.insert(1, machine_test_dir)
from avocado import main
from avocado.utils import process
from seleniumlib import *
from timeoutlib import Retry
class SosReportingTab(SeleniumTest):
"""
:avocado: enable
"""
def test10SosReport(self):
self.login()
self.wait_id("sidebar")
self.click(self.wait_link('Diagnostic report', cond=clickable))
self.wait_frame("sosreport")
self.wait_text("This tool will collect system configuration and diagnostic")
self.click(self.wait_xpath('//button[@data-target="#sos"]', cond=clickable))
self.wait_id("sos")
self.wait_text("Generating report")
@Retry(attempts = 10, timeout = 3, error = Exception('Timeout: sosreport did not start'))
def waitforsosreportstarted():
process.run("pgrep sosreport", shell=True)
waitforsosreportstarted()
# duration of report generation depends on the target system - as along as sosreport is active, we don't want to timeout
# it is also important to call some selenium method there to ensure that connection to HUB will not be lost
@Retry(attempts = 30, timeout = 10, error = Exception('Timeout: sosreport did not finish'), inverse = True)
def waitforsosreport():
process.run("pgrep sosreport", shell=True)
self.wait_text("Generating report", overridetry=5)
waitforsosreport()
element = self.wait_id("sos-download")
self.wait_xpath('//button[contains(text(), "%s")]' % "Download", cond=clickable, baseelement=element)
self.click(self.wait_id("sos-cancel", cond=clickable))
self.wait_text("This tool will collect system configuration and diagnostic")
self.mainframe()
self.error = False
if __name__ == '__main__':
main()
|
lgpl-2.1
|
Python
|
5906946b0287536976f816884169e3a3c91df043
|
Add a verbose_name and help_text to the User.id Property.
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
app/soc/models/user.py
|
app/soc/models/user.py
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the User Model."""
__authors__ = [
'"Todd Larsen" <[email protected]>',
'"Sverre Rabbelier" <[email protected]>',
'"Pawel Solyga" <[email protected]>',
]
import logging
from google.appengine.api import users
from google.appengine.ext import db
from django.utils.translation import ugettext_lazy
from soc.models import base
from soc.views.helpers import forms_helpers
class User(base.ModelWithFieldAttributes):
"""A user and associated login credentials, the fundamental identity entity.
User is a separate Model class from Person because the same login
ID may be used to, for example, serve as Contributor in one Program
and a Reviewer in another.
Also, this allows a Person to, in the future, re-associate that
Person entity with a different Google Account if necessary.
A User entity participates in the following relationships implemented
as a db.ReferenceProperty elsewhere in another db.Model:
persons) a 1:many relationship of Person entities identified by the
User. This relation is implemented as the 'persons' back-reference
Query of the Person model 'user' reference.
"""
#: A Google Account, which also provides a "private" email address.
#: This email address is only used in an automated fashion by
#: Melange web applications and is not made visible to other users
#: of any Melange application.
id = db.UserProperty(required=True,
verbose_name=ugettext_lazy('User account'))
id.help_text = ugettext_lazy(
'Email address of a valid user (Google Account).')
#: A list (possibly empty) of former Google Accounts associated with
#: this User.
former_ids = db.ListProperty(users.User)
#: Required field storing a nickname; displayed publicly.
#: Nicknames can be any valid UTF-8 text.
nick_name = db.StringProperty(required=True,
verbose_name=ugettext_lazy('Nick name'))
#: Required field storing linkname used in URLs to identify user.
#: Lower ASCII characters only.
link_name = db.StringProperty(required=True,
verbose_name=ugettext_lazy('Link name'))
link_name.help_text = ugettext_lazy(
'Field used in URLs to identify user. '
'Lower ASCII characters only.')
#: field storing whether User is a Developer with site-wide access.
is_developer = db.BooleanProperty(
verbose_name=ugettext_lazy('Is Developer'))
is_developer.help_text = ugettext_lazy(
'Field used to indicate user with site-wide "Developer" access.')
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the User Model."""
__authors__ = [
'"Todd Larsen" <[email protected]>',
'"Sverre Rabbelier" <[email protected]>',
'"Pawel Solyga" <[email protected]>',
]
import logging
from google.appengine.api import users
from google.appengine.ext import db
from django.utils.translation import ugettext_lazy
from soc.models import base
from soc.views.helpers import forms_helpers
class User(base.ModelWithFieldAttributes):
"""A user and associated login credentials, the fundamental identity entity.
User is a separate Model class from Person because the same login
ID may be used to, for example, serve as Contributor in one Program
and a Reviewer in another.
Also, this allows a Person to, in the future, re-associate that
Person entity with a different Google Account if necessary.
A User entity participates in the following relationships implemented
as a db.ReferenceProperty elsewhere in another db.Model:
persons) a 1:many relationship of Person entities identified by the
User. This relation is implemented as the 'persons' back-reference
Query of the Person model 'user' reference.
"""
#: A Google Account, which also provides a "private" email address.
#: This email address is only used in an automated fashion by
#: Melange web applications and is not made visible to other users
#: of any Melange application.
id = db.UserProperty(required=True)
#: A list (possibly empty) of former Google Accounts associated with
#: this User.
former_ids = db.ListProperty(users.User)
#: Required field storing a nickname; displayed publicly.
#: Nicknames can be any valid UTF-8 text.
nick_name = db.StringProperty(required=True,
verbose_name=ugettext_lazy('Nick name'))
#: Required field storing linkname used in URLs to identify user.
#: Lower ASCII characters only.
link_name = db.StringProperty(required=True,
verbose_name=ugettext_lazy('Link name'))
link_name.help_text = ugettext_lazy(
'Field used in URLs to identify user. '
'Lower ASCII characters only.')
#: field storing whether User is a Developer with site-wide access.
is_developer = db.BooleanProperty(
verbose_name=ugettext_lazy('Is Developer'))
is_developer.help_text = ugettext_lazy(
'Field used to indicate user with site-wide "Developer" access.')
|
apache-2.0
|
Python
|
b717696b5cff69e3586e06c399be7d06c057e503
|
Make spawn_n() stub properly ignore errors in the child thread work
|
barnsnake351/nova,dawnpower/nova,alvarolopez/nova,JioCloud/nova_test_latest,joker946/nova,apporc/nova,cyx1231st/nova,dims/nova,klmitch/nova,mgagne/nova,openstack/nova,orbitfp7/nova,phenoxim/nova,rajalokan/nova,Stavitsky/nova,akash1808/nova_test_latest,apporc/nova,projectcalico/calico-nova,devendermishrajio/nova_test_latest,noironetworks/nova,maelnor/nova,j-carpentier/nova,dims/nova,ted-gould/nova,tangfeixiong/nova,zaina/nova,rajalokan/nova,rahulunair/nova,takeshineshiro/nova,LoHChina/nova,cloudbase/nova,fnordahl/nova,mikalstill/nova,fnordahl/nova,viggates/nova,gooddata/openstack-nova,vmturbo/nova,saleemjaveds/https-github.com-openstack-nova,alaski/nova,Metaswitch/calico-nova,JioCloud/nova_test_latest,devendermishrajio/nova,belmiromoreira/nova,eonpatapon/nova,cloudbase/nova-virtualbox,felixma/nova,JioCloud/nova,CEG-FYP-OpenStack/scheduler,alaski/nova,thomasem/nova,hanlind/nova,noironetworks/nova,klmitch/nova,edulramirez/nova,isyippee/nova,mmnelemane/nova,jeffrey4l/nova,CloudServer/nova,silenceli/nova,eayunstack/nova,sebrandon1/nova,tealover/nova,scripnichenko/nova,Francis-Liu/animated-broccoli,projectcalico/calico-nova,angdraug/nova,tianweizhang/nova,rahulunair/nova,BeyondTheClouds/nova,scripnichenko/nova,belmiromoreira/nova,iuliat/nova,berrange/nova,double12gzh/nova,viggates/nova,openstack/nova,mahak/nova,tangfeixiong/nova,nikesh-mahalka/nova,Stavitsky/nova,LoHChina/nova,zhimin711/nova,alexandrucoman/vbox-nova-driver,petrutlucian94/nova,klmitch/nova,angdraug/nova,CEG-FYP-OpenStack/scheduler,berrange/nova,tianweizhang/nova,devendermishrajio/nova,petrutlucian94/nova,vmturbo/nova,blueboxgroup/nova,badock/nova,adelina-t/nova,virtualopensystems/nova,nikesh-mahalka/nova,redhat-openstack/nova,ruslanloman/nova,yosshy/nova,mmnelemane/nova,NeCTAR-RC/nova,whitepages/nova,varunarya10/nova_test_latest,jianghuaw/nova,bgxavier/nova,affo/nova,jeffrey4l/nova,blueboxgroup/nova,dawnpower/nova,hanlind/nova,barnsnake351/nova,akash1808/nova,sebrandon1/nova,Francis-Liu/animated-broccoli,eayunstack/nova,cernops/nova,mandeepdhami/nova,phenoxim/nova,rajalokan/nova,Yusuke1987/openstack_template,sebrandon1/nova,JioCloud/nova,akash1808/nova_test_latest,affo/nova,joker946/nova,kimjaejoong/nova,Juniper/nova,MountainWei/nova,double12gzh/nova,mgagne/nova,mandeepdhami/nova,yosshy/nova,watonyweng/nova,cernops/nova,yatinkumbhare/openstack-nova,cyx1231st/nova,rajalokan/nova,zzicewind/nova,CCI-MOC/nova,yatinkumbhare/openstack-nova,BeyondTheClouds/nova,bigswitch/nova,jianghuaw/nova,redhat-openstack/nova,vladikr/nova_drafts,Tehsmash/nova,cloudbase/nova,cernops/nova,JianyuWang/nova,orbitfp7/nova,devendermishrajio/nova_test_latest,felixma/nova,mahak/nova,Metaswitch/calico-nova,Tehsmash/nova,TwinkleChawla/nova,vladikr/nova_drafts,gooddata/openstack-nova,ruslanloman/nova,tealover/nova,BeyondTheClouds/nova,zaina/nova,watonyweng/nova,bigswitch/nova,MountainWei/nova,thomasem/nova,kimjaejoong/nova,bgxavier/nova,Juniper/nova,cloudbase/nova-virtualbox,alvarolopez/nova,TwinkleChawla/nova,raildo/nova,edulramirez/nova,ted-gould/nova,Juniper/nova,zhimin711/nova,saleemjaveds/https-github.com-openstack-nova,alexandrucoman/vbox-nova-driver,zzicewind/nova,tudorvio/nova,vmturbo/nova,virtualopensystems/nova,gooddata/openstack-nova,Juniper/nova,isyippee/nova,hanlind/nova,takeshineshiro/nova,silenceli/nova,gooddata/openstack-nova,NeCTAR-RC/nova,CloudServer/nova,eonpatapon/nova,shail2810/nova,Yusuke1987/openstack_template,CCI-MOC/nova,badock/nova,rahulunair/nova,shail2810/nova,mikalstill/nova,jianghuaw/nova,tudorvio/nova,varunarya10/nova_test_latest,klmitch/nova,maelnor/nova,iuliat/nova,j-carpentier/nova,whitepages/nova,akash1808/nova,jianghuaw/nova,adelina-t/nova,vmturbo/nova,openstack/nova,raildo/nova,cloudbase/nova,mikalstill/nova,JianyuWang/nova,mahak/nova
|
nova/tests/fake_utils.py
|
nova/tests/fake_utils.py
|
# Copyright (c) 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This modules stubs out functions in nova.utils."""
from nova import utils
def stub_out_utils_spawn_n(stubs):
"""Stubs out spawn_n with a blocking version.
This aids testing async processes by blocking until they're done.
"""
def no_spawn(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
# NOTE(danms): This is supposed to simulate spawning
# of a thread, which would run separate from the parent,
# and die silently on error. If we don't catch and discard
# any exceptions here, we're not honoring the usual
# behavior.
pass
stubs.Set(utils, 'spawn_n', no_spawn)
|
# Copyright (c) 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This modules stubs out functions in nova.utils."""
from nova import utils
def stub_out_utils_spawn_n(stubs):
"""Stubs out spawn_n with a blocking version.
This aids testing async processes by blocking until they're done.
"""
def no_spawn(func, *args, **kwargs):
return func(*args, **kwargs)
stubs.Set(utils, 'spawn_n', no_spawn)
|
apache-2.0
|
Python
|
1f65be1f67867fc445b692df0f9390d6aa576e02
|
Fix import in common/utils
|
Zloool/manyfaced-honeypot
|
manyfaced/common/utils.py
|
manyfaced/common/utils.py
|
import time
import pickle
from socket import error as socket_error
from status import CLIENT_TIMEOUT
def dump_file(data):
try:
with file('temp.db') as f:
string_file = f.read()
db = pickle.loads(string_file)
except:
db = list()
db.append(data)
with open('temp.db', "w") as f:
f.write(str(pickle.dumps(db)))
def receive_timeout(the_socket, timeout=CLIENT_TIMEOUT):
# make socket non blocking
the_socket.setblocking(0)
# total data partwise in an array
total_data = []
# beginning time
begin = time.time()
while True:
# if you got some data, then break after timeout
if total_data and time.time() - begin > timeout:
break
# if you got no data at all, wait a little longer, twice the timeout
elif time.time() - begin > timeout * 2:
break
# recv something
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
# change the beginning time for measurement
begin = time.time()
else:
# sleep for sometime to indicate a gap
time.sleep(0.1)
except socket_error:
pass
# join all parts to make final string
return ''.join(total_data)
|
import time
import pickle
from socket import error as socket_error
from manyfaced.common.status import CLIENT_TIMEOUT
def dump_file(data):
try:
with file('temp.db') as f:
string_file = f.read()
db = pickle.loads(string_file)
except:
db = list()
db.append(data)
with open('temp.db', "w") as f:
f.write(str(pickle.dumps(db)))
def receive_timeout(the_socket, timeout=CLIENT_TIMEOUT):
# make socket non blocking
the_socket.setblocking(0)
# total data partwise in an array
total_data = []
# beginning time
begin = time.time()
while True:
# if you got some data, then break after timeout
if total_data and time.time() - begin > timeout:
break
# if you got no data at all, wait a little longer, twice the timeout
elif time.time() - begin > timeout * 2:
break
# recv something
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
# change the beginning time for measurement
begin = time.time()
else:
# sleep for sometime to indicate a gap
time.sleep(0.1)
except socket_error:
pass
# join all parts to make final string
return ''.join(total_data)
|
mit
|
Python
|
03f99a79941ade157689534e7ed0d0d196dd4d56
|
fix grep command
|
stevenschlansker/Singularity,nvoron23/Singularity,stevenschlansker/Singularity,tejasmanohar/Singularity,grepsr/Singularity,andrhamm/Singularity,evertrue/Singularity,hs-jenkins-bot/Singularity,acbellini/Singularity,acbellini/Singularity,andrhamm/Singularity,grepsr/Singularity,calebTomlinson/Singularity,grepsr/Singularity,hs-jenkins-bot/Singularity,grepsr/Singularity,HubSpot/Singularity,mjball/Singularity,tejasmanohar/Singularity,nvoron23/Singularity,calebTomlinson/Singularity,calebTomlinson/Singularity,mjball/Singularity,calebTomlinson/Singularity,nvoron23/Singularity,calebTomlinson/Singularity,mjball/Singularity,HubSpot/Singularity,HubSpot/Singularity,stevenschlansker/Singularity,nvoron23/Singularity,calebTomlinson/Singularity,grepsr/Singularity,HubSpot/Singularity,stevenschlansker/Singularity,mjball/Singularity,hs-jenkins-bot/Singularity,andrhamm/Singularity,stevenschlansker/Singularity,andrhamm/Singularity,stevenschlansker/Singularity,acbellini/Singularity,hs-jenkins-bot/Singularity,grepsr/Singularity,tejasmanohar/Singularity,nvoron23/Singularity,nvoron23/Singularity,evertrue/Singularity,tejasmanohar/Singularity,acbellini/Singularity,evertrue/Singularity,tejasmanohar/Singularity,andrhamm/Singularity,HubSpot/Singularity,evertrue/Singularity,evertrue/Singularity,hs-jenkins-bot/Singularity,evertrue/Singularity,acbellini/Singularity,tejasmanohar/Singularity,mjball/Singularity,acbellini/Singularity
|
scripts/logfetch/grep.py
|
scripts/logfetch/grep.py
|
import os
import sys
from termcolor import colored
GREP_COMMAND_FORMAT = 'xargs -n {0} {1} < {2}'
DEFAULT_GREP_COMMAND = 'grep --color=always \'{0}\''
def grep_files(args, all_logs):
if args.grep:
greplist_filename = '{0}/.greplist'.format(args.dest)
create_greplist(args, all_logs, greplist_filename)
command = grep_command(args, all_logs, greplist_filename)
sys.stderr.write(colored('Running "{0}" this might take a minute'.format(command), 'blue') + '\n')
sys.stdout.write(os.popen(command).read() + '\n')
remove_greplist(greplist_filename)
sys.stderr.write(colored('Finished grep, exiting', 'green') + '\n')
def create_greplist(args, all_logs, greplist_filename):
greplist_file = open(greplist_filename, 'wb')
for log in all_logs:
greplist_file.write('{0}\n'.format(log))
greplist_file.close()
def remove_greplist(greplist_filename):
if os.path.isfile(greplist_filename):
os.remove(greplist_filename)
def grep_command(args, all_logs, greplist_filename):
if 'grep' in args.grep:
return GREP_COMMAND_FORMAT.format(len(all_logs), args.grep, greplist_filename)
else:
return GREP_COMMAND_FORMAT.format(len(all_logs), DEFAULT_GREP_COMMAND.format(args.grep), greplist_filename)
|
import os
import sys
from termcolor import colored
GREP_COMMAND_FORMAT = 'xargs -n {0} {1} < {2}'
DEFAULT_GREP_COMMAND = 'grep --color=always \'{1}\''
def grep_files(args, all_logs):
if args.grep:
greplist_filename = '{0}/.greplist'.format(args.dest)
create_greplist(args, all_logs, greplist_filename)
command = grep_command(args, all_logs, greplist_filename)
sys.stderr.write(colored('Running "{0}" this might take a minute'.format(command), 'blue') + '\n')
sys.stdout.write(os.popen(command).read() + '\n')
remove_greplist(greplist_filename)
sys.stderr.write(colored('Finished grep, exiting', 'green') + '\n')
def create_greplist(args, all_logs, greplist_filename):
greplist_file = open(greplist_filename, 'wb')
for log in all_logs:
greplist_file.write('{0}\n'.format(log))
greplist_file.close()
def remove_greplist(greplist_filename):
if os.path.isfile(greplist_filename):
os.remove(greplist_filename)
def grep_command(args, all_logs, greplist_filename):
if 'grep' in args.grep:
return GREP_COMMAND_FORMAT.format(len(all_logs), args.grep, greplist_filename)
else:
return GREP_COMMAND_FORMAT.format(len(all_logs), DEFAULT_GREP_COMMAND.format(args.grep), greplist_filename)
|
apache-2.0
|
Python
|
3eea445a445a9154758cd82c11c52751f2804eca
|
add axis to 3d example
|
aringh/odl,odlgroup/odl,kohr-h/odl,odlgroup/odl,kohr-h/odl,aringh/odl
|
examples/tomo/xray_trafo_parallel_3d.py
|
examples/tomo/xray_trafo_parallel_3d.py
|
# Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Example using the X-ray transform with 3d parallel beam geometry."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
import odl
# Discrete reconstruction space: discretized functions on the cube
# [-20, 20]^3 with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_corner=[-20, -20, -20], max_corner=[20, 20, 20],
nsamples=[300, 300, 300], dtype='float32')
# Make a parallel beam geometry with flat detector
# Angles: uniformly spaced, n = 360, min = 0, max = 2 * pi
angle_partition = odl.uniform_partition(0, 2 * np.pi, 360)
# Detector: uniformly sampled, n = (558, 558), min = (-30, -30), max = (30, 30)
detector_partition = odl.uniform_partition([-30, -30], [30, 30], [558, 558])
# Discrete reconstruction space
# Astra cannot handle axis aligned origin_to_det unless it is aligned
# with the third coordinate axis. See issue #18 at ASTRA's github.
# This is fixed in new versions of astra, with older versions, this could
# give a zero result.
geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition,
axis=[0, 1, 0])
# X-ray transform aka forward projection. We use ASTRA CUDA backend.
xray_trafo = odl.tomo.XrayTransform(reco_space, geometry, impl='astra_cuda')
# Create a discrete Shepp-Logan phantom (modified version)
phantom = odl.util.phantom.shepp_logan(reco_space, True)
# Create projection data by calling the ray transform on the phantom
proj_data = xray_trafo(phantom)
# Back-projection can be done by simply calling the adjoint operator on the
# projection data (or any element in the projection space).
backproj = xray_trafo.adjoint(proj_data)
# Shows a slice of the phantom, projections, and reconstruction
phantom.show(indices=np.s_[:, :, 150], title='Phantom, middle z slice')
proj_data.show(indices=np.s_[0, :, :], title='Projection 0')
proj_data.show(indices=np.s_[90, :, :], title='Projection 90')
backproj.show(indices=np.s_[:, :, 150],
title='back-projection, middle z slice')
|
# Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Example using the X-ray transform with 3d parallel beam geometry."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
import odl
# Discrete reconstruction space: discretized functions on the cube
# [-20, 20]^3 with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_corner=[-20, -20, -20], max_corner=[20, 20, 20],
nsamples=[300, 300, 300], dtype='float32')
# Make a parallel beam geometry with flat detector
# Angles: uniformly spaced, n = 360, min = 0, max = 2 * pi
angle_partition = odl.uniform_partition(0, 2 * np.pi, 360)
# Detector: uniformly sampled, n = (558, 558), min = (-30, -30), max = (30, 30)
detector_partition = odl.uniform_partition([-30, -30], [30, 30], [558, 558])
# Discrete reconstruction space
# Astra cannot handle axis aligned origin_to_det unless it is aligned
# with the third coordinate axis. See issue #18 at ASTRA's github.
# This is fixed in new versions of astra, with older versions, this could
# give a zero result.
geometry = odl.tomo.Parallel3dSingleAxisGeometry(angle_partition,
detector_partition)
# X-ray transform aka forward projection. We use ASTRA CUDA backend.
xray_trafo = odl.tomo.XrayTransform(reco_space, geometry, impl='astra_cuda')
# Create a discrete Shepp-Logan phantom (modified version)
phantom = odl.util.phantom.shepp_logan(reco_space, True)
# Create projection data by calling the ray transform on the phantom
proj_data = xray_trafo(phantom)
# Back-projection can be done by simply calling the adjoint operator on the
# projection data (or any element in the projection space).
backproj = xray_trafo.adjoint(proj_data)
# Shows a slice of the phantom, projections, and reconstruction
phantom.show(indices=np.s_[:, :, 150], title='Phantom, middle z slice')
proj_data.show(indices=np.s_[0, :, :], title='Projection 0')
proj_data.show(indices=np.s_[90, :, :], title='Projection 90')
backproj.show(indices=np.s_[:, :, 150],
title='back-projection, middle z slice')
|
mpl-2.0
|
Python
|
e76777897bed5b9396d126e384555ea230b35784
|
Use StaticFileStorage to determine source directories
|
jrief/django-sass-processor,jrief/django-sass-processor
|
sass_processor/apps.py
|
sass_processor/apps.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.apps import apps, AppConfig
from django.conf import settings
from django.core.files.storage import get_storage_class
APPS_INCLUDE_DIRS = []
class SassProcessorConfig(AppConfig):
name = 'sass_processor'
verbose_name = "Sass Processor"
_sass_exts = ('.scss', '.sass')
_storage = get_storage_class(import_path=settings.STATICFILES_STORAGE)()
def ready(self):
app_configs = apps.get_app_configs()
for app_config in app_configs:
static_dir = os.path.join(app_config.path, self._storage.base_url.strip(os.path.sep))
if os.path.isdir(static_dir):
self.traverse_tree(static_dir)
@classmethod
def traverse_tree(cls, static_dir):
"""traverse the static folders an look for at least one file ending in .scss/.sass"""
for root, dirs, files in os.walk(static_dir):
for filename in files:
basename, ext = os.path.splitext(filename)
if basename.startswith('_') and ext in cls._sass_exts:
APPS_INCLUDE_DIRS.append(static_dir)
return
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.apps import apps, AppConfig
APPS_INCLUDE_DIRS = []
class SassProcessorConfig(AppConfig):
name = 'sass_processor'
verbose_name = "Sass Processor"
_static_dir = 'static'
_sass_exts = ('.scss', '.sass')
def ready(self):
app_configs = apps.get_app_configs()
for app_config in app_configs:
static_dir = os.path.join(app_config.path, self._static_dir)
if os.path.isdir(static_dir):
self.traverse_tree(static_dir)
print(APPS_INCLUDE_DIRS)
@classmethod
def traverse_tree(cls, static_dir):
"""traverse the static folders an look for at least one file ending in .scss/.sass"""
for root, dirs, files in os.walk(static_dir):
for filename in files:
basename, ext = os.path.splitext(filename)
if basename.startswith('_') and ext in cls._sass_exts:
APPS_INCLUDE_DIRS.append(static_dir)
return
|
mit
|
Python
|
81622074d2d7544b897cec196257b130904f06b7
|
Comment about JSON
|
virajs/selenium-1,winhamwr/selenium,akiellor/selenium,akiellor/selenium,virajs/selenium-1,virajs/selenium-1,virajs/selenium-1,winhamwr/selenium,virajs/selenium-1,akiellor/selenium,virajs/selenium-1,akiellor/selenium,winhamwr/selenium,akiellor/selenium,akiellor/selenium,winhamwr/selenium,virajs/selenium-1,virajs/selenium-1,winhamwr/selenium,winhamwr/selenium,akiellor/selenium,winhamwr/selenium,virajs/selenium-1,winhamwr/selenium,akiellor/selenium
|
firefox/src/py/extensionconnection.py
|
firefox/src/py/extensionconnection.py
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communication with the firefox extension."""
import logging
import socket
import time
try:
import json
except ImportError: # Python < 2.6
import simplejson as json
# Some old JSON libraries don't have "dumps", make sure we have a good one
if not hasattr(json, 'dumps'):
import simplejson as json
from selenium.remote.command import Command
from selenium.remote.remote_connection import RemoteConnection
_DEFAULT_TIMEOUT = 20
_DEFAULT_PORT = 7055
LOGGER = logging.getLogger("webdriver.ExtensionConnection")
class ExtensionConnection(RemoteConnection):
"""This class maintains a connection to the firefox extension.
"""
def __init__(self, timeout=_DEFAULT_TIMEOUT):
RemoteConnection.__init__(
self, "http://localhost:%d/hub" % _DEFAULT_PORT)
LOGGER.debug("extension connection initiated")
self.timeout = timeout
def quit(self, sessionId=None):
self.execute(Command.QUIT, {'sessionId':sessionId})
while self.is_connectable():
logging.info("waiting to quit")
time.sleep(1)
def connect(self):
"""Connects to the extension and retrieves the session id."""
return self.execute(Command.NEW_SESSION, {'desiredCapabilities':{
'browserName': 'firefox',
'platform': 'ANY',
'version': '',
'javascriptEnabled': True}})
def connect_and_quit(self):
"""Connects to an running browser and quit immediately."""
self._request('%s/extensions/firefox/quit' % self._url)
def is_connectable(self):
"""Trys to connect to the extension but do not retrieve context."""
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect(("localhost", _DEFAULT_PORT))
socket_.close()
return True
except socket.error:
return False
class ExtensionConnectionError(Exception):
"""An internal error occurred int the extension.
Might be caused by bad input or bugs in webdriver
"""
pass
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communication with the firefox extension."""
import logging
import socket
import time
try:
import json
except ImportError: # Python < 2.6
import simplejson as json
# FIXME: What is this?
if not hasattr(json, 'dumps'):
import simplejson as json
from selenium.remote.command import Command
from selenium.remote.remote_connection import RemoteConnection
_DEFAULT_TIMEOUT = 20
_DEFAULT_PORT = 7055
LOGGER = logging.getLogger("webdriver.ExtensionConnection")
class ExtensionConnection(RemoteConnection):
"""This class maintains a connection to the firefox extension.
"""
def __init__(self, timeout=_DEFAULT_TIMEOUT):
RemoteConnection.__init__(
self, "http://localhost:%d/hub" % _DEFAULT_PORT)
LOGGER.debug("extension connection initiated")
self.timeout = timeout
def quit(self, sessionId=None):
self.execute(Command.QUIT, {'sessionId':sessionId})
while self.is_connectable():
logging.info("waiting to quit")
time.sleep(1)
def connect(self):
"""Connects to the extension and retrieves the session id."""
return self.execute(Command.NEW_SESSION, {'desiredCapabilities':{
'browserName': 'firefox',
'platform': 'ANY',
'version': '',
'javascriptEnabled': True}})
def connect_and_quit(self):
"""Connects to an running browser and quit immediately."""
self._request('%s/extensions/firefox/quit' % self._url)
def is_connectable(self):
"""Trys to connect to the extension but do not retrieve context."""
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect(("localhost", _DEFAULT_PORT))
socket_.close()
return True
except socket.error:
return False
class ExtensionConnectionError(Exception):
"""An internal error occurred int the extension.
Might be caused by bad input or bugs in webdriver
"""
pass
|
apache-2.0
|
Python
|
b6b514d385e8e18d03b939cf5fae9873c9f02a21
|
add constraint for price_list_ite
|
bank-netforce/netforce,bank-netforce/netforce,bank-netforce/netforce,bank-netforce/netforce,bank-netforce/netforce,bank-netforce/netforce
|
netforce_product/netforce_product/models/price_list_item.py
|
netforce_product/netforce_product/models/price_list_item.py
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
class PriceListItem(Model):
_name = "price.list.item"
_string = "Price List Item"
_key = ["list_id","product_id","price"]
_fields = {
"list_id": fields.Many2One("price.list", "Price List", required=True, on_delete="cascade", search=True),
"type": fields.Selection([["sale", "Sales"], ["purchase", "Purchasing"]], "Type", function="_get_related", function_context={"path": "list_id.type"}, search=True),
"currency_id": fields.Many2One("currency", "Currency", function="_get_related", function_context={"path": "list_id.currency_id"}, search=True),
"product_id": fields.Many2One("product", "Product", required=True, search=True, on_delete="cascade"),
"price": fields.Decimal("Price", required=True, scale=6),
"discount_percent": fields.Decimal("Discount %"),
"min_qty": fields.Decimal("Min Qty"),
"max_qty": fields.Decimal("Max Qty"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"discount_text": fields.Char("Discount Text"),
}
_sql_constraints = [
("key_uniq", "unique (list_id,product_id,price)", "The price list, product and type must be unique!")
]
PriceListItem.register()
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
class PriceListItem(Model):
_name = "price.list.item"
_string = "Price List Item"
_key = ["list_id","product_id","price"]
_fields = {
"list_id": fields.Many2One("price.list", "Price List", required=True, on_delete="cascade", search=True),
"type": fields.Selection([["sale", "Sales"], ["purchase", "Purchasing"]], "Type", function="_get_related", function_context={"path": "list_id.type"}, search=True),
"currency_id": fields.Many2One("currency", "Currency", function="_get_related", function_context={"path": "list_id.currency_id"}, search=True),
"product_id": fields.Many2One("product", "Product", required=True, search=True, on_delete="cascade"),
"price": fields.Decimal("Price", required=True, scale=6),
"discount_percent": fields.Decimal("Discount %"),
"min_qty": fields.Decimal("Min Qty"),
"max_qty": fields.Decimal("Max Qty"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"discount_text": fields.Char("Discount Text"),
}
PriceListItem.register()
|
mit
|
Python
|
577c0bff1e7333fe0f0fd5e45ce7c7cf19710605
|
Fix migration [WAL-904]
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
nodeconductor/structure/migrations/0052_customer_subnets.py
|
nodeconductor/structure/migrations/0052_customer_subnets.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-07 08:52
from __future__ import unicode_literals
from django.db import migrations, models
import nodeconductor.core.validators
class Migration(migrations.Migration):
dependencies = [
('structure', '0051_add_customer_email_phone_agreement_number'),
]
operations = [
migrations.AddField(
model_name='customer',
name='access_subnets',
field=models.TextField(blank=True, default='', validators=[nodeconductor.core.validators.validate_cidr_list], help_text='Enter a comma separated list of IPv4 or IPv6 CIDR addresses from which connection to self-service is allowed.'),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-07 08:52
from __future__ import unicode_literals
from django.db import migrations, models
import nodeconductor.core.validators
class Migration(migrations.Migration):
dependencies = [
('structure', '0051_add_customer_email_phone_agreement_number'),
]
operations = [
migrations.AddField(
model_name='customer',
name='access_subnets',
field=models.TextField(blank=True, default='', validators=[nodeconductor.core.validators.validate_cidr_list], help_text='Enter a comma separated list of IPv4 or IPv6 subnets from which connection to self-service is allowed.'),
),
]
|
mit
|
Python
|
f9a02492ca8f902ca349e60ce42dee4cadbd35c0
|
Make run under Python 2.4.
|
rhaberkorn/scintilla-mirror,rhaberkorn/scintilla-mirror,rhaberkorn/scintilla-mirror,rhaberkorn/scintilla-mirror,rhaberkorn/scintilla-mirror,rhaberkorn/scintilla-mirror,rhaberkorn/scintilla-mirror,rhaberkorn/scintilla-mirror
|
include/HFacer.py
|
include/HFacer.py
|
# HFacer.py - regenerate the Scintilla.h and SciLexer.h files from the Scintilla.iface interface
# definition file.
# The header files are copied to a temporary file apart from the section between a //++Autogenerated
# comment and a //--Autogenerated comment which is generated by the printHFile and printLexHFile
# functions. After the temporary file is created, it is copied back to the original file name.
import string
import sys
import os
import Face
def Contains(s,sub):
return string.find(s, sub) != -1
def printLexHFile(f,out):
for name in f.order:
v = f.features[name]
if v["FeatureType"] in ["val"]:
if Contains(name, "SCE_") or Contains(name, "SCLEX_"):
out.write("#define " + name + " " + v["Value"] + "\n")
def printHFile(f,out):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
if v["FeatureType"] in ["fun", "get", "set"]:
featureDefineName = "SCI_" + string.upper(name)
out.write("#define " + featureDefineName + " " + v["Value"] + "\n")
elif v["FeatureType"] in ["evt"]:
featureDefineName = "SCN_" + string.upper(name)
out.write("#define " + featureDefineName + " " + v["Value"] + "\n")
elif v["FeatureType"] in ["val"]:
if not (Contains(name, "SCE_") or Contains(name, "SCLEX_")):
out.write("#define " + name + " " + v["Value"] + "\n")
def CopyWithInsertion(input, output, genfn, definition):
copying = 1
for line in input.readlines():
if copying:
output.write(line)
if Contains(line, "//++Autogenerated"):
copying = 0
genfn(definition, output)
if Contains(line, "//--Autogenerated"):
copying = 1
output.write(line)
def contents(filename):
f = file(filename)
t = f.read()
f.close()
return t
def Regenerate(filename, genfn, definition):
inText = contents(filename)
tempname = "HFacer.tmp"
out = open(tempname,"w")
hfile = open(filename)
CopyWithInsertion(hfile, out, genfn, definition)
out.close()
hfile.close()
outText = contents(tempname)
if inText == outText:
os.unlink(tempname)
else:
os.unlink(filename)
os.rename(tempname, filename)
f = Face.Face()
try:
f.ReadFromFile("Scintilla.iface")
Regenerate("Scintilla.h", printHFile, f)
Regenerate("SciLexer.h", printLexHFile, f)
print "Maximum ID is", max([x for x in f.values if int(x) < 3000])
except:
raise
|
# HFacer.py - regenerate the Scintilla.h and SciLexer.h files from the Scintilla.iface interface
# definition file.
# The header files are copied to a temporary file apart from the section between a //++Autogenerated
# comment and a //--Autogenerated comment which is generated by the printHFile and printLexHFile
# functions. After the temporary file is created, it is copied back to the original file name.
import string
import sys
import os
import Face
def Contains(s,sub):
return string.find(s, sub) != -1
def printLexHFile(f,out):
for name in f.order:
v = f.features[name]
if v["FeatureType"] in ["val"]:
if Contains(name, "SCE_") or Contains(name, "SCLEX_"):
out.write("#define " + name + " " + v["Value"] + "\n")
def printHFile(f,out):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
if v["FeatureType"] in ["fun", "get", "set"]:
featureDefineName = "SCI_" + string.upper(name)
out.write("#define " + featureDefineName + " " + v["Value"] + "\n")
elif v["FeatureType"] in ["evt"]:
featureDefineName = "SCN_" + string.upper(name)
out.write("#define " + featureDefineName + " " + v["Value"] + "\n")
elif v["FeatureType"] in ["val"]:
if not (Contains(name, "SCE_") or Contains(name, "SCLEX_")):
out.write("#define " + name + " " + v["Value"] + "\n")
def CopyWithInsertion(input, output, genfn, definition):
copying = 1
for line in input.readlines():
if copying:
output.write(line)
if Contains(line, "//++Autogenerated"):
copying = 0
genfn(definition, output)
if Contains(line, "//--Autogenerated"):
copying = 1
output.write(line)
def contents(filename):
f = file(filename)
t = f.read()
f.close()
return t
def Regenerate(filename, genfn, definition):
inText = contents(filename)
tempname = "HFacer.tmp"
out = open(tempname,"w")
hfile = open(filename)
CopyWithInsertion(hfile, out, genfn, definition)
out.close()
hfile.close()
outText = contents(tempname)
if inText == outText:
os.unlink(tempname)
else:
os.unlink(filename)
os.rename(tempname, filename)
f = Face.Face()
try:
f.ReadFromFile("Scintilla.iface")
Regenerate("Scintilla.h", printHFile, f)
Regenerate("SciLexer.h", printLexHFile, f)
print "Maximum ID is", max(x for x in f.values if int(x) < 3000)
except:
raise
|
isc
|
Python
|
8683400f5c76a5afd71655c67eef89e98b39c19c
|
make test runnable standalone
|
radical-cybertools/radical.ensemblemd,radical-cybertools/radical.ensemblemd
|
tests/test_issues/test_issue_26.py
|
tests/test_issues/test_issue_26.py
|
#!/usr/bin/env python
from radical.entk import Pipeline, Stage, Task, AppManager
from radical.entk import states
from radical.entk.exceptions import *
import pytest
import os
hostname = os.environ.get('RMQ_HOSTNAME','localhost')
port = int(os.environ.get('RMQ_PORT',5672))
# MLAB = 'mongodb://entk:[email protected]:43511/entk_0_7_4_release'
MLAB = os.environ.get('RADICAL_PILOT_DBURL')
# ------------------------------------------------------------------------------
#
def test_issue_26():
# --------------------------------------------------------------------------
#
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/echo']
t1.arguments = ['hello']
t1.copy_input_data = []
t1.copy_output_data = []
s.add_tasks(t1)
p.add_stages(s)
return p
res_dict = {
'resource': 'local.localhost',
'walltime': 10,
'cpus' : 1,
'project' : ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
appman = AppManager(hostname=hostname, port=port, autoterminate=False)
appman.resource_desc = res_dict
p1 = create_pipeline()
appman.workflow = [p1]
appman.run()
print p1.uid, p1.stages[0].uid
p2 = create_pipeline()
appman.workflow = [p2]
appman.run()
print p2.uid, p2.stages[0].uid
appman.resource_terminate()
lhs = int(p1.stages[0].uid.split('.')[-1]) + 1
rhs = int(p2.stages[0].uid.split('.')[-1])
assert lhs == rhs
for t in p1.stages[0].tasks:
for tt in p2.stages[0].tasks:
lhs = int(t.uid.split('.')[-1]) + 1
rhs = int(tt.uid.split('.')[-1])
assert lhs == rhs
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
test_issue_26()
# ------------------------------------------------------------------------------
|
from radical.entk import Pipeline, Stage, Task, AppManager
from radical.entk import states
from radical.entk.exceptions import *
import pytest
import os
hostname = os.environ.get('RMQ_HOSTNAME','localhost')
port = int(os.environ.get('RMQ_PORT',5672))
# MLAB = 'mongodb://entk:[email protected]:43511/entk_0_7_4_release'
MLAB = os.environ.get('RADICAL_PILOT_DBURL')
def test_issue_26():
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/echo']
t1.arguments = ['hello']
t1.copy_input_data = []
t1.copy_output_data = []
s.add_tasks(t1)
p.add_stages(s)
return p
res_dict = {
'resource': 'local.localhost',
'walltime': 10,
'cpus': 1,
'project': ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
appman = AppManager(hostname=hostname, port=port, autoterminate=False)
appman.resource_desc = res_dict
p1 = create_pipeline()
appman.workflow = [p1]
appman.run()
print p1.uid, p1.stages[0].uid
p2 = create_pipeline()
appman.workflow = [p2]
appman.run()
print p2.uid, p2.stages[0].uid
appman.resource_terminate()
lhs = int(p1.stages[0].uid.split('.')[-1]) + 1
rhs = int(p2.stages[0].uid.split('.')[-1])
assert lhs == rhs
for t in p1.stages[0].tasks:
for tt in p2.stages[0].tasks:
lhs = int(t.uid.split('.')[-1]) + 1
rhs = int(tt.uid.split('.')[-1])
assert lhs == rhs
|
mit
|
Python
|
e5d13f315624be780fd60a04ff255f6682bdd84b
|
Update set_student_guardian.py
|
geekroot/erpnext,gsnbng/erpnext,geekroot/erpnext,indictranstech/erpnext,indictranstech/erpnext,gsnbng/erpnext,njmube/erpnext,gsnbng/erpnext,gsnbng/erpnext,Aptitudetech/ERPNext,njmube/erpnext,njmube/erpnext,indictranstech/erpnext,geekroot/erpnext,indictranstech/erpnext,njmube/erpnext,geekroot/erpnext
|
erpnext/patches/v7_1/set_student_guardian.py
|
erpnext/patches/v7_1/set_student_guardian.py
|
import frappe
def execute():
if frappe.db.exists("DocType", "Guardian"):
frappe.reload_doc("schools", "doctype", "student")
frappe.reload_doc("schools", "doctype", "student_guardian")
frappe.reload_doc("schools", "doctype", "student_sibling")
if "student" not in frappe.db.get_table_columns("Guardian"):
return
guardian = frappe.get_all("Guardian", fields=["name", "student"])
for d in guardian:
if d.student:
student = frappe.get_doc("Student", d.student)
if student:
student.append("guardians", {"guardian": d.name})
student.save()
|
import frappe
def execute():
if frappe.db.exists("DocType", "Guardian"):
frappe.reload_doc("schools", "doctype", "student")
frappe.reload_doc("schools", "doctype", "student_guardian")
frappe.reload_doc("schools", "doctype", "student_sibling")
guardian = frappe.get_all("Guardian", fields=["name", "student"])
for d in guardian:
if d.student:
student = frappe.get_doc("Student", d.student)
if student:
student.append("guardians", {"guardian": d.name})
student.save()
|
agpl-3.0
|
Python
|
9986da1599e5beaaad49d389b2a2ee3d4f308991
|
fix https://github.com/BackofenLab/AlgoDat/issues/61
|
TobiasFaller/AlgoDat,BackofenLab/AlgoDat,BackofenLab/AlgoDat,BackofenLab/AlgoDat,TobiasFaller/AlgoDat,TobiasFaller/AlgoDat,TobiasFaller/AlgoDat,BackofenLab/AlgoDat,BackofenLab/AlgoDat,TobiasFaller/AlgoDat
|
Lecture-4/Code/AssociativeArray.py
|
Lecture-4/Code/AssociativeArray.py
|
# creates a new map (called dictionary)
countries = {"DE" : "Deutschland", \
"EN" : "England"}
# check if element exists
if "EN" in countries:
print("Found %s!" % countries["EN"])
# map key "DE" to value "Germany"
countries["DE"] = "Germany"
# delete key "DE"
del countries["DE"]
|
# creates a new map (called dictionary)
countries = {"DE" : "Deutschland", \
"EN" : "England"}
# check if element exists
if "EN" in countries:
print("Found %s!" % countries["EN"])
# map key "DE" to value 0
countries["DE"] = "Germany"
# delete key "DE"
del countries["DE"]
|
mit
|
Python
|
f98ef68949b8875daeb5b8346a65a842a682a5df
|
replace linebreaks in version strings
|
lilydjwg/nvchecker
|
nvchecker/get_version.py
|
nvchecker/get_version.py
|
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <[email protected]>, et al.
import logging
from importlib import import_module
logger = logging.getLogger(__name__)
handler_precedence = (
'github', 'aur', 'pypi', 'archpkg', 'debianpkg', 'ubuntupkg',
'gems', 'pacman',
'cmd', 'bitbucket', 'regex', 'manual', 'vcs',
'cratesio', 'npm', 'hackage', 'cpan', 'gitlab', 'packagist',
'anitya',
)
async def get_version(name, conf):
for key in handler_precedence:
if key in conf:
func = import_module('.source.' + key, __package__).get_version
version = await func(name, conf)
return version.replace('\n', ' ')
else:
logger.error('%s: no idea to get version info.', name)
|
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <[email protected]>, et al.
import logging
from importlib import import_module
logger = logging.getLogger(__name__)
handler_precedence = (
'github', 'aur', 'pypi', 'archpkg', 'debianpkg', 'ubuntupkg',
'gems', 'pacman',
'cmd', 'bitbucket', 'regex', 'manual', 'vcs',
'cratesio', 'npm', 'hackage', 'cpan', 'gitlab', 'packagist',
'anitya',
)
async def get_version(name, conf):
for key in handler_precedence:
if key in conf:
func = import_module('.source.' + key, __package__).get_version
return await func(name, conf)
else:
logger.error('%s: no idea to get version info.', name)
|
mit
|
Python
|
11c81ff161cad04a24a8746b93d3afb64e6a5b49
|
fix typo
|
awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples
|
python/example_code/ec2/describe_subnets.py
|
python/example_code/ec2/describe_subnets.py
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
ec2 = boto3.client('ec2')
response = ec2.describe_subnets()
print(response)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[describe_subnets.py demonstrates how to describe one or more of your Subnets.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon EC2]
# snippet-service:[ec2]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-11-13]
# snippet-sourceauthor:[nprajilesh]
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
ec2 = boto3.client('ec2')
response = ec2.describe_subnets()
print(response)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[describe_subnets.py demonstrates how to describe describe one or more of your Subnets.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon EC2]
# snippet-service:[ec2]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-11-13]
# snippet-sourceauthor:[nprajilesh]
|
apache-2.0
|
Python
|
4c4e997767681e91f5d115e998cda22433eae7f6
|
allow to set a mode (list|map)
|
liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin
|
apps/mapideas/views.py
|
apps/mapideas/views.py
|
import django_filters
from django.contrib import messages
from django.utils.translation import ugettext as _
from adhocracy4.maps import mixins as map_mixins
from adhocracy4.modules import views as module_views
from apps.contrib import filters
from . import forms
from . import models
def get_ordering_choices(request):
choices = (('-created', _('Most recent')),)
if request.module.has_feature('rate', models.MapIdea):
choices += ('-positive_rating_count', _('Most popular')),
choices += ('-comment_count', _('Most commented')),
return choices
class MapIdeaFilterSet(django_filters.FilterSet):
category = filters.CategoryFilter()
ordering = filters.OrderingFilter(
choices=get_ordering_choices
)
@property
def qs(self):
return super().qs.filter(module=self.request.module) \
.annotate_positive_rating_count() \
.annotate_negative_rating_count() \
.annotate_comment_count()
class Meta:
model = models.MapIdea
fields = ['category']
class MapIdeaListView(map_mixins.MapItemListMixin, module_views.ItemListView):
model = models.MapIdea
filter_set = MapIdeaFilterSet
def dispatch(self, request, **kwargs):
self.mode = request.GET.get('mode', 'list')
return super().dispatch(request, **kwargs)
class MapIdeaDetailView(map_mixins.MapItemDetailMixin,
module_views.ItemDetailView):
model = models.MapIdea
queryset = models.MapIdea.objects.annotate_positive_rating_count()\
.annotate_negative_rating_count()
permission_required = 'meinberlin_mapideas.view_idea'
class MapIdeaCreateView(module_views.ItemCreateView):
model = models.MapIdea
form_class = forms.MapIdeaForm
permission_required = 'meinberlin_mapideas.propose_idea'
template_name = 'meinberlin_mapideas/mapidea_create_form.html'
class MapIdeaUpdateView(module_views.ItemUpdateView):
model = models.MapIdea
form_class = forms.MapIdeaForm
permission_required = 'meinberlin_mapideas.modify_idea'
template_name = 'meinberlin_mapideas/mapidea_update_form.html'
class MapIdeaDeleteView(module_views.ItemDeleteView):
model = models.MapIdea
success_message = _("Your Idea has been deleted")
permission_required = 'meinberlin_mapideas.modify_idea'
template_name = 'meinberlin_mapideas/mapidea_confirm_delete.html'
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super().delete(request, *args, **kwargs)
|
import django_filters
from django.contrib import messages
from django.utils.translation import ugettext as _
from adhocracy4.maps import mixins as map_mixins
from adhocracy4.modules import views as module_views
from apps.contrib import filters
from . import forms
from . import models
def get_ordering_choices(request):
choices = (('-created', _('Most recent')),)
if request.module.has_feature('rate', models.MapIdea):
choices += ('-positive_rating_count', _('Most popular')),
choices += ('-comment_count', _('Most commented')),
return choices
class MapIdeaFilterSet(django_filters.FilterSet):
category = filters.CategoryFilter()
ordering = filters.OrderingFilter(
choices=get_ordering_choices
)
@property
def qs(self):
return super().qs.filter(module=self.request.module) \
.annotate_positive_rating_count() \
.annotate_negative_rating_count() \
.annotate_comment_count()
class Meta:
model = models.MapIdea
fields = ['category']
class MapIdeaListView(map_mixins.MapItemListMixin, module_views.ItemListView):
model = models.MapIdea
filter_set = MapIdeaFilterSet
class MapIdeaDetailView(map_mixins.MapItemDetailMixin,
module_views.ItemDetailView):
model = models.MapIdea
queryset = models.MapIdea.objects.annotate_positive_rating_count()\
.annotate_negative_rating_count()
permission_required = 'meinberlin_mapideas.view_idea'
class MapIdeaCreateView(module_views.ItemCreateView):
model = models.MapIdea
form_class = forms.MapIdeaForm
permission_required = 'meinberlin_mapideas.propose_idea'
template_name = 'meinberlin_mapideas/mapidea_create_form.html'
class MapIdeaUpdateView(module_views.ItemUpdateView):
model = models.MapIdea
form_class = forms.MapIdeaForm
permission_required = 'meinberlin_mapideas.modify_idea'
template_name = 'meinberlin_mapideas/mapidea_update_form.html'
class MapIdeaDeleteView(module_views.ItemDeleteView):
model = models.MapIdea
success_message = _("Your Idea has been deleted")
permission_required = 'meinberlin_mapideas.modify_idea'
template_name = 'meinberlin_mapideas/mapidea_confirm_delete.html'
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super().delete(request, *args, **kwargs)
|
agpl-3.0
|
Python
|
dac0afb3db74b1e8cd144993e662dc8ac0622cb9
|
Add missing import to FTP module
|
opensistemas-hub/osbrain
|
osbrain/ftp.py
|
osbrain/ftp.py
|
"""
Implementation of FTP-related features.
"""
import Pyro4
from .core import BaseAgent
from .common import address_to_host_port
class FTPAgent(BaseAgent):
"""
An agent that provides basic FTP functionality.
"""
def ftp_configure(self, addr, user, passwd, path, perm='elr'):
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
# Create authorizer
authorizer = DummyAuthorizer()
authorizer.add_user(user, passwd, path, perm=perm)
# Create handler
handler = FTPHandler
handler.authorizer = authorizer
# Create server
host, port = address_to_host_port(addr)
# TODO: is this necessary? Or would `None` be sufficient?
if port is None:
port = 0
self.ftp_server = FTPServer((host, port), handler)
return self.ftp_server.socket.getsockname()
@Pyro4.oneway
def ftp_run(self):
# Serve forever
self.ftp_server.serve_forever()
def ftp_addr(self):
return self.ftp_server.socket.getsockname()
def ftp_retrieve(self, addr, origin, destiny, user, passwd):
import ftplib
host, port = addr
ftp = ftplib.FTP()
ftp.connect(host, port)
ftp.login(user, passwd)
ftp.retrbinary('RETR %s' % origin, open(destiny, 'wb').write)
ftp.close()
return destiny
|
"""
Implementation of FTP-related features.
"""
from .core import BaseAgent
from .common import address_to_host_port
class FTPAgent(BaseAgent):
"""
An agent that provides basic FTP functionality.
"""
def ftp_configure(self, addr, user, passwd, path, perm='elr'):
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
# Create authorizer
authorizer = DummyAuthorizer()
authorizer.add_user(user, passwd, path, perm=perm)
# Create handler
handler = FTPHandler
handler.authorizer = authorizer
# Create server
host, port = address_to_host_port(addr)
# TODO: is this necessary? Or would `None` be sufficient?
if port is None:
port = 0
self.ftp_server = FTPServer((host, port), handler)
return self.ftp_server.socket.getsockname()
@Pyro4.oneway
def ftp_run(self):
# Serve forever
self.ftp_server.serve_forever()
def ftp_addr(self):
return self.ftp_server.socket.getsockname()
def ftp_retrieve(self, addr, origin, destiny, user, passwd):
import ftplib
host, port = addr
ftp = ftplib.FTP()
ftp.connect(host, port)
ftp.login(user, passwd)
ftp.retrbinary('RETR %s' % origin, open(destiny, 'wb').write)
ftp.close()
return destiny
|
apache-2.0
|
Python
|
c450ee554daf1b5c4143e33d5688df2fed776f99
|
fix bug: 隐藏管理后台侧边栏配置owner字段,修复不填写作者无法保存侧边栏内容问题
|
boldmanQ/blogsys,boldmanQ/blogsys
|
blogsys/blogsys/adminx.py
|
blogsys/blogsys/adminx.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:zq time:2018/3/15
from __future__ import unicode_literals
import xadmin
from xadmin.views import CommAdminView
class BaseOwnerAdmin(object):
'''
1.用来处理文章、分类、标签、侧边栏、友链这些model的owner子段自动补充
2.用来针对queryset过滤当前用户的数据
'''
exclude = ('owner',)
def get_list_queryset(self):
request = self.request
queryset = super(BaseOwnerAdmin, self).get_list_queryset()
if request.user.is_superuser:
return queryset.all()
return queryset.filter(owner=request.user)
def save_models(self):
if not self.org_obj:
self.new_obj.owner = self.request.user
return super(BaseOwnerAdmin, self).save_models()
class XAdminGlobalSetting(object):
site_title = '車乞的博客后台'
site_footer = 'power by [email protected]'
xadmin.site.register(CommAdminView, XAdminGlobalSetting)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:zq time:2018/3/15
from __future__ import unicode_literals
import xadmin
from xadmin.views import CommAdminView
class BaseOwnerAdmin(object):
'''
1.用来处理文章、分类、标签、侧边栏、友链这些model的owner子段自动补充
2.用来针对queryset过滤当前用户的数据
'''
exclude = ('owner')
def get_list_queryset(self):
request = self.request
queryset = super(BaseOwnerAdmin, self).get_list_queryset()
if request.user.is_superuser:
return queryset.all()
return queryset.filter(owner=request.user)
def save_models(self):
if not self.org_obj:
self.new_obj.owner = self.request.user
return super(BaseOwnerAdmin, self).save_models()
class XAdminGlobalSetting(object):
site_title = '車乞的博客后台'
site_footer = 'power by [email protected]'
xadmin.site.register(CommAdminView, XAdminGlobalSetting)
|
mit
|
Python
|
a6128825eb7b4267a2a5a3116ec2625fdd8d3552
|
Add outsuffix to prottable-qvality driver
|
glormph/msstitch
|
app/drivers/prottable/qvality.py
|
app/drivers/prottable/qvality.py
|
from app.drivers.pycolator.qvality import QvalityDriver
from app.actions.prottable import qvality as preparation
from app.readers import tsv
class ProttableQvalityDriver(QvalityDriver):
"""Runs qvality on two protein tables"""
outsuffix = '_protqvality.txt'
def __init__(self, **kwargs):
super(ProttableQvalityDriver).__init__(**kwargs)
self.score_get_fun = preparation.prepare_qvality_input
if '***reverse' not in self.qvalityoptions:
self.qvalityoptions.extend(['***reverse'])
def set_features(self):
targetheader = tsv.get_tsv_header(self.fn)
self.target = tsv.generate_tsv_proteins(self.fn, targetheader)
decoyheader = tsv.get_tsv_header(self.decoy)
self.decoy = tsv.generate_tsv_proteins(self.decoy, decoyheader)
super().set_features()
|
from app.drivers.pycolator.qvality import QvalityDriver
from app.actions.prottable import qvality as preparation
from app.readers import tsv
class ProttableQvalityDriver(QvalityDriver):
def __init__(self, **kwargs):
super(ProttableQvalityDriver).__init__(**kwargs)
self.score_get_fun = preparation.prepare_qvality_input
if '***reverse' not in self.qvalityoptions:
self.qvalityoptions.extend(['***reverse'])
def set_features(self):
targetheader = tsv.get_tsv_header(self.fn)
self.target = tsv.generate_tsv_proteins(self.fn, targetheader)
decoyheader = tsv.get_tsv_header(self.decoy)
self.decoy = tsv.generate_tsv_proteins(self.decoy, decoyheader)
super().set_features()
|
mit
|
Python
|
3199b523a67f9c241950992a07fe38d2bbee07dc
|
Update migration file for namechange
|
RockinRobin/seednetwork,RockinRobin/seednetwork,RockinRobin/seednetwork
|
seedlibrary/migrations/0003_extendedview_fix.py
|
seedlibrary/migrations/0003_extendedview_fix.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-02-21 02:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seedlibrary', '0002_add_extendedview'),
]
operations = [
migrations.RenameField(
model_name='extendedview',
old_name='external_field',
new_name='external_url',
),
migrations.AddField(
model_name='extendedview',
name='grain_subcategory',
field=models.CharField(blank=True, max_length=50),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-02-21 02:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seedlibrary', '0002_auto_20170219_2058'),
]
operations = [
migrations.RenameField(
model_name='extendedview',
old_name='external_field',
new_name='external_url',
),
migrations.AddField(
model_name='extendedview',
name='grain_subcategory',
field=models.CharField(blank=True, max_length=50),
),
]
|
mit
|
Python
|
17b0f5d7b718bc12755f7ddefdd76ee9312adf5f
|
Add content type text/html to response
|
sanchopanca/reader,sanchopanca/reader
|
books.py
|
books.py
|
import falcon
import template
def get_paragraphs(pathname: str) -> list:
result = []
with open(pathname) as f:
for line in f.readlines():
if line != '\n':
result.append(line[:-1])
return result
class BooksResource:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.content_type = 'text/html'
paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
resp.body = template.render_template('book.html', paragraphs=paragraphs)
app = falcon.API()
books = BooksResource()
app.add_route('/books', books)
if __name__ == '__main__':
paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
print(paragraphs)
|
import falcon
import template
def get_paragraphs(pathname: str) -> list:
result = []
with open(pathname) as f:
for line in f.readlines():
if line != '\n':
result.append(line[:-1])
return result
class BooksResource:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
resp.body = template.render_template('book.html', paragraphs=paragraphs)
app = falcon.API()
books = BooksResource()
app.add_route('/books', books)
if __name__ == '__main__':
paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
print(paragraphs)
|
agpl-3.0
|
Python
|
75b5f6acc441efe66c22b548ea21c5a16210af33
|
Update cuberun model with BN, and input shape.
|
johnmartinsson/bird-species-classification,johnmartinsson/bird-species-classification
|
bird/models/cuberun.py
|
bird/models/cuberun.py
|
from keras.layers import Input
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
def CubeRun(nb_classes):
""" Instantiate a CubeRun architecture
# Arguments
nb_classes: the number of classification classes
# Returns
A Keras model instance
"""
# adapt input shape to the used backend
if K.image_dim_ordering() == 'th':
input_shape=(1, 257, 624)
else:
input_shape=(257, 624, 1)
img_input = Input(shape=input_shape)
# adapt back normalization axis to the used backend
if K.image_dim_ordering() == 'th':
bn_axis = 1
else:
bn_axis = 3
x = ZeroPadding2D((2, 2))(img_input)
#x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
#x = Dropout(0.2)(x)
# conv (64 5x5 kernels, stride size 2x1)
x = Convolution2D(64, 5, 5, subsample=(2, 1))(x)
x = Activation('relu')(x)
# max pooling (2x2 kernels, stride size 2x2)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
# batch normalization
x = BatchNormalization(axis=bn_axis)(x)
# conv (64 5x5 kernels, stride size 1x1)
x = Convolution2D(64, 5, 5, subsample=(1, 1))(x)
x = Activation('relu')(x)
# max pooling (2x2 kernels, stride size 2x2)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
# batch normalization
x = BatchNormalization(axis=bn_axis)(x)
# conv (128 5x5 kernels, stride size 1x1)
x = Convolution2D(128, 5, 5, subsample=(1, 1))(x)
x = Activation('relu')(x)
# max pooling (2x2 kernels, stride size 2x2)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
# batch normalization
x = BatchNormalization(axis=bn_axis)(x)
# conv (256 5x5 kernels, stride size 1x1)
x = Convolution2D(256, 5, 5, activation="relu", subsample=(1, 1))(x)
x = Activation('relu')(x)
# max pooling (2x2 kernels, stride size 2x2)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
# batch normalization
x = BatchNormalization(axis=bn_axis)(x)
# conv (256 3x3 kernels, stride size 1x1)
x = Convolution2D(256, 3, 3, subsample=(1, 1))(x)
x = Activation('relu')(x)
# max pooling (2x2 kernels, stride size 2x2)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
# batch normalization
#x = BatchNormalization(axis=bn_axis)(x)
# flatten 3D feature maps to 1D feature vectors
x = Flatten()(x)
# dense layer
x = Dense(1024)(x)
x = Activation('relu')(x)
# dense layer dropout
x = Dropout(0.4)(x)
# soft max layer
x = Dense(nb_classes)(x)
x = Activation('softmax')(x)
# soft max layer dropout
#x = Dropout(0.4)(x)
model = Model(img_input, x)
return model
|
import json
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
model = Sequential()
# conv (64 5x5 kernels, stride size 2x1)
# TODO : 1 channel?
model.add(Convolution2D(64, 5, 5, input_shape=(3, 128, 256), activation="relu", subsample=(2, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (64 5x5 kernels, stride size 1x1)
model.add(Convolution2D(64, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (128 5x5 kernels, stride size 1x1)
model.add(Convolution2D(128, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 5x5 kernels, stride size 1x1)
model.add(Convolution2D(256, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 3x3 kernels, stride size 1x1)
model.add(Convolution2D(256, 3, 3, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# flatten 3D feature maps to 1D feature vectors
model.add(Flatten())
# dense (1024 units)
model.add(Dense(1024))
# soft max (19 units)
model.add(Dense(19, activation='softmax'))
with open('cuberun.json', 'w') as outfile:
json.dump(mode.to_json(), outfile)
|
mit
|
Python
|
ced7ff090c44f706fa161b5e5bed8f36fb6570c9
|
Set default pool to 1 worker.
|
soasme/blackgate
|
blackgate/component.py
|
blackgate/component.py
|
# -*- coding: utf-8 -*-
from functools import partial
from blackgate.executor_pools import ExecutorPools
from blackgate.circuit_beaker import NoCircuitBeaker, InProcessCircuitBeaker, get_circuit_beaker
class Component(object):
def __init__(self):
self.pools = ExecutorPools()
self.circuit_beakers = {}
self.circuit_beaker_impl = NoCircuitBeaker
self.circuit_beaker_options = {}
self.get_circuit_beaker = partial(
get_circuit_beaker,
self.circuit_beakers,
)
self.configurations = {}
def set(self, key, value):
self.configurations[key] = value
def add(self, key, value):
self.configurations.setdefault(key, [])
if key in self.configurations:
assert isinstance(self.configurations[key], list)
self.configurations[key].append(value)
def delete(self, key):
del self.configurations[key]
def install(self):
self.install_executor_pool()
self.install_circuit_beaker()
def install_executor_pool(self):
self.pools.register_pool('default', 1)
if 'executor_pool' in self.configurations:
for executor_pool in self.configurations['executor_pool']:
self.pools.register_pool(executor_pool['group_key'], executor_pool['max_workers'])
def install_circuit_beaker(self):
if 'circuit_beaker_enabled' in self.configurations:
self.circuit_beaker_impl = NoCircuitBeaker
self.circuit_beaker_options = {}
elif 'circuit_beaker_impl' not in self.configurations:
self.circuit_beaker_impl = InProcessCircuitBeaker
self.circuit_beaker_options = {'metrics': None} # FIXME
else:
# FIXME: add definition of import_string
self.circuit_beaker_impl = import_string(self.configurations['circuit_beaker_impl'])
self.circuit_beaker_options = self.configurations.get('circuit_beaker_options') or {}
|
# -*- coding: utf-8 -*-
from functools import partial
from blackgate.executor_pools import ExecutorPools
from blackgate.circuit_beaker import NoCircuitBeaker, InProcessCircuitBeaker, get_circuit_beaker
class Component(object):
def __init__(self):
self.pools = ExecutorPools()
self.circuit_beakers = {}
self.circuit_beaker_impl = NoCircuitBeaker
self.circuit_beaker_options = {}
self.get_circuit_beaker = partial(
get_circuit_beaker,
table=self.circuit_beakers,
)
self.configurations = {}
def set(self, key, value):
self.configurations[key] = value
def add(self, key, value):
self.configurations.setdefault(key, [])
if key in self.configurations:
assert isinstance(self.configurations[key], list)
self.configurations[key].append(value)
def delete(self, key):
del self.configurations[key]
def install(self):
self.install_executor_pool()
self.install_circuit_beaker()
def install_executor_pool(self):
if 'executor_pool' in self.configurations:
for executor_pool in self.configurations['executor_pool']:
self.pools.register_pool(executor_pool['group_key'], executor_pool['max_workers'])
def install_circuit_beaker(self):
if 'circuit_beaker_enabled' in self.configurations:
self.circuit_beaker_impl = NoCircuitBeaker
self.circuit_beaker_options = {}
elif 'circuit_beaker_impl' not in self.configurations:
self.circuit_beaker_impl = InProcessCircuitBeaker
self.circuit_beaker_options = {'metrics': None} # FIXME
else:
# FIXME: add definition of import_string
self.circuit_beaker_impl = import_string(self.configurations['circuit_beaker_impl'])
self.circuit_beaker_options = self.configurations.get('circuit_beaker_options') or {}
|
mit
|
Python
|
89237e9af27fa46c08ec90cab4029f41b335708f
|
fix pep8 violations
|
simphony/simphony-common
|
examples/plugin/simphony_example/__init__.py
|
examples/plugin/simphony_example/__init__.py
|
# Functions, classes and constants exported here will be available
# when the `example` module is imported.
__all__ = ['A', 'B']
from .code import A, B
|
# Functions, classes and constants exported here will be available
# when the `example` module is imported.
__all__ = ['A', 'B']
from .code import A, B
|
bsd-2-clause
|
Python
|
4a498d83c15f89e00c095659df1fc38377acc0a3
|
fix permissions for rewards
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
bluebottle/rewards/models.py
|
bluebottle/rewards/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import (CreationDateTimeField,
ModificationDateTimeField)
from bluebottle.utils.fields import MoneyField
from bluebottle.utils.utils import StatusDefinition
GROUP_PERMS = {
'Staff': {
'perms': (
'add_reward', 'change_reward', 'delete_reward',
)
},
'Anonymous': {
'perms': ('api_read_reward',)
},
'Authenticated': {
'perms': (
'api_read_reward', 'api_add_reward', 'api_change_reward', 'api_delete_reward',
)
}
}
class Reward(models.Model):
"""
Rewards for donations
"""
amount = MoneyField(_('Amount'))
title = models.CharField(_('Title'), max_length=30)
description = models.CharField(_('Description'), max_length=200)
project = models.ForeignKey('projects.Project', verbose_name=_('Project'))
limit = models.IntegerField(_('Limit'), null=True, blank=True,
help_text=_('How many of this rewards are available'))
created = CreationDateTimeField(_('creation date'))
updated = ModificationDateTimeField(_('last modification'))
@property
def owner(self):
return self.project.owner
@property
def parent(self):
return self.project
@property
def count(self):
from bluebottle.donations.models import Donation
return Donation.objects \
.filter(project=self.project) \
.filter(reward=self) \
.filter(order__status__in=[StatusDefinition.PENDING, StatusDefinition.SUCCESS]) \
.count()
def __unicode__(self):
return self.title
class Meta:
ordering = ['-project__created', 'amount']
verbose_name = _("Gift")
verbose_name_plural = _("Gifts")
permissions = (
('api_read_reward', 'Can view reward through the API'),
('api_add_reward', 'Can add reward through the API'),
('api_change_reward', 'Can change reward through the API'),
('api_delete_reward', 'Can delete reward through the API'),
)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import (CreationDateTimeField,
ModificationDateTimeField)
from bluebottle.utils.fields import MoneyField
from bluebottle.utils.utils import StatusDefinition
GROUP_PERMS = {
'Staff': {
'perms': (
'add_reward', 'change_reward', 'delete_reward',
)
}
}
class Reward(models.Model):
"""
Rewards for donations
"""
amount = MoneyField(_('Amount'))
title = models.CharField(_('Title'), max_length=30)
description = models.CharField(_('Description'), max_length=200)
project = models.ForeignKey('projects.Project', verbose_name=_('Project'))
limit = models.IntegerField(_('Limit'), null=True, blank=True,
help_text=_('How many of this rewards are available'))
created = CreationDateTimeField(_('creation date'))
updated = ModificationDateTimeField(_('last modification'))
@property
def owner(self):
return self.project.owner
@property
def parent(self):
return self.project
@property
def count(self):
from bluebottle.donations.models import Donation
return Donation.objects \
.filter(project=self.project) \
.filter(reward=self) \
.filter(order__status__in=[StatusDefinition.PENDING, StatusDefinition.SUCCESS]) \
.count()
def __unicode__(self):
return self.title
class Meta:
ordering = ['-project__created', 'amount']
verbose_name = _("Gift")
verbose_name_plural = _("Gifts")
permissions = (
('api_read_reward', 'Can view reward through the API'),
('api_add_reward', 'Can add reward through the API'),
('api_change_reward', 'Can change reward through the API'),
('api_delete_reward', 'Can delete reward through the API'),
)
|
bsd-3-clause
|
Python
|
7851e867aec82f771683cc267ecb5989d2005aa1
|
add same features as program in 03-janus
|
pdebuyl-lab/RMPCDMD,laurensdeprez/RMPCDMD,pdebuyl-lab/RMPCDMD,pdebuyl/RMPCDMD,laurensdeprez/RMPCDMD,pdebuyl/RMPCDMD
|
experiments/01-single-dimer/plot_velocity.py
|
experiments/01-single-dimer/plot_velocity.py
|
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='H5MD datafile')
parser.add_argument('--directed', action='store_true')
parser.add_argument('--histogram', action='store_true')
args = parser.parse_args()
import numpy as np
import h5py
import matplotlib.pyplot as plt
with h5py.File(args.file, 'r') as f:
r = f['particles/dimer/position/value'][...]
r_dt = f['particles/dimer/position/time'][()]
im = f['particles/dimer/image/value'][...]
v = f['particles/dimer/velocity/value'][...]
v_dt = f['particles/dimer/velocity/time'][()]
edges = f['particles/dimer/box/edges'][:].reshape((1,-1))
r += edges*im
assert abs(r_dt-v_dt) < 1e-12
assert r.shape[1]==2
assert r.shape[2]==3
assert v.shape[1]==2
assert v.shape[2]==3
time = np.arange(r.shape[0])*r_dt
v_com = v.mean(axis=1)
if args.directed:
unit_z = r[:,1,:]-r[:,0,:]
unit_z /= np.sqrt(np.sum(unit_z**2, axis=1)).reshape((-1,1))
vz = np.sum(v_com*unit_z, axis=1)
if args.histogram:
plt.hist(vz, bins=20)
else:
plt.plot(time, vz)
else:
for i in range(3):
plt.subplot(3,1,i+1)
if args.histogram:
plt.hist(v_com[:,i])
plt.ylabel(r'$P(v_'+'xyz'[i]+')$')
else:
plt.plot(time, v_com[:,i])
plt.ylabel('xyz'[i])
plt.show()
|
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='H5MD datafile')
parser.add_argument('--directed', action='store_true')
args = parser.parse_args()
import numpy as np
import h5py
import matplotlib.pyplot as plt
with h5py.File(args.file, 'r') as f:
r = f['particles/dimer/position/value'][...]
r_dt = f['particles/dimer/position/time'][()]
im = f['particles/dimer/image/value'][...]
v = f['particles/dimer/velocity/value'][...]
v_dt = f['particles/dimer/velocity/time'][()]
edges = f['particles/dimer/box/edges'][:].reshape((1,-1))
r += edges*im
assert abs(r_dt-v_dt) < 1e-12
assert r.shape[1]==2
assert r.shape[2]==3
assert v.shape[1]==2
assert v.shape[2]==3
time = np.arange(r.shape[0])*r_dt
v_com = v.mean(axis=1)
if args.directed:
unit_z = r[:,1,:]-r[:,0,:]
unit_z /= np.sqrt(np.sum(unit_z**2, axis=1)).reshape((-1,1))
vz = np.sum(v_com*unit_z, axis=1)
plt.plot(time, vz)
else:
plt.plot(time, v_com)
plt.show()
|
bsd-3-clause
|
Python
|
aabe64773baf0516ecce2e96793221d5bfa91040
|
change to use environment variables
|
osbock/InternetOfSilly
|
scripts/sillyserver.py
|
scripts/sillyserver.py
|
# Example of using the MQTT client class to subscribe to a feed and print out
# any changes made to the feed. Edit the variables below to configure the key,
# username, and feed to subscribe to for changes.
# Import standard python modules.
import sys, os, serial
# Import Adafruit IO MQTT client.
from Adafruit_IO import MQTTClient
# Set to your Adafruit IO key & username below.
ADAFRUIT_IO_KEY = os.getenv('AIOKEY','nokey')
ADAFRUIT_IO_USERNAME = os.getenv('AIOUSER','nouser')
if (ADAFRUIT_IO_KEY == 'nokey' or ADAFRUIT_IO_USERNAME == 'nouser'):
print('no user or key environment variable')
sys.exit()
FEED_ID = 'SillyFire'
ser = serial.Serial('/dev/cu.usbmodem1411',115200)
# Define callback functions which will be called when certain events happen.
def connected(client):
# Connected function will be called when the client is connected to Adafruit IO.
# This is a good place to subscribe to feed changes. The client parameter
# passed to this function is the Adafruit IO MQTT client so you can make
# calls against it easily.
print 'Connected to Adafruit IO! Listening for {0} changes...'.format(FEED_ID)
# Subscribe to changes on a feed named DemoFeed.
client.subscribe(FEED_ID)
def disconnected(client):
# Disconnected function will be called when the client disconnects.
print 'Disconnected from Adafruit IO!'
client.connect()
def message(client, feed_id, payload):
# Message function will be called when a subscribed feed has a new value.
# The feed_id parameter identifies the feed, and the payload parameter has
# the new value.
print 'Feed {0} received new value: {1}'.format(feed_id, payload)
if int(payload) == 1:
print "Fire"
ser.write(b'F')
# Create an MQTT client instance.
client = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
# Setup the callback functions defined above.
client.on_connect = connected
client.on_disconnect = disconnected
client.on_message = message
# Connect to the Adafruit IO server.
print "try to connect"
client.connect()
# Start a message loop that blocks forever waiting for MQTT messages to be
# received. Note there are other options for running the event loop like doing
# so in a background thread--see the mqtt_client.py example to learn more.
client.loop_blocking()
|
# Example of using the MQTT client class to subscribe to a feed and print out
# any changes made to the feed. Edit the variables below to configure the key,
# username, and feed to subscribe to for changes.
# Import standard python modules.
import sys
import serial
# Import Adafruit IO MQTT client.
from Adafruit_IO import MQTTClient
# Set to your Adafruit IO key & username below.
ADAFRUIT_IO_KEY = "yourkey"
ADAFRUIT_IO_USERNAME = "yourusername"
FEED_ID = 'SillyFire'
ser = serial.Serial('/dev/cu.usbmodem1411',115200)
# Define callback functions which will be called when certain events happen.
def connected(client):
# Connected function will be called when the client is connected to Adafruit IO.
# This is a good place to subscribe to feed changes. The client parameter
# passed to this function is the Adafruit IO MQTT client so you can make
# calls against it easily.
print 'Connected to Adafruit IO! Listening for {0} changes...'.format(FEED_ID)
# Subscribe to changes on a feed named DemoFeed.
client.subscribe(FEED_ID)
def disconnected(client):
# Disconnected function will be called when the client disconnects.
print 'Disconnected from Adafruit IO!'
client.connect()
def message(client, feed_id, payload):
# Message function will be called when a subscribed feed has a new value.
# The feed_id parameter identifies the feed, and the payload parameter has
# the new value.
print 'Feed {0} received new value: {1}'.format(feed_id, payload)
if int(payload) == 1:
print "Fire"
ser.write(b'F')
# Create an MQTT client instance.
client = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
# Setup the callback functions defined above.
client.on_connect = connected
client.on_disconnect = disconnected
client.on_message = message
# Connect to the Adafruit IO server.
client.connect()
# Start a message loop that blocks forever waiting for MQTT messages to be
# received. Note there are other options for running the event loop like doing
# so in a background thread--see the mqtt_client.py example to learn more.
client.loop_blocking()
|
mit
|
Python
|
d4879a7640869b16e1ea50cdbff84f173a81b521
|
simplify variable
|
Fizzadar/pyinfra,Fizzadar/pyinfra
|
examples/apt.py
|
examples/apt.py
|
from pyinfra import host
from pyinfra.modules import apt
SUDO = True
code_name = host.fact.linux_distribution['release_meta'].get('DISTRIB_CODENAME')
print(host.fact.linux_name, code_name)
if host.fact.linux_name in ['Debian', 'Ubuntu']:
apt.packages(
{'Install some packages'},
['vim-addon-manager', 'vim', 'software-properties-common', 'wget'],
update=True,
)
apt.ppa(
{'Add the Bitcoin ppa'},
'ppa:bitcoin/bitcoin',
)
# typically after adding a ppk, you want to update
apt.update()
# but you could just include the update in the apt install step
# like this:
apt.packages(
{'Install Bitcoin'},
'bitcoin-qt',
update=True,
)
apt.deb(
{'Install Chrome via deb'},
'https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb',
)
apt.key(
{'Install VirtualBox key'},
'https://www.virtualbox.org/download/oracle_vbox_2016.asc',
)
apt.repo(
{'Install VirtualBox repo'},
'deb https://download.virtualbox.org/virtualbox/debian {} contrib'.format(code_name),
)
|
from pyinfra import host
from pyinfra.modules import apt
SUDO = True
# Note: Using linux_distribution fact so running from docker
# will show valid name since the lsb-release tool is not installed,
# otherwise could just use host.fact.linux_name
linux_name = host.fact.linux_distribution.get('name', '')
code_name = host.fact.linux_distribution['release_meta'].get('DISTRIB_CODENAME')
print(linux_name, code_name)
if linux_name in ['Debian', 'Ubuntu']:
apt.packages(
{'Install some packages'},
['vim-addon-manager', 'vim', 'software-properties-common', 'wget'],
update=True,
)
apt.ppa(
{'Add the Bitcoin ppa'},
'ppa:bitcoin/bitcoin',
)
# typically after adding a ppk, you want to update
apt.update()
# but you could just include the update in the apt install step
# like this:
apt.packages(
{'Install Bitcoin'},
'bitcoin-qt',
update=True,
)
apt.deb(
{'Install Chrome via deb'},
'https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb',
)
apt.key(
{'Install VirtualBox key'},
'https://www.virtualbox.org/download/oracle_vbox_2016.asc',
)
apt.repo(
{'Install VirtualBox repo'},
'deb https://download.virtualbox.org/virtualbox/debian {} contrib'.format(code_name),
)
|
mit
|
Python
|
630e362e727d4c7274b987008488d203e21f8ec6
|
Use default quality
|
JustinWingChungHui/electionleaflets,DemocracyClub/electionleaflets,DemocracyClub/electionleaflets,DemocracyClub/electionleaflets,JustinWingChungHui/electionleaflets,JustinWingChungHui/electionleaflets,JustinWingChungHui/electionleaflets
|
electionleaflets/apps/api/serializers.py
|
electionleaflets/apps/api/serializers.py
|
from rest_framework import serializers
from sorl.thumbnail import get_thumbnail
from leaflets.models import Leaflet, LeafletImage
from constituencies.models import Constituency
from uk_political_parties.models import Party
from people.models import Person
class ConstituencySerializer(serializers.ModelSerializer):
class Meta:
model = Constituency
fields = (
'pk',
'name',
'country_name',
'slug',
)
class PartySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Party
fields = (
'pk',
'party_name',
'party_type',
'status',
)
class PersonSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Person
fields = (
'pk',
'name',
'remote_id',
'source_name',
'source_url',
)
class LeafletImageSerializer(serializers.ModelSerializer):
class Meta:
model = LeafletImage
fields = (
'image',
'image_text',
)
image = serializers.ImageField()
class LeafletSerializer(serializers.HyperlinkedModelSerializer):
images = LeafletImageSerializer(many=True, required=False)
constituency = ConstituencySerializer(required=False)
publisher_party = PartySerializer(required=False)
publisher_person = PersonSerializer(required=False)
first_page_thumb = serializers.SerializerMethodField()
def get_first_page_thumb(self, obj):
image = obj.get_first_image()
if image:
return get_thumbnail(obj.get_first_image, '350').url
def validate(self, data):
if not data.get('status') or not data.get('images'):
data['status'] = 'draft'
return data
class Meta:
model = Leaflet
depth = 1
fields = (
'pk',
'title',
'description',
'publisher_party',
'publisher_person',
'constituency',
'images',
'first_page_thumb',
'date_uploaded',
'date_delivered',
'status',
)
|
from rest_framework import serializers
from sorl.thumbnail import get_thumbnail
from leaflets.models import Leaflet, LeafletImage
from constituencies.models import Constituency
from uk_political_parties.models import Party
from people.models import Person
class ConstituencySerializer(serializers.ModelSerializer):
class Meta:
model = Constituency
fields = (
'pk',
'name',
'country_name',
'slug',
)
class PartySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Party
fields = (
'pk',
'party_name',
'party_type',
'status',
)
class PersonSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Person
fields = (
'pk',
'name',
'remote_id',
'source_name',
'source_url',
)
class LeafletImageSerializer(serializers.ModelSerializer):
class Meta:
model = LeafletImage
fields = (
'image',
'image_text',
)
image = serializers.ImageField()
class LeafletSerializer(serializers.HyperlinkedModelSerializer):
images = LeafletImageSerializer(many=True, required=False)
constituency = ConstituencySerializer(required=False)
publisher_party = PartySerializer(required=False)
publisher_person = PersonSerializer(required=False)
first_page_thumb = serializers.SerializerMethodField()
def get_first_page_thumb(self, obj):
image = obj.get_first_image()
if image:
return get_thumbnail(obj.get_first_image, '350', quality=80).url
def validate(self, data):
if not data.get('status') or not data.get('images'):
data['status'] = 'draft'
return data
class Meta:
model = Leaflet
depth = 1
fields = (
'pk',
'title',
'description',
'publisher_party',
'publisher_person',
'constituency',
'images',
'first_page_thumb',
'date_uploaded',
'date_delivered',
'status',
)
|
mit
|
Python
|
4626a20b2d46a3a8ea17d265dff220c5a02700d8
|
Fix bug in clang-format's vim integration cause by r186789.
|
llvm-mirror/clang,llvm-mirror/clang,apple/swift-clang,apple/swift-clang,llvm-mirror/clang,llvm-mirror/clang,llvm-mirror/clang,llvm-mirror/clang,apple/swift-clang,llvm-mirror/clang,llvm-mirror/clang,apple/swift-clang,apple/swift-clang,llvm-mirror/clang,apple/swift-clang,llvm-mirror/clang,apple/swift-clang,apple/swift-clang,apple/swift-clang,apple/swift-clang
|
tools/clang-format/clang-format.py
|
tools/clang-format/clang-format.py
|
# This file is a minimal clang-format vim-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Add to your .vimrc:
#
# map <C-I> :pyf <path-to-this-file>/clang-format.py<CR>
# imap <C-I> <ESC>:pyf <path-to-this-file>/clang-format.py<CR>i
#
# The first line enables clang-format for NORMAL and VISUAL mode, the second
# line adds support for INSERT mode. Change "C-I" to another binding if you
# need clang-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and clang-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import difflib
import json
import subprocess
import sys
import vim
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles (see
# clang-format -help)
style = 'LLVM'
# Get the current text.
buf = vim.current.buffer
text = '\n'.join(buf)
# Determine range to format.
cursor = int(vim.eval('line2byte(line("."))+col(".")')) - 2
lines = '%s:%s' % (vim.current.range.start + 1, vim.current.range.end + 1)
# Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
startupinfo = None
if sys.platform.startswith('win32'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call formatter.
p = subprocess.Popen([binary, '-lines', lines, '-style', style,
'-cursor', str(cursor)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate(input=text)
# If successful, replace buffer contents.
if stderr:
message = stderr.splitlines()[0]
parts = message.split(' ', 2)
if len(parts) > 2:
message = parts[2]
print 'Formatting failed: %s (total %d warnings, %d errors)' % (
message, stderr.count('warning:'), stderr.count('error:'))
if not stdout:
print ('No output from clang-format (crashed?).\n' +
'Please report to bugs.llvm.org.')
else:
lines = stdout.split('\n')
output = json.loads(lines[0])
lines = lines[1:]
sequence = difflib.SequenceMatcher(None, vim.current.buffer, lines)
for op in reversed(sequence.get_opcodes()):
if op[0] is not 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
vim.command('goto %d' % (output['Cursor'] + 1))
|
# This file is a minimal clang-format vim-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Add to your .vimrc:
#
# map <C-I> :pyf <path-to-this-file>/clang-format.py<CR>
# imap <C-I> <ESC>:pyf <path-to-this-file>/clang-format.py<CR>i
#
# The first line enables clang-format for NORMAL and VISUAL mode, the second
# line adds support for INSERT mode. Change "C-I" to another binding if you
# need clang-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and clang-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import difflib
import json
import subprocess
import sys
import vim
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles (see
# clang-format -help)
style = 'LLVM'
# Get the current text.
buf = vim.current.buffer
text = '\n'.join(buf)
# Determine range to format.
cursor = int(vim.eval('line2byte(line("."))+col(".")')) - 2
lines = '%s:%s' % (vim.current.range.start + 1, vim.current.range.end + 1)
# Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
startupinfo = None
if sys.platform.startswith('win32'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call formatter.
p = subprocess.Popen([binary, '-lines', lines, '-style', style,
'-cursor', str(cursor)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate(input=text)
# If successful, replace buffer contents.
if stderr:
message = stderr.splitlines()[0]
parts = message.split(' ', 2)
if len(parts) > 2:
message = parts[2]
print 'Formatting failed: %s (total %d warnings, %d errors)' % (
message, stderr.count('warning:'), stderr.count('error:'))
if not stdout:
print ('No output from clang-format (crashed?).\n' +
'Please report to bugs.llvm.org.')
else:
lines = stdout.split('\n')
output = json.loads(lines[0])
lines = lines[1:]
sequence = difflib.SequenceMatcher(None, vim.current.buffer, lines)
for op in sequence.get_opcodes():
if op[0] is not 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
vim.command('goto %d' % (output['Cursor'] + 1))
|
apache-2.0
|
Python
|
f2de502608833dda82908a6bb4f639645f785c06
|
Change to support PEP Varible naming scheme
|
jdgwartney/pulse-api-cli,jdgwartney/boundary-api-cli,boundary/boundary-api-cli,boundary/pulse-api-cli,jdgwartney/boundary-api-cli,boundary/boundary-api-cli,boundary/pulse-api-cli,jdgwartney/pulse-api-cli
|
boundary/hostgroup_update.py
|
boundary/hostgroup_update.py
|
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import HostgroupModify
class HostgroupUpdate(HostgroupModify):
def __init__(self):
HostgroupModify.__init__(self, True)
self.method = "PUT"
self.host_group_id = None
def add_arguments(self):
HostgroupModify.add_arguments(self)
self.parser.add_argument('-i', '--host-group-id', dest='host_group_id', action='store',
required=True, metavar='host_group_id', help='Host group id to update')
self.parser.add_argument('-n', '--host-group-name', dest='host_group_name', action='store', required=False,
metavar="host_group_name", help='Host group name')
self.parser.add_argument('-s', '--sources', dest='sources', action='store', required=True, metavar='sources',
help='Comma separated sources to add to the host group. If empty adds all hosts.')
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
HostgroupModify.get_arguments(self)
if self.args.host_group_id is not None:
self.host_group_id = self.args.host_group_id
self.path = "v1/hostgroup/" + str(self.host_group_id)
def get_description(self):
return 'Updates host group definition in a {0} account'.format(self.product_name)
|
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import HostgroupModify
class HostgroupUpdate(HostgroupModify):
def __init__(self):
HostgroupModify.__init__(self, True)
self.method = "PUT"
self.host_group_id = None
def add_arguments(self):
HostgroupModify.add_arguments(self)
self.parser.add_argument('-i', '--host-group-id', dest='host_group_id', action='store',
required=True, metavar='host_group_id', help='Host group id to update')
self.parser.add_argument('-n', '--host-group-name', dest='host_group_name', action='store', required=False,
metavar="host_group_name", help='Host group name')
self.parser.add_argument('-s', '--sources', dest='sources', action='store', required=True, metavar='sources',
help='Comma separated sources to add to the host group. If empty adds all hosts.')
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
HostgroupModify.get_arguments(self)
if self.args.hostGroupId is not None:
self.hostGroupId = self.args.hostGroupId
self.path = "v1/hostgroup/" + str(self.hostGroupId)
def get_description(self):
return 'Updates host group definition in a {0} account'.format(self.product_name)
|
apache-2.0
|
Python
|
9670454dbb7b2ec4ef61d60080a1bb798c3ace74
|
use integer amounts for payment capture
|
hasgeek/boxoffice,hasgeek/boxoffice,hasgeek/boxoffice,hasgeek/boxoffice
|
boxoffice/extapi/razorpay.py
|
boxoffice/extapi/razorpay.py
|
# -*- coding: utf-8 -*-
import requests
from coaster.utils import LabeledEnum
from baseframe import __
from boxoffice import app
# Don't use a trailing slash
base_url = 'https://api.razorpay.com/v1/payments'
__all__ = ['RAZORPAY_PAYMENT_STATUS', 'capture_payment']
class RAZORPAY_PAYMENT_STATUS(LabeledEnum):
"""
Reflects payment statuses as specified in
https://docs.razorpay.com/docs/return-objects
"""
CREATED = (0, __("Created"))
AUTHORIZED = (1, __("Authorized"))
CAPTURED = (2, __("Captured"))
# Only fully refunded payments.
REFUNDED = (3, __("Refunded"))
FAILED = (4, __("Failed"))
def capture_payment(paymentid, amount):
"""
Attempts to capture the payment, from Razorpay
"""
verify_https = False if app.config.get('VERIFY_RAZORPAY_HTTPS') is False else True
url = '{base_url}/{paymentid}/capture'.format(base_url=base_url, paymentid=paymentid)
# Razorpay requires the amount to be in paisa and of type integer
resp = requests.post(url, data={'amount': int(amount*100)},
auth=(app.config['RAZORPAY_KEY_ID'], app.config['RAZORPAY_KEY_SECRET']), verify=verify_https)
return resp
|
# -*- coding: utf-8 -*-
import requests
from coaster.utils import LabeledEnum
from baseframe import __
from boxoffice import app
# Don't use a trailing slash
base_url = 'https://api.razorpay.com/v1/payments'
__all__ = ['RAZORPAY_PAYMENT_STATUS', 'capture_payment']
class RAZORPAY_PAYMENT_STATUS(LabeledEnum):
"""
Reflects payment statuses as specified in
https://docs.razorpay.com/docs/return-objects
"""
CREATED = (0, __("Created"))
AUTHORIZED = (1, __("Authorized"))
CAPTURED = (2, __("Captured"))
# Only fully refunded payments.
REFUNDED = (3, __("Refunded"))
FAILED = (4, __("Failed"))
def capture_payment(paymentid, amount):
"""
Attempts to capture the payment, from Razorpay
"""
verify_https = False if app.config.get('VERIFY_RAZORPAY_HTTPS') is False else True
url = '{base_url}/{paymentid}/capture'.format(base_url=base_url, paymentid=paymentid)
# Razorpay requires the amount to be in paisa
resp = requests.post(url, data={'amount': amount*100},
auth=(app.config['RAZORPAY_KEY_ID'], app.config['RAZORPAY_KEY_SECRET']), verify=verify_https)
return resp
|
agpl-3.0
|
Python
|
f8944c0ac5a80d72852d9b2ea1dc1fc7d79a1891
|
Add test for deserialising
|
Ghostkeeper/Luna
|
plugins/data/enumerated/test/test_enumerated_type.py
|
plugins/data/enumerated/test/test_enumerated_type.py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Tests the enumerated type data type.
These tests are mostly interface-based, meaning that they will not test actual
output a lot, but tests the behaviour of the units instead.
"""
import enum #To define example enumerated types to test with.
import test.test_enum #Built-in enumerated types to test with.
import enumerated.enumerated_type #The module we're testing.
import luna.tests #For parametrised tests.
class Animal(enum.Enum):
"""
An example enumerated type to perform tests on.
"""
CAT = 0
DOG = 1
BIRD = 2
class EnumContainer:
"""
A class that contains a nested enum to test with.
"""
class Material(enum.Enum):
"""
A nested enumerated type inside another class.
We test with this because it has a different import path if it is
defined this way.
"""
IRON = 3
STONE = 4
WOOD = 5
class TestEnumeratedType(luna.tests.TestCase):
"""
Tests the behaviour of various functions belonging to the enumerated type.
In particular, it focuses on how these functions interact and integrate with
each other.
"""
@luna.tests.parametrise({
"custom": {
"serialised": b"enumerated.test.Animal.CAT"
},
"custom2": {
"serialised": b"enumerated.test.Animal.BIRD"
},
"builtins": {
"serialised": b"test.test_enum.Fruit.tomato"
},
"nested": {
"serialised": b"enumerated.test.EnumContainer.Material.STONE"
}
})
def test_deserialise(self, serialised):
"""
Tests whether we can deserialise enumerated types.
:param serialised: The serialised form of some enumerated type.
"""
result = enumerated.enumerated_type.deserialise(serialised)
self.assertIsInstance(result, enum.Enum)
@luna.tests.parametrise({
"module_local": {
"instance": Animal.CAT
},
"module_local2": { #Different module-local one that is not the first-defined entry.
"instance": Animal.BIRD
},
"builtins": {
"instance": test.test_enum.Fruit.tomato
},
"nested": {
"instance": EnumContainer.Material.STONE
}
})
def test_serialise(self, instance):
"""
Tests whether we can serialise enumerated types.
:param instance: The enumerated type instance to serialise.
"""
result = enumerated.enumerated_type.serialise(instance)
self.assertIsInstance(result, bytes, "The serialised enumerated type must be a byte sequence.")
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Tests the enumerated type data type.
These tests are mostly interface-based, meaning that they will not test actual
output a lot, but tests the behaviour of the units instead.
"""
import enum #To define example enumerated types to test with.
import test.test_enum #Built-in enumerated types to test with.
import enumerated.enumerated_type #The module we're testing.
import luna.tests #For parametrised tests.
class Animal(enum.Enum):
"""
An example enumerated type to perform tests on.
"""
CAT = 0
DOG = 1
BIRD = 2
class EnumContainer:
"""
A class that contains a nested enum to test with.
"""
class Material(enum.Enum):
"""
A nested enumerated type inside another class.
We test with this because it has a different import path if it is
defined this way.
"""
IRON = 3
STONE = 4
WOOD = 5
class TestEnumeratedType(luna.tests.TestCase):
"""
Tests the behaviour of various functions belonging to the enumerated type.
In particular, it focuses on how these functions interact and integrate with
each other.
"""
@luna.tests.parametrise({
"module_local": {
"instance": Animal.CAT
},
"module_local2": { #Different module-local one that is not the first-defined entry.
"instance": Animal.BIRD
},
"builtins": {
"instance": test.test_enum.Fruit.tomato
},
"nested": {
"instance": EnumContainer.Material.STONE
}
})
def test_serialise(self, instance):
"""
Tests whether we can serialise enumerated types.
:param instance: The enumerated type instance to serialise.
"""
result = enumerated.enumerated_type.serialise(instance)
self.assertIsInstance(result, bytes, "The serialised enumerated type must be a byte sequence.")
|
cc0-1.0
|
Python
|
2be23846aabae5307ef817561661783b44c43160
|
Move error-message logic into exception class
|
mlibrary/image-conversion-and-validation,mlibrary/image-conversion-and-validation
|
falcom/table.py
|
falcom/table.py
|
# Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
class Table:
class InputStrContainsCarriageReturn (RuntimeError):
pass
class InconsistentColumnCounts (RuntimeError):
def __init__ (self, expected_len, row):
self.expected_len = expected_len
self.row = row
def __str__ (self):
return "Expected every row to have len={:d}: {}".format(
self.expected_len, repr(self.row))
def __init__ (self, tab_separated_text = None):
self.text = tab_separated_text
self.__raise_error_if_carriage_returns()
self.__create_internal_structure()
@property
def rows (self):
return len(self)
@property
def cols (self):
return len(self.__rows[0]) if self else 0
def __len__ (self):
return len(self.__rows)
def __iter__ (self):
return iter(self.__rows)
def __getitem__ (self, key):
return self.__rows[key]
def __repr__ (self):
return "<{} {}>".format(self.__class__.__name__,
repr(self.text))
def __raise_error_if_carriage_returns (self):
if self.text and "\r" in self.text:
raise self.InputStrContainsCarriageReturn
def __create_internal_structure (self):
if self.text:
self.__set_to_list_of_rows_from_text()
else:
self.__rows = []
def __set_to_list_of_rows_from_text (self):
self.__rows = [self.__split_row(r)
for r in self.__rows_from_text()]
self.__raise_error_unless_col_counts_are_consistent()
def __split_row (self, row_text):
return tuple(row_text.split("\t"))
def __rows_from_text (self):
return self.text.rstrip("\n").split("\n")
def __raise_error_unless_col_counts_are_consistent (self):
rows = iter(self.__rows)
expected_len = len(next(rows))
for row in rows:
if len(row) != expected_len:
raise Table.InconsistentColumnCounts(expected_len, row)
|
# Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
class Table:
class InputStrContainsCarriageReturn (RuntimeError):
pass
class InconsistentColumnCounts (RuntimeError):
pass
def __init__ (self, tab_separated_text = None):
self.text = tab_separated_text
self.__raise_error_if_carriage_returns()
self.__create_internal_structure()
@property
def rows (self):
return len(self)
@property
def cols (self):
return len(self.__rows[0]) if self else 0
def __len__ (self):
return len(self.__rows)
def __iter__ (self):
return iter(self.__rows)
def __getitem__ (self, key):
return self.__rows[key]
def __repr__ (self):
return "<{} {}>".format(self.__class__.__name__,
repr(self.text))
def __raise_error_if_carriage_returns (self):
if self.text and "\r" in self.text:
raise self.InputStrContainsCarriageReturn
def __create_internal_structure (self):
if self.text:
self.__set_to_list_of_rows_from_text()
else:
self.__rows = []
def __set_to_list_of_rows_from_text (self):
self.__rows = [self.__split_row(r)
for r in self.__rows_from_text()]
self.__raise_error_unless_col_counts_are_consistent()
def __split_row (self, row_text):
return tuple(row_text.split("\t"))
def __rows_from_text (self):
return self.text.rstrip("\n").split("\n")
def __raise_error_unless_col_counts_are_consistent (self):
rows = iter(self.__rows)
expected_len = len(next(rows))
for row in rows:
if len(row) != expected_len:
raise Table.InconsistentColumnCounts(
"Expected every row to have len={:d}: {}".format(
expected_len, repr(row)))
|
bsd-3-clause
|
Python
|
4ce117b65d4a6f18d327f00866eaa4383f908094
|
Revert "meter import path."
|
gizmoguy/faucet,shivarammysore/faucet,Bairdo/faucet,wackerly/faucet,anarkiwi/faucet,gizmoguy/faucet,trentindav/faucet,faucetsdn/faucet,mwutzke/faucet,anarkiwi/faucet,trungdtbk/faucet,trentindav/faucet,Bairdo/faucet,byllyfish/faucet,REANNZ/faucet,byllyfish/faucet,REANNZ/faucet,wackerly/faucet,shivarammysore/faucet,faucetsdn/faucet,mwutzke/faucet,trungdtbk/faucet
|
faucet/meter.py
|
faucet/meter.py
|
"""Configure meters."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from conf import Conf
from valve_of import meteradd
except ImportError:
from faucet.conf import Conf
from valve_of import meteradd
class Meter(Conf):
"""Implement FAUCET configuration for an OpenFlow meter."""
name = None
entry_msg = None
defaults = {
'meter_id': None,
'entry': None,
}
defaults_type = {
'entry': dict,
'meter_id': int,
}
def __init__(self, _id, conf):
super(Meter, self).__init__(_id, conf)
assert conf['entry']
assert conf['entry']['flags']
assert conf['entry']['bands']
conf['entry']['meter_id'] = self.meter_id
self.entry_msg = meteradd(self.entry)
|
"""Configure meters."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from conf import Conf
from valve_of import meteradd
except ImportError:
from faucet.conf import Conf
from faucet.valve_of import meteradd
class Meter(Conf):
"""Implement FAUCET configuration for an OpenFlow meter."""
name = None
entry_msg = None
defaults = {
'meter_id': None,
'entry': None,
}
defaults_type = {
'entry': dict,
'meter_id': int,
}
def __init__(self, _id, conf):
super(Meter, self).__init__(_id, conf)
assert conf['entry']
assert conf['entry']['flags']
assert conf['entry']['bands']
conf['entry']['meter_id'] = self.meter_id
self.entry_msg = meteradd(self.entry)
|
apache-2.0
|
Python
|
db8b991600ab0a812e1d9af1a6e4bb7be25b5bd4
|
fix apply_tote_contents_hint
|
pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc
|
jsk_2016_01_baxter_apc/node_scripts/apply_tote_contents_hint.py
|
jsk_2016_01_baxter_apc/node_scripts/apply_tote_contents_hint.py
|
#!/usr/bin/env python
import numpy as np
from jsk_2015_05_baxter_apc.msg import ObjectRecognition
import jsk_apc2016_common
from jsk_recognition_msgs.msg import ClassificationResult
from jsk_topic_tools import ConnectionBasedTransport
from jsk_topic_tools.log_utils import jsk_logwarn
import rospy
class ApplyToteContentsHint(ConnectionBasedTransport):
"""Use tote contents info to improve object recognition"""
def __init__(self):
super(self.__class__, self).__init__()
json_file = rospy.get_param('~json')
self.tote_contents = jsk_apc2016_common.get_tote_contents(json_file)
self.pub = self.advertise('~output', ObjectRecognition, queue_size=1)
def subscribe(self):
self.sub = rospy.Subscriber('~input', ClassificationResult,
self._apply)
def unsubscribe(self):
self.sub.unregister()
def _apply(self, msg):
# get candidates probabilities
candidates = ['no_object'] + self.tote_contents
label_to_proba = dict(zip(msg.target_names, msg.probabilities))
candidates_proba = [label_to_proba[label] for label in candidates]
candidates_proba = np.array(candidates_proba)
candidates_proba = candidates_proba / candidates_proba.sum()
# compose output message
top_index = np.argmax(candidates_proba)
out_msg = ObjectRecognition(
header=msg.header,
matched=candidates[top_index],
probability=candidates_proba[top_index],
candidates=candidates,
probabilities=candidates_proba,
)
self.pub.publish(out_msg)
if __name__ == '__main__':
rospy.init_node('apply_tote_contents_hint')
app = ApplyToteContentsHint()
rospy.spin()
|
#!/usr/bin/env python
import numpy as np
from jsk_2015_05_baxter_apc.msg import ObjectRecognition
import jsk_apc2016_common
from jsk_recognition_msgs.msg import ClassificationResult
from jsk_topic_tools import ConnectionBasedTransport
from jsk_topic_tools.log_utils import jsk_logwarn
import rospy
class ApplyToteContentsHint(ConnectionBasedTransport):
"""Use tote contents info to improve object recognition"""
def __init__(self):
super(self.__class__, self).__init__()
json_file = rospy.get_param('~json')
self.tote_contents = jsk_apc2016_common.get_tote_contents(json_file)
self.pub = self.advertise('~output', ObjectRecognition, queue_size=1)
def subscribe(self):
self.sub = rospy.Subscriber('~input', ClassificationResult,
self._apply)
def unsubscribe(self):
self.sub.unregister()
def _apply(self, msg):
# get candidates probabilities
candidates = self.tote_contents
candidates.append('no_object')
label_to_proba = dict(zip(msg.target_names, msg.probabilities))
candidates_proba = [label_to_proba[label] for label in candidates]
candidates_proba = np.array(candidates_proba)
candidates_proba = candidates_proba / candidates_proba.sum()
# compose output message
top_index = np.argmax(candidates_proba)
out_msg = ObjectRecognition(
header=msg.header,
matched=candidates[top_index],
probability=candidates_proba[top_index],
candidates=candidates,
probabilities=candidates_proba,
)
self.pub.publish(out_msg)
if __name__ == '__main__':
rospy.init_node('apply_tote_contents_hint')
app = ApplyToteContentsHint()
rospy.spin()
|
bsd-3-clause
|
Python
|
02efde47b5cf20b7385eacaa3f21454ffa636ad7
|
Update CodeStarConnections::Connection per 2020-07-23 update
|
cloudtools/troposphere,cloudtools/troposphere
|
troposphere/codestarconnections.py
|
troposphere/codestarconnections.py
|
# Copyright (c) 2012-2020, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, Tags
VALID_CONNECTION_PROVIDERTYPE = ('Bitbucket')
def validate_connection_providertype(connection_providertype):
"""Validate ProviderType for Connection"""
if connection_providertype not in VALID_CONNECTION_PROVIDERTYPE:
raise ValueError("Connection ProviderType must be one of: %s" %
", ".join(VALID_CONNECTION_PROVIDERTYPE))
return connection_providertype
class Connection(AWSObject):
resource_type = "AWS::CodeStarConnections::Connection"
props = {
'ConnectionName': (basestring, True),
'HostArn': (basestring, False),
'ProviderType': (validate_connection_providertype, True),
'Tags': (Tags, False),
}
|
# Copyright (c) 2012-2020, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, Tags
VALID_CONNECTION_PROVIDERTYPE = ('Bitbucket')
def validate_connection_providertype(connection_providertype):
"""Validate ProviderType for Connection"""
if connection_providertype not in VALID_CONNECTION_PROVIDERTYPE:
raise ValueError("Connection ProviderType must be one of: %s" %
", ".join(VALID_CONNECTION_PROVIDERTYPE))
return connection_providertype
class Connection(AWSObject):
resource_type = "AWS::CodeStarConnections::Connection"
props = {
'ConnectionName': (basestring, True),
'ProviderType': (validate_connection_providertype, True),
'Tags': (Tags, False),
}
|
bsd-2-clause
|
Python
|
c86d689e3593b6221b8b9120f6af16b32c2211d9
|
Add __all__ and __version__ to __init__.py.
|
kivhift/pu
|
src/pu/__init__.py
|
src/pu/__init__.py
|
__all__ = 'utils'.split()
__version__ = '1.0.0'
|
mit
|
Python
|
|
92469456222a8d5c00595ca34ea21c66042a5040
|
modify main loop
|
m4tx/techswarm-receiver
|
parser/main.py
|
parser/main.py
|
import config
import parse
pipeout = open(config.PIPE_NAME, 'r')
while True:
input_record = pipeout.readline()
if input_record.split(',')[0] == '$GYRO':
gyro = input_record
if input_record.split(',')[0] == '$ACCEL':
accel = pipeout.readline()
if input_record.split(',')[0] == '$MAGNET':
magnet = pipeout.readline()
if input_record.split(',')[0] == '$MBAR':
pressure = pipeout.readline()
if all([gyro, accel, magnet, pressure]):
parse.parse_IMU(gyro, accel, magnet, pressure)
gyro = accel = magnet = pressure = None
|
import config
import parse
pipeout = open(config.PIPE_NAME, 'r')
while True:
input_record = pipeout.readline()
if input_record.split(',')[0] == '$GYRO':
gyro = input_record
accel = pipeout.readline()
magnet = pipeout.readline()
pressure = pipeout.readline()
parse.parse_IMU(gyro, accel, magnet, pressure)
|
mit
|
Python
|
39e5defbb12da62fc132e89437b1ce408b85ec6b
|
Fix parsertests script exit code.
|
spookysys/systemverilog,vaisup/systemverilog-1,svstuff/systemverilog,svstuff/systemverilog,spookysys/systemverilog,vaisup/systemverilog-1,svstuff/systemverilog,vaisup/systemverilog-1,svstuff/systemverilog,spookysys/systemverilog,svstuff/systemverilog,vaisup/systemverilog-1,spookysys/systemverilog,spookysys/systemverilog,spookysys/systemverilog,svstuff/systemverilog,vaisup/systemverilog-1,vaisup/systemverilog-1,vaisup/systemverilog-1,svstuff/systemverilog,spookysys/systemverilog
|
parsertests.py
|
parsertests.py
|
#!/usr/bin/env python
import sys
import os
import os.path as path
import glob
import subprocess as sp
from collections import namedtuple
from multiprocessing import Pool
# TODO: remove this silly script and write the tests in scala/gradle.
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
statuscolor = {
'PASS': bcolors.OKGREEN,
'WARN': bcolors.WARNING,
'FAIL': bcolors.FAIL,
}
Result = namedtuple('Result', ['testname', 'testout', 'status'])
def run_test(test):
cmd = ['./build/install/svparse/bin/svparse', os.path.join(test, "project.xml")]
testenv = os.environ.copy()
testenv['SVPARSE_EXTRA'] = 'svparse_extra_test.xml'
pid = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT, env=testenv)
rawout, _ = pid.communicate()
testdir, testname = os.path.split(test)
testout = os.path.join(testdir, '{}.log'.format(testname))
with open(testout, 'w') as f:
f.write(rawout)
if pid.returncode != 0:
return Result(test, testout, 'FAIL')
if detected_antlr_warnings(rawout):
return Result(test, testout, 'WARN')
return Result(test, testout, 'PASS')
def detected_antlr_warnings(testout):
return "reportAmbiguity" in testout
def main():
n_total = 0
n_pass = 0
n_fail = 0
p = Pool(4)
for result in p.imap_unordered(run_test, [f for f in glob.glob("parsertests/*") if os.path.isdir(f)]):
n_total += 1
status = statuscolor[result.status] + result.status + bcolors.ENDC
if result.status != 'PASS':
if result.status == 'FAIL':
n_fail += 1
print "{}: {} - {}".format(status, result.testname, result.testout)
else:
n_pass += 1
print "{}: {}".format(status, result.testname)
print "Summary:"
print "- PASS: {}".format(n_pass)
print "- FAIL: {}".format(n_fail)
print "- WARN: {}".format(n_total - n_fail - n_pass)
if n_fail == 0:
return 0
return 1
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python
import sys
import os
import os.path as path
import glob
import subprocess as sp
from collections import namedtuple
from multiprocessing import Pool
# TODO: remove this silly script and write the tests in scala/gradle.
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
statuscolor = {
'PASS': bcolors.OKGREEN,
'WARN': bcolors.WARNING,
'FAIL': bcolors.FAIL,
}
Result = namedtuple('Result', ['testname', 'testout', 'status'])
def run_test(test):
cmd = ['./build/install/svparse/bin/svparse', os.path.join(test, "project.xml")]
testenv = os.environ.copy()
testenv['SVPARSE_EXTRA'] = 'svparse_extra_test.xml'
pid = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT, env=testenv)
rawout, _ = pid.communicate()
testdir, testname = os.path.split(test)
testout = os.path.join(testdir, '{}.log'.format(testname))
with open(testout, 'w') as f:
f.write(rawout)
if pid.returncode != 0:
return Result(test, testout, 'FAIL')
if detected_antlr_warnings(rawout):
return Result(test, testout, 'WARN')
return Result(test, testout, 'PASS')
def detected_antlr_warnings(testout):
return "reportAmbiguity" in testout
def main():
n_not_passing = 0
p = Pool(4)
for result in p.imap_unordered(run_test, [f for f in glob.glob("parsertests/*") if os.path.isdir(f)]):
status = statuscolor[result.status] + result.status + bcolors.ENDC
if result.status != 'PASS':
n_not_passing += 1
print "{}: {} - {}".format(status, result.testname, result.testout)
else:
print "{}: {}".format(status, result.testname)
print "Summary: {} tests did not pass cleanly".format(n_not_passing)
if __name__ == "__main__":
sys.exit(main())
|
mit
|
Python
|
a9b7f92edb7b3a73a2b38a45c5ad6a0deee18e19
|
Add GA tracking
|
keyan/blog
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'keyan'
SITENAME = u'keyan pishdadian'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Detroit'
DEFAULT_LANG = u'en'
THEME = "themes/flasky"
# Site analytics
GOOGLE_ANALYTICS_ACCOUNT = "UA-93664476-1"
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Navigation sections and relative URL:
SECTIONS = [('blog', 'index.html'),
('archive', 'archives.html'),
('about', 'pages/about-me.html')]
DEFAULT_CATEGORY = 'Uncategorized'
DATE_FORMAT = {'en': '%m %d %Y'}
DEFAULT_DATE_FORMAT = '%m %d %Y'
DEFAULT_PAGINATION = False
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
FEED_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/%s.rss.xml'
OUTPUT_PATH = 'output'
# static paths will be copied under the same name
STATIC_PATHS = ["images"]
# Optional social media links
# =============================
TWITTER_USERNAME = 'keyan__P'
LINKEDIN_URL = 'https://www.linkedin.com/in/keyanp'
GITHUB_URL = 'http://github.com/keyan'
MAIL_USERNAME = 'kpishdadian'
MAIL_HOST = 'gmail.com'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'keyan'
SITENAME = u'keyan pishdadian'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Detroit'
DEFAULT_LANG = u'en'
THEME = "themes/flasky"
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Navigation sections and relative URL:
SECTIONS = [('blog', 'index.html'),
('archive', 'archives.html'),
('about', 'pages/about-me.html')]
DEFAULT_CATEGORY = 'Uncategorized'
DATE_FORMAT = {'en': '%m %d %Y'}
DEFAULT_DATE_FORMAT = '%m %d %Y'
DEFAULT_PAGINATION = False
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
FEED_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/%s.rss.xml'
OUTPUT_PATH = 'output'
# static paths will be copied under the same name
STATIC_PATHS = ["images"]
# Optional social media links
# =============================
TWITTER_USERNAME = 'keyan__P'
LINKEDIN_URL = 'https://www.linkedin.com/in/keyanp'
GITHUB_URL = 'http://github.com/keyan'
MAIL_USERNAME = 'kpishdadian'
MAIL_HOST = 'gmail.com'
|
mit
|
Python
|
c8a1b3a7475d3e964814cb9be2a82d00bba745d0
|
Update settings
|
yannbaumgartner/histoires-de-briques,yannbaumgartner/histoires-de-briques
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Yann Baumgartner'
SITENAME = u'Histoires de briques'
SITESUBTITLE = 'Un site dédié aux LEGO®'
SITEURL = ''
GITHUB_URL = 'https://github.com/yannbaumgartner/histoires-de-briques.git'
PATH = 'content'
PAGE_PATHS = ['pages']
ARTICLE_PATHS = ['articles']
STATIC_PATHS = ['images', 'extra/robots.txt', 'extra/favicon.png']
PLUGIN_PATHS = ['plugins']
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
'extra/favicon.ico': {'path': 'favicon.png'}
}
PLUGINS = ['tag_cloud', 'tipue_search']
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'fr'
LOCALE = 'fr_FR.utf8'
DATE_FORMATS = {
'fr': '%-d %B %Y',
}
DEFAULT_PAGINATION = False
SUMMARY_MAX_LENGTH = None
THEME = 'themes/pelican-bootstrap3'
BOOTSTRAP_THEME = 'flatly'
DIRECT_TEMPLATES = (('index', 'tags', 'categories', 'authors', 'archives', 'search'))
DISPLAY_ARTICLE_INFO_ON_INDEX = True
DISPLAY_TAGS_ON_SIDEBAR = 'True'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Yann Baumgartner'
SITENAME = u'Histoires de briques'
SITESUBTITLE = 'Un site dédié aux LEGO®'
SITEURL = ''
GITHUB_URL = 'https://github.com/yannbaumgartner/histoires-de-briques.git'
PATH = 'content'
PAGE_PATHS = ['pages']
ARTICLE_PATHS = ['articles']
STATIC_PATHS = ['images', 'extra/robots.txt', 'extra/favicon.png']
PLUGIN_PATHS = ['plugins']
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
'extra/favicon.ico': {'path': 'favicon.png'}
}
PLUGINS = ['tag_cloud', 'tipue_search']
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'fr'
LOCALE = 'fr_FR.utf8'
DATE_FORMATS = {
'fr': '%-d %B %Y',
}
DEFAULT_PAGINATION = False
SUMMARY_MAX_LENGTH = None
THEME = 'themes/pelican-bootstrap3'
BOOTSTRAP_THEME = 'flatly'
DIRECT_TEMPLATES = (('index', 'tags', 'categories', 'authors', 'archives', 'search'))
DISPLAY_TAGS_ON_SIDEBAR = 'True'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),)
|
agpl-3.0
|
Python
|
0a6e486a27a48c59e48f458ce1217848ed73ff24
|
Fix time zone config
|
irskep/nestweb,irskep/nestweb
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Eric, Etan, Fred, Genny, and Steve'
SITENAME = u'The Nest'
SITEURL = ''
TIMEZONE = 'PST'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Eric, Etan, Fred, Genny, and Steve'
SITENAME = u'The Nest'
SITEURL = ''
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
bsd-3-clause
|
Python
|
c08e28a23778280e577793156ce5b455ba80f92f
|
Tag new release: 2.3.0
|
Floobits/floobits-sublime,Floobits/floobits-sublime
|
floo/version.py
|
floo/version.py
|
PLUGIN_VERSION = '2.3.0'
# The line above is auto-generated by tag_release.py. Do not change it manually.
try:
from .common import shared as G
assert G
except ImportError:
from common import shared as G
G.__VERSION__ = '0.03'
G.__PLUGIN_VERSION__ = PLUGIN_VERSION
|
PLUGIN_VERSION = '2.2.13'
# The line above is auto-generated by tag_release.py. Do not change it manually.
try:
from .common import shared as G
assert G
except ImportError:
from common import shared as G
G.__VERSION__ = '0.03'
G.__PLUGIN_VERSION__ = PLUGIN_VERSION
|
apache-2.0
|
Python
|
fcc77ac1557ab7f5c3d5605240ea505b8b61b321
|
Update utils.py
|
GuidoE/django-follow,GuidoE/django-follow
|
follow/utils.py
|
follow/utils.py
|
from django.core.urlresolvers import reverse
from django.db.models.fields.related import ManyToManyField, ForeignKey
from follow.models import Follow
from follow.registry import registry, model_map
from django import VERSION as DjangoVersion
if float('%s.%s' % DjangoVersion[:2]) >= 1.7:
module_name = 'model_name'
else:
module_name = 'module_name'
def get_followers_for_object(instance):
return Follow.objects.get_follows(instance)
def register(model, field_name=None, related_name=None, lookup_method_name='get_follows'):
"""
This registers any model class to be follow-able.
"""
if model in registry:
return
registry.append(model)
if not field_name:
field_name = 'target_%s' % model._meta.__getattribute__(module_name)
if not related_name:
related_name = 'follow_%s' % model._meta.__getattribute__(module_name)
field = ForeignKey(model, related_name=related_name, null=True,
blank=True, db_index=True)
field.contribute_to_class(Follow, field_name)
setattr(model, lookup_method_name, get_followers_for_object)
model_map[model] = [related_name, field_name]
def follow(user, obj):
""" Make a user follow an object """
follow, created = Follow.objects.get_or_create(user, obj)
return follow
def unfollow(user, obj):
""" Make a user unfollow an object """
try:
follow = Follow.objects.get_follows(obj).get(user=user)
follow.delete()
return follow
except Follow.DoesNotExist:
pass
def toggle(user, obj):
""" Toggles a follow status. Useful function if you don't want to perform follow
checks but just toggle it on / off. """
if Follow.objects.is_following(user, obj):
return unfollow(user, obj)
return follow(user, obj)
def follow_link(object):
return reverse('follow.views.toggle', args=[object._meta.app_label, object._meta.object_name.lower(), object.pk])
def unfollow_link(object):
return reverse('follow.views.toggle', args=[object._meta.app_label, object._meta.object_name.lower(), object.pk])
def toggle_link(object):
return reverse('follow.views.toggle', args=[object._meta.app_label, object._meta.object_name.lower(), object.pk])
def follow_url(user, obj):
""" Returns the right follow/unfollow url """
return toggle_link(obj)
|
from django.core.urlresolvers import reverse
from django.db.models.fields.related import ManyToManyField, ForeignKey
from follow.models import Follow
from follow.registry import registry, model_map
from django import VERSION as DjangoVersion
if float('%s.%s' % DjangoVersion[:2]) > 1.7:
module_name = 'model_name'
else:
module_name = 'module_name'
def get_followers_for_object(instance):
return Follow.objects.get_follows(instance)
def register(model, field_name=None, related_name=None, lookup_method_name='get_follows'):
"""
This registers any model class to be follow-able.
"""
if model in registry:
return
registry.append(model)
if not field_name:
field_name = 'target_%s' % model._meta.__getattribute__(module_name)
if not related_name:
related_name = 'follow_%s' % model._meta.__getattribute__(module_name)
field = ForeignKey(model, related_name=related_name, null=True,
blank=True, db_index=True)
field.contribute_to_class(Follow, field_name)
setattr(model, lookup_method_name, get_followers_for_object)
model_map[model] = [related_name, field_name]
def follow(user, obj):
""" Make a user follow an object """
follow, created = Follow.objects.get_or_create(user, obj)
return follow
def unfollow(user, obj):
""" Make a user unfollow an object """
try:
follow = Follow.objects.get_follows(obj).get(user=user)
follow.delete()
return follow
except Follow.DoesNotExist:
pass
def toggle(user, obj):
""" Toggles a follow status. Useful function if you don't want to perform follow
checks but just toggle it on / off. """
if Follow.objects.is_following(user, obj):
return unfollow(user, obj)
return follow(user, obj)
def follow_link(object):
return reverse('follow.views.toggle', args=[object._meta.app_label, object._meta.object_name.lower(), object.pk])
def unfollow_link(object):
return reverse('follow.views.toggle', args=[object._meta.app_label, object._meta.object_name.lower(), object.pk])
def toggle_link(object):
return reverse('follow.views.toggle', args=[object._meta.app_label, object._meta.object_name.lower(), object.pk])
def follow_url(user, obj):
""" Returns the right follow/unfollow url """
return toggle_link(obj)
|
mit
|
Python
|
2191f877270fc984d5a8e7cc2ffe9ab8c1630101
|
fix style
|
fgirault/smeuhsocial,amarandon/smeuhsocial,fgirault/smeuhsocial,fgirault/smeuhsocial,amarandon/smeuhsocial,amarandon/smeuhsocial
|
apps/smeuhoverride/views.py
|
apps/smeuhoverride/views.py
|
# Create your views here.
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.models import User
from django.http import Http404, HttpResponse
from django.contrib.auth.decorators import login_required
from tagging.models import Tag
from tagging.utils import calculate_cloud, LOGARITHMIC
from pinax.apps.blog.models import Post
from photos.models import Image
class TagInTheCloud:
"""
a fake Tag model to feed the cloud
"""
def __init__(self, name, count, *args):
self.name = name
self.count = count
def tag_index(request, template_name="tagging_ext/index.html", *args, **kw):
query = """
SELECT tag.name as name, COUNT(tag_item.tag_id) as counter, tag_item.tag_id as tag_id
FROM tagging_taggeditem as tag_item
INNER JOIN tagging_tag as tag ON (tag.id = tag_item.tag_id)
GROUP BY tag.name, tag_id
ORDER BY tag.name
"""
cursor = connection.cursor()
cursor.execute(query)
tags = calculate_cloud(
[ TagInTheCloud(*row) for row in cursor ],
steps=5,
distribution=LOGARITHMIC
)
return render_to_response(template_name, {'tags': tags},
context_instance=RequestContext(request))
def user_blog_index(request, username, template_name="blog/user_blog.html"):
blogs = Post.objects.filter(status=2).select_related(depth=1).order_by("-publish")
if username is not None:
user = get_object_or_404(User, username=username.lower())
blogs = blogs.filter(author=user)
return render_to_response(template_name, {
"blogs": blogs,
"username": username,
}, context_instance=RequestContext(request))
def blog_post_source(request, username, slug):
post = get_object_or_404(Post, slug=slug,
author__username=username)
if post.status == 1 and post.author != request.user:
raise Http404
return HttpResponse(post.body, mimetype="text/plain; charset=utf-8")
|
# Create your views here.
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.models import User
from django.http import Http404, HttpResponse
from django.contrib.auth.decorators import login_required
from tagging.models import Tag
from tagging.utils import calculate_cloud, LOGARITHMIC
from pinax.apps.blog.models import Post
from photos.models import Image
class TagInTheCloud:
"""
a fake Tag model to feed the cloud
"""
def __init__(self, name, count, _):
self.name = name
self.count = count
def tag_index(request, template_name="tagging_ext/index.html", min_size=0, limit=1000):
query = """
SELECT tag.name as name, COUNT(tag_item.tag_id) as counter, tag_item.tag_id as tag_id
FROM tagging_taggeditem as tag_item
INNER JOIN tagging_tag as tag ON (tag.id = tag_item.tag_id)
GROUP BY tag.name, tag_id
ORDER BY tag.name
"""
cursor = connection.cursor()
cursor.execute(query)
tags = calculate_cloud(
[ TagInTheCloud(*row) for row in cursor ],
steps=5,
distribution=LOGARITHMIC
)
return render_to_response(template_name, {'tags': tags},
context_instance=RequestContext(request))
def user_blog_index(request, username, template_name="blog/user_blog.html"):
blogs = Post.objects.filter(status=2).select_related(depth=1).order_by("-publish")
if username is not None:
user = get_object_or_404(User, username=username.lower())
blogs = blogs.filter(author=user)
return render_to_response(template_name, {
"blogs": blogs,
"username": username,
}, context_instance=RequestContext(request))
def blog_post_source(request, username, slug):
post = get_object_or_404(Post, slug=slug,
author__username=username)
if post.status == 1 and post.author != request.user:
raise Http404
return HttpResponse(post.body, mimetype="text/plain; charset=utf-8")
|
mit
|
Python
|
1ca331975ab91cf619c3c785ead5b352af47963a
|
Use local thread storage.
|
modoboa/modoboa,bearstech/modoboa,tonioo/modoboa,modoboa/modoboa,bearstech/modoboa,bearstech/modoboa,modoboa/modoboa,bearstech/modoboa,tonioo/modoboa,tonioo/modoboa,modoboa/modoboa
|
modoboa/lib/middleware.py
|
modoboa/lib/middleware.py
|
# coding: utf-8
"""Custom middlewares."""
from threading import local
from django.http import HttpResponseRedirect
from modoboa.lib.exceptions import ModoboaException
from modoboa.lib.signals import request_accessor
from modoboa.lib.web_utils import (
_render_error, ajax_response, render_to_json_response
)
_local_store = local()
class AjaxLoginRedirect(object):
def process_response(self, request, response):
if request.is_ajax():
if type(response) == HttpResponseRedirect:
response.status_code = 278
return response
class CommonExceptionCatcher(object):
"""Modoboa exceptions catcher."""
def process_exception(self, request, exception):
if not isinstance(exception, ModoboaException):
return None
if request.is_ajax() or "/api/" in request.path:
if exception.http_code is None:
return ajax_response(
request, status="ko", respmsg=unicode(exception),
norefresh=True
)
return render_to_json_response(
unicode(exception), status=exception.http_code
)
return _render_error(
request, user_context=dict(error=str(exception))
)
class RequestCatcherMiddleware(object):
"""Simple middleware to store the current request."""
def __init__(self):
_local_store.request = None
request_accessor.connect(self)
def process_request(self, request):
_local_store.request = request
def process_response(self, request, response):
"""Empty store."""
_local_store.request = None
return response
def __call__(self, **kwargs):
return _local_store.request
|
# coding: utf-8
"""Custom middlewares."""
from django.http import HttpResponseRedirect
from modoboa.lib.exceptions import ModoboaException
from modoboa.lib.signals import request_accessor
from modoboa.lib.web_utils import (
_render_error, ajax_response, render_to_json_response
)
from . import singleton
class AjaxLoginRedirect(object):
def process_response(self, request, response):
if request.is_ajax():
if type(response) == HttpResponseRedirect:
response.status_code = 278
return response
class CommonExceptionCatcher(object):
"""Modoboa exceptions catcher."""
def process_exception(self, request, exception):
if not isinstance(exception, ModoboaException):
return None
if request.is_ajax() or "/api/" in request.path:
if exception.http_code is None:
return ajax_response(
request, status="ko", respmsg=unicode(exception),
norefresh=True
)
return render_to_json_response(
unicode(exception), status=exception.http_code
)
return _render_error(
request, user_context=dict(error=str(exception))
)
class RequestCatcherMiddleware(singleton.Singleton):
"""Simple middleware to store the current request.
FIXME: the Singleton hack is used to make tests work. I don't know
why but middlewares are not dropped between test case runs so more
than one instance can be listening to the request_accessor signal
and we don't want that!
"""
def __init__(self):
self._request = None
request_accessor.connect(self)
def process_request(self, request):
self._request = request
def process_response(self, request, response):
"""Empty self._request."""
self._request = None
return response
def __call__(self, **kwargs):
return self._request
|
isc
|
Python
|
70ce7627b11bf804660bc66a60910f66f8f106bd
|
Reformat code
|
jdgwartney/boundary-api-cli,jdgwartney/pulse-api-cli,jdgwartney/boundary-api-cli,boundary/boundary-api-cli,jdgwartney/pulse-api-cli,wcainboundary/boundary-api-cli,wcainboundary/boundary-api-cli,boundary/pulse-api-cli,boundary/pulse-api-cli,boundary/boundary-api-cli
|
boundary/metric_delete.py
|
boundary/metric_delete.py
|
#
# Copyright 2014-2015 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements command to remove a metric definition from a Boundary account.
"""
from boundary import ApiCli
"""
Uses the following Boundary API:
http://premium-documentation.boundary.com/v1/delete/metrics/:metric
"""
class MetricDelete(ApiCli):
def __init__(self):
ApiCli.__init__(self)
self.method = "DELETE"
self.metricName = None
def addArguments(self):
ApiCli.addArguments(self)
self.parser.add_argument('-n', '--metric-name', dest='metricName', action='store', required=True,
metavar='metric_name', help='Metric identifier')
def getArguments(self):
'''
Extracts the specific arguments of this CLI
'''
ApiCli.getArguments(self)
if self.args.metricName != None:
self.metricName = self.args.metricName
self.path = "v1/metrics/{0}".format(self.metricName)
def validateArguments(self):
return ApiCli.validateArguments(self)
def getDescription(self):
return "Deletes a metric definition from a Boundary account"
|
#
# Copyright 2014-2015 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements command to remove a metric definition from a Boundary account.
"""
from boundary import ApiCli
"""
Uses the following Boundary API:
http://premium-documentation.boundary.com/v1/delete/metrics/:metric
"""
class MetricDelete (ApiCli):
def __init__(self):
ApiCli.__init__(self)
self.method = "DELETE"
self.metricName = None
def addArguments(self):
ApiCli.addArguments(self)
self.parser.add_argument('-n', '--metric-name', dest='metricName',action='store',required=True,metavar='metric_name',help='Metric identifier')
def getArguments(self):
'''
Extracts the specific arguments of this CLI
'''
ApiCli.getArguments(self)
if self.args.metricName != None:
self.metricName = self.args.metricName
self.path = "v1/metrics/{0}".format(self.metricName)
def validateArguments(self):
return ApiCli.validateArguments(self)
def getDescription(self):
return "Deletes a metric definition from a Boundary account"
|
apache-2.0
|
Python
|
b628e466f86bc27cbe45ec27a02d4774a0efd3bb
|
Clean out dist and build before building
|
relekang/python-semantic-release,relekang/python-semantic-release
|
semantic_release/pypi.py
|
semantic_release/pypi.py
|
"""PyPI
"""
from invoke import run
from semantic_release import ImproperConfigurationError
def upload_to_pypi(
dists: str = 'sdist bdist_wheel',
username: str = None,
password: str = None,
skip_existing: bool = False
):
"""Creates the wheel and uploads to pypi with twine.
:param dists: The dists string passed to setup.py. Default: 'bdist_wheel'
:param username: PyPI account username string
:param password: PyPI account password string
:param skip_existing: Continue uploading files if one already exists. (Only valid when
uploading to PyPI. Other implementations may not support this.)
"""
if username is None or password is None or username == "" or password == "":
raise ImproperConfigurationError('Missing credentials for uploading')
run('rm -rf build dist')
run('python setup.py {}'.format(dists))
run(
'twine upload -u {} -p {} {} {}'.format(
username,
password,
'--skip-existing' if skip_existing else '',
'dist/*'
)
)
run('rm -rf build dist')
|
"""PyPI
"""
from invoke import run
from semantic_release import ImproperConfigurationError
def upload_to_pypi(
dists: str = 'sdist bdist_wheel',
username: str = None,
password: str = None,
skip_existing: bool = False
):
"""Creates the wheel and uploads to pypi with twine.
:param dists: The dists string passed to setup.py. Default: 'bdist_wheel'
:param username: PyPI account username string
:param password: PyPI account password string
:param skip_existing: Continue uploading files if one already exists. (Only valid when
uploading to PyPI. Other implementations may not support this.)
"""
if username is None or password is None or username == "" or password == "":
raise ImproperConfigurationError('Missing credentials for uploading')
run('python setup.py {}'.format(dists))
run(
'twine upload -u {} -p {} {} {}'.format(
username,
password,
'--skip-existing' if skip_existing else '',
'dist/*'
)
)
run('rm -rf build dist')
|
mit
|
Python
|
f6cad1777023ceb53db8599bc2e74bf0ab2aa0a7
|
Prepare for next dev release
|
totem/cluster-orchestrator,totem/cluster-orchestrator,totem/cluster-orchestrator
|
orchestrator/__init__.py
|
orchestrator/__init__.py
|
from __future__ import absolute_import
from celery.signals import setup_logging
import orchestrator.logger
__version__ = '0.5.2'
__author__ = 'sukrit'
orchestrator.logger.init_logging()
setup_logging.connect(orchestrator.logger.init_celery_logging)
|
from __future__ import absolute_import
from celery.signals import setup_logging
import orchestrator.logger
__version__ = '0.5.1'
__author__ = 'sukrit'
orchestrator.logger.init_logging()
setup_logging.connect(orchestrator.logger.init_celery_logging)
|
mit
|
Python
|
569c056e016131ec4325185ee9fe814018d5e1fe
|
Fix problem on no-longer existing bands that are still as logged in session available
|
dennisausbremen/tunefish,dennisausbremen/tunefish,dennisausbremen/tunefish
|
server/bands/__init__.py
|
server/bands/__init__.py
|
from flask import session, redirect, url_for, g, jsonify, Response
from flask.views import MethodView
from server.models import Band
class RestrictedBandPage(MethodView):
def dispatch_request(self, *args, **kwargs):
if not 'bandId' in session:
return redirect(url_for('bands.session.index'))
else:
self.band = Band.query.get(session['bandId'])
if not self.band:
del session['bandId']
return redirect(url_for('bands.session.index'))
else:
g.band = self.band
return super(RestrictedBandPage, self).dispatch_request(*args, **kwargs)
class AjaxException(Exception):
errors = []
def __init__(self, *args):
super(Exception, self).__init__()
self.errors = args
AJAX_SUCCESS = Response(200)
class AjaxForm(MethodView):
def post(self):
if self.form.validate_on_submit():
try:
result = self.on_submit()
if type(result) is Response:
return result
else:
return jsonify(result)
except AjaxException as e:
errors = self.form.errors
if len(e.errors) > 0:
errors['general'] = e.errors
return jsonify(errors=errors), 400
else:
return jsonify(errors=self.form.errors), 400
|
from flask import session, redirect, url_for, g, jsonify, Response
from flask.views import MethodView
from server.models import Band
class RestrictedBandPage(MethodView):
def dispatch_request(self, *args, **kwargs):
if not 'bandId' in session:
return redirect(url_for('bands.session.index'))
else:
self.band = Band.query.get(session['bandId'])
if not self.band:
return redirect(url_for('bands.session.index'))
else:
g.band = self.band
return super(RestrictedBandPage, self).dispatch_request(*args, **kwargs)
class AjaxException(Exception):
errors = []
def __init__(self, *args):
super(Exception, self).__init__()
self.errors = args
AJAX_SUCCESS = Response(200)
class AjaxForm(MethodView):
def post(self):
if self.form.validate_on_submit():
try:
result = self.on_submit()
if type(result) is Response:
return result
else:
return jsonify(result)
except AjaxException as e:
errors = self.form.errors
if len(e.errors) > 0:
errors['general'] = e.errors
return jsonify(errors=errors), 400
else:
return jsonify(errors=self.form.errors), 400
|
apache-2.0
|
Python
|
08c25e4ff96765c057397582327a36a6a1d3b7cb
|
fix caching unicode error
|
c4fcm/our-cup,c4fcm/our-cup,c4fcm/our-cup
|
ourcup/util/filecache.py
|
ourcup/util/filecache.py
|
import hashlib, os, codecs, logging
'''
Super basic file-based cache (utf-8 friendly). Helpful if you're developing a
webpage scraper and want to be a bit more polite to the server you're scraping
while developing. The idea is that it caches content in files, each named by the
key you pass in (use the md5_key helper to generate keys and make this super easy).
'''
DEFAULT_DIR = "cache"
cache_dir = DEFAULT_DIR
logger = logging.getLogger(__name__)
def md5_key(string):
'''
Use this to generate filenae keys
'''
m = hashlib.md5()
m.update(string.encode('utf-8'))
return m.hexdigest()
def set_dir(new_dir = DEFAULT_DIR):
'''
Don't need to call this, unless you want to override the default location
'''
global cache_dir
cache_dir = new_dir
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
logger.info("Caching files to {}".format(cache_dir))
def contains(key):
'''
Returns true if a file named by key is in the cache dir
'''
global cache_dir
return os.path.isfile(os.path.join(cache_dir,key))
def get(key):
'''
Returns the contents of the file named by key from the cache dir.
Returns None if file doesn't exist
'''
global cache_dir
if os.path.isfile(os.path.join(cache_dir,key)):
with codecs.open(os.path.join(cache_dir,key), mode="r",encoding='utf-8') as myfile:
return myfile.read()
return None
def put(key,content):
'''
Creates a file in the cache dir named by key, with the content in it
'''
global cache_dir
logger.debug("caching "+str(key)+" in "+cache_dir)
text_file = codecs.open(os.path.join(cache_dir,key), encoding='utf-8', mode="w")
text_file.write(content.decode('utf-8'))
text_file.close()
|
import hashlib, os, codecs, logging
'''
Super basic file-based cache (utf-8 friendly). Helpful if you're developing a
webpage scraper and want to be a bit more polite to the server you're scraping
while developing. The idea is that it caches content in files, each named by the
key you pass in (use the md5_key helper to generate keys and make this super easy).
'''
DEFAULT_DIR = "cache"
cache_dir = DEFAULT_DIR
logger = logging.getLogger(__name__)
def md5_key(string):
'''
Use this to generate filenae keys
'''
m = hashlib.md5()
m.update(string)
return m.hexdigest()
def set_dir(new_dir = DEFAULT_DIR):
'''
Don't need to call this, unless you want to override the default location
'''
global cache_dir
cache_dir = new_dir
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
logger.info("Caching files to {}".format(cache_dir))
def contains(key):
'''
Returns true if a file named by key is in the cache dir
'''
global cache_dir
return os.path.isfile(os.path.join(cache_dir,key))
def get(key):
'''
Returns the contents of the file named by key from the cache dir.
Returns None if file doesn't exist
'''
global cache_dir
if os.path.isfile(os.path.join(cache_dir,key)):
with codecs.open(os.path.join(cache_dir,key), mode="r",encoding='utf-8') as myfile:
return myfile.read()
return None
def put(key,content):
'''
Creates a file in the cache dir named by key, with the content in it
'''
global cache_dir
logger.debug("caching "+str(key)+" in "+cache_dir)
text_file = codecs.open(os.path.join(cache_dir,key), encoding='utf-8', mode="w")
text_file.write(content.decode('utf-8'))
text_file.close()
|
mit
|
Python
|
a3353e1b4c3e181958a877c8e2485c2c7eed4201
|
Update pd_utils for python3
|
ricardog/raster-project,ricardog/raster-project,ricardog/raster-project,ricardog/raster-project,ricardog/raster-project
|
projections/pd_utils.py
|
projections/pd_utils.py
|
import numpy.lib
import numpy as np
import pandas as pd
import _pickle as pickle
def save_pandas(fname, data):
'''Save DataFrame or Series
Parameters
----------
fname : str
filename to use
data: Pandas DataFrame or Series
'''
np.save(open(fname, 'wb'), data)
if len(data.shape) == 2:
meta = data.index, data.columns
elif len(data.shape) == 1:
meta = (data.index,)
else:
raise ValueError('save_pandas: Cannot save this type')
s = pickle.dumps(meta)
import pdb; pdb.set_trace()
#s = s.encode('string_escape')
with open(fname, 'ab') as f:
f.seek(0, os.SEEK_END)
f.write(s)
def load_pandas(fname, mmap_mode='r'):
'''Load DataFrame or Series
Parameters
----------
fname : str
filename
mmap_mode : str, optional
Same as numpy.load option
'''
values = np.load(fname, mmap_mode=mmap_mode)
with open(fname, 'rb') as f:
numpy.lib.format.read_magic(f)
numpy.lib.format.read_array_header_1_0(f)
f.seek(values.dtype.alignment * values.size, 1)
data = f.readline()
#meta = pickle.loads(data.decode('string_escape'))
import pdb; pdb.set_trace()
meta = pickle.loads(data)
if len(meta) == 2:
return pd.DataFrame(values, index=meta[0], columns=meta[1])
elif len(meta) == 1:
return pd.Series(values, index=meta[0])
|
import numpy.lib
import numpy as np
import pandas as pd
import _pickle as pickle
def save_pandas(fname, data):
'''Save DataFrame or Series
Parameters
----------
fname : str
filename to use
data: Pandas DataFrame or Series
'''
np.save(open(fname, 'w'), data)
if len(data.shape) == 2:
meta = data.index,data.columns
elif len(data.shape) == 1:
meta = (data.index,)
else:
raise ValueError('save_pandas: Cannot save this type')
s = pickle.dumps(meta)
s = s.encode('string_escape')
with open(fname, 'a') as f:
f.seek(0, 2)
f.write(s)
def load_pandas(fname, mmap_mode='r'):
'''Load DataFrame or Series
Parameters
----------
fname : str
filename
mmap_mode : str, optional
Same as numpy.load option
'''
values = np.load(fname, mmap_mode=mmap_mode)
with open(fname) as f:
numpy.lib.format.read_magic(f)
numpy.lib.format.read_array_header_1_0(f)
f.seek(values.dtype.alignment*values.size, 1)
meta = pickle.loads(f.readline().decode('string_escape'))
if len(meta) == 2:
return pd.DataFrame(values, index=meta[0], columns=meta[1])
elif len(meta) == 1:
return pd.Series(values, index=meta[0])
|
apache-2.0
|
Python
|
006eff6a9376c65e0632efe79ec6d39cbc50f80b
|
remove UI element namespaces
|
tim-janik/rapicorn,tim-janik/rapicorn,tim-janik/rapicorn,tim-janik/rapicorn,tim-janik/rapicorn
|
docs/tutorial/tuthello.py
|
docs/tutorial/tuthello.py
|
# Licensed CC0 Public Domain: http://creativecommons.org/publicdomain/zero/1.0
# [HelloRapicorn-EXAMPLE]
# Load and import a versioned Rapicorn module into the 'Rapicorn' namespace
import Rapicorn1307 as Rapicorn
# Setup the application object, unsing a unique application name.
app = Rapicorn.app_init ("Hello Rapicorn")
# Define the elements of the dialog window to be displayed.
hello_window = """
<tmpl:define id="hello-window" inherit="Window">
<Alignment padding="15">
<VBox spacing="30">
<Label markup-text="Hello World!"/>
<Button on-click="CLICK">
<Label markup-text="Close" />
</Button>
</VBox>
</Alignment>
</tmpl:define>
"""
# Register the 'hello-window' definition for later use, for this we need
# a unique domain string, it's easiest to reuse the application name.
app.load_string (hello_window)
# The above is all that is needed to allow us to create the window object.
window = app.create_window ("hello-window")
# This function is called to handle the command we use for button clicks.
def command_handler (command_name, args):
# When we see the 'CLICK' command, close down the Application
if command_name == "CLICK":
app.close_all();
# Call the handler when the Window::commands signal is emitted.
window.sig_commands_connect (command_handler)
# Preparations done, now it's time to show the window on the screen.
window.show()
# Pass control to the event loop, to wait and handle user commands.
app.loop()
# [HelloRapicorn-EXAMPLE]
|
# Licensed CC0 Public Domain: http://creativecommons.org/publicdomain/zero/1.0
# [HelloRapicorn-EXAMPLE]
# Load and import a versioned Rapicorn module into the 'Rapicorn' namespace
import Rapicorn1307 as Rapicorn
# Setup the application object, unsing a unique application name.
app = Rapicorn.app_init ("Hello Rapicorn")
# Define the elements of the dialog window to be displayed.
hello_window = """
<tmpl:define id="hello-window" inherit="Window">
<Alignment padding="15">
<VBox spacing="30">
<Label markup-text="Hello World!"/>
<Button on-click="CLICK">
<Label markup-text="Close" />
</Button>
</VBox>
</Alignment>
</tmpl:define>
"""
# Register the 'hello-window' definition for later use, for this we need
# a unique domain string, it's easiest to reuse the application name.
app.load_string ("HelloRapicorn", hello_window)
# The above is all that is needed to allow us to create the window object.
window = app.create_window ("HelloRapicorn:hello-window")
# This function is called to handle the command we use for button clicks.
def command_handler (command_name, args):
# When we see the 'CLICK' command, close down the Application
if command_name == "CLICK":
app.close_all();
# Call the handler when the Window::commands signal is emitted.
window.sig_commands_connect (command_handler)
# Preparations done, now it's time to show the window on the screen.
window.show()
# Pass control to the event loop, to wait and handle user commands.
app.loop()
# [HelloRapicorn-EXAMPLE]
|
mpl-2.0
|
Python
|
ec2456eac36a96c9819920bf8b4176e6a37ad9a5
|
Rename productclass made during migration
|
KenMutemi/saleor,HyperManTT/ECommerceSaleor,mociepka/saleor,maferelo/saleor,maferelo/saleor,tfroehlich82/saleor,tfroehlich82/saleor,car3oon/saleor,itbabu/saleor,KenMutemi/saleor,KenMutemi/saleor,UITools/saleor,UITools/saleor,jreigel/saleor,mociepka/saleor,tfroehlich82/saleor,itbabu/saleor,car3oon/saleor,UITools/saleor,HyperManTT/ECommerceSaleor,mociepka/saleor,maferelo/saleor,UITools/saleor,itbabu/saleor,HyperManTT/ECommerceSaleor,jreigel/saleor,jreigel/saleor,UITools/saleor,car3oon/saleor
|
saleor/product/migrations/0020_attribute_data_to_class.py
|
saleor/product/migrations/0020_attribute_data_to_class.py
|
from __future__ import unicode_literals
from django.db import migrations, models
def move_data(apps, schema_editor):
Product = apps.get_model('product', 'Product')
ProductClass = apps.get_model('product', 'ProductClass')
for product in Product.objects.all():
attributes = product.attributes.all()
product_class = ProductClass.objects.all()
for attribute in attributes:
product_class = product_class.filter(
variant_attributes__in=[attribute])
product_class = product_class.first()
if product_class is None:
product_class = ProductClass.objects.create(
name='Unnamed product type',
has_variants=True)
product_class.variant_attributes = attributes
product_class.save()
product.product_class = product_class
product.save()
class Migration(migrations.Migration):
dependencies = [
('product', '0019_auto_20161212_0230'),
]
operations = [
migrations.RunPython(move_data),
]
|
from __future__ import unicode_literals
from django.db import migrations, models
def move_data(apps, schema_editor):
Product = apps.get_model('product', 'Product')
ProductClass = apps.get_model('product', 'ProductClass')
for product in Product.objects.all():
attributes = product.attributes.all()
product_class = ProductClass.objects.all()
for attribute in attributes:
product_class = product_class.filter(
variant_attributes__in=[attribute])
product_class = product_class.first()
if product_class is None:
product_class = ProductClass.objects.create(
name='Migrated Product Class',
has_variants=True)
product_class.variant_attributes = attributes
product_class.save()
product.product_class = product_class
product.save()
class Migration(migrations.Migration):
dependencies = [
('product', '0019_auto_20161212_0230'),
]
operations = [
migrations.RunPython(move_data),
]
|
bsd-3-clause
|
Python
|
f31e8215838e40960abff6c86be8c66cbf113c95
|
Make the endpoint return geojson as opposed to wkt geometry
|
Kitware/minerva,Kitware/minerva,Kitware/minerva
|
server/rest/twofishes.py
|
server/rest/twofishes.py
|
import requests
from shapely.wkt import loads
from shapely.geometry import mapping
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
class TwoFishes(Resource):
def __init__(self):
self.resourceName = 'minerva_geocoder'
self.route('GET', (), self.geocode)
self.route('GET', ('autocomplete',), self.autocomplete)
@access.public
def geocode(self, params):
r = requests.get(params['twofishes'],
params={'query': params['location'],
'responseIncludes': 'WKT_GEOMETRY'})
wkt = r.json()['interpretations'][0]['feature']['geometry']['wktGeometry']
return mapping(loads(wkt))
geocode.description = (
Description('Get geojson for a given location name')
.param('twofishes', 'Twofishes url')
.param('location', 'Location name to get a geojson')
)
@access.public
def autocomplete(self, params):
r = requests.get(params['twofishes'],
params={'autocomplete': True,
'query': params['location'],
'maxInterpretations': 10,
'autocompleteBias': None})
return [i['feature']['matchedName'] for i in r.json()['interpretations']]
autocomplete.description = (
Description('Autocomplete result for a given location name')
.param('twofishes', 'Twofishes url')
.param('location', 'Location name to autocomplete')
)
|
import requests
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
class TwoFishes(Resource):
def __init__(self):
self.resourceName = 'minerva_geocoder'
self.route('GET', (), self.geocode)
self.route('GET', ('autocomplete',), self.autocomplete)
@access.public
def geocode(self, params):
r = requests.get(params['twofishes'],
params={'query': params['location'],
'responseIncludes': 'WKT_GEOMETRY'})
return r.json()
geocode.description = (
Description('Get geojson for a given location name')
.param('twofishes', 'Twofishes url')
.param('location', 'Location name to get a geojson')
)
@access.public
def autocomplete(self, params):
r = requests.get(params['twofishes'],
params={'autocomplete': True,
'query': params['location'],
'maxInterpretations': 10,
'autocompleteBias': None})
return [i['feature']['matchedName'] for i in r.json()['interpretations']]
autocomplete.description = (
Description('Autocomplete result for a given location name')
.param('twofishes', 'Twofishes url')
.param('location', 'Location name to autocomplete')
)
|
apache-2.0
|
Python
|
9ece50e71d2c5eab7b97edc5b8bbdfb410ce64bf
|
edit admin.py
|
dresl/django_choice_and_question,dresl/django_choice_and_question
|
polls/admin.py
|
polls/admin.py
|
from django.contrib import admin
from polls.models import Choice, Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date')
admin.site.register(Question, QuestionAdmin)
|
from django.contrib import admin
from polls.models import Choice, Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
admin.site.register(Question, QuestionAdmin)
|
apache-2.0
|
Python
|
5b63950cb3fa018b63ccb0be3faeceef684f9299
|
Add persistent storage
|
felixbade/webanalytics
|
ipinfo.py
|
ipinfo.py
|
from ipaddress import ip_address, ip_network
import shelve
from ipwhois import IPWhois
ip_whois_shelve_filename = 'ip_whois'
known_networks = shelve.open(ip_whois_shelve_filename)
def updateIpInfo(ip):
info = IPWhois(ip).lookup()
# these two lines might break on some input
net = info['nets'][0]
networks = net['cidr'].split(', ')
for network in networks:
network = network
known_networks.update({network: net})
return net
def getIpInfo(ip):
ip = ip_address(ip)
for network in known_networks:
if ip in ip_network(network):
info = known_networks[network]
return info
info = updateIpInfo(ip)
return info
def getISP(ip):
net = getIpInfo(ip)
return net['description']
def getCountry(ip):
net = getIpInfo(ip)
return net['country']
|
from ipaddress import ip_address, ip_network
from ipwhois import IPWhois
known_networks = {}
def updateIpInfo(ip):
info = IPWhois(ip).lookup()
# these two lines might break on some input
net = info['nets'][0]
networks = net['cidr'].split(', ')
for network in networks:
network = ip_network(network)
known_networks.update({network: net})
return net
def getIpInfo(ip):
ip = ip_address(ip)
for network in known_networks:
if ip in network:
info = known_networks[network]
return info
info = updateIpInfo(ip)
return info
def getISP(ip):
net = getIpInfo(ip)
return net['description']
def getCountry(ip):
net = getIpInfo(ip)
return net['country']
|
mit
|
Python
|
f63ddd4c4e98322fcff651aefb298f7724dc9bff
|
Add help text, rename 'history' to graph
|
flukiluke/eris
|
basil.py
|
basil.py
|
import subprocess
import time
import tempfile
last_watered = 0
COOLDOWN = 60
WATER_MAX_SECS = 60
HELPTEXT = {}
def basilcmd(cmds):
output = subprocess.check_output(['ssh', 'rrpi', './basilbot/cli.py', *cmds], stderr=subprocess.STDOUT)
return output
HELPTEXT['moisture'] = 'Check the instantaneous moisture of the pot'
def moisture():
output = basilcmd(['moisture'])
return 'Soil moisture content: ' + output.decode().strip() + '%'
HELPTEXT['history [N]'] = 'Print [N] of the automatic hourly moisture measurements.'
def history(num=12):
output = basilcmd(['history', num])
return '```' + output.decode().strip() + '```'
HELPTEXT['water [time]'] = 'Dispense water for [time] seconds'
def water(runtime):
global last_watered, COOLDOWN, WATER_MAX_SECS
dt = time.time() - last_watered
if runtime <= 0:
return "Nice try, you won't fool me with that one again."
if runtime > WATER_MAX:
return "Please only water me between 0 and %d seconds." % WATER_MAX_SECS
if dt < COOLDOWN:
return "I was watered %d second(s) ago, but you may tend to me again in a mere %d second(s)" % (int(dt), int(COOLDOWN - dt))
else:
output = basilcmd(['water', str(runtime)])
if output.decode().strip() == 'OK':
last_watered = time.time()
return str(runtime) + " seconds of S i p p"
else:
return "Hydration subsystem reported error: " + output.decode().strip()
HELPTEXT['graph [N]'] = 'Graph [N] of the automatic hourly moisture measurements.'
def graph(samples):
data = basilcmd(['raw_history', str(samples)])
image = tempfile.NamedTemporaryFile(delete=False)
subprocess.run(['gnuplot', 'basil_history.gnuplot'], stdout=image, input=data)
image.close()
return image.name
HELPTEXT['help [command]'] = 'Get detailed help for [command]'
def help(cmd):
str = ''
try:
str += '!basil %s: %s\n' % (cmd, HELPTEXT[cmd])
except KeyError:
str += 'Basil commands:\n'
for text in HELPTEXT:
str += '!basil %s\n' % text
return str
|
import subprocess
import time
import tempfile
last_watered = 0
COOLDOWN = 60
def basilcmd(cmds):
output = subprocess.check_output(['ssh', 'rrpi', './basilbot/cli.py', *cmds], stderr=subprocess.STDOUT)
return output
def moisture():
output = basilcmd(['moisture'])
return 'Soil moisture content: ' + output.decode().strip() + '%'
def history(num=12):
output = basilcmd(['history', num])
return '```' + output.decode().strip() + '```'
def water(runtime):
global last_watered, COOLDOWN
dt = time.time() - last_watered
if runtime <= 0:
return "Nice try, you won't fool me with that one again."
if runtime > 60:
return 'Please only water me between 0 and 60 seconds.'
if dt < COOLDOWN:
return 'I was watered %d second(s) ago, but you may tend to me again in a mere %d second(s)' % (int(dt), int(COOLDOWN - dt))
else:
output = basilcmd(['water', str(runtime)])
if output.decode().strip() == 'OK':
last_watered = time.time()
return str(runtime) + " seconds of S i p p"
else:
return "Hydration subsystem reported error: " + output.decode().strip()
def ghistory(samples):
data = basilcmd(['raw_history', str(samples)])
image = tempfile.NamedTemporaryFile(delete=False)
subprocess.run(['gnuplot', 'basil_history.gnuplot'], stdout=image, input=data)
image.close()
return image.name
|
mit
|
Python
|
6c3ba0617575d2d178c2f1a2632cea5f5ba09d5f
|
Revert "Updated License"
|
DatiData/grabLocation_py
|
grabLocation.py
|
grabLocation.py
|
from geopy.geocoders import GoogleV3
from geopy.exc import GeocoderTimedOut
import time # For rate limiting purposes
# Using Geopy copyright 2006-2016 geopy authors
# Geopy available at https://github.com/geopy/geopy
# This program is copyright 2017 Joseph Johaneman
# And is released under the 3 Clause BSD License
# What this does: this program reads in a list of addresses of
# school names and prints out a tab separated list of schools name
# latitude and longitude. Obviously, this can be redirected to
# a text file by redirecting standard output
# Create the Google Maps API Object. Note you need an API Key
googleLocator=GoogleV3(api_key='<Your Google Map Geoencode API Key>')
# First we need the list of schools
filename="SchoolList.txt"
# Create a list to score the School Names loaded them from a file
with open(filename) as f:
schools=f.read().splitlines()
# print header
print "School Name\tLatitude\tLongitude"
# Loop through the school names and get locations
for i in schools:
try: #Exception handling is important!
location=googleLocator.geocode(i, exactly_one=True)
except GeocoderTimedOut: # in case we time out:
print i, "\t0\t-1" # print 0, -1. We'll check for it later
else: # Okay we didn't time out
if location != None: # if we find something
print i, "\t", location.latitude, "\t", location.longitude #print it
else: # Didn't find it. Print zeroes
print i, "\t0\t0" # otherwise print 0s. We'll check for it later
time.sleep(.3) # This waits 300 milliseconds between requests to be nice
# Note: I chose to print 0, -1 for timeouts and 0, 0 for not found so I'd know
# how many exceptions were thrown.
|
from geopy.geocoders import GoogleV3
from geopy.exc import GeocoderTimedOut
import time # For rate limiting purposes
# Using Geopy copyright 2006-2016 geopy authors
# Geopy available at https://github.com/geopy/geopy
# This program is copyright 2017 Joseph Johaneman
# And is released under the MIT License
# What this does: this program reads in a list of addresses of
# school names and prints out a tab separated list of schools name
# latitude and longitude. Obviously, this can be redirected to
# a text file by redirecting standard output
# Create the Google Maps API Object. Note you need an API Key
googleLocator=GoogleV3(api_key='<Your Google Map Geoencode API Key>')
# First we need the list of schools
filename="SchoolList.txt"
# Create a list to score the School Names loaded them from a file
with open(filename) as f:
schools=f.read().splitlines()
# print header
print "School Name\tLatitude\tLongitude"
# Loop through the school names and get locations
for i in schools:
try: #Exception handling is important!
location=googleLocator.geocode(i, exactly_one=True)
except GeocoderTimedOut: # in case we time out:
print i, "\t0\t-1" # print 0, -1. We'll check for it later
else: # Okay we didn't time out
if location != None: # if we find something
print i, "\t", location.latitude, "\t", location.longitude #print it
else: # Didn't find it. Print zeroes
print i, "\t0\t0" # otherwise print 0s. We'll check for it later
time.sleep(.3) # This waits 300 milliseconds between requests to be nice
# Note: I chose to print 0, -1 for timeouts and 0, 0 for not found so I'd know
# how many exceptions were thrown.
|
mit
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.