commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
be530dc2e18ccbeeb3e4396f47d2a527364e6ab1 | Add migration for ADS.added_via | openmaraude/APITaxi,openmaraude/APITaxi | migrations/versions/f8c0bde5d368_match_sqlalchemy_defintion_and_actual_.py | migrations/versions/f8c0bde5d368_match_sqlalchemy_defintion_and_actual_.py | """Match sqlalchemy defintion and actual schema
Revision ID: f8c0bde5d368
Revises: ae904ac154cf
Create Date: 2019-11-19 11:24:40.555110
"""
# revision identifiers, used by Alembic.
revision = 'f8c0bde5d368'
down_revision = 'ae904ac154cf'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.alter_column('ADS', 'added_via',
existing_type=postgresql.ENUM('form', 'api', name='via'),
nullable=False)
def downgrade():
op.alter_column('ADS', 'added_via',
existing_type=postgresql.ENUM('form', 'api', name='via'),
nullable=True)
| agpl-3.0 | Python |
|
50415300e3ce1e7cc10782aa4661da14d900d6de | Add code generation tests | opesci/devito,opesci/devito | benchmarks/regression/benchmarks/codegen.py | benchmarks/regression/benchmarks/codegen.py | from examples.seismic.tti.tti_example import tti_setup
repeat = 3
class TTI(object):
space_order = 12
def setup(self):
self.solver = tti_setup(space_order=TTI.space_order)
def time_forward(self):
self.solver.op_fwd()
def time_adjoint(self):
self.solver.op_adj()
| mit | Python |
|
5dd3424e9d95c12c2fb4c770f527b85b928da705 | create a separate module for decoration/coloring | jeffersonmourak/pyTerm,jeffersonmourak/pyTerm,usmanayubsh/pyTerm,usmanayubsh/pyTerm | decorate.py | decorate.py | class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' | mit | Python |
|
89d08498f7f7e12fa5486eb88f64829621aa27f9 | Add missing migration | opennode/nodeconductor-saltstack | src/nodeconductor_saltstack/saltstack/migrations/0005_label_change.py | src/nodeconductor_saltstack/saltstack/migrations/0005_label_change.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('saltstack', '0004_remove_spl_state'),
]
operations = [
migrations.AlterModelOptions(
name='saltstackservice',
options={'verbose_name': 'SaltStack service', 'verbose_name_plural': 'SaltStack service'},
),
]
| mit | Python |
|
8cde7867eb98cc56533ab0156768ad2409e8c65e | Fix bug | madflow/seahub,Chilledheart/seahub,Chilledheart/seahub,Chilledheart/seahub,cloudcopy/seahub,cloudcopy/seahub,cloudcopy/seahub,madflow/seahub,miurahr/seahub,madflow/seahub,madflow/seahub,madflow/seahub,miurahr/seahub,Chilledheart/seahub,cloudcopy/seahub,Chilledheart/seahub,miurahr/seahub,miurahr/seahub | user_notification.py | user_notification.py | #!/usr/bin/python
# encoding: utf-8
from datetime import datetime
import string
from django.core.mail import send_mail
from notifications.models import UserNotification
import settings
email_template = u'''${username}您好:
您有${cnt}条新消息,请点击下面的链接查看:
${msg_url}
感谢使用我们的网站!
${site_name}团队
'''
today = datetime.now()
site_name = settings.SITE_NAME
subject = u'%s:新消息' % site_name
site_base = settings.SITE_BASE
if site_base[-1] != '/':
site_base += '/'
site_root = settings.SITE_ROOT
if site_root[-1] != '/':
site_root += '/'
url = site_base + site_root + 'home/my/'
notifications = UserNotification.objects.all()
d = {}
for e in notifications:
if today.year != e.timestamp.year or today.month != e.timestamp.month or \
today.day != e.timestamp.day:
continue
if d.has_key(e.to_user):
d[e.to_user] += 1
else:
d[e.to_user] = 1
for k in d.keys():
to_user = k
cnt = d[k]
template = string.Template(email_template)
content = template.substitute(username=to_user, cnt=cnt, msg_url=url, \
site_name=site_name)
send_mail(subject, content, settings.DEFAULT_FROM_EMAIL, [to_user], \
fail_silently=False)
| #!/usr/bin/python
# encoding: utf-8
from datetime import datetime
import string
from django.core.mail import send_mail
from notifications.models import UserNotification
import settings
email_template = u'''${username}您好:
您有${cnt}条新消息,请点击下面的链接查看:
${msg_url}
感谢使用我们的网站!
${site_name}团队
'''
today = datetime.now()
site_name = settings.SITE_NAME
subject = u'%s:新消息' % site_name
site_base = settings.SITE_BASE
if site_base[-1] != '/':
site_base += '/'
url = site_base + 'home/my/'
notifications = UserNotification.objects.all()
d = {}
for e in notifications:
if today.year != e.timestamp.year or today.month != e.timestamp.month or \
today.day != e.timestamp.day:
continue
if d.has_key(e.to_user):
d[e.to_user] += 1
else:
d[e.to_user] = 1
for k in d.keys():
to_user = k
cnt = d[k]
template = string.Template(email_template)
content = template.substitute(username=to_user, cnt=cnt, msg_url=url, \
site_name=site_name)
send_mail(subject, content, settings.DEFAULT_FROM_EMAIL, [to_user], \
fail_silently=False)
| apache-2.0 | Python |
6d8fb7d052dc7341ecd9fb3388b804b82f77fa0f | add example usage | pennlabs/penncoursereview-sdk-python | examples/scores.py | examples/scores.py | """Get a list of average scores for each professor in a department."""
import sys
from collections import defaultdict
import penncoursereview as pcr
def prof_scores(dept):
professor_scores = defaultdict(list)
dept = pcr.Department(dept)
for review in dept.reviews.values:
instructor = review.instructor
rating = review.ratings.rInstructorQuality
professor_scores[instructor.name].append(float(rating))
return professor_scores
def averages(dept):
professor_scores = prof_scores(dept)
for prof, scores in professor_scores.iteritems():
score = sum(scores) / len(scores)
yield prof, score
def main(dept):
for prof, avg in sorted(averages(dept), key=lambda x: x[1]):
print "%s %.2f" % (prof, avg)
if __name__ == "__main__":
if (len(sys.argv) < 2):
print "usage: scores.py <department>"
else:
main(sys.argv[1])
| mit | Python |
|
7dbc289897ecf35f0b709177ac3feacffd8691ca | add a test file | pypot/ek_book | ch_04/testfile.py | ch_04/testfile.py | #this is a test file for eclipse | mit | Python |
|
50194e14a75c3300996f64c415a8593b1243af9f | Add api_helper for testing | j0gurt/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,vladan-m/ggrc-core,plamut/ggrc-core,uskudnik/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,uskudnik/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,hyperNURb/ggrc-core,j0gurt/ggrc-core,hasanalom/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,vladan-m/ggrc-core,uskudnik/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,hyperNURb/ggrc-core,selahssea/ggrc-core,uskudnik/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,andrei-karalionak/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core | src/tests/ggrc/api_helper.py | src/tests/ggrc/api_helper.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc.app import app
from ggrc.services.common import Resource
from ggrc import services
import inspect
import flask
import logging
from sqlalchemy.orm.collections import InstrumentedList
# style: should the class name be all capitals?
class Api():
def __init__(self):
self.tc = app.test_client()
self.tc.get("/login")
self.resource = Resource()
self.service_dict = {s.model_class.__name__: s.name
for s in services.all_services()}
self.headers = {'Content-Type': 'application/json',
"X-Requested-By": "gGRC"
}
self.user_headers = {}
def set_user(self, person=None):
if person:
self.user_headers = {
"X-ggrc-user": self.resource.as_json({
"name": person.name,
"email": person.email,
})
}
else:
self.user_headers = {}
self.tc.get("/logout")
self.tc.get("/login", headers=self.user_headers)
def get_service(self, obj):
if inspect.isclass(obj):
return self.service_dict[obj.__name__]
else:
return self.service_dict[obj.__class__.__name__]
def api_link(self, obj, obj_id=None):
obj_id = "" if obj_id is None else "/" + str(obj_id)
return "/api/%s%s" % (self.get_service(obj), obj_id)
def data_to_json(self, response):
""" add docoded json to response object """
try:
response.json = flask.json.loads(response.data)
except:
response.json = None
return response
def send_request(self, request, obj, data, headers={}, api_link=None):
if api_link is None:
api_link = self.api_link(obj)
headers.update(self.headers)
headers.update(self.user_headers)
json_data = self.resource.as_json(data)
logging.info("request json" + json_data)
response = request(api_link, data=json_data, headers=headers.items())
return self.data_to_json(response)
def put(self, obj, data):
response = self.get(obj, obj.id)
headers = {
"If-Match": response.headers.get("Etag"),
"If-Unmodified-Since": response.headers.get("Last-Modified")
}
api_link = self.api_link(obj, obj.id)
return self.send_request(self.tc.put , obj, data, headers=headers, api_link=api_link)
def post(self, obj, data):
return self.send_request(self.tc.post, obj, data)
def get(self, obj, id):
return self.data_to_json(self.tc.get(self.api_link(obj, id)))
def delete(self, obj, id):
response = self.get(obj, obj.id)
headers = {
"If-Match": response.headers.get("Etag"),
"If-Unmodified-Since": response.headers.get("Last-Modified")
}
headers.update(self.headers)
api_link = self.api_link(obj, obj.id)
return self.tc.delete(api_link, headers=headers)
| apache-2.0 | Python |
|
2b1b1e1d5db7edf4350239b712d2e872e7769d84 | add problem 24 | branning/euler,branning/euler | euler024.py | euler024.py | #!/usr/bin/env python
def nextperm(s):
'''
http://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
'''
k = None
for i in range(len(s)-1):
if s[i] < s[i+1]:
k = i
if k is None:
# sequence in descending order, last permutation
return None
l = None
for i in range(k+1, len(s)):
if s[i] > s[k]:
l = i
hold = s[l]
s[l] = s[k]
s[k] = hold
# reverse s from k+1 to the end
t = s[k+1:]
t.reverse()
s[k+1:] = t
return s
if __name__=="__main__":
debugging = False
s = range(10)
permutations = 10**6-1
for perm in xrange(permutations):
nextperm(s)
if debugging:
print s
print s
| mit | Python |
|
03279bbc6193d3944dcd2542daa65701a1e0eded | Add solution for problem 26 | cifvts/PyEuler | euler026.py | euler026.py | #!/usr/bin/python
"""
For resolve this, we have to find the maximum
Full Reptend Prime int he given limit. To do that, we need
to check if the 10 is a primitive root of p.
See http://mathworld.wolfram.com/FullReptendPrime.html for details
"""
from sys import exit
for p in range(999, 7, -2):
for k in range(1, p):
if (10 ** k) % p == 1:
if k != p - 1:
break
else:
print(p)
exit(0)
| mit | Python |
|
8373611a9c5b035953aee208bc65f4be92890314 | add the conversion script | idaholab/raven,joshua-cogliati-inl/raven,joshua-cogliati-inl/raven,joshua-cogliati-inl/raven,idaholab/raven,joshua-cogliati-inl/raven,idaholab/raven,idaholab/raven,idaholab/raven,joshua-cogliati-inl/raven,idaholab/raven,joshua-cogliati-inl/raven,idaholab/raven,joshua-cogliati-inl/raven | scripts/conversionScripts/toTransformationNode.py | scripts/conversionScripts/toTransformationNode.py | import xml.etree.ElementTree as ET
import xml.dom.minidom as pxml
import os
def convert(tree,fileName=None):
"""
Converts input files to be compatible with merge request ....
change the attribute of node <variablesTransformation> from 'model' to 'distribution'
@ In, tree, xml.etree.ElementTree.ElementTree object, the contents of a RAVEN input file
@ Out, tree, xml.etree.ElementTree.ElementTree object, the modified RAVEN input file
"""
simulation = tree.getroot()
if simulation.tag!='Simulation': return tree #this isn't an input file
for distNode in simulation.iter('MultivariateNormal'):
distName = distNode.get('name')
break
for vtNode in simulation.iter('variablesTransformation'):
vtNode.set('distribution', distName)
modelName = vtNode.get('model')
if modelName != None:
del vtNode.attrib['model']
return tree
if __name__=='__main__':
import convert_utils
import sys
convert_utils.standardMain(sys.argv,convert)
| apache-2.0 | Python |
|
55a35c642b64a6bdb8314b9470c1f7fedb16478f | print results | annarev/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,gautam1858/tensorflow,renyi533/tensorflow,renyi533/tensorflow,petewarden/tensorflow,gunan/tensorflow,karllessard/tensorflow,freedomtan/tensorflow,renyi533/tensorflow,gunan/tensorflow,paolodedios/tensorflow,gunan/tensorflow,xzturn/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,yongtang/tensorflow,sarvex/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,davidzchen/tensorflow,xzturn/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,freedomtan/tensorflow,tensorflow/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,gunan/tensorflow,sarvex/tensorflow,gunan/tensorflow,paolodedios/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,freedomtan/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,Intel-tensorflow/tensorflow,cxxgtxy/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,xzturn/tensorflow,yongtang/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,jhseu/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,freedomtan/tensorflow,gunan/tensorflow,jhseu/tensorflow,gunan/tensorflow,gautam1858/tensorflow,xzturn/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,paolodedios/tensorflow,cxxgtxy/tensorflow,yongtang/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,xzturn/tensorflow,aldian/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,davidzchen/tensorflow,Intel-tensorflow/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,jhseu/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,aldian/tensorflow,renyi533/tensorflow,aam-at/tensorflow,xzturn/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,jhseu/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,aldian/tensorflow,yongtang/tensorflow,jhseu/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,sarvex/tensorflow,xzturn/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,aam-at/tensorflow,sarvex/tensorflow,freedomtan/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,jhseu/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,annarev/tensorflow,jhseu/tensorflow,aam-at/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,frreiss/tensorflow-fred,petewarden/tensorflow,sarvex/tensorflow,aldian/tensorflow,yongtang/tensorflow,karllessard/tensorflow,cxxgtxy/tensorflow,davidzchen/tensorflow,gautam1858/tensorflow,aldian/tensorflow,gunan/tensorflow,annarev/tensorflow,gunan/tensorflow,jhseu/tensorflow,davidzchen/tensorflow,sarvex/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,xzturn/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,petewarden/tensorflow,jhseu/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aam-at/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,annarev/tensorflow,freedomtan/tensorflow,karllessard/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,davidzchen/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,yongtang/tensorflow,xzturn/tensorflow,jhseu/tensorflow,petewarden/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,xzturn/tensorflow,paolodedios/tensorflow,davidzchen/tensorflow,Intel-tensorflow/tensorflow,renyi533/tensorflow,aldian/tensorflow,sarvex/tensorflow,jhseu/tensorflow,gautam1858/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,cxxgtxy/tensorflow,yongtang/tensorflow,aam-at/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,gunan/tensorflow,renyi533/tensorflow,cxxgtxy/tensorflow,petewarden/tensorflow,aldian/tensorflow,sarvex/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,aldian/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,annarev/tensorflow,aam-at/tensorflow,annarev/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,xzturn/tensorflow,yongtang/tensorflow,davidzchen/tensorflow,Intel-tensorflow/tensorflow,jhseu/tensorflow,freedomtan/tensorflow,xzturn/tensorflow,freedomtan/tensorflow,renyi533/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gunan/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow | tensorflow/python/keras/utils/np_utils.py | tensorflow/python/keras/utils/np_utils.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.to_categorical')
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
Usage Example:
>>> y = [0, 1, 2, 3]
>>> tf.keras.utils.to_categorical(y, num_classes=4)
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]], dtype=float32)
Arguments:
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input. Default: `'float32'`.
Returns:
A binary matrix representation of the input. The classes axis is placed
last.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@keras_export('keras.utils.normalize')
def normalize(x, axis=-1, order=2):
"""Normalizes a Numpy array.
Arguments:
x: Numpy array to normalize.
axis: axis along which to normalize.
order: Normalization order (e.g. 2 for L2 norm).
Returns:
A normalized copy of the array.
"""
l2 = np.atleast_1d(np.linalg.norm(x, order, axis))
l2[l2 == 0] = 1
return x / np.expand_dims(l2, axis)
| # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.to_categorical')
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
Usage Example:
>>> y = [0, 1, 2, 3]
>>> tf.keras.utils.to_categorical(y, num_classes=4)
array([...], dtype=float32)
Arguments:
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input. Default: `'float32'`.
Returns:
A binary matrix representation of the input. The classes axis is placed
last.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@keras_export('keras.utils.normalize')
def normalize(x, axis=-1, order=2):
"""Normalizes a Numpy array.
Arguments:
x: Numpy array to normalize.
axis: axis along which to normalize.
order: Normalization order (e.g. 2 for L2 norm).
Returns:
A normalized copy of the array.
"""
l2 = np.atleast_1d(np.linalg.norm(x, order, axis))
l2[l2 == 0] = 1
return x / np.expand_dims(l2, axis)
| apache-2.0 | Python |
ae972cd7fe6856a1265981810ea1d03fc5efcf54 | write test for django admin | liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin | tests/organisations/test_admin.py | tests/organisations/test_admin.py | import pytest
from django.urls import reverse
@pytest.mark.django_db
def test_organisation_admin_form(client, organisation,
admin, user_factory,
group_factory):
client.login(username=admin, password='password')
url = reverse('admin:meinberlin_organisations_organisation_add')
response = client.get(url)
assert response.status_code == 200
data = {'name': 'My Organisation'}
response = client.post(url, data)
assert 1 == 2
| agpl-3.0 | Python |
|
cce3b017f36de8fb8682971e13201c0143c524cf | add indexes to make deleting faster | openstack/aeromancer,stackforge/aeromancer,dhellmann/aeromancer | aeromancer/db/alembic/versions/a3d002d161a_add_indexes.py | aeromancer/db/alembic/versions/a3d002d161a_add_indexes.py | """add indexes
Revision ID: a3d002d161a
Revises: 22e0aa22ab8e
Create Date: 2014-11-24 14:24:29.824147
"""
# revision identifiers, used by Alembic.
revision = 'a3d002d161a'
down_revision = '22e0aa22ab8e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('file_project_idx', 'file', ['project_id'])
op.create_index('line_file_idx', 'line', ['file_id'])
def downgrade():
op.drop_index('line_file_idx', 'line')
op.drop_index('file_project_idx', 'file')
| apache-2.0 | Python |
|
234c03381209d860d7b6ff29263f927736822c1e | Add shellFlags.py (not yet integrated) | nth10sd/funfuzz,nth10sd/funfuzz,MozillaSecurity/funfuzz,MozillaSecurity/funfuzz,MozillaSecurity/funfuzz,nth10sd/funfuzz | js/shellFlags.py | js/shellFlags.py | import random
import os
import subprocess
def memoize(f, cache={}):
'''Function decorator that caches function results.'''
# From http://code.activestate.com/recipes/325205-cache-decorator-in-python-24/#c9
def g(*args, **kwargs):
key = ( f, tuple(args), frozenset(kwargs.items()) )
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return g
# This (or something like it) could move to inspectShell.py, where
# it would replace exitCodeDbgOptOrJsShellXpcshell.
@memoize
def shellSupportsFlag(shell, flag):
with open(os.devnull, 'w') as devnull:
retCode = subprocess.call([shell, flag, "-e", "42"], stdout=devnull, stderr=devnull)
assert 0 <= retCode <= 3
return (retCode == 0)
def chance(p):
return random.random() < p
def randomFlagSet(shell):
'''
Returns a random list of command-line flags appropriate for the given shell.
Only works for spidermonkey js shell. Does not work for xpcshell.
'''
args = []
jaeger = chance(.7)
ion = shellSupportsFlag(shell, "--ion") and chance(.7)
infer = chance(.7)
if shellSupportsFlag(shell, "--no-ion"):
# New js shell defaults jaeger, ion, and infer to on! See bug 724751.
if not jaeger:
args.append("--no-jm")
if not ion:
args.append("--no-ion")
if not infer:
args.append("--no-ti")
else:
# Old shells (and xpcshell?) default jaeger, ion, and infer to off.
if jaeger:
args.append("-m")
if ion:
args.append("--ion")
if infer:
args.append("-n")
if jaeger:
if chance(.4):
args.append("--always-mjit") # aka -a
if chance(.2):
args.append("--debugjit") # aka -d
if chance(.2):
args.append("--execute=mjitChunkLimit(" + str(random.randint(5, 100)) + ")")
if ion:
if chance(.4):
args.append("--ion-eager")
if chance(.2):
args.append("--ion-gvn=" + random.choice(["off", "pessimistic", "optimistic"]))
if chance(.2):
args.append("--ion-regalloc=" + random.choice(["greedy", "lsra"]))
if chance(.2):
args.append("--ion-licm=off")
if chance(.2):
args.append("--ion-range-analysis=off")
if chance(.2):
args.append("--ion-inlining=off")
if chance(.2):
args.append("--ion-osr=off")
#if chance(.05):
# args.append("--execute=verifybarriers()")
if chance(.05):
args.append("--dump-bytecode") # aka -D
if shellSupportsFlag(shell, "--gc-zeal=0") and chance(.05):
args.append("--gc-zeal=" + random.choice(["1", "2"]) + "," + str(random.randint(1, 100)) + "," + random.choice(["0", "1"]))
return args
def basicFlagSets(shell):
if shellSupports(shell, "--no-ion"):
# From https://bugzilla.mozilla.org/attachment.cgi?id=616725
return [
[],
['--no-jm'],
['--ion-gvn=off', '--ion-licm=off'],
['--no-ion', '--no-jm', '--no-ti'],
['--no-ion', '--no-ti'],
['--no-ion', '--no-ti', '-a', '-d'],
['--no-ion', '--no-jm'],
['--no-ion'],
['--no-ion', '-a'],
['--no-ion', '-a', '-d'],
['--no-ion', '-d']
]
else:
return [
# ,m,am,amd,n,mn,amn,amdn,mdn
[],
['-m'],
['-m', 'a'],
['-m', 'a', 'd'],
['-n'],
['-m', '-n'],
['-m', '-n', 'a'],
['-m', '-n', 'a', 'd'],
['-m', '-n', 'd']
]
# Consider adding a function (for compareJIT reduction) that takes a flag set
# and returns all its (meaningful) subsets.
def testRandomFlags():
import sys
for i in range(100):
print ' '.join(randomFlagSet(sys.argv[1]))
if __name__ == "__main__":
testRandomFlags()
| mpl-2.0 | Python |
|
61c2823b5da460001ca02db6b028fc770d204e32 | Add initial test case | tobbez/lys-reader | api_tests.py | api_tests.py | from flask import Flask, g
import unittest
import json
from simplekv.fs import FilesystemStore
from flaskext.kvsession import KVSessionExtension
from api import app, db
from common.database import Database
class APITest(unittest.TestCase):
def setUp(self):
global db
store = FilesystemStore('session')
KVSessionExtension(store, app)
# Load the debug config
app.config.from_pyfile('../config.defaults.py')
app.config.from_pyfile('../config_debug.py')
app.secret_key = app.config['SECRET_KEY']
db = Database(app.config)
self._setup_database()
app.testing = True
self.app = app.test_client(use_cookies=True)
self.csrf = ''
"""Setup the database
by clearing it and loading the schema"""
def _setup_database(self):
con = db.get_connection()
cur = con.cursor()
cur.execute(open('schema.sql', 'r').read())
con.commit()
db.put_away_connection(con)
def test_1_api_base(self):
rv = self.app.get('/api/')
data = json.loads(rv.data)
assert data['status']['code'] is 0
assert data['csrf_token']
self.csrf = data['csrf_token']
if __name__ == '__main__':
unittest.main()
| isc | Python |
|
72db299a3974b05f511420da5e5861f3bead0065 | Create solution_1.py | DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler | problem301/Python/solution_1.py | problem301/Python/solution_1.py | #!/usr/bin/env python
# coding=utf-8
def nim():
binary_map = [0,1]
total = 3
for k in range(28):
binary_map_new = []
for i in range(0, len(binary_map), 2):
if binary_map[i:i+2] == [0,0]:
binary_map_new.extend([0,0,0,1])
total += 3
elif binary_map[i:i+2] == [0,1]:
binary_map_new.extend([0,0])
total += 2
binary_map = binary_map_new
return total
if __name__ == "__main__":
print(nim())
| mit | Python |
|
53bf5c12b77e19d54e3ab50ade8840843cca9649 | add sql group_by unit tests | machow/siuba | siuba/tests/test_verb_group_by.py | siuba/tests/test_verb_group_by.py | """
Note: this test file was heavily influenced by its dbplyr counterpart.
https://github.com/tidyverse/dbplyr/blob/master/tests/testthat/test-verb-group_by.R
"""
from siuba import _, group_by, ungroup, summarize
from siuba.dply.vector import row_number, n
import pytest
from .helpers import assert_equal_query, data_frame, backend_notimpl
from string import ascii_lowercase
DATA = data_frame(x = [1,2,3], y = [9,8,7], g = ['a', 'a', 'b'])
@pytest.fixture(scope = "module")
def df(backend):
return backend.load_df(DATA)
def test_group_by_no_add(df):
gdf = group_by(df, _.x, _.y)
assert gdf.group_by == ("x", "y")
def test_group_by_override(df):
gdf = df >> group_by(_.x, _.y) >> group_by(_.g)
assert gdf.group_by == ("g",)
def test_group_by_add(df):
gdf = group_by(df, _.x) >> group_by(_.y, add = True)
assert gdf.group_by == ("x", "y")
def test_group_by_ungroup(df):
q1 = df >> group_by(_.g)
assert q1.group_by == ("g",)
q2 = q1 >> ungroup()
assert q2.group_by == tuple()
@pytest.mark.skip("TODO: need to test / validate joins first")
def test_group_by_before_joins(df):
assert False
@pytest.mark.skip("TODO: (#52)")
def test_group_by_performs_mutate(df):
assert_equal_query(
df,
group_by(z = _.x + _.y) >> summarize(n = n(_)),
data_frame(z = 10, n = 4)
)
| mit | Python |
|
c0d0496eb2675ba2dbd5dbaa9d4b4c701409308f | Allow IHaskellPrelude.hs to not be formatting checked | wyager/IHaskell,kfiz/IHaskell,kfiz/IHaskell,franklx/IHaskell,qzchenwl/IHaskell,wyager/IHaskell,wyager/IHaskell,gibiansky/IHaskell,franklx/IHaskell,beni55/IHaskell,gibiansky/IHaskell,sumitsahrawat/IHaskell,thomasjm/IHaskell,qzchenwl/IHaskell,artuuge/IHaskell,sumitsahrawat/IHaskell,artuuge/IHaskell,wyager/IHaskell,FranklinChen/IHaskell,thomasjm/IHaskell,thomasjm/IHaskell,artuuge/IHaskell,artuuge/IHaskell,thomasjm/IHaskell,FranklinChen/IHaskell,beni55/IHaskell,FranklinChen/IHaskell,qzchenwl/IHaskell,franklx/IHaskell,FranklinChen/IHaskell,gibiansky/IHaskell,franklx/IHaskell,sumitsahrawat/IHaskell,qzchenwl/IHaskell,beni55/IHaskell,kfiz/IHaskell,kfiz/IHaskell,beni55/IHaskell | verify_formatting.py | verify_formatting.py | #!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import subprocess
def hindent(contents):
with open(".tmp3", "w") as f:
f.write(contents)
with open(".tmp3", "r") as f:
output = subprocess.check_output(["hindent", "--style", "gibiansky"],
stdin=f)
return output.decode('utf-8')
def diff(src1, src2):
# Ignore trailing newlines
if src1[-1] == "\n":
src1 = src1[:-1]
if src2[-1] == "\n":
src2 = src2[:-1]
with open(".tmp1", "w") as f1:
f1.write(src1)
with open(".tmp2", "w") as f2:
f2.write(src2)
try:
output = subprocess.check_output(["diff", ".tmp1", ".tmp2"])
return output.decode('utf-8')
except subprocess.CalledProcessError as e:
return e.output.decode('utf-8')
# Verify that we're in the right directory
try:
open("ihaskell.cabal", "r").close()
except:
print(sys.argv[0], "must be run from the ihaskell directory",
file=sys.stderr)
# Find all the source files
sources = []
for source_dir in ["src", "ipython-kernel", "ihaskell-display"]:
for root, dirnames, filenames in os.walk(source_dir):
# Skip cabal dist directories
if "dist" in root:
continue
for filename in filenames:
# Take Haskell files, but ignore the Cabal Setup.hs
# Also ignore IHaskellPrelude.hs, it uses CPP in weird places
ignored_files = ["Setup.hs", "IHaskellPrelude.hs"]
if filename.endswith(".hs") and filename not in ignored_files:
sources.append(os.path.join(root, filename))
hindent_outputs = {}
for source_file in sources:
print("Formatting file", source_file)
with open(source_file, "r") as f:
original_source = f.read()
formatted_source = hindent(original_source)
hindent_outputs[source_file] = (original_source, formatted_source)
diffs = {filename: diff(original, formatted)
for (filename, (original, formatted)) in hindent_outputs.items()}
incorrect_formatting = False
for filename, diff in diffs.items():
if diff:
incorrect_formatting = True
print('Incorrect formatting in', filename)
print('=' * 10)
print(diff)
if incorrect_formatting:
sys.exit(1)
| #!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import subprocess
def hindent(contents):
with open(".tmp3", "w") as f:
f.write(contents)
with open(".tmp3", "r") as f:
output = subprocess.check_output(["hindent", "--style", "gibiansky"],
stdin=f)
return output.decode('utf-8')
def diff(src1, src2):
# Ignore trailing newlines
if src1[-1] == "\n":
src1 = src1[:-1]
if src2[-1] == "\n":
src2 = src2[:-1]
with open(".tmp1", "w") as f1:
f1.write(src1)
with open(".tmp2", "w") as f2:
f2.write(src2)
try:
output = subprocess.check_output(["diff", ".tmp1", ".tmp2"])
return output.decode('utf-8')
except subprocess.CalledProcessError as e:
return e.output.decode('utf-8')
# Verify that we're in the right directory
try:
open("ihaskell.cabal", "r").close()
except:
print(sys.argv[0], "must be run from the ihaskell directory",
file=sys.stderr)
# Find all the source files
sources = []
for source_dir in ["src", "ipython-kernel", "ihaskell-display"]:
for root, dirnames, filenames in os.walk(source_dir):
# Skip cabal dist directories
if "dist" in root:
continue
for filename in filenames:
# Take Haskell files, but ignore the Cabal Setup.hs
if filename.endswith(".hs") and filename != "Setup.hs":
sources.append(os.path.join(root, filename))
hindent_outputs = {}
for source_file in sources:
print("Formatting file", source_file)
with open(source_file, "r") as f:
original_source = f.read()
formatted_source = hindent(original_source)
hindent_outputs[source_file] = (original_source, formatted_source)
diffs = {filename: diff(original, formatted)
for (filename, (original, formatted)) in hindent_outputs.items()}
incorrect_formatting = False
for filename, diff in diffs.items():
if diff:
incorrect_formatting = True
print('Incorrect formatting in', filename)
print('=' * 10)
print(diff)
if incorrect_formatting:
sys.exit(1)
| mit | Python |
930a8b1a7c980183df5469627a734033ca39a444 | Add functional tests for create_image | openstack-infra/shade,jsmartin/shade,dtroyer/python-openstacksdk,openstack/python-openstacksdk,stackforge/python-openstacksdk,openstack-infra/shade,dtroyer/python-openstacksdk,stackforge/python-openstacksdk,jsmartin/shade,openstack/python-openstacksdk | shade/tests/functional/test_image.py | shade/tests/functional/test_image.py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_compute
----------------------------------
Functional tests for `shade` image methods.
"""
import tempfile
import uuid
from shade import openstack_cloud
from shade.tests import base
from shade.tests.functional.util import pick_image
class TestCompute(base.TestCase):
def setUp(self):
super(TestCompute, self).setUp()
# Shell should have OS-* envvars from openrc, typically loaded by job
self.cloud = openstack_cloud()
self.image = pick_image(self.cloud.nova_client.images.list())
def test_create_image(self):
test_image = tempfile.NamedTemporaryFile(delete=False)
test_image.write('\0' * 1024 * 1024)
test_image.close()
image_name = 'test-image-%s' % uuid.uuid4()
try:
self.cloud.create_image(name=image_name,
filename=test_image.name,
disk_format='raw',
container_format='bare',
wait=True)
finally:
self.cloud.delete_image(image_name, wait=True)
| apache-2.0 | Python |
|
59ef02377c41041fd8010231f2c86d1aba072c0f | Complete recur sol | bowen0701/algorithms_data_structures | lc0105_construct_binary_tree_from_preorder_and_inorder_traversal.py | lc0105_construct_binary_tree_from_preorder_and_inorder_traversal.py | """Leetcode 105. Construct Binary Tree from Preorder and Inorder Traversal
Medium
URL: https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/
Given preorder and inorder traversal of a tree, construct the binary tree.
Note: You may assume that duplicates do not exist in the tree.
For example, given
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
Return the following binary tree:
3
/ \
9 20
/ \
15 7
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class SolutionRecur(object):
def _build(self, pre_start, pre_end, in_start, in_end,
inorder_d, preorder, inorder):
if pre_start > pre_end or in_start > in_end:
return None
# Preorder's first is root.
root = TreeNode(preorder[pre_start])
# Get root's pos in inorder.
in_root_pos = inorder_d[root.val]
# Compute the number of left from root.
n_left = in_root_pos - in_start
# Build binary trees for root's left and right.
root.left = self._build(pre_start + 1, pre_start + n_left,
in_start, in_root_pos - 1,
inorder_d, preorder, inorder)
root.right = self._build(pre_start + n_left + 1, pre_end,
in_root_pos + 1, in_end,
inorder_d, preorder, inorder)
return root
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
# Create dict for inorder value->index.
inorder_d = {v: i for (i, v) in enumerate(inorder)}
# Build binary tree by recursion.
return self._build(0, len(preorder) - 1, 0, len(inorder) - 1,
inorder_d, preorder, inorder)
def main():
# Ans:
# 3
# / \
# 9 20
# / \
# 15 7
preorder = [3, 9, 20, 15, 7]
inorder = [9, 3, 15, 20, 7]
root = SolutionRecur().buildTree(preorder, inorder)
print root.val
print root.left.val
print root.right.val
print root.right.left.val
print root.right.right.val
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
9c045f7667e1bdc6c9137c3877292907f4623774 | Add a management command to check if URNs are present in the database | ministryofjustice/manchester_traffic_offences_pleas,ministryofjustice/manchester_traffic_offences_pleas,ministryofjustice/manchester_traffic_offences_pleas,ministryofjustice/manchester_traffic_offences_pleas | make_a_plea/management/commands/check_urns_in_db.py | make_a_plea/management/commands/check_urns_in_db.py | import csv
from django.core.management.base import BaseCommand
from apps.plea.models import DataValidation, Case
from apps.plea.standardisers import standardise_urn, format_for_region
class Command(BaseCommand):
help = "Build weekly aggregate stats"
def add_arguments(self, parser):
parser.add_argument('csv_file', nargs='+')
def handle(self, *args, **options):
with open(options['csv_file'][0]) as csvfile:
total_matched, total_missed, matched, missed = 0, 0, 0, 0
for row in csvfile.readlines():
if not row.strip():
continue
elif row.startswith("#"):
if matched > 0 or missed > 0:
print "----------------\nMatched {}\nMissed {}\n\n".format(matched, missed)
total_matched += matched
total_missed += missed
matched = 0
missed = 0
print row
else:
urn = standardise_urn(row)
if Case.objects.filter(urn__iexact=urn).exists():
matched += 1
else:
missed += 1
print "{} - failed".format(urn)
print "----------------\nTotal:\nMatched {}\nMissed {}".format(total_matched, total_missed) | mit | Python |
|
4759cf1b058d1a1b5999882a8b44f84ad89a8a9a | Add tests file | saeschdivara/ArangoPy,saeschdivara/ArangoPy | arangodb/tests.py | arangodb/tests.py | # -*- coding: utf-8 -*-
| mit | Python |
|
64b572a4e1e8359d781591e22439fb432c5860b6 | Create click_location.py | li8bot/OpenCV,li8bot/OpenCV | click_location.py | click_location.py | from PIL import Image
from pylab import *
im = array(Image.open('img.jpg'))
show()
while(1):
imshow(im)
print "Please click 3 points"
x = ginput(1)
print 'you clicked:',x
| mit | Python |
|
607a73317e0497ee206bf8381f7cfa9fe46a1609 | add xml-row-to-vector script | AlexLamson/safebooru-predict | src/4_train_models/vectorize_data.py | src/4_train_models/vectorize_data.py | #!/usr/bin/python3
from bs4 import BeautifulSoup
from tqdm import tqdm
import numpy as np
import pickle
#http://stackoverflow.com/a/27518377/2230446
def get_num_lines(filename):
f = open(filename, "rb")
num_lines = 0
buf_size = 1024 * 1024
read_f = f.raw.read
buf = read_f(buf_size)
while buf:
num_lines += buf.count(b"\n")
buf = read_f(buf_size)
return num_lines
def load_tag_index_map(filename):
print("loading tag index map")
tag_index_map = pickle.load(open(filename, "rb"))
return tag_index_map
# map list of tags to boolean vector
def vectorize(tags, tag_index_map):
vector = np.zeros(len(tag_index_map))
for tag in tags:
if tag in tag_index_map.keys():
index = tag_index_map[tag]
vector[index] = True
return vector
# convert a single line of the xml file to an input vector and output value
def line_to_x_y(line, tag_index_map):
soup = BeautifulSoup(line, "lxml")
post = soup.find("post")
if post is not None:
tags = post["tags"].strip().split(" ")
# print(tags)
x = vectorize(tags, tag_index_map)
y = score = int(post["score"])
return x, y
print("~~~ERROR~~~")
print("line:", line)
print("~~~ERROR~~~")
# convert entire xml file into list of input vectors and list of output values
def file_to_xs_ys(filename, tag_index_map):
num_lines = get_num_lines(filename)
num_dimensions = len(tag_index_map)
xs = np.zeros((num_lines, num_dimensions), dtype=bool)
ys = np.zeros((num_lines,1))
with open(filename, "r") as f:
for i, line in tqdm(enumerate(f), total=num_lines):
x, y = line_to_x_y(line, tag_index_map)
xs[i] = x
ys[i] = y
return xs, ys
def main():
tag_index_map = load_tag_index_map("../../res/tag_index_map.p")
# print(tag_index_map)
filename = "../../res/head_safebooru.xml"
# filename = "../../res/sample_safebooru.xml"
xs, ys = file_to_xs_ys(filename, tag_index_map)
print(xs[0], ys[0])
print(xs[1], ys[1])
if __name__ == "__main__":
main()
| mit | Python |
|
d6e9971ceefc69f0eefc7440cc5e7035e7dcc05d | Add the middleware for reporting errors to gcloud. | fle-internal/content-curation,jonboiser/content-curation,jonboiser/content-curation,jayoshih/content-curation,aronasorman/content-curation,jayoshih/content-curation,fle-internal/content-curation,aronasorman/content-curation,jayoshih/content-curation,DXCanas/content-curation,jonboiser/content-curation,fle-internal/content-curation,DXCanas/content-curation,jonboiser/content-curation,fle-internal/content-curation,DXCanas/content-curation,DXCanas/content-curation,aronasorman/content-curation,jayoshih/content-curation | contentcuration/contentcuration/middleware/ErrorReportingMiddleware.py | contentcuration/contentcuration/middleware/ErrorReportingMiddleware.py | from google.cloud import error_reporting
class ErrorReportingMiddleware(object):
def __init__(self, *args, **kwargs):
self.client = error_reporting.Client()
def process_exception(self, request, exception):
self.client.report_exception()
| mit | Python |
|
e3ab7c126f808864f0458b52f36518e485f546ca | Add a session class to help tie everything together in a convenient way. | 4degrees/harmony | source/harmony/session.py | source/harmony/session.py | # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import os
from harmony.schema.collection import Collection
from harmony.schema.collector import FilesystemCollector
from harmony.schema.processor import MixinProcessor, ValidateProcessor
from harmony.schema.validator import Validator
class Session(object):
'''A configuration of the various components in a standard way.'''
def __init__(self, collector=None, processors=None, validator_class=None):
'''Initialise session.
*collector* is used to collect schemas for use in the session and
should conform to the :py:class:`~harmony.schema.collector.Collector`
interface. Defaults to a
:py:class:`~harmony.schema.collector.FileSystemCollector` using the
environment variable :envvar:`HARMONY_SCHEMA_PATH` to discover schemas.
*processors* specifies a list of
:py:class:`~harmony.schema.processor.Processor` instances that will
post-process any discovered schemas. If not specified will default to
[:py:class:`~harmony.schema.processor.ValidateProcessor`,
:py:class:`~harmony.schema.processor.MixinProcessor`].
*validator_class* should be the class to use for validation of schemas
and instances. Defaults to
:py:class:`harmony.schema.validator.Validator`.
'''
self.schemas = Collection()
self.collector = collector
if self.collector is None:
paths = os.environ.get('HARMONY_SCHEMA_PATH', '').split(os.pathsep)
self.collector = FilesystemCollector(paths)
self.validator_class = validator_class
if self.validator_class is None:
self.validator_class = Validator
self.processors = processors
if self.processors is None:
self.processors = [
ValidateProcessor(self.validator_class), MixinProcessor()
]
self.refresh()
def refresh(self):
'''Discover schemas and add to local collection.
.. note::
Collection will be processed with self.processors.
'''
self.schemas.clear()
for schema in self.collector.collect():
self.schemas.add(schema)
for processor in self.processors:
processor.process(self.schemas)
| apache-2.0 | Python |
|
c240f7bcd94b2fe6ead8568f6f6f5a69c1853b3a | Add a shelve/unshelve scenario | yamt/tempest,afaheem88/tempest,afaheem88/tempest_neutron,dkalashnik/tempest,nunogt/tempest,pandeyop/tempest,rzarzynski/tempest,vedujoshi/tempest,Vaidyanath/tempest,cisco-openstack/tempest,rakeshmi/tempest,hpcloud-mon/tempest,manasi24/jiocloud-tempest-qatempest,neerja28/Tempest,Vaidyanath/tempest,roopali8/tempest,tudorvio/tempest,neerja28/Tempest,nunogt/tempest,xbezdick/tempest,akash1808/tempest,izadorozhna/tempest,ebagdasa/tempest,Tesora/tesora-tempest,JioCloud/tempest,openstack/tempest,akash1808/tempest,vedujoshi/tempest,manasi24/jiocloud-tempest-qatempest,eggmaster/tempest,CiscoSystems/tempest,masayukig/tempest,rakeshmi/tempest,jaspreetw/tempest,izadorozhna/tempest,NexusIS/tempest,Juniper/tempest,hayderimran7/tempest,pczerkas/tempest,xbezdick/tempest,Juraci/tempest,LIS/lis-tempest,Lilywei123/tempest,afaheem88/tempest,manasi24/tempest,tudorvio/tempest,pandeyop/tempest,tonyli71/tempest,NexusIS/tempest,jamielennox/tempest,jamielennox/tempest,pczerkas/tempest,manasi24/tempest,redhat-cip/tempest,cisco-openstack/tempest,roopali8/tempest,alinbalutoiu/tempest,ebagdasa/tempest,rzarzynski/tempest,masayukig/tempest,flyingfish007/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,bigswitch/tempest,sebrandon1/tempest,hpcloud-mon/tempest,Juraci/tempest,alinbalutoiu/tempest,eggmaster/tempest,varunarya10/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,redhat-cip/tempest,Juniper/tempest,tonyli71/tempest,Lilywei123/tempest,afaheem88/tempest_neutron,varunarya10/tempest,hayderimran7/tempest,zsoltdudas/lis-tempest,dkalashnik/tempest,danielmellado/tempest,CiscoSystems/tempest,bigswitch/tempest,openstack/tempest,flyingfish007/tempest,danielmellado/tempest,Tesora/tesora-tempest,jaspreetw/tempest,zsoltdudas/lis-tempest,JioCloud/tempest,LIS/lis-tempest,sebrandon1/tempest,yamt/tempest | tempest/scenario/test_shelve_instance.py | tempest/scenario/test_shelve_instance.py | # Copyright 2014 Scality
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest import config
from tempest.openstack.common import log
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
class TestShelveInstance(manager.ScenarioTest):
"""
This test shelves then unshelves a Nova instance
The following is the scenario outline:
* boot a instance and create a timestamp file in it
* shelve the instance
* unshelve the instance
* check the existence of the timestamp file in the unshelved instance
"""
def _write_timestamp(self, server_or_ip):
ssh_client = self.get_remote_client(server_or_ip)
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
def _check_timestamp(self, server_or_ip):
ssh_client = self.get_remote_client(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
def _shelve_then_unshelve_server(self, server):
self.servers_client.shelve_server(server['id'])
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
self.servers_client.wait_for_server_status(
server['id'], 'SHELVED_OFFLOADED', extra_timeout=offload_time)
else:
self.servers_client.wait_for_server_status(server['id'], 'SHELVED')
self.servers_client.shelve_offload_server(server['id'])
self.servers_client.wait_for_server_status(server['id'],
'SHELVED_OFFLOADED')
self.servers_client.unshelve_server(server['id'])
self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.services('compute', 'network', 'image')
def test_shelve_instance(self):
self.keypair = self.create_keypair()
self.security_group = self._create_security_group()
create_kwargs = {
'key_name': self.keypair['name'],
'security_groups': [self.security_group]
}
server = self.create_server(image=CONF.compute.image_ref,
create_kwargs=create_kwargs)
if CONF.compute.use_floatingip_for_ssh:
_, floating_ip = self.floating_ips_client.create_floating_ip()
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], server['id'])
self._write_timestamp(floating_ip['ip'])
else:
self._write_timestamp(server)
# Prevent bug #1257594 from coming back
# Unshelve used to boot the instance with the original image, not
# with the instance snapshot
self._shelve_then_unshelve_server(server)
if CONF.compute.use_floatingip_for_ssh:
self._check_timestamp(floating_ip['ip'])
else:
self._check_timestamp(server)
| apache-2.0 | Python |
|
c2036cd7629b93bfc12069eaf174f2427d47e769 | add another test | somic/paasta,Yelp/paasta,somic/paasta,Yelp/paasta | tests/monitoring/test_check_mesos_duplicate_frameworks.py | tests/monitoring/test_check_mesos_duplicate_frameworks.py | # Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
from paasta_tools.monitoring.check_mesos_duplicate_frameworks import check_mesos_no_duplicate_frameworks
def test_check_mesos_no_duplicate_frameworks_ok(capfd):
with mock.patch(
'paasta_tools.monitoring.check_mesos_duplicate_frameworks.parse_args', autospec=True,
) as mock_parse_args, mock.patch(
'paasta_tools.monitoring.check_mesos_duplicate_frameworks.get_mesos_master', autospec=True,
) as mock_get_mesos_master:
mock_opts = mock.MagicMock()
mock_opts.check = 'marathon,chronos'
mock_parse_args.return_value = mock_opts
mock_master = mock.MagicMock()
mock_master.state = {
'frameworks': [
{'name': 'marathon'},
{'name': 'chronos'},
{'name': 'foobar'},
{'name': 'foobar'},
],
}
mock_get_mesos_master.return_value = mock_master
with pytest.raises(SystemExit) as error:
check_mesos_no_duplicate_frameworks()
out, err = capfd.readouterr()
assert "OK" in out
assert "marathon" in out
assert "chronos" in out
assert "foobar" not in out
assert error.value.code == 0
def test_check_mesos_no_duplicate_frameworks_critical(capfd):
with mock.patch(
'paasta_tools.monitoring.check_mesos_duplicate_frameworks.parse_args', autospec=True,
) as mock_parse_args, mock.patch(
'paasta_tools.monitoring.check_mesos_duplicate_frameworks.get_mesos_master', autospec=True,
) as mock_get_mesos_master:
mock_opts = mock.MagicMock()
mock_opts.check = 'marathon,chronos'
mock_parse_args.return_value = mock_opts
mock_master = mock.MagicMock()
mock_master.state = {
'frameworks': [
{'name': 'marathon'},
{'name': 'marathon'},
{'name': 'chronos'},
{'name': 'foobar'},
{'name': 'foobar'},
],
}
mock_get_mesos_master.return_value = mock_master
with pytest.raises(SystemExit) as error:
check_mesos_no_duplicate_frameworks()
out, err = capfd.readouterr()
assert "CRITICAL" in out
assert "marathon" in out
assert "chronos" in out
assert "foobar" not in out
assert error.value.code == 2
| apache-2.0 | Python |
|
0b81997dd12f775fc9f814c19fb62ef35bde998e | Add ceres library | tuttleofx/sconsProject | autoconf/ceres.py | autoconf/ceres.py | from _external import *
from pthread import *
from amd import *
from gomp import *
from lapack import *
from suitesparse import *
from glog import *
ceres = LibWithHeaderChecker('ceres', 'ceres/ceres.h', 'c++', name='ceres', dependencies = [gomp,lapack,suitesparse,amd,pthread,glog],)
| mit | Python |
|
681f73490fd7d333883134a417477492744ce22a | Add project permissions | avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf | src/python/expedient/clearinghouse/project/permissions.py | src/python/expedient/clearinghouse/project/permissions.py | '''
Created on Aug 3, 2010
@author: jnaous
'''
from expedient.common.permissions.shortcuts import create_permission
from expedient.clearinghouse.permissionmgmt.utils import \
request_permission_wrapper
create_permission(
"can_create_project",
description=\
"Owners of this permission can create projects in Expedient.",
view=request_permission_wrapper,
)
create_permission(
"can_edit_project",
description=\
"Owners of this permission can edit basic project properties.",
view=request_permission_wrapper,
)
create_permission(
"can_delete_project",
description=\
"Owners of this permission can edit basic project properties.",
view=request_permission_wrapper,
)
create_permission(
"can_view_project",
description=\
"Owners of this permission can view the project. Without "
"other permissions, they are non-functional members.",
view=request_permission_wrapper,
)
create_permission(
"can_add_members",
description=\
"Owners of this permission can add members to "
"the project and assign to them roles.",
view=request_permission_wrapper,
)
create_permission(
"can_remove_members",
description=\
"Owners of this permission can remove members from "
"the project.",
view=request_permission_wrapper,
)
create_permission(
"can_create_slices",
description=\
"Owners of this permission can create new slices.",
view=request_permission_wrapper,
)
create_permission(
"can_add_aggregates",
description=\
"Owners of this permission can add aggregates "
"to the project.",
view=request_permission_wrapper,
)
create_permission(
"can_remove_aggregates",
description=\
"Owners of this permission can remove aggregates "
"from the project.",
view=request_permission_wrapper,
)
| bsd-3-clause | Python |
|
bd9fce88c235ea6be032a1d15a31bf41df14a444 | Fix missing migration | nephila/djangocms-blog,nephila/djangocms-blog,skirsdeda/djangocms-blog,skirsdeda/djangocms-blog,nephila/djangocms-blog,skirsdeda/djangocms-blog | djangocms_blog/migrations/0033_auto_20180226_1410.py | djangocms_blog/migrations/0033_auto_20180226_1410.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_blog', '0032_auto_20180109_0023'),
]
operations = [
migrations.AlterField(
model_name='blogcategorytranslation',
name='meta_description',
field=models.TextField(blank=True, verbose_name='category meta description', default=''),
),
]
| bsd-3-clause | Python |
|
7a0bbdb2395ca1e8579e0f2cc6ccd43807c51161 | Create 6kyu_alpha_to_numeric_and_numeric_to_alpha.py | Orange9000/Codewars,Orange9000/Codewars | Solutions/6kyu/6kyu_alpha_to_numeric_and_numeric_to_alpha.py | Solutions/6kyu/6kyu_alpha_to_numeric_and_numeric_to_alpha.py | import re
def AlphaNum_NumAlpha(string):
return ''.join(swap(s) for s in re.findall('\d{1,2}|[a-z]', string))
def swap(s):
return chr(int(s)+96) if s.isdigit() else str(ord(s)-96)
| mit | Python |
|
20c51dbcd2d90bfa234efa5027254a4915995edd | add nmap_hosts migration | asrozar/perception | alembic/versions/13b7c3d4c802_create_nmap_hosts_table.py | alembic/versions/13b7c3d4c802_create_nmap_hosts_table.py | """create nmap_hosts table
Revision ID: 13b7c3d4c802
Revises: ecd5f49567a6
Create Date: 2017-07-21 08:19:17.849112
"""
from sqlalchemy.dialects import postgresql
from alembic import op
import sqlalchemy as sa
import datetime
def _get_date():
return datetime.datetime.now()
# revision identifiers, used by Alembic.
revision = '13b7c3d4c802'
down_revision = 'ecd5f49567a6'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('nmap_hosts',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('ip_addr', postgresql.INET, unique=True, nullable=False),
sa.Column('created_at', sa.TIMESTAMP(timezone=False), default=_get_date))
def downgrade():
op.drop_table('nmap_hosts')
| mit | Python |
|
941985a561d0bdce1a8aba2e57fc60f90b6164fb | Add jrun main module so "python jrun" works | ctrueden/jrun,ctrueden/jrun | jrun/__main__.py | jrun/__main__.py | import jrun
if __name__ == '__main__':
jrun.jrun_main()
| unlicense | Python |
|
b27b3089f393a84c3d004e8d89be43165862be1d | add match matrix | tingyuchang/my-algorithm | matrix_match.py | matrix_match.py | import random
w1 = int(raw_input(">>> w1: "))
h1 = int(raw_input(">>> h1: "))
w2 = int(raw_input(">>> w2: "))
h2 = int(raw_input(">>> h2: "))
r1 = int(raw_input(">>> r1: "))
r2 = int(raw_input(">>> r2: "))
# w1 = 20
# h1 = 20
# w2 = 3
# h2 = 3
matrix = [[random.randint(r1, r2) for x in range(w1)] for x in range(h1)]
pattern = [[random.randint(r1, r2) for x in range(w2)] for x in range(h2)]
def matchMatrix(matrix1, matrix2):
print 'Match Matrix start:\n '
results = []
temp = []
for x in matrix2:
for y in x:
temp.append(y)
indexOfX = 0
for x in matrix1:
if indexOfX >= (h1-h2+1):
break
indexOfY = 0
for y in x:
if indexOfY >= (w1-w2+1):
break
count = 0
for z in matrix2:
subMatrix = matrix[indexOfX+count]
count+=1
size = len(z)
subX = subMatrix[indexOfY:indexOfY+size]
if z != subX:
break
if count == h2:
results.append((indexOfX, indexOfY))
indexOfY+=1
indexOfX+=1
return results
for x in pattern:
print x
for x in matrix:
print x
print 'Ans:\n%s' % (matchMatrix(matrix, pattern)) | mit | Python |
|
40f92e6293bb13ee1462b932be15f5f11ceeee74 | Add initial implementation of TempType. | Renelvon/llama,Renelvon/llama | compiler/infer.py | compiler/infer.py | """
# ----------------------------------------------------------------------
# infer.py
#
# Type inference for Llama
# http://courses.softlab.ntua.gr/compilers/2012a/llama2012.pdf
#
# Authors: Nick Korasidis <[email protected]>
# Dimitris Koutsoukos <[email protected]>
# ----------------------------------------------------------------------
"""
class TempType:
"""A temporary type used during inference."""
_next_free = 1 # Next free papaki.
@classmethod
def _get_next_tag(cls):
cls._next_free += 1
return cls._next_free
def __init__(self, node, spec_type=None):
"""
Construct a new temporary type for node `node`.
The user may optionally supply a type for this node;
such a specification is not binding but will improve
error reporting.
"""
self._node = node
self._spec_type = spec_type
self._inferred_type = None
self._tag = self._get_next_tag()
def write_back(self):
self._node.type = self._inferred_type
# TODO: Validate the type before returning.
| mit | Python |
|
a2e27feff324d5aed7220a520df651f688cd1829 | Add migration | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/assignments/migrations/0002_auto_20190529_1755.py | bluebottle/assignments/migrations/0002_auto_20190529_1755.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-05-29 15:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assignments', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='assignment',
old_name='end',
new_name='end_time',
),
]
| bsd-3-clause | Python |
|
9ca926d052edc754ca3b6f3663b1c00887b2965a | add migration with blank projects.Tag | brasilcomvc/brasilcomvc,brasilcomvc/brasilcomvc,brasilcomvc/brasilcomvc | brasilcomvc/projects/migrations/0004_tag_may_be_blank.py | brasilcomvc/projects/migrations/0004_tag_may_be_blank.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_project_tags'),
]
operations = [
migrations.AlterField(
model_name='project',
name='tags',
field=models.ManyToManyField(to='projects.Tag', blank=True),
preserve_default=True,
),
]
| apache-2.0 | Python |
|
77ce1e2606132a5a04bc8c1b86c14f6f590e458d | test script added for assessment_lookups | gnowledge/gstudio,gnowledge/gstudio,gnowledge/gstudio,gnowledge/gstudio,gnowledge/gstudio | doc/dlkit-gstudio-impl/assessment_lookup.py | doc/dlkit-gstudio-impl/assessment_lookup.py | from dlkit_runtime import PROXY_SESSION, RUNTIME
from dlkit_gstudio.gstudio_user_proxy import GStudioRequest
req_obj = GStudioRequest(id=1)
condition = PROXY_SESSION.get_proxy_condition()
condition.set_http_request(req_obj)
proxy = PROXY_SESSION.get_proxy(condition)
assessment_service_mgr = RUNTIME.get_service_manager('ASSESSMENT', proxy=proxy)
all_banks = assessment_service_mgr.get_banks()
all_banks.available()
# ======
from dlkit_runtime import PROXY_SESSION, RUNTIME
from dlkit_gstudio.gstudio_user_proxy import GStudioRequest
condition = PROXY_SESSION.get_proxy_condition()
proxy = PROXY_SESSION.get_proxy(condition)
assessment_service_mgr = RUNTIME.get_service_manager('ASSESSMENT', proxy=proxy)
all_banks = assessment_service_mgr.get_banks()
all_banks.available()
from dlkit.primordium.id.primitives import Id
bank = assessment_service_mgr.get_bank(Id('assessment.Bank%3A57c00fbded849b11f52fc8ec%40ODL.MIT.EDU'))
bank.get_display_name().text
# bank = all_banks.next()
assessment_items = bank.get_assessments()
assessment_items.available()
a = assessment_items.next()
offerings = bank.get_assessments_offered_for_assessment(a.get_id())
# Error:
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/assessment/objects.pyc in next(self)
# 1190
# 1191 def next(self):
# -> 1192 return self._get_next_object(Assessment)
# 1193
# 1194 next_assessment = property(fget=get_next_assessment)
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/osid/objects.pyc in _get_next_object(self, object_class)
# 2454 raise
# 2455 if isinstance(next_object, dict):
# -> 2456 next_object = object_class(osid_object_map=next_object, runtime=self._runtime, proxy=self._proxy)
# 2457 elif isinstance(next_object, basestring) and object_class == Id:
# 2458 next_object = Id(next_object)
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/assessment/objects.pyc in __init__(self, **kwargs)
# 827
# 828 def __init__(self, **kwargs):
# --> 829 osid_objects.OsidObject.__init__(self, object_name='ASSESSMENT', **kwargs)
# 830 self._catalog_name = 'bank'
# 831
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/osid/objects.pyc in __init__(self, osid_object_map, runtime, **kwargs)
# 114 osid_markers.Extensible.__init__(self, runtime=runtime, **kwargs)
# 115 self._my_map = osid_object_map
# --> 116 self._load_records(osid_object_map['recordTypeIds'])
# 117
# 118 def get_object_map(self, obj_map=None):
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/osid/markers.pyc in _load_records(self, record_type_idstrs)
# 174 """Load all records from given record_type_idstrs."""
# 175 for record_type_idstr in record_type_idstrs:
# --> 176 self._init_record(record_type_idstr)
# 177
# 178 def _init_records(self, record_types):
# /home/docker/code/gstudio/gnowsys-ndf/qbank_lite/dlkit/mongo/osid/markers.pyc in _init_record(self, record_type_idstr)
# 189 import importlib
# 190 record_type_data = self._record_type_data_sets[Id(record_type_idstr).get_identifier()]
# --> 191 module = importlib.import_module(record_type_data['module_path'])
# 192 record = getattr(module, record_type_data['object_record_class_name'], None)
# 193 # only add recognized records ... so apps don't break
# /usr/lib/python2.7/importlib/__init__.pyc in import_module(name, package)
# 35 level += 1
# 36 name = _resolve_name(name[level:], package, level)
# ---> 37 __import__(name)
# 38 return sys.modules[name]
# ImportError: No module named records.osid.object_records
| agpl-3.0 | Python |
|
a6d3ae8b27f6e97e7e5b4388a20836f25953c26d | Add example config file | mgunyho/kiltiskahvi | config-example.py | config-example.py | """
Minimal config file for kahvibot. Just define values as normal Python code.
"""
# put your bot token here as a string
bot_token = ""
# the tg username of the bot's admin.
admin_username = ""
# if a message contains any of these words, the bot responds
trigger_words = [
"kahvi",
"\u2615", # coffee emoji
"tsufe",
"kahavi",
#"sima", # wappu mode
]
| mit | Python |
|
bcda14f8258daaf3475dd9d3ca3eb7b25aa0496c | Add py-voluptuous (#13457) | LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/py-voluptuous/package.py | var/spack/repos/builtin/packages/py-voluptuous/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyVoluptuous(PythonPackage):
"""Voluptous, despite the name, is a Python data validation library."""
homepage = "https://github.com/alecthomas/voluptuous"
url = "https://github.com/alecthomas/voluptuous/archive/0.11.5.tar.gz"
version('0.11.5', sha256='01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | Python |
|
93000ab88c489f720d0f7e6a8921dc69342d61f1 | Add migration | OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain | webapp/apps/dynamic/migrations/0012_auto_20160616_1908.py | webapp/apps/dynamic/migrations/0012_auto_20160616_1908.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dynamic', '0011_auto_20160614_1902'),
]
operations = [
migrations.RenameField(
model_name='dynamicbehaviorsaveinputs',
old_name='BE_CG_per',
new_name='BE_cg',
),
]
| mit | Python |
|
662ad845a0ce729d8d8b72121a4c7c6f22e3eaa2 | support for phonetic similarity added | anoopkunchukuttan/indic_nlp_library | src/indicnlp/script/phonetic_sim.py | src/indicnlp/script/phonetic_sim.py | # Copyright Anoop Kunchukuttan 2014 - present
#
# This file is part of Indic NLP Library.
#
# Indic NLP Library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Indic NLP Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indic NLP Library. If not, see <http://www.gnu.org/licenses/>.
#
from indicnlp import loader
from indicnlp import langinfo
from indicnlp.script.indic_scripts import *
import numpy as np
import gzip
import pandas as pd
import codecs,sys
def equal(v1,v2):
return 0.0 if np.sum( xor_vectors(v1, v2)) > 0 else 1.0
def dice(v1,v2):
dotprod=2*float(np.dot( v1, v2.T ))
return dotprod/float(len(v1)+len(v2))
def jaccard(v1,v2):
dotprod=float(np.dot( v1, v2.T ))
return dotprod/float(len(v1)+len(v2)-dotprod)
def cosine(v1,v2):
dotprod=float(np.dot( v1, v2.T ))
norm1=float(np.dot( v1, v1.T ))
norm2=float(np.dot( v2, v2.T ))
return ((dotprod)/(np.sqrt(norm1*norm2)+0.00001))
def dotprod(v1,v2):
return float(np.dot( v1, v2.T ))
def sim1(v1,v2,base=5.0):
return np.power(base,dotprod(v1,v2))
def softmax(v1,v2):
return sim1(v1,v2,np.e)
def create_similarity_matrix(sim_func,slang,tlang,normalize=True):
dim=langinfo.COORDINATED_RANGE_END_INCLUSIVE-langinfo.COORDINATED_RANGE_START_INCLUSIVE+1
sim_mat=np.zeros((dim,dim))
for offset1 in xrange(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1):
v1=get_phonetic_feature_vector(offset_to_char(offset1,slang),slang)
for offset2 in xrange(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1):
v2=get_phonetic_feature_vector(offset_to_char(offset2,tlang),tlang)
sim_mat[offset1,offset2]=sim_func(v1,v2)
if normalize:
sums=np.sum(sim_mat, axis=1)
sim_mat=(sim_mat.transpose()/sums).transpose()
return sim_mat
| mit | Python |
|
30ea7b5c77acc0af8826e3aef6155f9d329ed419 | Create getCpuUsage2.py | david618/dcosadmin,david618/dcosadmin | mesosmetrics/getCpuUsage2.py | mesosmetrics/getCpuUsage2.py | import urllib
import json
import time
import sys
if __name__ == '__main__':
agent=sys.argv[1]
url = "http://" + agent + ":5051/monitor/statistics"
executors = {}
response = urllib.urlopen(url)
data = json.loads(response.read())
for itm in data:
executor = {}
id = itm["executor_id"]
executor["name"] = itm["executor_name"]
a = {}
a["cpu_system"] = itm["statistics"]["cpus_system_time_secs"]
a["cpu_user"] = itm["statistics"]["cpus_user_time_secs"]
a["ts"] = itm["statistics"]["timestamp"]
executor["a"] = a
executors[id] = executor
time.sleep(5)
response = urllib.urlopen(url)
data = json.loads(response.read())
for itm in data:
id = itm["executor_id"]
b = {}
b["cpu_system"] = itm["statistics"]["cpus_system_time_secs"]
b["cpu_user"] = itm["statistics"]["cpus_user_time_secs"]
b["ts"] = itm["statistics"]["timestamp"]
executors[id]["b"] = b
for id,itm in executors.items():
cpus_total_usage = ((itm["b"]["cpu_system"]-itm["a"]["cpu_system"]) + \
(itm["b"]["cpu_user"]-itm["a"]["cpu_user"])) / \
(itm["b"]["ts"]-itm["a"]["ts"])
print(str(id) + " : " + str(cpus_total_usage))
| apache-2.0 | Python |
|
93a41a7d406e5f7c264865d96c0f85b1181e5cb0 | add basic test | yuyu2172/chainercv,chainer/chainercv,pfnet/chainercv,chainer/chainercv,yuyu2172/chainercv | tests/utils_tests/extension_tests/test_forward.py | tests/utils_tests/extension_tests/test_forward.py | import mock
import numpy as np
import unittest
import chainer
from chainer import testing
from chainercv.utils import forward
@testing.parameterize(*testing.product({
'in_shapes': [((3, 4),), ((3, 4), (5,))],
'out_shapes': [((3, 4),), ((3, 4), (5,))],
'variable': [True, False],
}))
class TestForward(unittest.TestCase):
def setUp(self):
self.xp = np
self.mocked_model = mock.MagicMock()
self.mocked_model.xp = self.xp
self.inputs = tuple(np.empty(shape) for shape in self.in_shapes)
if len(self.inputs) == 1:
self.inputs = self.inputs[0]
self.outputs = tuple(
self.xp.array(np.empty(shape)) for shape in self.out_shapes)
if self.variable:
self.outputs = tuple(
chainer.Variable(output) for output in self.outputs)
if len(self.outputs) == 1:
self.outputs = self.outputs[0]
def _check_inputs(self, inputs):
if isinstance(self.inputs, tuple):
orig_inputs = self.inputs
else:
orig_inputs = self.inputs,
for orig, in_ in zip(orig_inputs, inputs):
self.assertIsInstance(in_, chainer.Variable)
self.assertEqual(chainer.cuda.get_array_module(in_.data), self.xp)
in_ = chainer.cuda.to_cpu(in_.data)
np.testing.assert_equal(in_, orig)
def _check_outputs(self, outputs):
if len(outputs) == 1:
outputs = outputs,
for orig, out in zip(self.outputs, outputs):
self.assertIsInstance(out, np.ndarray)
if self.variable:
orig = orig.data
orig = chainer.cuda.to_cpu(orig)
np.testing.assert_equal(out, orig)
def test_forward(self):
def _call(*inputs):
self._check_inputs(inputs)
return self.outputs
self.mocked_model.side_effect = _call
outputs = forward(self.mocked_model, self.inputs)
self._check_outputs(outputs)
testing.run_module(__name__, __file__)
| mit | Python |
|
4802b8fe149ed72303bbb0f1f924275dbc004b5a | Use the handy interruptible pool from emcee | e-koch/TurbuStat,Astroua/TurbuStat | Examples/interruptible_pool.py | Examples/interruptible_pool.py | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2010-2013 Daniel Foreman-Mackey & contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Python's multiprocessing.Pool class doesn't interact well with
``KeyboardInterrupt`` signals, as documented in places such as:
* `<http://stackoverflow.com/questions/1408356/>`_
* `<http://stackoverflow.com/questions/11312525/>`_
* `<http://noswap.com/blog/python-multiprocessing-keyboardinterrupt>`_
Various workarounds have been shared. Here, we adapt the one proposed in the
last link above, by John Reese, and shared as
* `<https://github.com/jreese/multiprocessing-keyboardinterrupt/>`_
Our version is a drop-in replacement for multiprocessing.Pool ... as long as
the map() method is the only one that needs to be interrupt-friendly.
Contributed by Peter K. G. Williams <[email protected]>.
*Added in version 2.1.0*
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["InterruptiblePool"]
import signal
import functools
from multiprocessing.pool import Pool
from multiprocessing import TimeoutError
def _initializer_wrapper(actual_initializer, *rest):
"""
We ignore SIGINT. It's up to our parent to kill us in the typical
condition of this arising from ``^C`` on a terminal. If someone is
manually killing us with that signal, well... nothing will happen.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if actual_initializer is not None:
actual_initializer(*rest)
class InterruptiblePool(Pool):
"""
A modified version of :class:`multiprocessing.pool.Pool` that has better
behavior with regard to ``KeyboardInterrupts`` in the :func:`map` method.
:param processes: (optional)
The number of worker processes to use; defaults to the number of CPUs.
:param initializer: (optional)
Either ``None``, or a callable that will be invoked by each worker
process when it starts.
:param initargs: (optional)
Arguments for *initializer*; it will be called as
``initializer(*initargs)``.
:param kwargs: (optional)
Extra arguments. Python 2.7 supports a ``maxtasksperchild`` parameter.
"""
wait_timeout = 3600
def __init__(self, processes=None, initializer=None, initargs=(),
**kwargs):
new_initializer = functools.partial(_initializer_wrapper, initializer)
super(InterruptiblePool, self).__init__(processes, new_initializer,
initargs, **kwargs)
def map(self, func, iterable, chunksize=None):
"""
Equivalent of ``map()`` built-in, without swallowing
``KeyboardInterrupt``.
:param func:
The function to apply to the items.
:param iterable:
An iterable of items that will have `func` applied to them.
"""
# The key magic is that we must call r.get() with a timeout, because
# a Condition.wait() without a timeout swallows KeyboardInterrupts.
r = self.map_async(func, iterable, chunksize)
while True:
try:
return r.get(self.wait_timeout)
except TimeoutError:
pass
except KeyboardInterrupt:
self.terminate()
self.join()
raise
# Other exceptions propagate up.
| mit | Python |
|
133da92ed69aafc6c0a8d4466cf3b0266c5edc68 | Add migration for change in profile model. | hackerspace-ntnu/website,hackerspace-ntnu/website,hackerspace-ntnu/website | userprofile/migrations/0006_auto_20180309_2215.py | userprofile/migrations/0006_auto_20180309_2215.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-09 22:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0005_auto_20171121_1923'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default=None, upload_to='profilepictures'),
),
]
| mit | Python |
|
4bce7685c39e7efbb674407184d0bf436cbdaec0 | Create ftxproxy.py | puluto/ftxproxy | ftxproxy.py | ftxproxy.py | #!/usr/bin/python
# This is a simple port-forward / proxy, written using only the default python
# library. If you want to make a suggestion or fix something you can contact-me
# at voorloop_at_gmail.com
# Distributed over IDC(I Don't Care) license
import socket
import select
import time
import sys
# Changing the buffer_size and delay, you can improve the speed and bandwidth.
# But when buffer get to high or delay go too down, you can broke things
buffer_size = 4096
delay = 0.0001
forward_to = ('10.11.10.18', 8989)
class Forward:
def __init__(self):
self.forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def start(self, host, port):
try:
self.forward.connect((host, port))
return self.forward
except Exception, e:
print e
return False
class TheServer:
input_list = []
channel = {}
def __init__(self, host, port):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((host, port))
self.server.listen(200)
def main_loop(self):
self.input_list.append(self.server)
while 1:
time.sleep(delay)
ss = select.select
inputready, outputready, exceptready = ss(self.input_list, [], [])
for self.s in inputready:
if self.s == self.server:
self.on_accept()
break
self.data = self.s.recv(buffer_size)
if len(self.data) == 0:
self.on_close()
break
else:
self.on_recv()
def on_accept(self):
forward = Forward().start(forward_to[0], forward_to[1])
clientsock, clientaddr = self.server.accept()
if forward:
print clientaddr, "has connected"
self.input_list.append(clientsock)
self.input_list.append(forward)
self.channel[clientsock] = forward
self.channel[forward] = clientsock
else:
print "Can't establish connection with remote server.",
print "Closing connection with client side", clientaddr
clientsock.close()
def on_close(self):
print self.s.getpeername(), "has disconnected"
#remove objects from input_list
self.input_list.remove(self.s)
self.input_list.remove(self.channel[self.s])
out = self.channel[self.s]
# close the connection with client
self.channel[out].close() # equivalent to do self.s.close()
# close the connection with remote server
self.channel[self.s].close()
# delete both objects from channel dict
del self.channel[out]
del self.channel[self.s]
def on_recv(self):
data = self.data
# here we can parse and/or modify the data before send forward
print data
self.channel[self.s].send(data)
if __name__ == '__main__':
server = TheServer('', 8002)
try:
server.main_loop()
except KeyboardInterrupt:
print "Ctrl C - Stopping server"
sys.exit(1)
| bsd-2-clause | Python |
|
350f7056e895dd8ddee756779ae50522f099f998 | Add tests for the oauth2 decorators | lorenzogil/yith-library-server,lorenzogil/yith-library-server,lorenzogil/yith-library-server | yithlibraryserver/oauth2/tests/test_decorators.py | yithlibraryserver/oauth2/tests/test_decorators.py | # Yith Library Server is a password storage server.
# Copyright (C) 2014 Lorenzo Gil Sanchez <[email protected]>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
from bson.tz_util import utc
from pyramid.httpexceptions import HTTPUnauthorized
from yithlibraryserver import testing
from yithlibraryserver.oauth2.decorators import (
protected,
protected_method,
)
@protected(['scope1'])
def view_function(request):
return 'response'
class ViewClass(object):
def __init__(self, request):
self.request = request
@protected_method(['scope1'])
def view_method(self):
return 'response'
class DecoratorsTests(testing.TestCase):
clean_collections = ('access_codes', 'users')
def setUp(self):
super(DecoratorsTests, self).setUp()
os.environ['YITH_FAKE_DATETIME'] = '2014-2-23-08-00-00'
self.user_id = self.db.users.insert({
'username': 'user1',
})
def _create_access_code(self, scope):
expiration = datetime.datetime(2014, 2, 23, 9, 0, tzinfo=utc)
self.db.access_codes.insert({
'access_token': '1234',
'type': 'Bearer',
'expiration': expiration,
'user_id': self.user_id,
'scope': scope,
'client_id': 'client1',
})
def tearDown(self):
del os.environ['YITH_FAKE_DATETIME']
super(DecoratorsTests, self).tearDown()
def test_protected_bad_scope(self):
self._create_access_code('scope2')
request = testing.FakeRequest(headers={
'Authorization': 'Bearer 1234',
}, db=self.db)
self.assertRaises(HTTPUnauthorized, view_function, request)
def test_protected(self):
self._create_access_code('scope1')
request = testing.FakeRequest(headers={
'Authorization': 'Bearer 1234',
}, db=self.db)
self.assertEqual(view_function(request), 'response')
self.assertEqual(request.user['username'], 'user1')
def test_protected_method_bad_scope(self):
self._create_access_code('scope2')
request = testing.FakeRequest(headers={
'Authorization': 'Bearer 1234',
}, db=self.db)
view_object = ViewClass(request)
self.assertRaises(HTTPUnauthorized, view_object.view_method)
def test_protected_method(self):
self._create_access_code('scope1')
request = testing.FakeRequest(headers={
'Authorization': 'Bearer 1234',
}, db=self.db)
view_object = ViewClass(request)
self.assertEqual(view_object.view_method(), 'response')
self.assertEqual(request.user['username'], 'user1')
| agpl-3.0 | Python |
|
f93ae9f59dcbb834b93fa3a57d89d84c4520baa0 | Create collector.py | aiskov/cdn-collector | collector.py | collector.py | import os
from subprocess import Popen, PIPE
import tarfile
import json
import shutil
import re
def move(file, origin, destination):
if not file:
return
if not isinstance(file, basestring):
for entry in file:
move(entry, origin, destination)
return
destination_path = '%s/%s' % (destination, re.sub('(\./|bin/|dist/)', '', file))
origin_path = '%s/%s' % (origin, re.sub('(\./)', '', file))
shutil.move(origin_path, destination_path)
def load_json(conf):
json_data = open(conf)
data = json.load(json_data)
json_data.close()
return data
def check_skip(text, skips):
for word in skips:
if word in text:
return True
return False
config = load_json('cdn-config.json')
print 'Rebuild CDN collection.'
for target in config['targets']:
print 'Collect %s libraries.' % target
proc = Popen(["bower info %s" % target], stdout=PIPE, shell=True)
start = False
for line in proc.communicate()[0].splitlines():
if not start:
if 'Available versions:' in line:
start = True
continue
if 'You can request' in line:
break
if check_skip(line, config['skipWords']):
continue
version = line.strip()[2:]
print 'Version found %s - %s.' % (target, version)
if not os.path.isdir(target):
os.mkdir(target)
directory = "%s/%s" % (target, version)
if os.path.isdir(directory):
if os.listdir(directory):
print 'Skip version, directory already exists %s/%s' % (target, version)
continue
else:
os.mkdir("%s/%s" % (target, version))
proc_info = Popen(["bower info %s#%s" % (target, version)], stdout=PIPE, shell=True)
link = None
info = proc_info.communicate()[0]
info = info[info.find('{'):info.rfind('}') + 1].replace(': ', '": ')
for i, match in enumerate(re.finditer('( [A-za-z]+":)', info)):
pos = match.start() + 1 + i
info = info[:pos] + '"' + info[pos:]
info = info.replace('\'', '"')
info = json.loads(info)
if info['homepage']:
wget_cmd = 'wget --directory-prefix="%(target)s/%(version)s" "%(link)s/archive/%(version)s.tar.gz"' % {
'target': target,
'version': version,
'link': info['homepage']
}
print wget_cmd
proc_download = Popen([wget_cmd], stdout=PIPE, shell=True)
print proc_download.communicate()[0]
archive = "%s/%s" % (directory, os.listdir(directory)[0])
tfile = tarfile.open(archive, 'r:gz')
tfile.extractall(directory)
os.remove(archive)
location = "%s/%s" % (directory, os.listdir(directory)[0])
move(info.get('main', info.get('scripts')), location, directory)
shutil.rmtree(location)
else:
print 'Download link for version not found.'
print info
| apache-2.0 | Python |
|
0f1475eddf3f9237a1f746784b090a4f65d96226 | add import script for Swindon | chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_swindon.py | polling_stations/apps/data_collection/management/commands/import_swindon.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E06000030'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (11).tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (11).tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| bsd-3-clause | Python |
|
9366fe261b2f13f81678851fd5ae4a0035a811c7 | Add new package: py-walinuxagent (#18961) | iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-walinuxagent/package.py | var/spack/repos/builtin/packages/py-walinuxagent/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyWalinuxagent(PythonPackage):
"""Microsoft Azure Linux Guest Agent."""
homepage = "https://github.com/Azure/WALinuxAgent"
url = "https://github.com/Azure/WALinuxAgent/archive/pre-v2.2.52.tar.gz"
version('2.2.52', sha256='02c26af75827bd7042aa2285c78dee86ddb25a6a8f6bb0a85679a2df9ba56a3a')
version('2.2.50', sha256='3b2b99552e3b35dfcbb4cabf476d0113d701eb23d2e0e61f35f0fa33cabde0a1')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pyasn1', type=('build', 'run'))
depends_on('py-distro', type=('build', 'run'), when='^[email protected]:')
| lgpl-2.1 | Python |
|
ce647d22a2a65ea40d259b064a6b4f611ca669af | Add test codes for wheels | peppaseven/iPeppaCar | wheels.py | wheels.py | #!/usr/bin/python2
#coding=utf-8
import RPi.GPIO as GPIO
import time
'''
2 L298N control 4 Motors.
SOC Control GPIO
Front Motor: (Left) 15-ENDA, 31-forward,33-backward
(Right)29-ENDB, 35-forward,37-backward
Rear Motor: (Left) 18-ENDB, 38-forward,40-backward
(Right)25-ENDA, 36-forward,32-backward
This is temporary test codes, need define a wheels class.
'''
# GPIOs should move a common file to define.
def init():
GPIO.setmode(GPIO.BOARD)
# front motor
GPIO.setup(15, GPIO.OUT)
GPIO.setup(31, GPIO.OUT)
GPIO.setup(33, GPIO.OUT)
GPIO.setup(29, GPIO.OUT)
GPIO.setup(35, GPIO.OUT)
GPIO.setup(37, GPIO.OUT)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(38, GPIO.OUT)
GPIO.setup(40, GPIO.OUT)
GPIO.setup(25, GPIO.OUT)
GPIO.setup(36, GPIO.OUT)
GPIO.setup(32, GPIO.OUT)
def reset():
GPIO.output(15, GPIO.LOW)
GPIO.output(31, GPIO.LOW)
GPIO.output(33, GPIO.LOW)
GPIO.output(29, GPIO.LOW)
GPIO.output(35, GPIO.LOW)
GPIO.output(37, GPIO.LOW)
GPIO.output(18, GPIO.LOW)
GPIO.output(38, GPIO.LOW)
GPIO.output(40, GPIO.LOW)
GPIO.output(25, GPIO.LOW)
GPIO.output(36, GPIO.LOW)
GPIO.output(32, GPIO.LOW)
# front left forward
def front_left_forward():
GPIO.output(15, GPIO.HIGH)
GPIO.output(31, GPIO.HIGH)
GPIO.output(33, GPIO.LOW)
# front right forward
def front_right_forward():
GPIO.output(29, GPIO.HIGH)
GPIO.output(35, GPIO.HIGH)
GPIO.output(37, GPIO.LOW)
# rear left forward
def rear_left_forward():
GPIO.output(18, GPIO.HIGH)
GPIO.output(38, GPIO.HIGH)
GPIO.output(40, GPIO.LOW)
# rear right forward
def rear_right_forward():
GPIO.output(25, GPIO.HIGH)
GPIO.output(36, GPIO.HIGH)
GPIO.output(32, GPIO.LOW)
def front_left_back():
GPIO.output(15, GPIO.HIGH)
GPIO.output(31, GPIO.LOW)
GPIO.output(33, GPIO.HIGH)
def front_right_back():
GPIO.output(29, GPIO.HIGH)
GPIO.output(35, GPIO.LOW)
GPIO.output(37, GPIO.HIGH)
def rear_left_back():
GPIO.output(18, GPIO.HIGH)
GPIO.output(38, GPIO.LOW)
GPIO.output(40, GPIO.HIGH)
def rear_right_back():
GPIO.output(25, GPIO.HIGH)
GPIO.output(36, GPIO.LOW)
GPIO.output(32, GPIO.HIGH)
# forward
def forward():
reset()
front_left_forward()
front_right_forward()
rear_left_forward()
rear_right_forward()
# backward
def back():
reset()
front_left_back()
front_right_back()
rear_left_back()
rear_right_back()
# move forward with left
def front_left_turn():
reset()
front_right_forward()
rear_right_forward()
time.sleep(0.3)
reset()
# move forward with right
def front_right_turn():
reset()
front_left_forward()
rear_left_forward()
time.sleep(0.3)
reset()
# move backward with left
def rear_left_turn():
reset()
rear_left_back()
front_left_back()
time.sleep(0.3)
reset()
# move backward with right
def rear_right_turn():
reset()
rear_right_back()
front_right_back()
time.sleep(0.3)
reset()
# stop motor
def stop():
reset()
if __name__ == "__main__":
init()
reset()
forward()
time.sleep(2)
#back()
#time.sleep(2)
#front_left_turn()
#time.sleep(1)
#front_right_turn()
#time.sleep(1)
#rear_left_turn()
#time.sleep(1)
#rear_right_turn()
stop()
#must call this when exit
GPIO.cleanup() | apache-2.0 | Python |
|
f3db6608c2b4afeb214c3f1b94e0175609ad0b88 | Add migration file for event slug changes | uccser/cs4teachers,uccser/cs4teachers,uccser/cs4teachers,uccser/cs4teachers | cs4teachers/events/migrations/0018_auto_20170706_0803.py | cs4teachers/events/migrations/0018_auto_20170706_0803.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-06 08:03
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0017_auto_20170705_0952'),
]
operations = [
migrations.AlterField(
model_name='event',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),
),
migrations.AlterField(
model_name='location',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),
),
migrations.AlterField(
model_name='resource',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),
),
migrations.AlterField(
model_name='session',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique_with=['event__slug']),
),
migrations.AlterField(
model_name='thirdpartyevent',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),
),
]
| mit | Python |
|
7d10648275e991fda42c6dccbce340d37d442115 | fix test on test_cell | fishstamp82/moltools,fishstamp82/moltools | src/test/test_cell.py | src/test/test_cell.py | import unittest, os
import numpy as np
from particles import PointDipoleList
from molecules import Cluster, Atom
from use_generator import Generator
import dstruct
FILE_XYZ = os.path.join( os.path.dirname(__file__), 'pna_waters.xyz' )
FILE_MOL = os.path.join( os.path.dirname(__file__), 'tip3p44_10qm.mol' )
FILE_PDB = os.path.join( os.path.dirname(__file__), 'tip3p0.pdb' )
POTSTRING = """AU
6 1 22 1
1 0.000000 0.000000 0.000000 -0.66229 0.00000 0.00000 0.34276 4.10574 0.00000 0.00000 4.79229 0.00000 4.01912 0.00000 0.00000 -3.33162 0.00000 0.00000 0.00000 0.00000 -0.32216 0.00000 0.79137
1 1.430429 0.000000 1.107157 0.33114 -0.16617 0.00000 -0.11629 1.53802 0.00000 1.19765 0.90661 0.00000 1.37138 -4.52137 0.00000 -5.08061 -1.35494 0.00000 -4.83365 0.00000 -0.46317 0.00000 -3.47921
1 -1.430429 0.000000 1.107157 0.33114 0.16617 0.00000 -0.11629 1.53802 0.00000 -1.19765 0.90661 0.00000 1.37138 4.52137 0.00000 -5.08061 1.35494 0.00000 4.83365 0.00000 -0.46317 0.00000 -3.47921
2 15.000000 15.000000 15.000000 -0.66229 0.00000 0.00000 0.34276 4.10574 0.00000 0.00000 4.79229 0.00000 4.01912 0.00000 0.00000 -3.33162 0.00000 0.00000 0.00000 0.00000 -0.32216 0.00000 0.79137
2 16.430429 15.000000 16.107157 0.33114 -0.16617 0.00000 -0.11629 1.53802 0.00000 1.19765 0.90661 0.00000 1.37138 -4.52137 0.00000 -5.08061 -1.35494 0.00000 -4.83365 0.00000 -0.46317 0.00000 -3.47921
2 13.569571 15.000000 16.107157 0.33114 0.16617 0.00000 -0.11629 1.53802 0.00000 -1.19765 0.90661 0.00000 1.37138 4.52137 0.00000 -5.08061 1.35494 0.00000 4.83365 0.00000 -0.46317 0.00000 -3.47921"""
from dstruct import Cell
class CellTest( unittest.TestCase ):
def setUp(self):
pass
def test_init(self):
c = Cell( my_min = map(float, [0, 0, 0]),
my_max = map(float, [1, 1, 1] ),
my_cutoff = 0.4)
assert len(c) == 3
c = Cell( my_min = map(float, [-10, 0, 0]),
my_max = map(float, [0, 1, 1] ),
my_cutoff = 12)
assert len(c) == 1
c = Cell( my_min = map(float, [-5, 0, 0]),
my_max = map(float, [10, 1, 1] ),
my_cutoff = 4.9)
assert len(c) == 4
def test_add(self):
c = Cell( my_cutoff = 2.9 )
a1 = Atom( element = 'H', x = 3 )
a2 = Atom( element = 'H', x = 3, y = 3 )
a3 = Atom( element = 'H', x = 3, y = 3, z= 3 )
c.add( a1 )
c.add( a2 )
c.add( a3 )
assert a1 in c[1][0][0]
assert a2 in c[1][1][0]
assert a3 in c[1][1][1]
def test_update(self):
c = Cell( my_cutoff = 3 )
a = Atom( z = 5 )
c.add( a )
assert a in c[0][0][1]
a.z = 0
c = c.update()
assert a in c[0][0][0]
def test_get_closest(self):
cell = Cell.from_xyz( FILE_XYZ )
#ensure at1 exists
for at in cell:
at1 = at
x, y, z = cell.get_index( at1 )
ats = 0
tmp = []
for i in range( x-1, x+2 ):
for j in range( y-1, y+2 ):
for k in range( z-1, z+2 ):
try:
for at in cell[i][j][k]:
if at in tmp:
continue
tmp.append(at)
except IndexError:
pass
assert len(tmp) -1 == len(cell.get_closest( at1 ))
def test_from_PointDipoleList(self, ):
_str = POTSTRING
pdl = PointDipoleList.from_string( _str )
cell = dstruct.Cell.from_PointDipoleList( pdl, co = 5 )
assert isinstance( cell, dstruct.Cell )
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
d03254dabaac466edd697de38c3433475828bd4f | Add tests for has_changes | joshfriend/sqlalchemy-utils,JackWink/sqlalchemy-utils,konstantinoskostis/sqlalchemy-utils,joshfriend/sqlalchemy-utils,cheungpat/sqlalchemy-utils,spoqa/sqlalchemy-utils,tonyseek/sqlalchemy-utils,marrybird/sqlalchemy-utils,rmoorman/sqlalchemy-utils,tonyseek/sqlalchemy-utils | tests/functions/test_has_changes.py | tests/functions/test_has_changes.py | import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils import has_changes
class TestHasChanges(object):
def setup_method(self, method):
Base = declarative_base()
class Article(Base):
__tablename__ = 'article_translation'
id = sa.Column(sa.Integer, primary_key=True)
title = sa.Column(sa.String(100))
self.Article = Article
def test_without_changed_attr(self):
article = self.Article()
assert not has_changes(article, 'title')
def test_with_changed_attr(self):
article = self.Article(title='Some title')
assert has_changes(article, 'title')
| bsd-3-clause | Python |
|
795cd6e190a1cc4d416c5524399780e586dc6c45 | Add better kitt script | OiNutter/microbit-scripts | kitt/kitt.py | kitt/kitt.py | from microbit import *
display.scroll("I am the Knight Industries 2000")
MAX_ROWS = 4
MAX_BRIGHTNESS = 9
MIN_BRIGHTNESS = 2
def scan(reverse=False):
for i in range(0, 9):
brightness = MAX_BRIGHTNESS
row_range = range(0, i+1) if not reverse else range(i, -1, -1)
counter = 0
for j in row_range:
x = i - j if not reverse else MAX_ROWS - j
light_level = max(MIN_BRIGHTNESS, brightness) if counter >= 2 else MAX_BRIGHTNESS - counter
print (x, light_level)
if x <= MAX_ROWS and x >= 0:
display.set_pixel(x, 2, light_level)
counter += 1
#if i >= 2:
brightness -= 1
print("-")
if i < 8:
sleep(100)
for x in range(0, MAX_ROWS+1):
display.set_pixel(x, 2, MIN_BRIGHTNESS)
while True:
scan()
scan(reverse=True)
| mit | Python |
|
a6ac5055a1867259ab17997a076299731e57c45b | Add Android extractor | qurami/strings2pot | strings2pot/extractors/android.py | strings2pot/extractors/android.py | # -*- coding: utf-8 -*-
import re
import xml.etree.ElementTree as ET
class AndroidExtractor:
def __init__(self, source_file, destination_file, context_id_generator):
self.source_file = source_file
self.destination_file = destination_file
self._create_context_id = context_id_generator
def parse_string(self, string):
s = string.replace("\\'", "'")
s = s.replace("\"", "\\\"")
s = s.replace("\\n", "\n")
s = re.sub(r'%\d\$s', '%s', s)
s = re.sub(r'%\d\$d', '%d', s)
if "\n" in s:
s = s.replace("\n", "\\n\n")
parts = s.split("\n")
new_parts = ["\"\""]
for line in parts:
new_parts.append("\"%s\"" % line)
s = "\n".join(new_parts)
else:
s = "\"%s\"" % s
return s
def run(self):
with open(self.destination_file, 'a') as pot:
root = ET.parse(self.source_file)
counter = 3
for el in root.findall('./string'):
parsed_string = self.parse_string(el.text)
message_id = parsed_string[1:len(parsed_string)-1]
counter += 1
content = "\n#: %s:%d\nmsgctxt \"%s\"\nmsgid %s\nmsgstr \"\"\n" % (
self.source_file,
counter,
self._create_context_id(message_id), # was el.attrib.get('name')
parsed_string )
pot.write(content) | mit | Python |
|
c711f62ef96d67a6e42e3bbe10c0b3cd64a23444 | add moviepy - text_hineinzoomen | openscreencast/video_snippets,openscreencast/video_snippets | moviepy/text_hineinzoomen.py | moviepy/text_hineinzoomen.py | #!/usr/bin/env python
# Video mit Text erzeugen, hineinzoomen (Text wird grösser)
# Einstellungen
text = 'Text' # Text
textgroesse = 150 # Textgroesse in Pixel
textfarbe_r = 0 # Textfarbe R
textfarbe_g = 0 # Textfarbe G
textfarbe_b = 0 # Textfarbe B
schrift = 'FreeSans' # Schriftart
winkel = 0 # Winkel
hgfarbe_r = 1 # Hintergrundfarbe R
hgfarbe_g = 1 # Hintergrundfarbe G
hgfarbe_b = 1 # Hintergrundfarbe B
videobreite = 1280 # in Pixel
videohoehe = 720 # in Pixel
videolaenge = 5 # in Sekunden
videodatei = 'text.ogv' # Videodatei
frames = 25 # Frames pro Sekunde
# Modul moviepy importieren
from moviepy.editor import *
# Modul gizeh importieren
import gizeh
# Funktion um Frames zu erzeugen, t ist die Zeit beim jeweiligen Frame
def create_frame(t):
img = gizeh.Surface(videobreite,videohoehe,bg_color=(hgfarbe_r,hgfarbe_g,hgfarbe_b))
text_img = gizeh.text(text, fontfamily=schrift, fontsize=t*(textgroesse/videolaenge),
fill=(textfarbe_r,textfarbe_g,textfarbe_b),
xy=(videobreite/2,videohoehe/2), angle=winkel)
text_img.draw(img)
return img.get_npimage()
# Video erzeugen
video = VideoClip(create_frame, duration=videolaenge)
# Video schreiben
video.write_videofile(videodatei, fps=frames)
# Hilfe fuer moviepy: https://zulko.github.io/moviepy/index.html
# Hilfe fuer gizeh: https://github.com/Zulko/gizeh
# text_hineinzoomen.py
# Lizenz: http://creativecommons.org/publicdomain/zero/1.0/
# Author: openscreencast.de
| cc0-1.0 | Python |
|
c61b1595709b6acd26cf7c43e7858e3ad5cb588f | Add missing module. | tlevine/csvkit,bradparks/csvkit__query_join_filter_CSV_cli,nriyer/csvkit,arowla/csvkit,metasoarous/csvkit,Jobava/csvkit,snuggles08/csvkit,jpalvarezf/csvkit,Tabea-K/csvkit,gepuro/csvkit,reubano/csvkit,barentsen/csvkit,kyeoh/csvkit,moradology/csvkit,unpingco/csvkit,aequitas/csvkit,wjr1985/csvkit,doganmeh/csvkit,onyxfish/csvkit,bmispelon/csvkit,dannguyen/csvkit,elcritch/csvkit,KarrieK/csvkit,wireservice/csvkit,haginara/csvkit,themiurgo/csvkit,matterker/csvkit,cypreess/csvkit,archaeogeek/csvkit | csvkit/headers.py | csvkit/headers.py | #!/usr/bin/env python
def make_default_headers(n):
"""
Make a set of simple, default headers for files that are missing them.
"""
return [u'column%i' % (i + 1) for i in range(n)]
| mit | Python |
|
1ad62b8fcffd88cc5aecb01418650e09aaa7ffad | Add deck_test.py, it's only a script for test when programming. | lanhin/deckAdvisor | deck_test.py | deck_test.py | import os
import json
from hearthstone.deckstrings import Deck
from hearthstone.enums import FormatType
from hearthstone.cardxml import load
from hearthstone.enums import Locale,Rarity
from collection import Collection
# Create a deck from a deckstring
deck = Deck()
deck.heroes = [7] # Garrosh Hellscream
deck.format = FormatType.FT_WILD
# Nonsense cards, but the deckstring doesn't validate.
deck.cards = [(1, 3), (2, 3), (3, 3), (4, 3)] # id, count pairs
print(deck.as_deckstring) # "AAEBAQcAAAQBAwIDAwMEAw=="
# Import a deck from a deckstring
deck = Deck.from_deckstring("AAEBAf0ECMAB5gT7BPsFigbYE5KsAv2uAgucArsClQONBKsEtAThBJYF7Ae8CImsAgA=")
print (deck.cards)
# load card database from CardDefs.xml and use it to initialize DBF database
db, xml = load(os.path.join("hsdata","CardDefs.xml"), locale="zhCN")
db_dbf={}
for card in db:
#print (card)
db_dbf[db[card].dbf_id] = db[card]
#print (db)
for cardPair in deck.cards:
# print (cardPair[0])
card = db_dbf[cardPair[0]]
print (cardPair[1],"x(", card.cost,")", card.name, card.rarity)
#print (type(deck.cards))
#col = Collection()
#for cardPair in deck.cards:
# col.add(cardPair)
#col.output()
#col.writeToFiles("mycards.csv")
col2 = Collection()
col2.loadFromFile("mycards.csv")
col2.output()
#col2.limitTo(1)
#col2.output()
#col3 = Collection()
#col3.initFromDeckStringFile("initdeck")
#col3.output()
def calculateLacksFromJSONFile(path, collection, db_dbf):
newlist = []
with open (path, "rt") as f:
for line in f.readlines():
data = json.loads(line)['result']
deck = Deck.from_deckstring(data['deckstring'])
if len(deck.cards) <= 0:
# If there exists some connection problem,
# we may get an empty deck here.
# If so, just ignore it.
continue
print (data)
print (deck.cards)
newdict = {}
newdict["name"] = data['title']
newdict["date"] = data['date']
newdict["type"] = data['type']
newdict["deck"] = deck
newdict["lacked"], newdict["alreadyHave"] = collection.calculateLacks(deck.cards)
# print (newdict["lacked"])
_, newdict["dust"] = calcArcaneDust(newdict["lacked"], db_dbf)
newdict["power"] = 1
newlist.append(newdict)
return newlist
def calcArcaneDust(cards, db_dbf):
"""Calculate the aracne dust
Return how much dust will be generated from the cards (dustOut)
or how much is needed to prodece the cards (dustIn)
"""
dustOut = 0
dustIn = 0
for cardPair in cards:
card = db_dbf[cardPair[0]]
if card.rarity == Rarity.COMMON:
dustOut += 5
dustIn += 40
elif card.rarity == Rarity.RARE:
dustOut += 20
dustIn += 100
elif card.rarity == Rarity.EPIC:
dustOut += 100
dustIn += 400
elif card.rarity == Rarity.LEGENDARY:
dustOut += 400
dustIn += 1600
return dustOut, dustIn
#print (calculateLacksFromFile("deck1.txt", col2, db_dbf))
with open ('t3.json', 'r') as f:
for line in f.readlines():
data = json.loads(line)['result']
print (data)
print (calculateLacksFromJSONFile('t3.json', col2, db_dbf))
| mit | Python |
|
9b50da16238d2f816199c8fb8a20ec558edf5d46 | Create oie_compress.py | 9nlp/rie_gce,9nlp/rie_gce | oie_compress.py | oie_compress.py | # 1.0 much be paid on insurance claim
# 1.0 much is paid
# 1.0 much is paid on insurance claim
# 1.0 much be paid
# -----------------------------------------------------
# 1.0 channel joining two bodies
# 1.0 channel joining two larger bodies of water
# 1.0 channel joining two larger bodies
# 1.0 channel joining two bodies of water
# 1.0 narrow channel joining two bodies of water
# 1.0 narrow channel joining two larger bodies
# 1.0 narrow channel joining two larger bodies of water
# 1.0 narrow channel joining two bodies
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--oie", help="Input file containing openIE triplets to be compresses.", required=True)
parser.add_argument("--o", help="Output file for compressed openIE triplets.")
args = parser.parse_args()
with open(args.oie) as f:
triplets=map(str.strip().split("\t")[1:], f.readlines())
if len(triplets) < 3:
print "No triplets in file %s" % args.oie
exit()
for c in xrange(3):
[row[c] for row in triplets]
| apache-2.0 | Python |
|
aad116d8bd35eee22d07edaff4cd8ddf80ea80be | Create 2.3Identify_SeasonalValues.py | WamdamProject/WaMDaM_UseCases | UseCases_files/3Figures_Python/2.3Identify_SeasonalValues.py | UseCases_files/3Figures_Python/2.3Identify_SeasonalValues.py | # Use Case 2.3Identify_SeasonalValues
# plot Seasonal data for multiple scenarios
# Adel Abdallah
# October 30, 2017
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
from random import randint
import pandas as pd
## read the input data from GitHub csv file which is a direct query output
# 3.3Identify_SeasonalValues.csv
df = pd.read_csv("https://raw.githubusercontent.com/WamdamProject/WaMDaM_UseCases/master/UseCases_files/2Results_CSV/2.3Identify_SeasonalValues.csv")
#get the many curves by looking under "ScenarioName" column header.
#Then plot Season name vs season value
column_name = "ScenarioName"
subsets = df.groupby(column_name)
data = []
#for each subset (curve), set up its legend and line info manually so they can be edited
subsets_settings = {
'Bear Wet Year Model': {
'dash': 'solid',
'mode':'lines+markers',
'width':'3',
'legend_index': 0,
'legend_name': 'Wet Year Model',
'color':'rgb(41, 10, 216)'
},
'Bear Normal Year Model': { # this oone is the name of subset as it appears in the csv file
'dash': 'solid', # this is properity of the line (curve)
'width':'3',
'mode':'lines+markers',
'legend_index': 1, # to order the legend
'legend_name': 'Normal Year Model', # this is the manual curve name
'color':'rgb(38, 77, 255)'
},
'Bear Dry Year Model': {
'dash': 'solid',
'mode':'lines+markers',
'width':'3',
'legend_index': 2,
'legend_name': 'Dry Year Model',
'color':'rgb(63, 160, 255)'
},
}
# This dict is used to map legend_name to original subset name
subsets_names = {y['legend_name']: x for x,y in subsets_settings.iteritems()}
for subset in subsets.groups.keys():
print subset
dt = subsets.get_group(name=subset)
s = go.Scatter(
x=df.SeasonName,
y=dt['SeasonNumericValue'],
name = subsets_settings[subset]['legend_name'],
line = dict(
color =subsets_settings[subset]['color'],
width =subsets_settings[subset]['width'],
dash=subsets_settings[subset]['dash']
),
marker=dict(size=10),
opacity = 0.8
)
data.append(s)
# Legend is ordered based on data, so we are sorting the data based
# on desired legend order indicarted by the index value entered above
data.sort(key=lambda x: subsets_settings[subsets_names[x['name']]]['legend_index'])
layout = dict(
#title = "Use Case 3.3",
yaxis = dict(
title = "Cumulative flow <br> (acre-feet/month)",
tickformat= ',',
showline=True,
dtick='5000',
ticks='outside',
ticklen=10
),
xaxis = dict(
#title = "Month",
ticks='inside',
ticklen=25
),
legend=dict(
x=0.6,y=0.5,
bordercolor='#00000',
borderwidth=2
),
width=1100,
height=800,
#paper_bgcolor='rgb(233,233,233)',
#plot_bgcolor='rgb(233,233,233)',
margin=go.Margin(l=210,b=100),
font=dict(size=28)
)
# create a figure object
fig = dict(data=data, layout=layout)
#py.iplot(fig, filename = "2.3Identify_SeasonalValues")
## it can be run from the local machine on Pycharm like this like below
## It would also work here offline but in a seperate window
plotly.offline.plot(fig, filename = "2.3Identify_SeasonalValues")
| bsd-3-clause | Python |
|
302f98844487d894252d3dc3f4d30940fbcbd9e1 | Allow pex to be invoked using runpy (python -m pex). (#637) | jsirois/pex,pantsbuild/pex,pantsbuild/pex,jsirois/pex | pex/__main__.py | pex/__main__.py | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from pex.bin import pex
__name__ == '__main__' and pex.main()
| apache-2.0 | Python |
|
8f506c20ccad47ee6f2454a419145b1b2b48adba | Create bold-words-in-string.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/bold-words-in-string.py | Python/bold-words-in-string.py | # Time: O(n * l), n is the length of S, l is the average length of words
# Space: O(t), t is the size of trie
class Solution(object):
def boldWords(self, words, S):
"""
:type words: List[str]
:type S: str
:rtype: str
"""
_trie = lambda: collections.defaultdict(_trie)
trie = _trie()
for i, word in enumerate(words):
reduce(dict.__getitem__, word, trie)["_end"] = i
lookup = [False] * len(S)
for i in xrange(len(S)):
curr = trie
k = -1
for j in xrange(i, len(S)):
if S[j] not in curr:
break
curr = curr[S[j]]
if "_end" in curr:
k = j
for j in xrange(i, k+1):
lookup[j] = True
result = []
for i in xrange(len(S)):
if lookup[i] and (i == 0 or not lookup[i-1]):
result.append("<b>")
result.append(S[i])
if lookup[i] and (i == len(S)-1 or not lookup[i+1]):
result.append("</b>");
return "".join(result)
| mit | Python |
|
2623e5e18907c1ca13661e1f468368fb17bc50d9 | add preproc tests | timahutchinson/desispec,desihub/desispec,desihub/desispec,timahutchinson/desispec,gdhungana/desispec,gdhungana/desispec | py/desispec/test/test_preproc.py | py/desispec/test/test_preproc.py | import unittest
import numpy as np
from desispec.preproc import preproc, _parse_sec_keyword
class TestPreProc(unittest.TestCase):
def setUp(self):
hdr = dict()
hdr['CAMERA'] = 'b0'
hdr['DATE-OBS'] = '2018-09-23T08:17:03.988'
hdr['CCDSEC'] = '[1:200,1:150]'
hdr['BIASSECA'] = '[1:20,1:80]'
hdr['DATASECA'] = '[21:110,1:80]'
hdr['CCDSECA'] = '[1:90,1:80]'
hdr['BIASSECB'] = '[221:240,1:80]'
hdr['DATASECB'] = '[111:220,1:80]'
hdr['CCDSECB'] = '[91:200,1:80]'
hdr['BIASSECC'] = '[1:20,81:150]'
hdr['DATASECC'] = '[21:110,81:150]'
hdr['CCDSECC'] = '[1:90,81:150]'
hdr['BIASSECD'] = '[221:240,81:150]'
hdr['DATASECD'] = '[111:220,81:150]'
hdr['CCDSECD'] = '[91:200,81:150]'
self.header = hdr
self.ny = 150
self.nx = 200
self.noverscan = 20
self.rawimage = np.zeros((self.ny, self.nx+2*self.noverscan))
self.offset = dict(A=100.0, B=100.5, C=50.3, D=200.4)
self.gain = dict(A=1.0, B=1.5, C=0.8, D=1.2)
self.rdnoise = dict(A=2.0, B=2.2, C=2.4, D=2.6)
self.quad = dict(
A = np.s_[0:80, 0:90], B = np.s_[0:80, 90:200],
C = np.s_[80:150, 0:90], D = np.s_[80:150, 90:200],
)
for amp in ('A', 'B', 'C', 'D'):
self.header['GAIN'+amp] = self.gain[amp]
self.header['RDNOISE'+amp] = self.rdnoise[amp]
xy = _parse_sec_keyword(hdr['BIASSEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
self.rawimage[xy] += self.offset[amp]
self.rawimage[xy] += np.random.normal(scale=self.rdnoise[amp], size=shape)/self.gain[amp]
xy = _parse_sec_keyword(hdr['DATASEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
self.rawimage[xy] += self.offset[amp]
self.rawimage[xy] += np.random.normal(scale=self.rdnoise[amp], size=shape)/self.gain[amp]
#- Confirm that all regions were correctly offset
assert not np.any(self.rawimage == 0.0)
def test_preproc(self):
image = preproc(self.rawimage, self.header)
self.assertEqual(image.pix.shape, (self.ny, self.nx))
self.assertTrue(np.all(image.ivar <= 1/image.readnoise**2))
for amp in ('A', 'B', 'C', 'D'):
pix = image.pix[self.quad[amp]]
rdnoise = np.median(image.readnoise[self.quad[amp]])
self.assertAlmostEqual(np.median(pix), 0.0, delta=0.2)
self.assertAlmostEqual(np.std(pix), self.rdnoise[amp], delta=0.2)
self.assertAlmostEqual(rdnoise, self.rdnoise[amp], delta=0.2)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
|
228f4325aa5f1c8b616f45462280b4a7cb0792dd | Add test for empty files to csvjoin | wireservice/csvkit,dannguyen/csvkit,onyxfish/csvkit,doganmeh/csvkit | tests/test_utilities/test_csvjoin.py | tests/test_utilities/test_csvjoin.py | #!/usr/bin/env python
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkit.utilities.csvjoin import CSVJoin, launch_new_instance
from tests.utils import CSVKitTestCase, EmptyFileTests
class TestCSVJoin(CSVKitTestCase, EmptyFileTests):
Utility = CSVJoin
default_args = ['examples/dummy.csv', '-']
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/join_a.csv', 'examples/join_b.csv']):
launch_new_instance()
def test_sequential(self):
output = self.get_output_as_io(['examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_inner(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 3)
def test_left(self):
output = self.get_output_as_io(['-c', 'a', '--left', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 5)
def test_right(self):
output = self.get_output_as_io(['-c', 'a', '--right', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_outer(self):
output = self.get_output_as_io(['-c', 'a', '--outer', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 6)
def test_left_short_columns(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a_short.csv', 'examples/join_b.csv'])
with open('examples/join_short.csv') as f:
self.assertEqual(output.readlines(), f.readlines())
| #!/usr/bin/env python
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkit.utilities.csvjoin import CSVJoin, launch_new_instance
from tests.utils import CSVKitTestCase
class TestCSVJoin(CSVKitTestCase):
Utility = CSVJoin
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/join_a.csv', 'examples/join_b.csv']):
launch_new_instance()
def test_sequential(self):
output = self.get_output_as_io(['examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_inner(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 3)
def test_left(self):
output = self.get_output_as_io(['-c', 'a', '--left', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 5)
def test_right(self):
output = self.get_output_as_io(['-c', 'a', '--right', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_outer(self):
output = self.get_output_as_io(['-c', 'a', '--outer', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 6)
def test_left_short_columns(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a_short.csv', 'examples/join_b.csv'])
with open('examples/join_short.csv') as f:
self.assertEqual(output.readlines(), f.readlines())
| mit | Python |
dbb127a6fbadfa17f5faad45e8d7ebb6b943a77d | add basic test for vamp_spectral_centroid | Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide | tests/test_vamp_spectral_centroid.py | tests/test_vamp_spectral_centroid.py | #! /usr/bin/env python
import unittest
from unit_timeside import TestRunner
from timeside.plugins.decoder.aubio import AubioDecoder as FileDecoder
from timeside.core import get_processor
from timeside.core.tools.test_samples import samples
class TestVampSpectralCentroid(unittest.TestCase):
proc_id = 'vamp_spectral_centroid'
def setUp(self):
self.analyzer = get_processor(self.proc_id)()
def testOnC4Scale(self):
"runs on C4 scale"
self.source = samples["C4_scale.wav"]
def testOnSweep(self):
"runs on sweep"
self.source = samples["sweep.wav"]
def tearDown(self):
decoder = FileDecoder(self.source)
(decoder | self.analyzer).run()
results = self.analyzer.results
result = results.get_result_by_id(self.proc_id)
duration = result.audio_metadata.duration
data_duration = result.data_object.time[-1]
self.assertAlmostEqual (duration, data_duration, 1)
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| agpl-3.0 | Python |
|
86c67f321ec4ee7c254fde4a7f942a83d5e35016 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/00734af980b920f9f963badf85fbeb12d576fde5. | Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "00734af980b920f9f963badf85fbeb12d576fde5"
TFRT_SHA256 = "0c136cdfb87ae3663c162ad807c57983a8119fa7097fb589c4a7d04b98d09d3d"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "d1caeb8bdba1851194baf06c28ea09b5b67e5623"
TFRT_SHA256 = "e480ad7451b9e3ce45da61d7107953a4d55789bf6087442fd000a1ecb7c6604e"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
8cf5b328d7596a9b74490b7dfd4a1b8aa1577b55 | Merge remote-tracking branch 'origin' into AC-9512 | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/migrations/0110_remove_bucket_list_program_role_20220707_1001.py | accelerator/migrations/0110_remove_bucket_list_program_role_20220707_1001.py | from django.db import migrations
def remove_bucket_list_program_roles(apps, schema_editor):
BucketState = apps.get_model('accelerator', 'BucketState')
ProgramRole = apps.get_model('accelerator', 'ProgramRole')
ProgramRoleGrant = apps.get_model('accelerator', 'ProgramRoleGrant')
NodePublishedFor = apps.get_model('accelerator', 'NodePublishedFor')
program_role_ids = BucketState.objects.values_list('program_role_id',
flat=True)
NodePublishedFor.objects.filter(
published_for_id__in=program_role_ids).delete()
ProgramRoleGrant.objects.filter(
program_role_id__in=program_role_ids).delete()
BucketState.objects.all().delete()
ProgramRole.objects.filter(pk__in=program_role_ids).delete()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0109_remove_interest_fields_20220705_0425'),
]
operations = [
migrations.RunPython(remove_bucket_list_program_roles,
migrations.RunPython.noop)
]
| mit | Python |
|
5945fe5c527b3f5cb2ed104eccdf9266dc702eb1 | add second order constraint | hungpham2511/toppra,hungpham2511/toppra,hungpham2511/toppra | toppra/constraint/can_linear_second_order.py | toppra/constraint/can_linear_second_order.py | from .canonical_linear import CanonicalLinearConstraint
from .constraint import DiscretizationType
import numpy as np
class CanonicalLinearSecondOrderConstraint(CanonicalLinearConstraint):
""" A class to represent Canonical Linear Generalized Second-order constraints.
Parameters
----------
inv_dyn: (array, array, array) -> array
The "inverse dynamics" function that receives joint position, velocity and
acceleration as inputs and ouputs the "joint torque". See notes for more
details.
cnst_coeffs: (array) -> array, array
The coefficient functions of the constraints. See notes for more details.
Notes
-----
A constraint of this kind can be represented by the following formula
.. math::
A(q) \ddot q + \dot q^\\top B(q) \dot q + C(q) = w,
where w is a vector that satisfies the polyhedral constraint
.. math::
F(q) w \\leq g(q).
To evaluate the constraint parameters, multiple calls to inv_dyn, cnst_F and cnst_g
are made. Specifically one can write the second-order equation as follows
.. math::
A(q) p'(s) \ddot s + [A(q) p''(s) + p'(s)^\\top B(q) p'(s)] + C(q) = w,
To evaluate the coefficients a(s), b(s), c(s), inv_dyn is called repeatedly with
appropriate arguments.
"""
def __init__(self, inv_dyn, cnst_F, cnst_g, discretization_scheme=DiscretizationType.Collocation):
super(CanonicalLinearSecondOrderConstraint, self).__init__()
self.discretization_type = discretization_scheme
self.inv_dyn = inv_dyn
self.cnst_F = cnst_F
self.cnst_g = cnst_g
self._format_string = " Generalized Second-order constraint"
self.discretization_type = discretization_scheme
def compute_constraint_params(self, path, gridpoints):
v_zero = np.zeros(path.get_dof())
p = path.eval(gridpoints)
ps = path.evald(gridpoints)
pss = path.evaldd(gridpoints)
F = np.array(map(self.cnst_F, p))
g = np.array(map(self.cnst_g, p))
c = np.array(
map(lambda p_: self.inv_dyn(p_, v_zero, v_zero), p)
)
a = np.array(
map(lambda p_, ps_: self.inv_dyn(p_, v_zero, ps_), zip(p, ps))
) - c
b = np.array(
map(lambda p_, ps_, pss_: self.inv_dyn(p_, ps_, pss_), zip(p, ps, pss))
) - c
return a, b, c, F, g, None, None
| mit | Python |
|
3020472569a49f01331ebb150f004e2684196b8e | add expression to improve the domain | ovnicraft/openerp-server,xrg/openerp-server,gisce/openobject-server,splbio/openobject-server,MarkusTeufelberger/openobject-server,splbio/openobject-server,MarkusTeufelberger/openobject-server,gisce/openobject-server,vnc-biz/openerp-server,splbio/openobject-server,vnc-biz/openerp-server,ovnicraft/openerp-server,xrg/openerp-server,MarkusTeufelberger/openobject-server,gisce/openobject-server | bin/tools/expression.py | bin/tools/expression.py | #!/usr/bin/env python
def _is_operator( element ):
return isinstance( element, str ) and element in ['&','|']
def _is_leaf( element ):
return isinstance( element, tuple ) and len( element ) == 3 and element[1] in ['=', '<>', '!=', '<=', '<', '>', '>=', 'like', 'not like', 'ilike', 'not ilike']
def _is_expression( element ):
return isinstance( element, tuple ) and len( element ) > 2 and _is_operator( element[0] )
class expression_leaf( object ):
def __init__(self, operator, left, right ):
self.operator = operator
self.left = left
self.right = right
def parse( self ):
return self
def to_sql( self ):
return "%s %s %s" % ( self.left, self.operator, self.right )
class expression( object ):
def __init__( self, exp ):
if isinstance( exp, tuple ):
if not _is_leaf( exp ) and not _is_operator( exp[0] ):
exp = list( exp )
if isinstance( exp, list ):
if len( exp ) == 1 and _is_leaf( exp[0] ):
exp = exp[0]
else:
if not _is_operator( exp[0][0] ):
exp.insert( 0, '&' )
exp = tuple( exp )
else:
exp = exp[0]
self.exp = exp
self.operator = '&'
self.children = []
def parse( self ):
if _is_leaf( self.exp ):
self.children.append( expression_leaf( self.exp[1], self.exp[0], self.exp[2] ).parse() )
elif _is_expression( self.exp ):
self.operator = self.exp[0]
for element in self.exp[1:]:
if not _is_operator( element ) and not _is_leaf(element):
self.children.append( expression(element).parse() )
else:
if _is_leaf(element):
self.children.append( expression_leaf( element[1], element[0], element[2] ).parse() )
return self
def to_sql( self ):
return "( %s )" % ((" %s " % {'&' : 'AND', '|' : 'OR' }[self.operator]).join([child.to_sql() for child in self.children]))
| agpl-3.0 | Python |
|
0226bec54c30a31c0005e7318b69c58a379cfbc9 | refactor output function | wolfg1969/my-stars-pilot,wolfg1969/oh-my-stars,wolfg1969/oh-my-stars | mystarspilot/view.py | mystarspilot/view.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from colorama import Fore, Back, Style
class SearchResultView(object):
def print_search_result(self, search_result, keywords=None):
if search_result is not None:
for repo in search_result:
self.print_repo_name(repo)
self.print_repo_url(repo)
self.print_repo_language(repo)
self.print_repo_description(repo)
self.print_summary(search_result)
def print_summary(self, search_result):
self._print('', end='\n')
count = len(search_result)
fore_color = Fore.GREEN if count else Fore.YELLOW
text = "({} star{} found)".format(count if count else "No", 's' if count > 1 else '')
self._print(text, fore_color, end='\n')
def print_repo_name(self, repo):
self._print(repo.full_name, Fore.GREEN)
def print_repo_url(self, repo):
self._print("[{}]".format(repo.html_url), Fore.YELLOW)
def print_repo_language(self, repo):
if repo.language:
self._print(repo.language, Fore.BLUE, end='\n')
def print_repo_description(self, repo):
if repo.description:
self._print(repo.description, end='\n')
def _print(self, text='', fore_color=Fore.RESET, end=' '):
print(fore_color + text, end='')
print(Fore.RESET + Back.RESET + Style.RESET_ALL, end=end)
| mit | Python |
|
7e91549abc8d185deb231c937d7740606f9454ec | add pmi element unit test | jasonwbw/NLPbasic | test_pmi_element.py | test_pmi_element.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# The unit test case for pmi.TopkHeap and PMIElement
#
# @author: Jason Wu ([email protected])
from pmi import PMIElement
import sys
import unittest
class PMIElementTestCase(unittest.TestCase):
def setUp(self):
pass
def tearGt(self):
f = PMIElement('f', 12)
e = PMIElement('e', 11)
self.assertEqual(True, e < f)
self.assertEqual(True, f > e)
def testEq(self):
f = PMIElement('f', 11)
e = PMIElement('e', 11)
g = PMIElement('e', 11)
self.assertEqual(False, e == f)
self.assertEqual(True, e == g)
def testPrintSomething(self):
pass
if __name__ == "__main__":
unittest.main()
| mit | Python |
|
3c52683e759f146ad247c6e397d5d49dd1cc9966 | Create __init__.py | kushalmitruka/django-mongodb-testing | testing/__init__.py | testing/__init__.py | mit | Python |
||
a770c91ea6761d890387b4b6e130cb495817eea0 | Improve the sc2parse debugging script. | StoicLoofah/sc2reader,GraylinKim/sc2reader,ggtracker/sc2reader,ggtracker/sc2reader,GraylinKim/sc2reader,vlaufer/sc2reader,vlaufer/sc2reader,StoicLoofah/sc2reader | sc2reader/scripts/sc2parse.py | sc2reader/scripts/sc2parse.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import sc2reader
import traceback
def main():
for argument in sys.argv[1:]:
for path in sc2reader.utils.get_files(argument):
try:
replay = sc2reader.load_replay(path, debug=True)
except sc2reader.exceptions.ReadError as e:
print e.replay.filename
print '{build} - {real_type} on {map_name} - Played {start_time}'.format(**e.replay.__dict__)
print '[ERROR]', e.message
for event in e.game_events[-5:]:
print '{0} - {1}'.format(hex(event.type),event.bytes.encode('hex'))
e.buffer.seek(e.location)
print e.buffer.peek(50).encode('hex')
print
except Exception as e:
print path
replay = sc2reader.load_replay(path, debug=True, load_level=1)
print '{build} - {real_type} on {map_name} - Played {start_time}'.format(**replay.__dict__)
print '[ERROR]', e
traceback.print_exc()
print
if __name__ == '__main__':
main() | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import sc2reader
def main():
for replay in sc2reader.load_replays(sys.argv[1:], verbose=True):
pass
if __name__ == '__main__':
main() | mit | Python |
98295608a2ba4519d12212532380253bba4372ed | Add script that recommends scrape task schedule based on recent run timings | openstates/bobsled,openstates/bobsled,openstates/bobsled,openstates/bobsled | scripts/frequency_analysis.py | scripts/frequency_analysis.py | import asyncio
import attr
import pprint
import dateutil.parser
from datetime import timedelta
from bobsled.core import bobsled
from bobsled.base import Status
def recommend_frequency_for_task(runs):
total_duration = timedelta(seconds=0)
longest_duration = timedelta(seconds=0)
for run in runs:
start = dateutil.parser.parse(run.start)
end = dateutil.parser.parse(run.end)
duration = end - start
total_duration += duration
if duration > longest_duration:
longest_duration = duration
average = total_duration / len(runs)
if longest_duration.seconds <= 60*10:
return '0 */2 * * ?'
elif longest_duration.seconds <= 60*60:
return '0 */6 * * ?'
else:
return 'daily'
async def analyze_frequency():
await bobsled.initialize()
tasks = [attr.asdict(t) for t in await bobsled.storage.get_tasks()]
results = await asyncio.gather(
*[bobsled.run.get_runs(task_name=t["name"], latest=4) for t in tasks]
)
recommendations = []
for task, latest_runs in zip(tasks, results):
# make recommendations for scrape tasks that have runs
if latest_runs and '-scrape' in task['name']:
if all(run.status is Status.Success for run in latest_runs):
recommendation = recommend_frequency_for_task(latest_runs)
else:
# a recent run failed, made a note of that
recommendation = 'n/a - at least one recent task failed'
recommendations.append({
'task': task['name'],
'current_schedule': task['triggers'][0]['cron'],
'recommended': recommendation
})
changed_recommendations = []
for recommendation in recommendations:
if recommendation['recommended'] != 'daily' and 'n/a' not in recommendation['recommended']\
and recommendation['current_schedule'] != recommendation['recommended']:
changed_recommendations.append(recommendation)
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(changed_recommendations)
def main():
# asyncio.run(bobsled.initialize()) # this makes a threading problem if it's here
asyncio.run(analyze_frequency())
if __name__ == "__main__":
main()
| mit | Python |
|
e6642dd9c9cad6aca3cb70e4cca53afe51494d4b | Add a test for checking setup.py | UCBerkeleySETI/blimpy,UCBerkeleySETI/blimpy | tests/test_setup.py | tests/test_setup.py | r""" Testspectra_gen functions"""
def test_setup():
import os
cmd = "python3 setup.py check"
os.system(cmd) | bsd-3-clause | Python |
|
0424eb7dd8e55e2f88f088c3a84c8e962d89f06e | build perf from source | cyliustack/sofa,cyliustack/sofa,cyliustack/sofa,cyliustack/sofa,cyliustack/sofa | tools/perf_build.py | tools/perf_build.py | #!/usr/bin/env python
import platform
import subprocess
if subprocess.call('which sudo', shell=True) == 0:
with_sudo = 'sudo '
else:
with_sudo = ''
major = int(platform.release().split('.')[0])
minor = int(platform.release().split('.')[1])
revision = int(platform.release().split('.')[2].split('-')[0])
url_kernel = 'https://cdn.kernel.org/pub/linux/kernel/v%d.x/linux-%d.%d.tar.gz' % (major, major, minor)
tarfile = 'linux-%d.%d.tar.gz' % (major, minor)
source_dir = 'linux-%d.%d' % (major, minor)
print('URL: ', url_kernel)
print('TarFile: ', tarfile)
subprocess.call('rm -r %s' % (source_dir), shell=True)
subprocess.call('rm %s' % (tarfile), shell=True)
subprocess.call('wget %s' % (url_kernel) , shell=True)
subprocess.call('tar xf %s && make -j -C %s/tools/perf' % (tarfile, source_dir) , shell=True)
subprocess.call(with_sudo + 'cp %s/tools/perf/perf /usr/bin/' % (source_dir) , shell=True)
subprocess.call('rm -r %s' % (source_dir), shell=True)
subprocess.call('rm %s' % (tarfile), shell=True)
subprocess.call('ls -lah /usr/bin/perf', shell=True)
#get kernelversion
#wget http://www.kernel.org/pub/linux/kernel/v2.6/testing/linux-2.6.33-rc3.tar.bz2
| apache-2.0 | Python |
|
ef3e07794d4245b9d4a1d0007a0b9099d5bafaf9 | Add asteval wrapper | Snuggert/moda | project/asteval_wrapper.py | project/asteval_wrapper.py | from asteval import Interpreter
import functools
import re
class Script(object):
def __init__(self):
"""
Sets up an interpreter.
"""
self.interpreter = Interpreter()
self.symtable['re'] = re
@property
def symtable(self):
"""
Expose the internal symbol table.
"""
return self.interpreter.symtable
@symtable.setter
def symtable(self, symtable):
"""
Apply changes to the internal symbol table.
"""
self.interpreter.symtable = symtable
def add_file(self, path):
"""
Adds and loads code from a script file.
"""
with open(path, 'rb') as f:
self.interpreter(f.read())
def invoke(self, name, *args, **kwargs):
"""
Invokes a function in the script with the appropriate arguments.
"""
f = self.interpreter.symtable.get(name, None)
if not callable(f):
return
return f(*args, **kwargs)
def __getattr__(self, name):
"""
Returns the function to invoke a function in the script, if a function
with that name exists within the symbol table. Otherwise, an attribute
error is being raised (default behaviour).
"""
if name in ['symtable', 'interpreter']:
raise AttributeError("{} instance has no attribute '{}'".format(
self.__class__.__name__, name))
if not callable(self.symtable.get(name, None)):
raise AttributeError("{} instance has no attribute '{}'".format(
self.__class__.__name__, name))
return functools.partial(self.invoke, name)
| mit | Python |
|
126863fd6c2a13491b92d546d3e886d0e0da492b | Add experiment for nodejs. | arangodb/velocypack,arangodb/Jason,arangodb/velocypack,arangodb/velocypack,arangodb/velocypack,arangodb/Jason,arangodb/Jason,arangodb/Jason | swig/node/binding.gyp | swig/node/binding.gyp | {
"targets": [
{
"target_name": "velocypack",
"sources": [ "../../src/asm-functions.cpp",
"../../src/AttributeTranslator.cpp",
"../../src/Builder.cpp",
"../../src/Collection.cpp",
"../../src/Dumper.cpp",
"../../src/Exception.cpp",
"../../src/fasthash.cpp",
"../../src/fpconv.cpp",
"../../src/HexDump.cpp",
"../../src/Iterator.cpp",
"../../src/Options.cpp",
"../../src/Parser.cpp",
"../../src/Slice.cpp",
"../../src/ValueType.cpp",
"../../src/velocypack-common.cpp",
"../../src/Version.cpp",
"velocypack_wrap.cxx" ],
"include_dirs": [ "../../include", "../../src", "/usr/local/node-v5.0.0-linux-x64/include/node" ],
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ]
}
]
}
| apache-2.0 | Python |
|
ad0a1bf70dc2776c88115389400fd6958e49ecc8 | Add rsync package | tmerrick1/spack,lgarren/spack,LLNL/spack,iulian787/spack,EmreAtes/spack,iulian787/spack,LLNL/spack,EmreAtes/spack,lgarren/spack,LLNL/spack,TheTimmy/spack,tmerrick1/spack,TheTimmy/spack,skosukhin/spack,iulian787/spack,matthiasdiener/spack,mfherbst/spack,iulian787/spack,lgarren/spack,mfherbst/spack,EmreAtes/spack,iulian787/spack,matthiasdiener/spack,mfherbst/spack,LLNL/spack,tmerrick1/spack,tmerrick1/spack,matthiasdiener/spack,lgarren/spack,EmreAtes/spack,krafczyk/spack,krafczyk/spack,matthiasdiener/spack,EmreAtes/spack,skosukhin/spack,krafczyk/spack,krafczyk/spack,krafczyk/spack,matthiasdiener/spack,TheTimmy/spack,TheTimmy/spack,tmerrick1/spack,mfherbst/spack,skosukhin/spack,skosukhin/spack,mfherbst/spack,LLNL/spack,skosukhin/spack,lgarren/spack,TheTimmy/spack | var/spack/packages/rsync/package.py | var/spack/packages/rsync/package.py | from spack import *
class Rsync(Package):
"""rsync is an open source utility that provides fast incremental file transfer."""
homepage = "https://rsync.samba.org"
url = "https://download.samba.org/pub/rsync/rsync-3.1.1.tar.gz"
version('3.1.1', '43bd6676f0b404326eee2d63be3cdcfe')
# depends_on("foo")
def install(self, spec, prefix):
configure('--prefix=%s' % prefix)
make()
make("install")
| lgpl-2.1 | Python |
|
5869091cc63afbe9c8bde2bf6e9f934c46d3c3f5 | Create generate_api_error.py | bigdig/vnpy,vnpy/vnpy,bigdig/vnpy,vnpy/vnpy,bigdig/vnpy,bigdig/vnpy | vnpy/api/tap/generator/generate_api_error.py | vnpy/api/tap/generator/generate_api_error.py | """"""
class DataTypeGenerator:
"""DataType生成器"""
def __init__(self, filename: str, prefix: str, name: str) -> None:
"""Constructor"""
self.filename: str = filename
self.prefix: str = prefix
self.name: str = name
def run(self) -> None:
"""主函数"""
self.f_cpp = open(self.filename, "r", encoding="UTF-8")
self.f_define = open(f"{self.prefix}_{self.name}_error_constant.py", "w", encoding="UTF-8")
for line in self.f_cpp:
self.process_line(line)
self.f_cpp.close()
self.f_define.close()
print(f"{self.name}_DataType生成完毕")
def process_line(self, line: str) -> None:
"""处理每行"""
line = line.replace("\n", "")
line = line.replace(";", "")
# print(line)
# MD
if self.name == "md":
if line.startswith("const int"):
self.process_int(line)
elif self.name == "td":
if line.startswith(" const int"):
self.process_int(line)
def process_int(self, line: str) -> None:
"""处理类型定义"""
sectors = line.split("=")
value = sectors[1].strip()
words = sectors[0].split(" ")
words = [word for word in words if word != ""]
name = words[-1].strip()
new_line = f"{name} = {value}\n"
self.f_define.write(new_line)
# def process_char_td(self, line: str) -> None:
# words = line.split(" ")
# words = [word for word in words if word != ""]
# name = words[-1]
# if "[" in name:
# name = name.split("[")[0]
# new_line = f"{name} = \"string\"\n"
# else:
# new_line = f"{name} = \"char\"\n"
# self.f_typedef.write(new_line)
# def process_const_md(self, line: str) -> None:
# """"""
# sectors = line.split("=")
# value = sectors[1].strip()
# words = sectors[0].split(" ")
# words = [word for word in words if word != ""]
# # name = words[1].strip()
# print(value, words)
# # new_line = f"{name} = {value}\n"
# # self.f_define.write(new_line)
# def process_const_td(self, line: str):
# sectors = line.split("=")
# value = sectors[1].replace("\'", "\"").strip()
# words = sectors[0].split(" ")
# words = [word for word in words if word != ""]
# name = words[-1].strip()
# new_line = f"{name} = {value}\n"
# self.f_define.write(new_line)
if __name__ == "__main__":
# md_generator = DataTypeGenerator("../include/tap/TapAPIError.h", "tap", "md")
# md_generator.run()
td_generator = DataTypeGenerator("../include/tap/iTapAPIError.h", "tap", "td")
td_generator.run()
| mit | Python |
|
d205284e21f5fad8195d796ad356042cb5c47894 | add log test | MrLYC/test_py_logging,MrLYC/test_py_logging | py_logging/test_logging.py | py_logging/test_logging.py | #!/usr/bin/env python
# encoding: utf-8
import logging
import os
import time
from unittest import TestCase
class TestLogging(TestCase):
def setUp(self):
dir_path = os.path.dirname(__file__)
self.logfile = os.path.join(dir_path, "tmp.log")
self.logger = logging.getLogger(
"test_logger_%s" % int(time.time() * 1000))
def tearDown(self):
if os.path.exists(self.logfile):
os.remove(self.logfile)
def log_lines(self):
with open(self.logfile, "rt") as fp:
return [l.strip() for l in fp]
def test_logger(self):
self.assertEqual(self.logger.level, logging.NOTSET)
def test_filehandler(self):
filehdr = logging.FileHandler(self.logfile)
self.logger.addHandler(filehdr)
self.logger.setLevel(logging.INFO)
self.logger.debug("debug")
self.logger.info("info")
self.logger.warning("warning")
self.logger.error("error")
self.logger.critical("critical")
self.assertListEqual(self.log_lines(), [
"info", "warning", "error", "critical"])
def test_format(self):
filehdr = logging.FileHandler(self.logfile)
logfmt = logging.Formatter("test: %(name)s %(levelname)-8s %(message)s")
filehdr.setFormatter(logfmt)
self.logger.addHandler(filehdr)
self.logger.setLevel(logging.INFO)
self.logger.info("info")
self.assertListEqual(self.log_lines(), [
"test: %s INFO info" % (self.logger.name,)])
| mit | Python |
|
9b2e0396f1121f94d6b66daa26c83bb85bc1a79a | format string tests | mitar/pychecker,mitar/pychecker | pychecker2/utest/format.py | pychecker2/utest/format.py | from pychecker2 import TestSupport
from pychecker2 import FormatStringChecks
class FormatTestCase(TestSupport.WarningTester):
def testGoodFormats(self):
self.silent('def f(x):\n'
' return "%s" % x\n')
self.silent('def f(x):\n'
" return ('%s' + '%s') % (x, x)\n")
self.silent("def f(x):\n"
" return (('%s' + '%s') * 8) % ((x,) * 16)\n")
self.silent("def f(x):\n"
" y = 2\n"
" return '%(x)f %(y)s' % locals()\n")
self.silent("y = 1\n"
"def f():\n"
" return '%(y)s' % globals()\n")
self.silent("def f():\n"
" return '%*.s %*.*s %*f' % locals()\n")
self.silent("def f():\n"
" return '%s %%' % ('',)\n")
self.silent("def f(t):\n"
" return '%s %f' % t\n")
self.silent("def f(t):\n"
" return ('%s %f' + t) % (1, 2)\n")
self.silent("def f(t):\n"
" return '%s' % `t`\n")
self.silent("def f(t):\n"
" return '%s' * ((7 - 1) / 2) % (t,t,t)\n")
def testBadFormats(self):
w = FormatStringChecks.FormatStringCheck.badFormat
self.warning("def f():\n"
" return '%' % locals()\n", 2, w, 0, '%')
self.warning("def f():\n"
" return '%z a kookie format, yah' % locals()\n",
2, w, 0, '%z a kooki...')
self.warning("def f():\n"
" return '%(foo)*.*s' % {'foo': 'bar'}\n",
2, w, 0, '%(foo)*.*s')
def testMixed(self):
w = FormatStringChecks.FormatStringCheck.mixedFormat
self.warning("def f():\n"
" return '%(mi)x %up' % locals()\n", 2, w, '(mi)')
self.warning("def f():\n"
" return '%up %(mi)x' % (1, 2)\n", 2, w, '(mi)')
def testFormatCount(self):
w = FormatStringChecks.FormatStringCheck.formatCount
self.warning("def f():\n"
" return '%s %d %f' % ('', 2)\n",
2, w, 2, 3)
def testUselessModifier(self):
w = FormatStringChecks.FormatStringCheck.uselessModifier
self.warning("def f(t):\n"
" return '%s %lf' % (t, t)\n",
2, w, 'l')
def testFormatConstants(self):
w = FormatStringChecks.FormatStringCheck.badConstant
self.warning("def f():\n"
" return ('%s' * 6) % ((1, 2) + 3 * 7)\n",
2, w, 'can only concatenate tuple (not "int") to tuple')
self.warning("def f():\n"
" return ('%s' + 6) % ((1, 2) * 3)\n",
2, w, "cannot concatenate 'str' and 'int' objects")
def testUnknownName(self):
w = FormatStringChecks.FormatStringCheck.unknownFormatName
self.warning("def f():\n"
" return '%(unknown)s' % globals()\n",
2, w, "unknown", "globals")
self.warning("def f():\n"
" return '%(unknown)s' % locals()\n",
2, w, "unknown", "locals")
| bsd-3-clause | Python |
|
2e2bae00f7b098e5fd20f2901b4f70554e250d2d | add program to plot offset distribution | dmargala/blupe,dmargala/blupe,dmargala/blupe | python/plot_offset_dist.py | python/plot_offset_dist.py | #!/usr/bin/env python
import argparse
import numpy as np
import glob
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams.update({'font.size': 10})
import matplotlib.pyplot as plt
def add_stat_legend(x):
textstr = '$\mathrm{N}=%d$\n$\mathrm{mean}=%.2f$\n$\mathrm{median}=%.2f$\n$\mathrm{std}=%.2f$' % (
len(x), np.nanmean(x), np.nanmedian(x), np.nanstd(x))
props = dict(boxstyle='round', facecolor='white')
plt.text(0.95, 0.95, textstr, transform=plt.gca().transAxes, va='top', ha='right', bbox=props)
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="print verbose output")
parser.add_argument("-o", "--output", type=str, default=None,
help="output file base name")
parser.add_argument("-i", "--input", type=str, default=None,
help="required input file")
args = parser.parse_args()
nfiles = len(filenames)
# the first is the fiberid and the next two columns are xfocal and yfocal positions of the target
nidtokens = 3
# the rest are the tabulated throughput correction values
npoints = 71
# the throughput correction vectors span the range 3500A to 10500A
xvalues = np.linspace(3500, 10500, npoints, endpoint=True)
offset_dict = {}
for x in xvalues:
offset_dict[x] = []
offsets = []
for i,filename in enumerate(filenames):
plate, mjd = filename.split('.')[0].split('-')[-2:]
data = np.loadtxt(filename, ndmin=2)
nentries, ntokens = data.shape
assert ntokens == 3*npoints + nidtokens
for row in data:
fiberid, xfocal, yfocal = row[0:nidtokens]
offset = row[nidtokens+0::3]
fiber_fraction = row[nidtokens+1::3]
tpcorr = row[nidtokens+2::3]
offsets.append(offsets)
offsets_array = np.vstack(offsets)
for i,x in enumerate(xvalues):
offsets_wave_slice = offsets_array[:,i]
fig = plt.figure(figsize=(8,6))
plt.hist(offsets_wave_slice, bins=50, histtype='stepfilled', alpha=0.5)
plt.xlabel('Centroid offset (arcseconds)')
plt.ylabel('Counts')
plt.title(r%'$\lambda = %s$' % x)
plt.xlim([0, 2])
add_stat_legend(offsets_wave_slice)
plt.grid(True)
fig.savefig(args.output+'-%s.png'%x, bbox_inches='tight')
if __name__ == '__main__':
main()
| mit | Python |
|
091432b795e3b5571887eb924fb831060d2fd53b | Add logging setup | google/turbinia,google/turbinia,google/turbinia,google/turbinia,google/turbinia | turbinia/config/logger.py | turbinia/config/logger.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sets up logging."""
import logging
from turbinia import config
def setup(root=False):
"""Set up logging parameters."""
config.LoadConfig()
log = logging.getLogger('turbinia')
fh = logging.FileHandler(config.LOG_FILE)
formatter = logging.Formatter(u'%(asctime)s:%(levelname)s:%(message)s')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter(u'[%(levelname)s] %(message)s')
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
# Optionally configure the root logger because other modules like PSQ use
# this, and we want to see log messages from it when executing from CLI.
if root:
root_log = logging.getLogger()
root_log.addHandler(ch)
root_log.setLevel(logging.DEBUG)
| apache-2.0 | Python |
|
f9998701bafa24fce25156751fefdfa97074c801 | Add protocol conformance map | rudkx/swift,atrick/swift,glessard/swift,gregomni/swift,glessard/swift,gregomni/swift,rudkx/swift,gregomni/swift,benlangmuir/swift,rudkx/swift,JGiola/swift,JGiola/swift,glessard/swift,atrick/swift,atrick/swift,JGiola/swift,rudkx/swift,atrick/swift,glessard/swift,gregomni/swift,atrick/swift,roambotics/swift,rudkx/swift,ahoppen/swift,gregomni/swift,roambotics/swift,apple/swift,benlangmuir/swift,JGiola/swift,apple/swift,ahoppen/swift,apple/swift,gregomni/swift,rudkx/swift,JGiola/swift,roambotics/swift,ahoppen/swift,benlangmuir/swift,atrick/swift,roambotics/swift,ahoppen/swift,apple/swift,glessard/swift,JGiola/swift,roambotics/swift,benlangmuir/swift,apple/swift,benlangmuir/swift,ahoppen/swift,benlangmuir/swift,ahoppen/swift,roambotics/swift,apple/swift,glessard/swift | utils/gyb_syntax_support/protocolsMap.py | utils/gyb_syntax_support/protocolsMap.py | SYNTAX_BUILDABLE_EXPRESSIBLE_AS_CONFORMANCES = {
'ExpressibleAsConditionElement': [
'ExpressibleAsConditionElementList'
],
'ExpressibleAsDeclBuildable': [
'ExpressibleAsCodeBlockItem',
'ExpressibleAsMemberDeclListItem',
'ExpressibleAsSyntaxBuildable'
],
'ExpressibleAsStmtBuildable': [
'ExpressibleAsCodeBlockItem',
'ExpressibleAsSyntaxBuildable'
],
'ExpressibleAsExprList': [
'ExpressibleAsConditionElement',
'ExpressibleAsSyntaxBuildable'
]
}
| apache-2.0 | Python |
|
143eb4665e76065ec67b5dd42cfe84e238d50094 | use per post winner count if available to overide settings | mysociety/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,mysociety/yournextmp-popit | candidates/constants.py | candidates/constants.py | ELECTION_ID_REGEX = r'(?P<election>[^/]+)'
POST_ID_REGEX = r'(?P<post_id>[^/]+)'
| agpl-3.0 | Python |
|
6aef9ab419b09822b2255141349144ac8978e862 | Add migration for h5p kind. | mrpau/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,mrpau/kolibri,learningequality/kolibri,indirectlylit/kolibri,learningequality/kolibri,learningequality/kolibri,mrpau/kolibri,mrpau/kolibri,indirectlylit/kolibri,learningequality/kolibri | kolibri/core/content/migrations/0025_add_h5p_kind.py | kolibri/core/content/migrations/0025_add_h5p_kind.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-12-19 02:29
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
("content", "0024_channelmetadata_public"),
]
operations = [
migrations.AlterField(
model_name="contentnode",
name="kind",
field=models.CharField(
blank=True,
choices=[
("topic", "Topic"),
("video", "Video"),
("audio", "Audio"),
("exercise", "Exercise"),
("document", "Document"),
("html5", "HTML5 App"),
("slideshow", "Slideshow"),
("h5p", "H5P"),
],
max_length=200,
),
),
]
| mit | Python |
|
39b00572d7888895bcf552999f80b712c1738142 | Create BillboardIter.py | jameswenzel/billboard-grabber | BillboardIter.py | BillboardIter.py | from datetime import date, timedelta
class BillboardDates():
'''Iterator over valid Billboard Chart weeks, which is
supposed to be a per-class singleton for start quantization'''
def __init__(self, endDate=date.today()):
assert type(endDate) is str or type(endDate) is date
self.endDate = endDate
if type(endDate) is not date:
self.endDate = self.str_to_date(endDate)
self.currentDate = date(1958, 8, 9)
def __iter__(self):
return self
def __next__(self):
if self.compare_dates(self.endDate) >= 0:
raise StopIteration
current = self.currentDate
self.increment()
return current
def str_to_date(self, string):
year, month, day = string.split('-')
return date(int(year), int(month), int(day))
def increment(self, days=7):
'''Serves as an abstraction barrier'''
self.currentDate = self.currentDate + timedelta(days)
def __repr__(self):
return str(self.currentDate)
def compare_dates(self, dateObj):
'''Returns 1 if current date is larger, 0 if equal, -1 if smaller'''
# check year first
if self.currentDate > dateObj:
return 1
elif self.currentDate < dateObj:
return -1
return 0 # if they are equal
class BillboardIter(BillboardDates):
'''Iterator over valid Billboard Chart weeks, which
quantizes the start to the next valid date'''
_BillboardDates = BillboardDates()
def __init__(self, startDate, endDate=date.today()):
assert type(startDate) is str or type(startDate) is date
super().__init__(endDate)
self.initDate = startDate
if type(self.initDate) is not date:
self.initDate = self.str_to_date(self.initDate)
self.currentDate = self.initDate
self.quantizeStart()
def reset(self):
self.currentDate = self.initDate
self.quantizeStart()
def quantizeStart(self):
'''Quantizes starting date to the closest following Billboard chart'''
bbDate = self._BillboardDates.currentDate
while self.compare_dates(bbDate) >= 0: # get BB date up to start
bbDate = next(self._BillboardDates)
while self.compare_dates(bbDate) < 0: # get start up to valid BB date
self.increment(1)
| mit | Python |
|
566850c873f6bdbed6632388330f8e4df6fbe613 | add migration for accordeon block on homepage | liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin | meinberlin/apps/cms/migrations/0021_add_accordeon_block.py | meinberlin/apps/cms/migrations/0021_add_accordeon_block.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-08 12:57
from __future__ import unicode_literals
from django.db import migrations
import meinberlin.apps.cms.blocks
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('meinberlin_cms', '0020_add_header_block'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(template='meinberlin_cms/blocks/richtext_block.html')), ('call_to_action', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock()), ('link', wagtail.wagtailcore.blocks.CharBlock()), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=50))))), ('image_call_to_action', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80)), ('body', wagtail.wagtailcore.blocks.RichTextBlock()), ('link', wagtail.wagtailcore.blocks.CharBlock()), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=50))))), ('columns_text', wagtail.wagtailcore.blocks.StructBlock((('columns_count', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(2, 'Two columns'), (3, 'Three columns'), (4, 'Four columns')])), ('columns', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.RichTextBlock(label='Column body')))))), ('projects', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80)), ('projects', wagtail.wagtailcore.blocks.ListBlock(meinberlin.apps.cms.blocks.ProjectSelectionBlock(label='Project')))))), ('activities', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(label='Heading')), ('count', wagtail.wagtailcore.blocks.IntegerBlock(default=5, label='Count'))))), ('accordion', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock()), ('body', wagtail.wagtailcore.blocks.RichTextBlock(required=False))))))),
),
]
| agpl-3.0 | Python |
|
ecfadf8478b8775d8579812a7bd835f6ebb1ffd4 | Add file lister for rclone export | jcu-eresearch/Edgar,jcu-eresearch/Edgar,jcu-eresearch/Edgar,jcu-eresearch/Edgar,jcu-eresearch/Edgar,jcu-eresearch/Edgar | util/rclone-list-files.py | util/rclone-list-files.py | #!/usr/bin/env python3
import glob
# For use with --files-from argument for Rclone
# This suits Edgar's structure with is
# SPECIESNAME/{occurrences|projected-distributions}/[2nd-to-latest-file-is-the-latest].zip
for folder in glob.glob('*'):
occurrences = glob.glob(folder + '/occurrences/*')
projected_distributions = glob.glob(folder + '/projected-distributions/*')
if not 'latest' in occurrences[-1] and not 'latest' in projected_distributions[-1]:
print(f'No latest in {folder}!')
exit(1)
print(folder + '/metadata.json')
print(occurrences[-2])
print(projected_distributions[-2])
| bsd-3-clause | Python |
|
629bd006bfd7e6210dcc95198be9b65614e4f051 | Convert optimization_test.py to PyTorch | huggingface/transformers,huggingface/transformers,huggingface/transformers,huggingface/pytorch-transformers,huggingface/transformers | optimization_test_pytorch.py | optimization_test_pytorch.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import optimization_pytorch as optimization
import torch
import unittest
class OptimizationTest(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_adam(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
x = torch.tensor([0.4, 0.2, -0.5])
criterion = torch.nn.MSELoss(reduction='elementwise_mean')
optimizer = optimization.BERTAdam(params={w}, lr=0.2, schedule='warmup_linear', warmup=0.1, t_total=100)
for _ in range(100):
# TODO Solve: reduction='elementwise_mean'=True not taken into account so division by x.size(0) is necessary
loss = criterion(x, w) / x.size(0)
loss.backward()
optimizer.step()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | Python |
|
101bb07375d2a36a65d07d3de32625cdef8f916d | Add parser for Vietnamese edition. | elfxiong/wiktionary-translations-parser | parser/parse_vi.py | parser/parse_vi.py | import re
from bs4 import BeautifulSoup, Tag
import requests
tested_url = [
"https://vi.wiktionary.org/wiki/kh%C3%B4ng#Ti.E1.BA.BFng_Vi.E1.BB.87t",
"https://vi.wiktionary.org/wiki/c%C3%A1m_%C6%A1n#Ti.E1.BA.BFng_Vi.E1.BB.87t",
]
HEADING_TAG = re.compile(r'^h(?P<level>[1-6])$', re.I)
COMMA_OR_SEMICOLON = re.compile('[,;]')
def get_heading_level(tag):
"""If the tag is a heading tag, return its level (1 through 6).
Otherwise, return `None`."""
heading_match = HEADING_TAG.match(tag)
if heading_match:
return int(heading_match.group('level'))
return None
def get_heading_text(tag):
"""
Extract the text of the heading, discarding "[edit]".
May need to be modified to work for more complex headings.
:param tag: a Tag object. It should be one of the <h?> tags.
:return: the actual/clean text in the tag
"""
text = tag.get_text()
text = text.split('[')[0]
return text
def get_html_tree(url):
html = requests.get(url)
# print(html.content)
soup = BeautifulSoup(html.content, 'html.parser')
return soup
def parse_translation_table(table):
"""
Parse the table to get translations and the languages.
Hopefully this function will work for all editions.
:param table: a list like table.
:return: (translation, language_name, language_code)
"""
for li in table.find_all('li'):
if not isinstance(li, Tag):
continue
text = li.get_text().split(':')
# language name is before ":"
lang_name = text[0]
# language code is in super script
lang_code = li.find("sup")
if lang_code:
lang_code = lang_code.text.strip()[1:-1]
else:
lang_code = ""
# each "trans" is: translation <sup>(lang_code)</sup> (transliteration)
# lang_code and transliteration may not exist
trans_list = re.split(COMMA_OR_SEMICOLON, text[1])
for trans in trans_list:
translation = trans.split('(')[0].strip()
yield (translation, lang_name, lang_code)
def generate_translation_tuples(soup):
"""
A generator of translation tuples
:param soup: BeautifulSoup object
:return: tuple of the form (headword, head_lang, translation, trans_lang, trans_lang_code, part_of_speech)
"""
# START non-edition-specific
# this is the table of content which is present in each edition
toc = soup.find('div', id='toc')
# print(toc.get_text())
page_state = {'headword': None,
'headword_lang': None,
'part_of_speech': None}
for element in toc.next_siblings:
if isinstance(element, Tag): # it could be a Tag or a NavigableString
level = get_heading_level(element.name)
# END non-edition-specific
if level == 2: # it is a header tag
page_state['headword_lang'] = get_heading_text(element)
elif level == 3:
page_state['part_of_speech'] = get_heading_text(element)
elif element.name == "p": # is a paragraph tag
bold_word = element.b
if bold_word:
page_state['headword'] = bold_word.get_text()
# print("headword: ", bold_word.get_text().strip())
elif element.name == "h4":
first_headline = element.find(class_="mw-headline")
if first_headline.text.strip() == "Dịch": # this translation header
# this is an translation table
table = element.find_next_sibling(class_="columns")
for translation, lang, lang_code in parse_translation_table(table):
yield (page_state['headword'], page_state['headword_lang'], translation, lang, lang_code,
page_state['part_of_speech'])
def main():
for url in tested_url:
soup = get_html_tree(url)
for tup in generate_translation_tuples(soup):
print(",".join(tup))
if __name__ == '__main__':
main()
| mit | Python |
|
7da53597f9cb4117cecbaed1dbb77f4693289815 | add a test for well locations endpoint | bcgov/gwells,bcgov/gwells,bcgov/gwells,bcgov/gwells | app/backend/wells/tests/test_wells.py | app/backend/wells/tests/test_wells.py | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
class TestWellLocationsSearch(APITestCase):
def test_well_locations(self):
# Basic test to ensure that the well location search returns a non-error response
url = reverse('well-locations')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| apache-2.0 | Python |
|
32f7fe6562f4d1592dfab5a9b065154dca51f1d3 | Add rsync module | predat/pyIsis | pyIsis/rsync.py | pyIsis/rsync.py | # -*- coding: utf-8 -*-
import os
import subprocess
import logging
#RSYNC_PATH = os.path.join(
# os.path.abspath (os.path.dirname(__file__)), 'bin', 'rsync')
RSYNC_PATH = '/opt/rsync/bin/rsync'
RSYNC_CMD = '{cmd} {options} "{source}" "{destination}"'
rsync_logger = logging.getLogger('avidisis')
class rsync(object):
"""
Run rsync as a subprocess sending output to a logger.
This class subclasses subprocess.Popen
"""
def __init__(self, src, dst, *options):
self.src = src
self.dst = dst
self.options = options
rsync_logger.debug('rsync parameters: {} {}'.format(src, dst))
def run(self):
cmd = RSYNC_CMD.format(
cmd=RSYNC_PATH,
options= ' '.join(self.options),
source=self.src,
destination=self.dst)
process = subprocess.Popen(
cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = ''
# Poll process for new output until finished
for line in iter(process.stdout.readline, ""):
rsync_logger.debug('------ {}'.format(line.strip('\n\r')))
#print '------ {}'.format(line.strip('\n\r'))
output += line
process.wait()
exitCode = process.returncode
if (exitCode == 0):
rsync_logger.info('Workspace [{}] backup done.'.format(
os.path.basename(self.src)))
return output
else:
rsync_logger.error('rsync exitCode: {}, ouput {}'.format(
exitCode, output))
raise Exception(cmd, exitCode, output)
if __name__ == "__main__":
r = rsync('/tmp/test/', '/tmp/test2', '-av', '--delete', '--exclude="*.log"')
out = r.run()
print out
| mit | Python |
|
01eaa6ba14c51568ea7aa3cc436d14fbffb78720 | Create downloader-v0.22pa.py | Keiiko/anime-manga-cz-downloader | downloader-v0.22pa.py | downloader-v0.22pa.py | import urllib2
import re
import os
def stahniHtml(url):
f = urllib2.urlopen(url)
obsah = f.read()
f.close()
return obsah
def nahled(url):
global chapter
global currpatch1
odkazy = vyberodkazux(url)
for odkaz in odkazy:
currpatch1 = odkaz.replace("index.html", "")
chapter = re.search(r'.*/(.*?)/index',odkaz).group(1)
print "Kapitola "+chapter
print " Stahovani nahledu kapitoly... ",
nahledhtml = stahniHtml(odkaz)
print "Hotovo."
print " Vyhledavani odkazu stranky... ",
tabulka = re.search(r'<!-- Thumbnail images -->(.*?)class="xsmalltxt"',nahledhtml, re.DOTALL).group(1)
nahledyurl = re.findall(r'<a href="(.*?)"',tabulka)
print "Hotovo."
kapitola(nahledyurl)
print "Vsechna stahovani dokoncena."
finalpatch = os.path.expanduser("~")+"\\Downloads\\anime-manga.cz-downloader\\"+nazevserie+"\\"
print "Ulozeno do: "+finalpatch
os.startfile(finalpatch)
def kapitola(nahledyurl):
for kapitolasmallurl in nahledyurl:
kapitolafullurl = currpatch1 + kapitolasmallurl
getobrazek(kapitolafullurl)
def getobrazek(kapitolafullurl):
global imgname
print " Vyhledavani odkazu obrazku... ",
obrazekshorturl = re.search(r'<img id="slide" src="(.*?)".*?>',stahniHtml(kapitolafullurl)).group(1).replace("../", "")
imgname = obrazekshorturl
print "Hotovo."
obrazekfullurl = currpatch1 + obrazekshorturl
#print obrazekfullurl
ulozitobr(obrazekfullurl)
def ulozitobr(obrazekfullurl):
print " Ukladani obrazku "+obrazekfullurl+"... ",
currentpatch = os.path.expanduser("~")+"\\Downloads\\anime-manga.cz-downloader\\"+nazevserie+"\\"+chapter+"\\"
createDir(currentpatch)
imgData = urllib2.urlopen(obrazekfullurl).read()
output = open(currentpatch+imgname,'wb')
output.write(imgData)
output.close()
print "Hotovo."
def createDir(path):
if os.path.exists(path) != True:
os.makedirs(path)
### 18+ rozsireni ###
def vyberodkazux(url):
global nazevserie
print "Stahovani hlavni stranky... ",
stranka = stahniHtml(url)
print "Hotovo."
print "Vyhledavani kapitol... ",
odkazy = odkazya(stranka) + odkazyb(stranka)
nazevserie = re.search(r'<title>(.*?) *\| Anime - Manga.*?</title>',stranka).group(1).replace(" ", "").replace(" ", " ").replace(" ", " ")
print "Hotovo."
print "Manga "+nazevserie
return odkazy
def odkazya(stranka):
odkazy1 = re.findall(r'<a href="(http://anime-manga.cz/manga.*?)"', stranka)
odkazy2 = re.findall(r'<a href="(http://www.anime-manga.cz/manga.*?)"',stranka)
odkazy = odkazy1 + odkazy2
return odkazy
def odkazyb(stranka):
odkazy18 = re.findall(r'<a href="(http://anime-manga.cz/\d[^/]*?)"|<a href="(http://www.anime-manga.cz/\d[^/]*?)"|<a href="(http://anime-manga.cz/[^/]*?\d)"|<a href="(http://www.anime-manga.cz/[^/]*?\d)"', stranka)
odkazy = []
for odkaz18 in odkazy18:
for i in range(4):
if odkaz18[i]!= '':
stranka18 = stahniHtml(odkaz18[i])
odkazy.append(re.search(r'<a href="(.*?anime-manga.cz/manga.*?)"',stranka18).group(1))
return odkazy
### Proxy ###
def inicializaceproxy():
prx = raw_input('Zadej prihlasovani ve tvaru http://username:[email protected]: ')
os.environ['HTTP_PROXY'] = prx
proxy = urllib2.ProxyHandler({'http': prx})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
print "Anime-manga.cz Downloader PROXY alfa"
inicializaceproxy()
xurl = raw_input('stahnout mangu s url: http://www.anime-manga.cz/')
nahled("http://www.anime-manga.cz/"+xurl)
| mit | Python |
|
d6b01b968b2ef97042bc7c3dfc1f1752fcbc98a4 | Create nnet3_compute_bnf.py | osadj/kaldi_nnet3,osadj/kaldi_nnet3 | nnet3_compute_bnf.py | nnet3_compute_bnf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 5 16:49:55 2017
@author: Omid Sadjadi <[email protected]>
"""
import numpy as np
import nnet3read
def splice_feats(x, w=9):
""" This routine splices the feature vectors in x by stacking over a window
of length w frames (must be odd)
"""
if w < 3 or ((w & 1) != 1):
raise ValueError('Window length should be an odd integer >= 3')
hlen = int(w / 2.)
ndim, nobs = x.shape
xx = np.c_[np.tile(x[:, 0][:,np.newaxis], hlen), x, np.tile(x[:, -1][:,np.newaxis], hlen)]
y = np.empty((w*ndim, nobs), dtype=x.dtype)
for ix in range(w):
y[ix*ndim:(ix+1)*ndim, :] = xx[:, ix:ix+nobs]
return y
def renorm_rms(data, target_rms=1.0, axis=0):
""" This routine scales the data such that the RMS is 1.0
"""
#scale = 1.0 / sqrt(x^t x / (D * target_rms^2)).
D = data.shape[axis]
scale = np.sqrt(np.sum(data * data, axis=axis, keepdims=True)/(D * target_rms * target_rms)) + 0.0
scale[scale==0] = 1.
return data / scale
def squashit(aff, nonlin, renorm=False):
""" This routine applies Sigmoid and RELU activation functions along with the
RMS renorm
"""
if nonlin=='sigmoid':
aff = sigmoid(aff)
elif nonlin=='relu':
np.maximum(aff, 0, aff)
if renorm:
aff = renorm_rms(aff, axis=0)
return aff
def sigmoid(x):
""" This routine implements Sigmoid nonlinearity
"""
return 1 / (1 + np.exp(-x))
def extract_bn_features(dnn, fea, nonlin='sigmoid', renorm=False):
""" This routine computes the bottleneck features using the DNN parameters (b, W)
and the spliced feature vectors fea. It is assumed that the last layer is
the bottleneck layer. This can be achieved by running the following command:
nnet3-copy --binary=false --nnet-config='echo output-node name=output input=dnn_bn.renorm |' \
--edits='remove-orphans' exp/nnet3/swbd9/final.raw exp/nnet3/swbd/final.txt
"""
b, W = dnn
aff = fea
for bi,wi in zip(b[:-1],W[:-1]):
aff = wi.dot(aff) + bi
aff = squashit(aff, nonlin, renorm)
aff = W[-1].dot(aff) + b[-1]
return aff
if __name__ == '__main__':
# example that shows how to extract bottleneck features from (say) MFCCs
dnn = nnet3read('final.txt', 'DNN_1024.h5', write_to_disk=True)
# we assume mfc is a numpy array of [ndim x nframes] dimesnsion, e.g., [39 x 537]
# that contains 39-dimensional (say) MFCCs. Features are spliced by stacking over
# a 21-frame context
fea = splice_feats(mfc, w=21)
# now we extract bottleneck features using the DNN parameters and the spliced
# features. Here we assume that a RELU ativation function is used, and followed
# by a renorm nonlinearity to scale the RMS of the vector of activations to 1.0.
# This kind of nonlinearity is implemented in Kaldi nnet3 as 'relu-renorm-layer'.
bnf = extract_bn_features(dnn, fea, nonlin='relu', renorm=True)
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.