commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
bc4b25b9c24ef0db58acbd1c8e24b3fee319314b | Solve challenge 13 | HKuz/PythonChallenge | Challenges/chall_13.py | Challenges/chall_13.py | #!/Applications/anaconda/envs/Python3/bin
# Python challenge - 13
# http://www.pythonchallenge.com/pc/return/disproportional.html
# http://www.pythonchallenge.com/pc/phonebook.php
import xmlrpc.client
def main():
'''
Hint: phone that evil
<area shape="circle" coords="326,177,45" href="../phonebook.php">
'''
xml_string = '<methodResponse><fault><value><struct><member><name>faultCode</name><value><int>105</int></value></member><member><name>faultString</name><value><string>XML error: Invalid document end at line 1, column 1</string></value></member></struct></value></fault></methodResponse>'
server_url = 'http://www.pythonchallenge.com/pc/phonebook.php'
with xmlrpc.client.ServerProxy(server_url) as server_proxy:
try:
print(server_proxy.phone('Bert')) # 555-ITALY
except Exception as e:
print('Error', e)
return 0
# Keyword: italy
if __name__ == '__main__':
main()
| mit | Python |
|
742c6d47cd542a2c56b50d48c92b817e977da768 | add pgproxy.py | nakagami/minipg | misc/pgproxy.py | misc/pgproxy.py | #!/usr/bin/env python3
##############################################################################
#The MIT License (MIT)
#
#Copyright (c) 2016 Hajime Nakagami
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
##############################################################################
import sys
import socket
import binascii
def asc_dump(s):
r = ''
for c in s:
r += chr(c) if (c >= 32 and c < 128) else '.'
if r:
print('\t[' + r + ']')
def proxy_wire(server_name, server_port, listen_host, listen_port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((listen_host, listen_port))
sock.listen(1)
client_sock, addr = sock.accept()
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.connect((server_name, server_port))
while True:
client_head = client_sock.recv(5)
client_code = client_head[0]
client_ln = int.from_bytes(client_head[1:], byteorder='big')
client_data = client_sock.recv(client_ln)
client_tail = client_sock.recv(5)
assert client_tail == b'H\x00\x00\x00\x04'
print('>>', chr(client_code), binascii.b2a_hex(client_data), asc_dump(client_data))
server_sock.send(client_head)
server_sock.send(client_data)
server_sock.send(client_tail)
server_code == 0
while server_code == 90:
server_head = server_sock.recv(5)
server_code = server_head[0]
server_ln = int.from_bytes(server_head[1:], byteorder='big')
server_data = server_sock.server(server_ln)
print('<<', chr(server_code), binascii.b2a_hex(server_data), asc_dump(server_data))
client_sock.send(server_head)
client_sock.send(server_data)
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage : ' + sys.argv[0] + ' server[:port] [listen_host:]listen_port')
sys.exit()
server = sys.argv[1].split(':')
server_name = server[0]
if len(server) == 1:
server_port = 5432
else:
server_port = int(server[1])
listen = sys.argv[2].split(':')
if len(listen) == 1:
listen_host = 'localhost'
listen_port = int(listen[0])
else:
listen_host = listen[0]
listen_port = int(listen[1])
proxy_wire(server_name, server_port, listen_host, listen_port)
| mit | Python |
|
78ea9019850dbf9b88d3f65a4a61139f01d2c496 | Add scratch genbank-gff-to-nquads.py which just opens a gff file atm | justinccdev/biolta | src/genbank-gff-to-nquads.py | src/genbank-gff-to-nquads.py | #!/usr/bin/env python
import jargparse
parser = jargparse.ArgParser('Convert Genbank GFF into an n-quad file')
parser.add_argument('gffPath', help='path to the GFF')
args = parser.parse_args()
with open(args.gffPath):
pass
| apache-2.0 | Python |
|
48b724d7a2163c50be60d98933132b51347940bd | Create longest-line-of-consecutive-one-in-a-matrix.py | jaredkoontz/leetcode,jaredkoontz/leetcode,kamyu104/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,jaredkoontz/leetcode,kamyu104/LeetCode | Python/longest-line-of-consecutive-one-in-a-matrix.py | Python/longest-line-of-consecutive-one-in-a-matrix.py | # Time: O(m * n)
# Space: O(n)
class Solution(object):
def longestLine(self, M):
"""
:type M: List[List[int]]
:rtype: int
"""
if not M: return 0
result = 0
dp = [[[0] * 4 for _ in xrange(len(M[0]))] for _ in xrange(2)]
for i in xrange(len(M)):
for j in xrange(len(M[0])):
dp[i % 2][j][:] = [0] * 4
if M[i][j] == 1:
dp[i % 2][j][0] = dp[i % 2][j - 1][0]+1 if j > 0 else 1
dp[i % 2][j][1] = dp[(i-1) % 2][j][1]+1 if i > 0 else 1
dp[i % 2][j][2] = dp[(i-1) % 2][j-1][2]+1 if (i > 0 and j > 0) else 1
dp[i % 2][j][3] = dp[(i-1) % 2][j+1][3]+1 if (i > 0 and j < len(M[0])-1) else 1
result = max(result, max(dp[i % 2][j]))
return result
| mit | Python |
|
23138ab91e5ac0ecf92a0968bf8e4abfa7d0c763 | Remove duplicates in all subdirectories - working raw version. | alprab/utils | removedups.py | removedups.py | import hashlib, csv, os
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def process_directory_csv(current_dir_fullpath, sub_dir_list, files, csvwriter):
for file in files:
full_name = current_dir_fullpath + '/' + file
# print(" " + full_name)
csvwriter.writerow([md5(full_name), str(os.path.getsize(full_name)), full_name])
def walk_all_subdirectories(path, output_file_name):
# count = 0
with open(output_file_name, "w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for current_dir, sub_dirs, files in os.walk(path):
print(current_dir)
process_directory_csv(current_dir, sub_dirs, files, csvwriter)
csvfile.flush()
# DEBUG CODE - process only 5 directories
# count += 1
# if count >= 10:
# csvfile.close()
# break;
csvfile.close()
def sort_file(inname, outname):
input_file = open(inname, "r")
output_file = open(outname, "w", 1)
lines = [] # give lines variable a type of list
for line in input_file:
lines.append(line)
lines.sort()
for line in lines:
output_file.write(line)
input_file.close()
output_file.close()
def generate_delete_commands(sortedfile, outname):
import csv
output_file = open(outname, "w", 1)
previous_checksum = "IMPOSSIBLE_CHECKSUM"
with open(sortedfile) as f:
reader = csv.reader(f, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader:
# print(row[0], row)
if previous_checksum == row[0]:
output_file.write("rm '" + row[2] + "'\n")
print("removing " + row[2])
os.remove(row[2])
previous_checksum = row[0]
f.close()
output_file.close()
# Main program follows
directory_name = ".."
unsorted_file_name = "filelist.csv"
sorted_file_name = "sortedfilelist.csv"
delete_command_file_name = "deletecommands.sh"
if __name__ == '__main__':
walk_all_subdirectories('..', unsorted_file_name)
sort_file(unsorted_file_name, sorted_file_name)
generate_delete_commands(sorted_file_name, delete_command_file_name)
| apache-2.0 | Python |
|
59736ee4dd82da7f7945723ec1cc89b19359b5c7 | Create LargestPrimeFactor.py | Laserbear/Python-Scripts | LargestPrimeFactor.py | LargestPrimeFactor.py | #! Christian Ng
base = 0
print("Enter an integer:")
base = int(raw_input())
print "Largest Factor is:"
while (base % 2) == 0:
base = base/2
if base == 1 or base == -1:
print "2"
increment = 3
while base != 1 and base != -1:
while base % increment == 0:
base = base/increment
increment = increment + 2
print increment - 2
| apache-2.0 | Python |
|
99395e345f74bbedd29fd45eebe0738a3b5f4729 | Test api endpoint for package show | ckan/ckanext-archiver,ckan/ckanext-archiver,ckan/ckanext-archiver | ckanext/archiver/tests/test_api.py | ckanext/archiver/tests/test_api.py | import pytest
import tempfile
from ckan import model
from ckan import plugins
from ckan.tests import factories
import ckan.tests.helpers as helpers
from ckanext.archiver import model as archiver_model
from ckanext.archiver.tasks import update_package
@pytest.mark.usefixtures('with_plugins')
@pytest.mark.ckan_config("ckanext-archiver.cache_url_root", "http://localhost:50001/resources/")
@pytest.mark.ckan_config("ckanext-archiver.max_content_length", 1000000)
@pytest.mark.ckan_config("ckan.plugins", "archiver testipipe")
class TestApi(object):
@pytest.fixture(autouse=True)
@pytest.mark.usefixtures(u"clean_db")
def initial_data(cls, clean_db):
archiver_model.init_tables(model.meta.engine)
cls.temp_dir = tempfile.mkdtemp()
def test_package_show(self, client):
url = client + '/?status=200&content=test&content-type=csv'
testipipe = plugins.get_plugin('testipipe')
testipipe.reset()
pkg_dict = {
'name': 'test-package-api',
'resources': [
{
'url': url,
'format': 'TXT',
'description': 'Test'
}
]
}
pkg = factories.Dataset(**pkg_dict)
update_package(pkg['id'])
result = helpers.call_action(
"package_show",
id=pkg["id"]
)
print(result)
assert 'archiver' in result.keys()
| mit | Python |
|
070a5192e4473bbbbf25a881080413f771f05801 | Add mockldap.ldap.functions taken from python-ldap | coreos/mockldap | src/mockldap/ldap/functions.py | src/mockldap/ldap/functions.py | import sys
from ldapobject import LDAPObject
def initialize(uri,trace_level=0,trace_file=sys.stdout,trace_stack_limit=None):
"""
Return LDAPObject instance by opening LDAP connection to
LDAP host specified by LDAP URL
Parameters:
uri
LDAP URL containing at least connection scheme and hostport,
e.g. ldap://localhost:389
trace_level
If non-zero a trace output of LDAP calls is generated.
trace_file
File object where to write the trace output to.
Default is to use stdout.
"""
return LDAPObject(uri,trace_level,trace_file,trace_stack_limit)
| bsd-2-clause | Python |
|
2f93251e77589c0edbb8e560940d29764caac9e0 | Test password update functionality | m-ober/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps | tests/blueprints/authentication/test_views_password_update.py | tests/blueprints/authentication/test_views_password_update.py | # -*- coding: utf-8 -*-
"""
:Copyright: 2006-2016 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authentication.password.models import Credential
from byceps.services.authentication.password import service as password_service
from byceps.services.authentication.session.models import SessionToken
from tests.base import AbstractAppTestCase
from testfixtures.user import create_user
class PasswordUpdateTestCase(AbstractAppTestCase):
def test_when_logged_in_endpoint_is_available(self):
old_password = 'LekkerBratworsten'
new_password = 'EvenMoreSecure!!1'
user = self.create_user(old_password)
credential_before = self.find_credential(user.id)
self.assertIsNotNone(credential_before)
session_token_before = self.find_session_token(user.id)
self.assertIsNotNone(session_token_before)
form_data = {
'old_password': old_password,
'new_password': new_password,
'new_password_confirmation': new_password,
}
response = self.send_request(form_data, user=user)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers.get('Location'),
'http://example.com/authentication/login')
credential_after = self.find_credential(user.id)
session_token_after = self.find_session_token(user.id)
self.assertIsNotNone(credential_after)
self.assertNotEqual(credential_before.password_hash,
credential_after.password_hash)
self.assertNotEqual(credential_before.updated_at,
credential_after.updated_at)
self.assertIsNotNone(session_token_after)
self.assertNotEqual(session_token_before.token,
session_token_after.token)
self.assertNotEqual(session_token_before.created_at,
session_token_after.created_at)
def test_when_not_logged_in_endpoint_is_unavailable(self):
form_data = {}
response = self.send_request(form_data)
self.assertEqual(response.status_code, 404)
# helpers
def create_user(self, password):
user = create_user(8516)
self.db.session.add(user)
self.db.session.commit()
password_service.create_password_hash(user.id, password)
return user
def find_credential(self, user_id):
return Credential.query.get(user_id)
def find_session_token(self, user_id):
return SessionToken.query \
.filter_by(user_id=user_id) \
.one()
def send_request(self, form_data, *, user=None):
url = '/authentication/password'
with self.client(user=user) as client:
return client.post(url, data=form_data)
| bsd-3-clause | Python |
|
0c59028a1ef33b3627e65955bafbf9b415c9bc34 | Add 457_Circular_Array_Loop.py (#34) | qiyuangong/leetcode,qiyuangong/leetcode,qiyuangong/leetcode | python/457_Circular_Array_Loop.py | python/457_Circular_Array_Loop.py | class Solution:
def circularArrayLoop(self, nums: List[int]) -> bool:
for i in range(len(nums)):
if nums[i] == 0:
continue
# if slow and fast pointers collide, then there exists a loop
slow = i
fast = self.index(nums, slow)
while nums[slow] * nums[fast] > 0 and nums[slow] * nums[self.index(nums, fast)] > 0:
if slow == fast and fast != self.index(nums, fast):
return True
elif slow == fast and fast == self.index(nums, fast):
break
slow = self.index(nums, slow)
fast = self.index(nums, self.index(nums, fast))
# set path to all 0s since it doesn't work
runner = i
value = nums[runner]
while nums[runner] * value > 0:
temp = self.index(nums, runner)
nums[runner] = 0
runner = temp
return False
def index(self, nums, index):
length = len(nums)
return (index + nums[index] + length) % length
| mit | Python |
|
ae484c893c9cbef5a80b908ba254885e1db4d0b3 | Create 0015_auto_20200128_1045.py | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/activities/migrations/0015_auto_20200128_1045.py | bluebottle/activities/migrations/0015_auto_20200128_1045.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-01-28 09:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('activities', '0014_add_permissions'),
]
operations = [
migrations.AddField(
model_name='contribution',
name='contribution_date',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='activity',
name='transition_date',
field=models.DateTimeField(blank=True, help_text='Date the contribution took place.', null=True, verbose_name='contribution date'),
),
migrations.AlterField(
model_name='contribution',
name='transition_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='contribution',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
| bsd-3-clause | Python |
|
a59aef7c780c4d940ff56fa34ddf38de46056a6f | add package py-flask-compress (#7713) | iulian787/spack,mfherbst/spack,tmerrick1/spack,iulian787/spack,mfherbst/spack,EmreAtes/spack,mfherbst/spack,mfherbst/spack,krafczyk/spack,iulian787/spack,matthiasdiener/spack,krafczyk/spack,tmerrick1/spack,matthiasdiener/spack,mfherbst/spack,iulian787/spack,LLNL/spack,matthiasdiener/spack,krafczyk/spack,LLNL/spack,LLNL/spack,matthiasdiener/spack,tmerrick1/spack,tmerrick1/spack,matthiasdiener/spack,EmreAtes/spack,krafczyk/spack,EmreAtes/spack,iulian787/spack,krafczyk/spack,EmreAtes/spack,EmreAtes/spack,LLNL/spack,tmerrick1/spack,LLNL/spack | var/spack/repos/builtin/packages/py-flask-compress/package.py | var/spack/repos/builtin/packages/py-flask-compress/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyFlaskCompress(PythonPackage):
"""Flask-Compress allows you to easily compress your Flask application's
responses with gzip."""
homepage = "https://github.com/libwilliam/flask-compress"
url = "https://pypi.io/packages/source/F/Flask-Compress/Flask-Compress-1.4.0.tar.gz"
version('1.4.0', 'd997f73e4ed5793ec526c135aa765e15')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 | Python |
|
d48b17dc82e359aca962449c6df51aaea88a11d3 | add resource manager tests | hgrecco/pyvisa-py,pyvisa/pyvisa-py | pyvisa_py/testsuite/keysight_assisted_tests/test_resource_manager.py | pyvisa_py/testsuite/keysight_assisted_tests/test_resource_manager.py | # -*- coding: utf-8 -*-
"""Test the Resource manager.
"""
from pyvisa.testsuite.keysight_assisted_tests import require_virtual_instr
from pyvisa.testsuite.keysight_assisted_tests.test_resource_manager import (
TestResourceManager as BaseTestResourceManager,
TestResourceParsing as BaseTestResourceParsing,
)
@require_virtual_instr
class TestPyResourceManager(BaseTestResourceManager):
"""
"""
pass
@require_virtual_instr
class TestPyResourceParsing(BaseTestResourceParsing):
"""
"""
pass
| mit | Python |
|
d623140b606d7ec9b874419b4414833d669f5677 | add a way to set the last sync date | crate-archive/crate-site,crateio/crate.pypi,crate-archive/crate-site | crate_project/apps/crate/management/commands/set_last_sync.py | crate_project/apps/crate/management/commands/set_last_sync.py | import redis
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
r = redis.StrictRedis(**getattr(settings, "PYPI_DATASTORE_CONFIG", {}))
if args:
r.set("crate:pypi:since", args[0])
| bsd-2-clause | Python |
|
ca96e71995c9daa17323a3285bee71c8d334c11e | Add abstract classifier | brainbots/assistant | assisstant/keyboard/classification/abstract_classifier.py | assisstant/keyboard/classification/abstract_classifier.py | from abc import ABC, abstractmethod, abstractproperty
class AbstractClassifier(ABC):
def __init__(self, freqs, duration, data=None):
self.freqs = freqs
self.duration = duration
if data:
self.data = data
self.train(data)
@abstractmethod
def classify(self, sample):
pass
@abstractmethod
def train(self, data):
pass
| apache-2.0 | Python |
|
e089107eb52c320309b3ddd2ea2b6e764f74ff09 | Create LeverBox.py | hectorpefo/hectorpefo.github.io,hectorpefo/hectorpefo.github.io,hectorpefo/hectorpefo.github.io,hectorpefo/hectorpefo.github.io | _includes/LeverBox.py | _includes/LeverBox.py | State_Probs = {(9,1,1,1,1,1,1,1,1,1) : 1}
def Modified_State(State,Indexes):
New_State_List = list(State)
for i in Indexes:
New_State_List[i] = 1
New_State = tuple(New_State_List)
return New_State
def Best_Case_Prob_For(State,Sum):
Best_Case_Prob = 0
if Sum < 10 and State[Sum] == 0:
P = Prob_For_State(Modified_State(State,(Sum,)))
if P > Best_Case_Prob:
Best_Case_Prob = P
for i in range(1, min(9,Sum)):
j = Sum - i
if j > 9 or i == j:
continue
try:
if State[i] == 0 and State[j] == 0:
P = Prob_For_State(Modified_State(State,(i,j)))
if P > Best_Case_Prob:
Best_Case_Prob = P
except:
print i,j,Sum,State
for i in range(1, min(7,Sum)):
for j in range(1, min(7,Sum)):
k = Sum - i - j
if k < 1 or k > 9 or i == j or j == k or i ==k:
continue
if State[i] == 0 and State[j] == 0 and State[k] == 0:
P = Prob_For_State(Modified_State(State,(i,j,k)))
if P > Best_Case_Prob:
Best_Case_Prob = P
return Best_Case_Prob
def Prob_For_State(State):
global State_Probs
if State in State_Probs:
return State_Probs[State]
# Throw one die
P1 = 0
for i in range(1,7):
P1 += 1.0/6 * Best_Case_Prob_For(State,i)
# Throw two dice
P2 = 0
for i in range(1,7):
for j in range(1,7):
P2 += 1.0/36 * Best_Case_Prob_For(State,i+j)
P = max(P1,P2)
State_Probs[State] = P
return P
print "P(000000000) =", Prob_For_State((9,0,0,0,0,0,0,0,0,0))
| mit | Python |
|
2eb090b406a341c3b225e59779d0046cf76efc6c | Add download_feeds.py script | ucldc/ucldc-merritt | download_feeds.py | download_feeds.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import boto3
bucketname = 'static.ucldc.cdlib.org'
prefix = 'merritt/'
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucketname)
for obj in bucket.objects.filter(Prefix=prefix):
if obj.key.endswith('.atom'):
print "downloading {}".format(obj.key)
filename = obj.key.split('/')[1]
filepath = './feeds_current/{}'.format(filename)
print "local filepath: {}".format(filepath)
s3.Bucket('static.ucldc.cdlib.org').download_file(obj.key, filepath)
| bsd-3-clause | Python |
|
494c0603c4aedb83852a008fad2139c469b537fd | Rename histograms in memory_benchmark_unittest. | jaruba/chromium.src,ChromiumWebApps/chromium,ondra-novak/chromium.src,timopulkkinen/BubbleFish,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,patrickm/chromium.src,ondra-novak/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,ltilve/chromium,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,dednal/chromium.src,anirudhSK/chromium,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,mogoweb/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,krieger-od/nwjs_chromium.src,pozdnyakov/chromium-crosswalk,anirudhSK/chromium,jaruba/chromium.src,littlstar/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,anirudhSK/chromium,fujunwei/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,hgl888/chromium-crosswalk,patrickm/chromium.src,bright-sparks/chromium-spacewalk,timopulkkinen/BubbleFish,markYoungH/chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,dednal/chromium.src,Chilledheart/chromium,littlstar/chromium.src,hujiajie/pa-chromium,krieger-od/nwjs_chromium.src,anirudhSK/chromium,Just-D/chromium-1,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,pozdnyakov/chromium-crosswalk,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,Chilledheart/chromium,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,hujiajie/pa-chromium,Chilledheart/chromium,hujiajie/pa-chromium,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,Jonekee/chromium.src,patrickm/chromium.src,M4sse/chromium.src,littlstar/chromium.src,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,hujiajie/pa-chromium,chuan9/chromium-crosswalk,anirudhSK/chromium,anirudhSK/chromium,dednal/chromium.src,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,ltilve/chromium,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,ltilve/chromium,hujiajie/pa-chromium,dednal/chromium.src,dushu1203/chromium.src,ltilve/chromium,krieger-od/nwjs_chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dednal/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,patrickm/chromium.src,axinging/chromium-crosswalk,patrickm/chromium.src,ChromiumWebApps/chromium,hujiajie/pa-chromium,bright-sparks/chromium-spacewalk,pozdnyakov/chromium-crosswalk,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,pozdnyakov/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,mogoweb/chromium-crosswalk,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,timopulkkinen/BubbleFish,M4sse/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,anirudhSK/chromium,dushu1203/chromium.src,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,M4sse/chromium.src,mogoweb/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,timopulkkinen/BubbleFish,Just-D/chromium-1,littlstar/chromium.src,hujiajie/pa-chromium,M4sse/chromium.src,dednal/chromium.src,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,ltilve/chromium,ltilve/chromium,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,littlstar/chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,jaruba/chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,dushu1203/chromium.src,jaruba/chromium.src,ChromiumWebApps/chromium,M4sse/chromium.src,timopulkkinen/BubbleFish,dednal/chromium.src,timopulkkinen/BubbleFish,pozdnyakov/chromium-crosswalk,ondra-novak/chromium.src,timopulkkinen/BubbleFish,dushu1203/chromium.src,Fireblend/chromium-crosswalk,jaruba/chromium.src,patrickm/chromium.src,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,mogoweb/chromium-crosswalk,Jonekee/chromium.src,patrickm/chromium.src,dednal/chromium.src,Chilledheart/chromium,Just-D/chromium-1,timopulkkinen/BubbleFish,Fireblend/chromium-crosswalk,anirudhSK/chromium,hujiajie/pa-chromium,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,littlstar/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,markYoungH/chromium.src,ChromiumWebApps/chromium,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,mogoweb/chromium-crosswalk | tools/perf/perf_tools/memory_benchmark_unittest.py | tools/perf/perf_tools/memory_benchmark_unittest.py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from perf_tools import memory_benchmark
from telemetry.page import page_benchmark_unittest_base
class MemoryBenchmarkUnitTest(
page_benchmark_unittest_base.PageBenchmarkUnitTestBase):
def testMemoryBenchmark(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('page_with_link.html')
ps.pages[0].stress_memory = {'action': 'click_element', 'text': 'Click me'}
benchmark = memory_benchmark.MemoryBenchmark()
all_results = self.RunBenchmark(benchmark, ps)
self.assertEqual(0, len(all_results.page_failures))
self.assertEqual(1, len(all_results.page_results))
results0 = all_results.page_results[0]
expected_measurements = ['V8.MemoryExternalFragmentationTotal',
'V8.MemoryHeapSampleTotalCommitted',
'V8.MemoryHeapSampleTotalUsed']
self.assertTrue(all(
[m in results0.measurement_names for m in expected_measurements]))
| # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from perf_tools import memory_benchmark
from telemetry.page import page_benchmark_unittest_base
class MemoryBenchmarkUnitTest(
page_benchmark_unittest_base.PageBenchmarkUnitTestBase):
def testMemoryBenchmark(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('page_with_link.html')
ps.pages[0].stress_memory = {'action': 'click_element', 'text': 'Click me'}
benchmark = memory_benchmark.MemoryBenchmark()
all_results = self.RunBenchmark(benchmark, ps)
self.assertEqual(0, len(all_results.page_failures))
self.assertEqual(1, len(all_results.page_results))
results0 = all_results.page_results[0]
expected_measurements = ['V8_MemoryExternalFragmentationTotal',
'V8_MemoryHeapSampleTotalCommitted',
'V8_MemoryHeapSampleTotalUsed']
self.assertTrue(all(
[m in results0.measurement_names for m in expected_measurements]))
| bsd-3-clause | Python |
0ea6bab984abee943d93cdfa90273b7a7aabcf8f | add new package : brltty (#15161) | LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/brltty/package.py | var/spack/repos/builtin/packages/brltty/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Brltty(AutotoolsPackage):
"""BRLTTY is a background process (daemon) providing access to the
Linux/Unix console (when in text mode) for a blind person using
a refreshable braille display."""
homepage = "http://brltty.app/"
url = "https://github.com/brltty/brltty/archive/BRLTTY-6.0.tar.gz"
version('6.0', sha256='acfea5274bdc9230b0ea1a87f8796e241615d4d2c1ba08d87601b9d116c7804c')
version('5.6', sha256='74f35043943525396b340b9f65f0d73c3cc4054a8f63d1c685f27ccf59f46c5d')
version('5.5', sha256='cd80a0d225f13779791dc3a72d7f137c06c48e5f2c9600e80a565d2378422207')
version('5.4', sha256='9ad5a540d29438a755f8b8f1f1534e0eba601c604f3d8223fa00b802959ec636')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('expat')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('autogen')
| lgpl-2.1 | Python |
|
a8c7d9a2ed9462506130157ce5eccad9121013a3 | add new package (#24004) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/r-afex/package.py | var/spack/repos/builtin/packages/r-afex/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAfex(RPackage):
"""Analysis of Factorial Experiments
Convenience functions for analyzing factorial experiments using ANOVA or
mixed models. aov_ez(), aov_car(), and aov_4() allow specification of
between, within (i.e., repeated-measures), or mixed (i.e., split-plot)
ANOVAs for data in long format (i.e., one observation per row),
automatically aggregating multiple observations per individual and cell
of the design. mixed() fits mixed models using lme4::lmer() and computes
p-values for all fixed effects using either Kenward-Roger or Satterthwaite
approximation for degrees of freedom (LMM only), parametric bootstrap
(LMMs and GLMMs), or likelihood ratio tests (LMMs and GLMMs).
afex_plot() provides a high-level interface for interaction or one-way
plots using ggplot2, combining raw data and model estimates. afex uses
type 3 sums of squares as default (imitating commercial statistical
software).
"""
homepage = "https://github.com/singmann/afex"
cran = "afex"
version('0.28-1', sha256='cfb0b79bfa01b590afc3354a5b2ad3640d2f4974b036d6c256fa8e684bc69c2e')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-car', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
| lgpl-2.1 | Python |
|
2bc7d87c705b038d699309b25eec7ab3df4e9308 | Add example for training text classifier | explosion/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy,honnibal/spaCy,recognai/spaCy,aikramer2/spaCy,aikramer2/spaCy,recognai/spaCy,spacy-io/spaCy,honnibal/spaCy,honnibal/spaCy,spacy-io/spaCy,aikramer2/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,recognai/spaCy,aikramer2/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy,aikramer2/spaCy,explosion/spaCy,spacy-io/spaCy | examples/training/train_textcat.py | examples/training/train_textcat.py | from __future__ import unicode_literals
import plac
import random
import tqdm
from thinc.neural.optimizers import Adam
from thinc.neural.ops import NumpyOps
import thinc.extra.datasets
import spacy.lang.en
from spacy.gold import GoldParse, minibatch
from spacy.util import compounding
from spacy.pipeline import TextCategorizer
def train_textcat(tokenizer, textcat,
train_texts, train_cats, dev_texts, dev_cats,
n_iter=20):
'''
Train the TextCategorizer without associated pipeline.
'''
textcat.begin_training()
optimizer = Adam(NumpyOps(), 0.001)
train_docs = [tokenizer(text) for text in train_texts]
train_gold = [GoldParse(doc, cats=cats) for doc, cats in
zip(train_docs, train_cats)]
train_data = zip(train_docs, train_gold)
batch_sizes = compounding(4., 128., 1.001)
for i in range(n_iter):
losses = {}
for batch in minibatch(tqdm.tqdm(train_data, leave=False),
size=batch_sizes):
docs, golds = zip(*batch)
textcat.update((docs, None), golds, sgd=optimizer, drop=0.2,
losses=losses)
with textcat.model.use_params(optimizer.averages):
scores = evaluate(tokenizer, textcat, dev_texts, dev_cats)
yield losses['textcat'], scores
def evaluate(tokenizer, textcat, texts, cats):
docs = (tokenizer(text) for text in texts)
tp = 1e-8 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
tn = 1e-8 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
if score >= 0.5 and label in gold:
tp += 1.
elif score >= 0.5 and label not in gold:
fp += 1.
elif score < 0.5 and label not in gold:
tn += 1
if score < 0.5 and label in gold:
fn += 1
precis = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = 2 * (precis * recall) / (precis + recall)
return {'textcat_p': precis, 'textcat_r': recall, 'textcat_f': fscore}
def load_data():
# Partition off part of the train data --- avoid running experiments
# against test.
train_data, _ = thinc.extra.datasets.imdb()
random.shuffle(train_data)
texts, labels = zip(*train_data)
cats = [(['POSITIVE'] if y else []) for y in labels]
split = int(len(train_data) * 0.8)
train_texts = texts[:split]
train_cats = cats[:split]
dev_texts = texts[split:]
dev_cats = cats[split:]
return (train_texts, train_cats), (dev_texts, dev_cats)
def main():
nlp = spacy.lang.en.English()
tokenizer = nlp.tokenizer
textcat = TextCategorizer(tokenizer.vocab, labels=['POSITIVE'])
print("Load IMDB data")
(train_texts, train_cats), (dev_texts, dev_cats) = load_data()
print("Itn.\tLoss\tP\tR\tF")
progress = '{i:d} {loss:.3f} {textcat_p:.3f} {textcat_r:.3f} {textcat_f:.3f}'
for i, (loss, scores) in enumerate(train_textcat(tokenizer, textcat,
train_texts, train_cats,
dev_texts, dev_cats, n_iter=20)):
print(progress.format(i=i, loss=loss, **scores))
if __name__ == '__main__':
plac.call(main)
| mit | Python |
|
c8cc7f6fe7c0f59697972602773d67b3fde40360 | Add basic filter classes. | fhirschmann/penchy,fhirschmann/penchy | penchy/filters.py | penchy/filters.py | class Filter(object):
"""
Base class for filters.
Inheriting classes must implement:
- ``run(*inputs)`` to run the filter on inputs which can be Producer or
Filter instances, after executing self.out has to be set to the
path of the produced output file
"""
def run(self, inputs):
"""
Run the filter on the inputs.
:param inputs: Producer or Filter classes which output will be processed.
"""
raise NotImplementedError("run must be implemented by filters")
class WallclockDacapo(Filter):
#TODO
pass
class HProf(Filter):
#TODO
pass
| mit | Python |
|
de3650503eaf2073817b4d35c116a2c076382441 | Create range-module.py | kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/range-module.py | Python/range-module.py | # Time: addRange: O(n)
# removeRange: O(n)
# queryRange: O(logn)
# Space: O(n)
# A Range Module is a module that tracks ranges of numbers.
# Your task is to design and implement the following interfaces in an efficient manner.
# - addRange(int left, int right) Adds the half-open interval [left, right),
# tracking every real number in that interval.
# Adding an interval that partially overlaps with currently tracked numbers should
# add any numbers in the interval [left, right) that are not already tracked.
# - queryRange(int left, int right) Returns true if and only if
# every real number in the interval [left, right) is currently being tracked.
# - removeRange(int left, int right) Stops tracking every real number currently being tracked
# in the interval [left, right).
#
# Example 1:
# addRange(10, 20): null
# removeRange(14, 16): null
# queryRange(10, 14): true (Every number in [10, 14) is being tracked)
# queryRange(13, 15): false (Numbers like 14, 14.03, 14.17 in [13, 15) are not being tracked)
# queryRange(16, 17): true (The number 16 in [16, 17) is still being tracked, despite the remove operation)
#
# Note:
# - A half open interval [left, right) denotes all real numbers left <= x < right.
# - 0 < left < right < 10^9 in all calls to addRange, queryRange, removeRange.
# - The total number of calls to addRange in a single test case is at most 1000.
# - The total number of calls to queryRange in a single test case is at most 5000.
# - The total number of calls to removeRange in a single test case is at most 1000.
class RangeModule(object):
def __init__(self):
self.__intervals = []
def addRange(self, left, right):
"""
:type left: int
:type right: int
:rtype: void
"""
tmp = []
i = 0
for interval in self.__intervals:
if right < interval[0]:
tmp.append((left, right))
break
elif interval[1] < left:
tmp.append(interval);
else:
left = min(left, interval[0])
right = max(right, interval[1])
i += 1
if i == len(self.__intervals):
tmp.append((left, right))
while i < len(self.__intervals):
tmp.append(self.__intervals[i])
i += 1
self.__intervals = tmp
def queryRange(self, left, right):
"""
:type left: int
:type right: int
:rtype: bool
"""
i = bisect.bisect_left(self.__intervals, (left, float("inf")))
if i: i -= 1
return bool(self.__intervals) and \
self.__intervals[i][0] <= left and \
right <= self.__intervals[i][1]
def removeRange(self, left, right):
"""
:type left: int
:type right: int
:rtype: void
"""
tmp = []
for interval in self.__intervals:
if interval[1] <= left or interval[0] >= right:
tmp.append(interval)
else:
if interval[0] < left:
tmp.append((interval[0], left))
if right < interval[1]:
tmp.append((right, interval[1]))
self.__intervals = tmp
# Your RangeModule object will be instantiated and called as such:
# obj = RangeModule()
# obj.addRange(left,right)
# param_2 = obj.queryRange(left,right)
# obj.removeRange(left,right)
| mit | Python |
|
8b95f442f3e78a5f3de539075379b88fc940e818 | add custom settings for momza | praekelt/casepro,praekelt/casepro,praekelt/casepro | casepro/settings_production_momza.py | casepro/settings_production_momza.py | from __future__ import unicode_literals
import os
# import our default settings
from settings_production import * # noqa
# Pods
PODS = [{
'label': "family_connect_registration_pod",
'title': "Registration Information",
'url': os.environ.get('REGISTRATION_URL', ''),
'token': os.environ.get('REGISTRATION_AUTH_TOKEN',
'replace-with-auth-token'),
'contact_id_fieldname': os.environ.get('REGISTRATION_CONTACT_ID_FIELDNAME',
'registrant_id'),
'field_mapping': [
{"field": "reg_type", "field_name": "Registration Type"},
{"field": "language", "field_name": "Language Preference"},
{"field": "id_type", "field_name": "ID Type"},
{"field": "sa_id_no", "field_name": "ID Number"},
{"field": "mom_dob", "field_name": "Mother's Date of Birth"},
{"field": "consent", "field_name": "Consent"},
{"field": "operator_id", "field_name": "Operator ID"},
{"field": "registrant_id", "field_name": "Registrant ID"},
{"field": "msisdn_registrant", "field_name": "MSISDN of Registrant"},
{"field": "msisdn_device", "field_name": "MSISDN of Device"},
]
}, {
'label': "family_connect_subscription_pod",
'title': "Subscription Information",
'url': os.environ.get('SUBSCRIPTION_URL', ''),
'token': os.environ.get('SUBSCRIPTION_AUTH_TOKEN',
'replace-with-auth-token'),
}]
| bsd-3-clause | Python |
|
797249c42c8c1c0d6eda05dbf9e9d16d2706b373 | Add LeNet example with custom scoring and train_samples_per_iteration. | mathemage/h2o-3,h2oai/h2o-3,spennihana/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,h2oai/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,michalkurka/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,spennihana/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,h2oai/h2o-3 | h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater.py | h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater.py | from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
| apache-2.0 | Python |
|
26d095d44a02862a4d567537e824170e75930a9a | add email people script | ThePianoDentist/fantasy-dota-heroes,ThePianoDentist/fantasy-dota-heroes,ThePianoDentist/fantasy-dota-heroes | fantasydota/scripts/email_users.py | fantasydota/scripts/email_users.py | from fantasydota import DBSession
from fantasydota.models import User
from pyramid_mailer import Mailer
from pyramid_mailer.message import Message
def email_users():
session = DBSession()
for user in session.query(User).filter(User.email.isnot("")).all():
if user.email:
email = "testemail"#user.email
mailer = Mailer()
message = Message(subject="Fantasy Hero Dota New System",
sender="Fantasy Dota EU",
recipients=[email],
body="Hi %s.\n\nJust letting you know fantasy leagues now run every week, on all pro circuit matches\n\n"
"You can pick your team for first week starting 1st January now https://www.fantasyesport.eu/dota/team\n\n"
"This is the 'finalised' state of the site for DotA. Therefore I will not email anyone again. Apologies for the spam/promotion. Have a nice Christmas :D" % (
user.username))
mailer.send(message)
return
if __name__ == '__main__':
email_users()
| apache-2.0 | Python |
|
8cdbbbaf33cd09bc742761ce8cd5b79b185710cd | Introduce a timer based update of activities | wodo/WebTool3,wodo/WebTool3,wodo/WebTool3,wodo/WebTool3 | webtool/server/management/commands/timer_update.py | webtool/server/management/commands/timer_update.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import io
import datetime
from django.core.management.base import BaseCommand
from server.models import Instruction, Tour, Talk, Session, Season
from server.views.bulletin import Activities
from server.views.bulletin.translator import Translator
class Command(BaseCommand):
leave_locale_alone = True
help = 'Update activities regarding the current date'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
season = Season.objects.get(current=True)
canceled = season.state_list.get(name='Ausgefallen')
completed = season.state_list.get(name='Durchgeführt')
not_touch = (canceled.id, completed.id)
today = datetime.date.today()
for instruction in Instruction.objects.filter(topic__seasons=season).exclude(state_id__in=not_touch):
event = instruction.instruction
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
instruction.state = completed
instruction.save()
instruction.instruction.save()
for event in instruction.meeting_list.all():
event.save()
for tour in Tour.objects.filter(season=season).exclude(state_id__in=not_touch):
event = tour.tour
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
tour.state = completed
tour.save()
tour.deadline.save()
if tour.preliminary:
tour.preliminary.save()
tour.tour.save()
for talk in Talk.objects.filter(season=season).exclude(state_id__in=not_touch):
event = talk.talk
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
talk.state = completed
talk.save()
talk.talk.save()
for session in Session.objects.filter(collective__seasons=season).exclude(state_id__in=not_touch):
event = session.session
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
session.state = completed
session.save()
session.session.save()
| bsd-2-clause | Python |
|
eceee762dd3773aacceb52119014dad88e363c8d | Create find_subnets_with_broker.py | infobloxopen/netmri-toolkit,infobloxopen/netmri-toolkit,infobloxopen/netmri-toolkit | python/NetMRI_GUI_Python/find_subnets_with_broker.py | python/NetMRI_GUI_Python/find_subnets_with_broker.py | # BEGIN-SCRIPT-BLOCK
#
# Script-Filter:
# true
#
# END-SCRIPT-BLOCK
from infoblox_netmri.easy import NetMRIEasy
import re
# This values will be provided by NetMRI before execution
defaults = {
"api_url": api_url,
"http_username": http_username,
"http_password": http_password,
"job_id": job_id,
"device_id": device_id,
"batch_id": batch_id,
"script_login" : "false"
}
# Create NetMRI context manager. It will close session after execution
with NetMRIEasy(**defaults) as easy:
subnet_broker = easy.client.get_broker('Subnet')
all_subnets = subnet_broker.index
print(all_subnets)
params = {
'select': 'SubnetCIDR'
}
results = all_subnets(**params)
for entry in results:
print(entry.SubnetCIDR)
| mit | Python |
|
c96b885d4446db96402d9770d71012dbcafcb8cf | install go-vcf-tools by manage.py command | perGENIE/pergenie-web,perGENIE/pergenie,perGENIE/pergenie-web,perGENIE/pergenie-web,perGENIE/pergenie,perGENIE/pergenie,perGENIE/pergenie-web,perGENIE/pergenie,perGENIE/pergenie-web,perGENIE/pergenie-web,perGENIE/pergenie | pergenie/apps/genome/management/commands/setup_go_vcf_tools.py | pergenie/apps/genome/management/commands/setup_go_vcf_tools.py | import os
import glob
import shutil
import tarfile
import platform
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from lib.utils.io import get_url_content
from lib.utils import clogging
log = clogging.getColorLogger(__name__)
class Command(BaseCommand):
help = "Setup go-vcf-tools"
def handle(self, *args, **options):
tmp_dir = os.path.join(settings.BASE_DIR, 'tmp')
bin_dir = os.path.join(settings.BASE_DIR, 'bin')
# TODO: check already exists?
log.info('Fetching go-vcf-tools ...')
url = '{repo}/releases/download/{tag}/{os_platform}.amd64.tar.gz'.format(repo='https://github.com/knmkr/go-vcf-tools',
tag='0.0.1', # TODO: get tag from command argument
os_platform=platform.system().lower())
tar_gz = os.path.join(tmp_dir, 'go-vcf-tools.tar.gz')
get_url_content(url, tar_gz)
with tarfile.open(tar_gz, 'r') as tar:
dst = os.path.join(tmp_dir, 'go-vcf-tools')
tar.extractall(dst)
for tool in glob.glob(os.path.join(tmp_dir, 'go-vcf-tools', '*', 'vcf-*')):
shutil.copy(tool, bin_dir)
os.remove(tar_gz)
shutil.rmtree(dst)
log.info('Done.')
| agpl-3.0 | Python |
|
76fbec63667f3844f2763d72e57e61c07209cdad | Create Meh.py | kallerdaller/Cogs-Yorkfield | Meh/Meh.py | Meh/Meh.py | import discord
from discord.ext import commands
class Mycog:
"""My custom cog that does stuff!"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def mycom(self):
"""This does stuff!"""
#Your code will go here
await self.bot.say("I can do stuff!")
def setup(bot):
bot.add_cog(Mycog(bot))
| mit | Python |
|
6693172856655329d99f038d54b1d8819fc1a9b6 | Add native code emitters example. | kwagyeman/openmv,openmv/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv,openmv/openmv,iabdalkader/openmv,iabdalkader/openmv,kwagyeman/openmv,kwagyeman/openmv,iabdalkader/openmv,kwagyeman/openmv | scripts/examples/02-Board-Control/native_emitters.py | scripts/examples/02-Board-Control/native_emitters.py | import time
@micropython.asm_thumb
def asm():
movw(r0, 42)
@micropython.viper
def viper(a, b):
return a + b
@micropython.native
def native(a, b):
return a + b
print(asm())
print(viper(1, 2))
print(native(1, 2))
| mit | Python |
|
5cb43fbf0efadff7af68836243eb7e1711e7df1c | Add test_object script | pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc | jsk_2015_05_baxter_apc/euslisp/test_object_recognition.py | jsk_2015_05_baxter_apc/euslisp/test_object_recognition.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import rospy
from jsk_2014_picking_challenge.msg import ObjectRecognition
left_result = None
right_result = None
def _cb_left(msg):
global left_result
left_result = dict(zip(msg.candidates, msg.probabilities))
def _cb_right(msg):
global right_result
right_result = dict(zip(msg.candidates, msg.probabilities))
rospy.init_node('test_object_recognition')
rospy.Subscriber('/left_process/bof_object_matcher/output', ObjectRecognition, _cb_left)
rospy.Subscriber('/right_process/bof_object_matcher/output', ObjectRecognition, _cb_right)
| bsd-3-clause | Python |
|
f56e86ff774a55e7882957a8928bdca98ce4c3e8 | Add missing migration | stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten | features/groups/migrations/0017_auto_20171127_1447.py | features/groups/migrations/0017_auto_20171127_1447.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-27 13:47
from __future__ import unicode_literals
from django.db import migrations, models
import features.stadt.forms
class Migration(migrations.Migration):
dependencies = [
('groups', '0016_auto_20171120_1311'),
]
operations = [
migrations.AlterField(
model_name='group',
name='slug',
field=models.SlugField(blank=True, help_text='Wird auch als Kurzname verwendet', null=True, unique=True, validators=[features.stadt.forms.validate_entity_slug], verbose_name='Adresse der Gruppenseite'),
),
]
| agpl-3.0 | Python |
|
afedd723ad85b99c2ebd08246b5ed13b37cd62e9 | make psnr.py | piraaa/VideoDigitalWatermarking | src/psnr.py | src/psnr.py | #
# psnr.py
# Created by pira on 2017/08/05.
#
#coding: utf-8
u"""For PSNR(Peak Signal to Noise Ratio).""" | mit | Python |
|
6472768e2373b7972db5c3033063fd0f0adb2059 | add example of load with input_type='apache-arrow' | hhatto/poyonga | examples/groonga_microblog_tutorial/2_load_with_pyarrow.py | examples/groonga_microblog_tutorial/2_load_with_pyarrow.py | from poyonga.client import Groonga
import pyarrow as pa
def _call(g, cmd, **kwargs):
ret = g.call(cmd, **kwargs)
print(ret.status)
print(ret.body)
if cmd == "select":
for item in ret.items:
print(item)
print("=*=" * 30)
def load_and_select(table, data, batch):
# use Apache Arrow IPC Streaming Format
sink = pa.BufferOutputStream()
writer = pa.ipc.new_stream(sink, batch.schema)
writer.write_batch(batch)
writer.close()
buf = sink.getvalue()
values = buf.to_pybytes()
_call(g, "load", table=table, values=values, input_type="apache-arrow")
_call(g, "select", table=table)
g = Groonga()
users = [
pa.array(["daijiro", "tasukuchan", "OffGao"]),
pa.array(["hsiomaneki", "グニャラくん", "OffGao"]),
pa.array([["tasukuchan"], ["daijiro", "OffGao"], ["tasukuchan", "daijiro"]]),
pa.array([[], ["daijiro:1", "OffGao:1"], ["tasukuchan:1", "daijiro:1"]]),
pa.array(["127678039x502643091", "128423343x502929252", "128544408x502801502"]),
pa.array(["神奈川県", "東京都渋谷区", "東京都中野区"]),
pa.array(["groonga developer", "エロいおっさん", "がおがお"]),
]
users_batch = pa.record_batch(users, names=["_key", "name", "follower", "favorites", "location", "location_str", "description"])
load_and_select("Users", users, users_batch)
comments = [
pa.array(["daijiro:1", "tasukuchan:1", "daijiro:2", "tasukuchan:2", "tasukuchan:3", "tasukuchan:4", "OffGao:1", "OffGao:2"]),
pa.array([
"マイクロブログ作ってみました(甘栗むいちゃいました的な感じで)。",
"初の書き込み。テストテスト。",
"@tasukuchan ようこそ!!!",
"@daijiro ありがとう!",
"groongaなう #groonga",
"groonga開発合宿のため羽田空港に来ました! #groonga #travel",
"@daijiro @tasukuchan 登録してみましたよー!",
"中野ブロードウェイなうなう",
]),
pa.array([
"2010/03/17 12:05:00", "2010/03/17 12:00:00", "2010/03/17 12:05:00", "2010/03/17 13:00:00",
"2010/03/17 14:00:00", "2010/03/17 14:05:00", "2010/03/17 15:00:00", "2010/03/17 15:05:00",
]),
pa.array([None, None, "tasukuchan:1", "daijiro:2", None, None, None, None]),
pa.array([None, None, ["tasukuchan"], ["daijiro"], None, None, ["daijiro", "tasukuchan"], None]),
pa.array([None, None, None, None, ["groonga"], ["groonga", "travel"], None, None]),
pa.array([None, None, None, None, "127972422x503117107", "127975798x502919856", "128551935x502796433", "128551935x502796434"]),
pa.array(["daijiro", "tasukuchan", "daijiro", "tasukuchan", "tasukuchan", "tasukuchan", "OffGao", "OffGao"]),
]
comments_batch = pa.record_batch(comments, names=[
"_key", "comment", "last_modified", "replied_to", "replied_users", "hash_tags", "location", "posted_by"
])
load_and_select("Comments", comments, comments_batch)
| mit | Python |
|
5bb154f41f25d8c9bbd9067b29a03a5fc2dff371 | Add functional tests for floating IP. | mtougeron/python-openstacksdk,stackforge/python-openstacksdk,dtroyer/python-openstacksdk,openstack/python-openstacksdk,briancurtin/python-openstacksdk,mtougeron/python-openstacksdk,openstack/python-openstacksdk,briancurtin/python-openstacksdk,dudymas/python-openstacksdk,stackforge/python-openstacksdk,dudymas/python-openstacksdk,dtroyer/python-openstacksdk | openstack/tests/functional/network/v2/test_floating_ip.py | openstack/tests/functional/network/v2/test_floating_ip.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import floating_ip
from openstack.network.v2 import network
from openstack.network.v2 import subnet
from openstack.tests.functional import base
class TestFloatingIP(base.BaseFunctionalTest):
NET_NAME = uuid.uuid4().hex
SUB_NAME = uuid.uuid4().hex
IPV4 = 4
CIDR = "10.100.0.0/24"
NET_ID = None
SUB_ID = None
FIP_ID = None
@classmethod
def setUpClass(cls):
super(TestFloatingIP, cls).setUpClass()
args = {'router:external': True}
net = cls.conn.network.create_network(name=cls.NET_NAME, **args)
assert isinstance(net, network.Network)
cls.assertIs(cls.NET_NAME, net.name)
cls.NET_ID = net.id
sub = cls.conn.network.create_subnet(name=cls.SUB_NAME,
ip_version=cls.IPV4,
network_id=cls.NET_ID,
cidr=cls.CIDR)
assert isinstance(sub, subnet.Subnet)
cls.assertIs(cls.SUB_NAME, sub.name)
cls.SUB_ID = sub.id
fip = cls.conn.network.create_ip(floating_network_id=cls.NET_ID)
assert isinstance(fip, floating_ip.FloatingIP)
cls.FIP_ID = fip.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_ip(cls.FIP_ID, ignore_missing=False)
cls.assertIs(None, sot)
sot = cls.conn.network.delete_subnet(cls.SUB_ID, ignore_missing=False)
cls.assertIs(None, sot)
sot = cls.conn.network.delete_network(cls.NET_ID, ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_ip(self.FIP_ID)
self.assertEqual(self.FIP_ID, sot.id)
def test_get(self):
sot = self.conn.network.get_ip(self.FIP_ID)
self.assertEqual(self.NET_ID, sot.floating_network_id)
self.assertEqual('10.100.0.2', sot.floating_ip_address)
self.assertIn('floating_ip_address', sot)
self.assertIn('fixed_ip_address', sot)
self.assertIn('port_id', sot)
self.assertIn('router_id', sot)
def test_list(self):
ids = [o.id for o in self.conn.network.ips()]
self.assertIn(self.FIP_ID, ids)
| apache-2.0 | Python |
|
a5720071a950185e5afb1992dd4b66b47aefc242 | Bump version 0.2.5 | kenjhim/django-accounting,kenjhim/django-accounting,kenjhim/django-accounting,dulaccc/django-accounting,dulaccc/django-accounting,kenjhim/django-accounting,dulaccc/django-accounting,dulaccc/django-accounting | accounting/__init__.py | accounting/__init__.py | import os
# Use 'final' as the 4th element to indicate
# a full release
VERSION = (0, 2, 5)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
# Append 3rd digit if > 0
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
return version
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'accounting/templates/accounting/base.html' can be accessed
# via both 'base.html' and 'accounting/base.html'. This allows Accounting's
# templates to be extended by templates with the same filename
ACCOUNTING_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/accounting')
ACCOUNTING_APPS = (
'accounting',
'accounting.libs',
'accounting.apps.connect',
'accounting.apps.people',
'accounting.apps.books',
'accounting.apps.reports',
# Third party apps that accounting depends on
'bootstrap3',
'django_select2',
'datetimewidget',
)
ACCOUNTING_TEMPLATE_CONTEXT_PROCESSORS = (
'accounting.apps.context_processors.metadata',
'accounting.apps.books.context_processors.organizations',
)
ACCOUNTING_MIDDLEWARE_CLASSES = (
'accounting.apps.books.middlewares.AutoSelectOrganizationMiddleware',
)
def get_apps():
return ACCOUNTING_APPS
| import os
# Use 'final' as the 4th element to indicate
# a full release
VERSION = (0, 2, 4)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
# Append 3rd digit if > 0
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
return version
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'accounting/templates/accounting/base.html' can be accessed
# via both 'base.html' and 'accounting/base.html'. This allows Accounting's
# templates to be extended by templates with the same filename
ACCOUNTING_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/accounting')
ACCOUNTING_APPS = (
'accounting',
'accounting.libs',
'accounting.apps.connect',
'accounting.apps.people',
'accounting.apps.books',
'accounting.apps.reports',
# Third party apps that accounting depends on
'bootstrap3',
'django_select2',
'datetimewidget',
)
ACCOUNTING_TEMPLATE_CONTEXT_PROCESSORS = (
'accounting.apps.context_processors.metadata',
'accounting.apps.books.context_processors.organizations',
)
ACCOUNTING_MIDDLEWARE_CLASSES = (
'accounting.apps.books.middlewares.AutoSelectOrganizationMiddleware',
)
def get_apps():
return ACCOUNTING_APPS
| mit | Python |
fa6060a21767a0b5b2b3a10e4301e0c1a30134cb | Test the lit0,cmp before bra eliminator | gbenson/i8c | i8c/tests/test_opt_lit0_cmp_before_bra.py | i8c/tests/test_opt_lit0_cmp_before_bra.py | from i8c.tests import TestCase
SOURCE1 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
beq return_the_null
deref ptr
return
return_the_null:
"""
SOURCE2 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
bne dereference
return
dereference:
deref ptr
"""
class TestOptimizeLit0CmpBeforeBra(TestCase):
def test_optimize_lit0_cmp_before_bra(self):
"""Check that lit0,cmp before bra is eliminated."""
for source in SOURCE1, SOURCE2:
tree, output = self.compile(source)
self.assertEqual(["dup", "bra", "skip", "deref"], output.opnames)
| lgpl-2.1 | Python |
|
7be2721bfcbf3376ddce4d58f2cfe9680803f9bb | Create center_dmenu script. | RyanMcG/center_dmenu | center_dmenu.py | center_dmenu.py | #!/usr/bin/env python2
from Xlib import display
import sys
from os import system
def get_dimensions():
current_display = display.Display()
current_screen = current_display.screen()
return (current_screen['width_in_pixels'],
current_screen['height_in_pixels'])
def parse_dmenu_args(args):
x_width, x_height = get_dimensions()
num_args = len(args)
# Set some default values for dmenu args
dmenu_run_args = {
'x': 200,
'height': 50,
'extra_args': "-fn 'Inconsolata:size=10'"
}
# Get arguments from the command line.
if num_args > 1:
dmenu_run_args['x'] = int(args[1])
if num_args > 2:
dmenu_run_args['height'] = int(args[2])
if num_args > 3:
dmenu_run_args['extra_args'] = args[3]
# Determine propper height and width for input into dmenu
dmenu_run_args['width'] = x_width - (2 * dmenu_run_args['x'])
dmenu_run_args['y'] = (x_height - dmenu_run_args['height']) / 2
return dmenu_run_args
def main(args):
dmenu_run_args = parse_dmenu_args(args)
return system(("dmenu_run {extra_args} -w {width} -x {x} -y {y}"
" -h {height}").format(**dmenu_run_args))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 | Python |
|
a737c7cd26450ac5dfdab23aea6902f53976c538 | fix version in xformhistory for multiple xml versions | awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat | onadata/apps/fsforms/management/commands/fix_xml_version_in_xformhistory.py | onadata/apps/fsforms/management/commands/fix_xml_version_in_xformhistory.py | import os
from onadata.settings.local_settings import XML_VERSION_MAX_ITER
from onadata.apps.fsforms.models import XformHistory
from django.core.management.base import BaseCommand
import re
import datetime
def check_version(instance, n):
for i in range(n, 0, -1):
p = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/_version__00{0}" """.format(i))
m = p.search(instance)
if m:
return m.group(1)
class Command(BaseCommand):
help = 'Fix FInstance version for multiple versions in xml'
# def add_arguments(self, parser):
# parser.add_argument('--file', type=str)
def handle(self, *args, **options):
batchsize = options.get("batchsize", 100)
stop = False
offset = 0
while stop is not True:
limit = offset + batchsize
xformhists = XformHistory.objects.all()[offset:limit]
inst = list(xformhists)
if xformhists:
self.stdout.write("Updating instances from #{} to #{}\n".format(
inst[0].id,
inst[-1].id))
for xformhist in xformhists:
version = ''
n = XML_VERSION_MAX_ITER
xml = xformhist.xml
version = check_version(xml, n)
if version:
xformhist.version = version
if not version:
p = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/_version_" """)
m = p.search(xml)
if m:
xformhist.version = m.group(1)
else:
p1 = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/__version__" """)
m1 = p1.search(xml)
if m1:
xformhist.version = m1.group(1)
else:
stop = True
offset += batchsize | bsd-2-clause | Python |
|
3837329e0d49796cfe9eabd2aeb026c206c5c4d8 | add admin ui for user upload record | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/user_importer/admin.py | corehq/apps/user_importer/admin.py | import zipfile
from io import BytesIO
from django.contrib import admin
from django.http.response import HttpResponse
from .models import UserUploadRecord
class UserUploadAdmin(admin.ModelAdmin):
list_display = ('domain', 'date_created')
list_filter = ('domain',)
ordering = ('-date_created',)
search_fields =('domain',)
actions = ['download_file']
def download_file(self, request, queryset):
export_file = BytesIO()
with zipfile.ZipFile(export_file, 'w') as zip_file:
for upload_record in queryset:
upload_file = upload_record.get_file()
zip_file.writestr(f'{upload_record.task_id}.csv', upload_file.read())
export_file.seek(0)
return HttpResponse(export_file, content_type='application/zip')
admin.site.register(UserUploadRecord, UserUploadAdmin)
| bsd-3-clause | Python |
|
ba5a358ffefb5646a3911fafe2c394c9c52905f7 | add import script for Horsham | DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_horsham.py | polling_stations/apps/data_collection/management/commands/import_horsham.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000227'
addresses_name = 'Democracy_Club__04May2017.tsv'
stations_name = 'Democracy_Club__04May2017.tsv'
elections = ['local.west-sussex.2017-05-04']
csv_delimiter = '\t'
| bsd-3-clause | Python |
|
3d4e3be7624f099f9b15c24a9161f474a733ebff | add a script for manual fixing of user profiles. Now I will fix the bug in the code… | 1flow/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow | scripts/20130625_fix_userprofile_data_being_unicode_insteadof_dict.py | scripts/20130625_fix_userprofile_data_being_unicode_insteadof_dict.py |
from oneflow.profiles.models import UserProfile
import ast
for p in UserProfile.objects.all():
if type(p.data) == type(u''):
p.data = {}
if type(p.register_request_data) == type(u''):
p.register_request_data = ast.literal_eval(p.register_request_data)
p.save()
| agpl-3.0 | Python |
|
921b0adf8b93ccad54eb0a82e42ff4b742e176db | Add label_wav_dir.py (#14847) | AnishShah/tensorflow,annarev/tensorflow,karllessard/tensorflow,kevin-coder/tensorflow-fork,asimshankar/tensorflow,dancingdan/tensorflow,yanchen036/tensorflow,xodus7/tensorflow,ppwwyyxx/tensorflow,snnn/tensorflow,arborh/tensorflow,kobejean/tensorflow,asimshankar/tensorflow,meteorcloudy/tensorflow,gojira/tensorflow,jendap/tensorflow,jbedorf/tensorflow,eaplatanios/tensorflow,sarvex/tensorflow,annarev/tensorflow,adit-chandra/tensorflow,arborh/tensorflow,AnishShah/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jalexvig/tensorflow,girving/tensorflow,jbedorf/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,seanli9jan/tensorflow,Bismarrck/tensorflow,theflofly/tensorflow,girving/tensorflow,DavidNorman/tensorflow,drpngx/tensorflow,freedomtan/tensorflow,ppwwyyxx/tensorflow,snnn/tensorflow,eaplatanios/tensorflow,gunan/tensorflow,ZhangXinNan/tensorflow,sarvex/tensorflow,dongjoon-hyun/tensorflow,chemelnucfin/tensorflow,zasdfgbnm/tensorflow,jart/tensorflow,allenlavoie/tensorflow,paolodedios/tensorflow,hfp/tensorflow-xsmm,lukeiwanski/tensorflow,Intel-Corporation/tensorflow,meteorcloudy/tensorflow,jendap/tensorflow,DavidNorman/tensorflow,jendap/tensorflow,snnn/tensorflow,hehongliang/tensorflow,jbedorf/tensorflow,manipopopo/tensorflow,benoitsteiner/tensorflow-xsmm,aam-at/tensorflow,aselle/tensorflow,Xeralux/tensorflow,ghchinoy/tensorflow,sarvex/tensorflow,dendisuhubdy/tensorflow,ppwwyyxx/tensorflow,caisq/tensorflow,Intel-tensorflow/tensorflow,jhseu/tensorflow,Xeralux/tensorflow,brchiu/tensorflow,yongtang/tensorflow,arborh/tensorflow,kevin-coder/tensorflow-fork,ageron/tensorflow,sarvex/tensorflow,manipopopo/tensorflow,allenlavoie/tensorflow,cxxgtxy/tensorflow,Bismarrck/tensorflow,allenlavoie/tensorflow,ageron/tensorflow,jhseu/tensorflow,brchiu/tensorflow,girving/tensorflow,aldian/tensorflow,davidzchen/tensorflow,nburn42/tensorflow,dongjoon-hyun/tensorflow,ghchinoy/tensorflow,Xeralux/tensorflow,kevin-coder/tensorflow-fork,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,zasdfgbnm/tensorflow,cxxgtxy/tensorflow,hehongliang/tensorflow,petewarden/tensorflow,xzturn/tensorflow,jart/tensorflow,gunan/tensorflow,jbedorf/tensorflow,xodus7/tensorflow,apark263/tensorflow,alshedivat/tensorflow,Intel-tensorflow/tensorflow,benoitsteiner/tensorflow-xsmm,karllessard/tensorflow,DavidNorman/tensorflow,sarvex/tensorflow,jalexvig/tensorflow,aam-at/tensorflow,freedomtan/tensorflow,AnishShah/tensorflow,yongtang/tensorflow,jart/tensorflow,AnishShah/tensorflow,drpngx/tensorflow,tensorflow/tensorflow,DavidNorman/tensorflow,drpngx/tensorflow,brchiu/tensorflow,apark263/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,nburn42/tensorflow,jalexvig/tensorflow,chemelnucfin/tensorflow,Xeralux/tensorflow,alshedivat/tensorflow,Intel-tensorflow/tensorflow,annarev/tensorflow,benoitsteiner/tensorflow-xsmm,ppwwyyxx/tensorflow,ghchinoy/tensorflow,gunan/tensorflow,jendap/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,DavidNorman/tensorflow,jart/tensorflow,Xeralux/tensorflow,Intel-tensorflow/tensorflow,davidzchen/tensorflow,kobejean/tensorflow,snnn/tensorflow,DavidNorman/tensorflow,meteorcloudy/tensorflow,tensorflow/tensorflow,alsrgv/tensorflow,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,caisq/tensorflow,dancingdan/tensorflow,adit-chandra/tensorflow,hfp/tensorflow-xsmm,tensorflow/tensorflow,jbedorf/tensorflow,gunan/tensorflow,yanchen036/tensorflow,drpngx/tensorflow,yanchen036/tensorflow,ageron/tensorflow,apark263/tensorflow,benoitsteiner/tensorflow-xsmm,jbedorf/tensorflow,jalexvig/tensorflow,gunan/tensorflow,chemelnucfin/tensorflow,dongjoon-hyun/tensorflow,jalexvig/tensorflow,ppwwyyxx/tensorflow,DavidNorman/tensorflow,xodus7/tensorflow,ZhangXinNan/tensorflow,nburn42/tensorflow,jhseu/tensorflow,girving/tensorflow,davidzchen/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,theflofly/tensorflow,xodus7/tensorflow,frreiss/tensorflow-fred,ghchinoy/tensorflow,dendisuhubdy/tensorflow,jhseu/tensorflow,petewarden/tensorflow,manipopopo/tensorflow,Intel-tensorflow/tensorflow,Bismarrck/tensorflow,Intel-tensorflow/tensorflow,ageron/tensorflow,arborh/tensorflow,jendap/tensorflow,AnishShah/tensorflow,xodus7/tensorflow,annarev/tensorflow,hfp/tensorflow-xsmm,frreiss/tensorflow-fred,jbedorf/tensorflow,ghchinoy/tensorflow,asimshankar/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,annarev/tensorflow,eaplatanios/tensorflow,yongtang/tensorflow,jart/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,eaplatanios/tensorflow,dongjoon-hyun/tensorflow,renyi533/tensorflow,seanli9jan/tensorflow,xzturn/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,girving/tensorflow,aselle/tensorflow,frreiss/tensorflow-fred,AnishShah/tensorflow,paolodedios/tensorflow,petewarden/tensorflow,apark263/tensorflow,kevin-coder/tensorflow-fork,alsrgv/tensorflow,jendap/tensorflow,xzturn/tensorflow,dancingdan/tensorflow,chemelnucfin/tensorflow,alsrgv/tensorflow,drpngx/tensorflow,hehongliang/tensorflow,aam-at/tensorflow,jhseu/tensorflow,xzturn/tensorflow,zasdfgbnm/tensorflow,adit-chandra/tensorflow,Intel-tensorflow/tensorflow,meteorcloudy/tensorflow,Bismarrck/tensorflow,kobejean/tensorflow,cxxgtxy/tensorflow,Bismarrck/tensorflow,apark263/tensorflow,renyi533/tensorflow,dancingdan/tensorflow,jart/tensorflow,gunan/tensorflow,alshedivat/tensorflow,aldian/tensorflow,zasdfgbnm/tensorflow,arborh/tensorflow,arborh/tensorflow,Bismarrck/tensorflow,alshedivat/tensorflow,dendisuhubdy/tensorflow,brchiu/tensorflow,jbedorf/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,benoitsteiner/tensorflow-xsmm,ppwwyyxx/tensorflow,theflofly/tensorflow,yanchen036/tensorflow,girving/tensorflow,davidzchen/tensorflow,ZhangXinNan/tensorflow,alsrgv/tensorflow,yanchen036/tensorflow,xodus7/tensorflow,petewarden/tensorflow,chemelnucfin/tensorflow,brchiu/tensorflow,arborh/tensorflow,xzturn/tensorflow,apark263/tensorflow,ageron/tensorflow,kevin-coder/tensorflow-fork,alsrgv/tensorflow,asimshankar/tensorflow,kevin-coder/tensorflow-fork,ZhangXinNan/tensorflow,renyi533/tensorflow,zasdfgbnm/tensorflow,petewarden/tensorflow,ageron/tensorflow,seanli9jan/tensorflow,frreiss/tensorflow-fred,freedomtan/tensorflow,benoitsteiner/tensorflow-xsmm,Bismarrck/tensorflow,gautam1858/tensorflow,manipopopo/tensorflow,aam-at/tensorflow,petewarden/tensorflow,dendisuhubdy/tensorflow,theflofly/tensorflow,benoitsteiner/tensorflow-xsmm,dancingdan/tensorflow,seanli9jan/tensorflow,alshedivat/tensorflow,yanchen036/tensorflow,xzturn/tensorflow,Xeralux/tensorflow,kobejean/tensorflow,kobejean/tensorflow,gautam1858/tensorflow,seanli9jan/tensorflow,Bismarrck/tensorflow,eaplatanios/tensorflow,arborh/tensorflow,aselle/tensorflow,caisq/tensorflow,ageron/tensorflow,jalexvig/tensorflow,karllessard/tensorflow,caisq/tensorflow,zasdfgbnm/tensorflow,freedomtan/tensorflow,asimshankar/tensorflow,drpngx/tensorflow,aam-at/tensorflow,Intel-Corporation/tensorflow,jhseu/tensorflow,jendap/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,jendap/tensorflow,dancingdan/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,gojira/tensorflow,brchiu/tensorflow,allenlavoie/tensorflow,Xeralux/tensorflow,tensorflow/tensorflow-pywrap_saved_model,DavidNorman/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_saved_model,allenlavoie/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,cxxgtxy/tensorflow,apark263/tensorflow,aselle/tensorflow,renyi533/tensorflow,allenlavoie/tensorflow,Intel-Corporation/tensorflow,jbedorf/tensorflow,allenlavoie/tensorflow,sarvex/tensorflow,aselle/tensorflow,Xeralux/tensorflow,asimshankar/tensorflow,apark263/tensorflow,dendisuhubdy/tensorflow,annarev/tensorflow,paolodedios/tensorflow,Xeralux/tensorflow,dancingdan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,snnn/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jendap/tensorflow,gunan/tensorflow,aselle/tensorflow,ZhangXinNan/tensorflow,aldian/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,jbedorf/tensorflow,xodus7/tensorflow,gautam1858/tensorflow,adit-chandra/tensorflow,theflofly/tensorflow,lukeiwanski/tensorflow,snnn/tensorflow,adit-chandra/tensorflow,alshedivat/tensorflow,Bismarrck/tensorflow,benoitsteiner/tensorflow-xsmm,caisq/tensorflow,jalexvig/tensorflow,benoitsteiner/tensorflow-xsmm,hehongliang/tensorflow,dongjoon-hyun/tensorflow,allenlavoie/tensorflow,drpngx/tensorflow,jendap/tensorflow,asimshankar/tensorflow,allenlavoie/tensorflow,snnn/tensorflow,nburn42/tensorflow,freedomtan/tensorflow,aldian/tensorflow,ghchinoy/tensorflow,kevin-coder/tensorflow-fork,petewarden/tensorflow,kevin-coder/tensorflow-fork,tensorflow/tensorflow-pywrap_saved_model,chemelnucfin/tensorflow,ghchinoy/tensorflow,zasdfgbnm/tensorflow,lukeiwanski/tensorflow,ppwwyyxx/tensorflow,aldian/tensorflow,asimshankar/tensorflow,sarvex/tensorflow,snnn/tensorflow,girving/tensorflow,xzturn/tensorflow,alsrgv/tensorflow,adit-chandra/tensorflow,apark263/tensorflow,yongtang/tensorflow,jbedorf/tensorflow,eaplatanios/tensorflow,manipopopo/tensorflow,gautam1858/tensorflow,DavidNorman/tensorflow,manipopopo/tensorflow,dongjoon-hyun/tensorflow,karllessard/tensorflow,DavidNorman/tensorflow,paolodedios/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,ghchinoy/tensorflow,manipopopo/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,asimshankar/tensorflow,davidzchen/tensorflow,ageron/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,manipopopo/tensorflow,drpngx/tensorflow,asimshankar/tensorflow,dancingdan/tensorflow,dongjoon-hyun/tensorflow,tensorflow/tensorflow-pywrap_saved_model,lukeiwanski/tensorflow,adit-chandra/tensorflow,arborh/tensorflow,meteorcloudy/tensorflow,eaplatanios/tensorflow,aselle/tensorflow,petewarden/tensorflow,Intel-Corporation/tensorflow,kobejean/tensorflow,gojira/tensorflow,jhseu/tensorflow,dongjoon-hyun/tensorflow,theflofly/tensorflow,paolodedios/tensorflow,AnishShah/tensorflow,kevin-coder/tensorflow-fork,manipopopo/tensorflow,zasdfgbnm/tensorflow,ZhangXinNan/tensorflow,hfp/tensorflow-xsmm,nburn42/tensorflow,meteorcloudy/tensorflow,yanchen036/tensorflow,renyi533/tensorflow,paolodedios/tensorflow,DavidNorman/tensorflow,freedomtan/tensorflow,Xeralux/tensorflow,theflofly/tensorflow,gojira/tensorflow,brchiu/tensorflow,meteorcloudy/tensorflow,ppwwyyxx/tensorflow,girving/tensorflow,xzturn/tensorflow,asimshankar/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,ZhangXinNan/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,chemelnucfin/tensorflow,ageron/tensorflow,alsrgv/tensorflow,eaplatanios/tensorflow,arborh/tensorflow,jart/tensorflow,alsrgv/tensorflow,jhseu/tensorflow,hfp/tensorflow-xsmm,karllessard/tensorflow,cxxgtxy/tensorflow,hfp/tensorflow-xsmm,annarev/tensorflow,hfp/tensorflow-xsmm,dendisuhubdy/tensorflow,alshedivat/tensorflow,girving/tensorflow,ppwwyyxx/tensorflow,frreiss/tensorflow-fred,eaplatanios/tensorflow,alshedivat/tensorflow,kevin-coder/tensorflow-fork,chemelnucfin/tensorflow,gunan/tensorflow,xodus7/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,alsrgv/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,freedomtan/tensorflow,dancingdan/tensorflow,drpngx/tensorflow,aam-at/tensorflow,dendisuhubdy/tensorflow,snnn/tensorflow,renyi533/tensorflow,adit-chandra/tensorflow,zasdfgbnm/tensorflow,aldian/tensorflow,aam-at/tensorflow,theflofly/tensorflow,benoitsteiner/tensorflow-xsmm,xzturn/tensorflow,petewarden/tensorflow,petewarden/tensorflow,gunan/tensorflow,apark263/tensorflow,yongtang/tensorflow,renyi533/tensorflow,theflofly/tensorflow,annarev/tensorflow,alshedivat/tensorflow,kobejean/tensorflow,aam-at/tensorflow,xodus7/tensorflow,gunan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ageron/tensorflow,frreiss/tensorflow-fred,theflofly/tensorflow,gautam1858/tensorflow,gojira/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,benoitsteiner/tensorflow-xsmm,ZhangXinNan/tensorflow,lukeiwanski/tensorflow,arborh/tensorflow,chemelnucfin/tensorflow,hfp/tensorflow-xsmm,meteorcloudy/tensorflow,dongjoon-hyun/tensorflow,jhseu/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,adit-chandra/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_saved_model,cxxgtxy/tensorflow,theflofly/tensorflow,davidzchen/tensorflow,aselle/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jhseu/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,brchiu/tensorflow,nburn42/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,adit-chandra/tensorflow,lukeiwanski/tensorflow,xzturn/tensorflow,ghchinoy/tensorflow,yongtang/tensorflow,jart/tensorflow,AnishShah/tensorflow,paolodedios/tensorflow,hehongliang/tensorflow,kobejean/tensorflow,hfp/tensorflow-xsmm,hehongliang/tensorflow,petewarden/tensorflow,AnishShah/tensorflow,ageron/tensorflow,caisq/tensorflow,eaplatanios/tensorflow,yongtang/tensorflow,kobejean/tensorflow,snnn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,davidzchen/tensorflow,aam-at/tensorflow,ghchinoy/tensorflow,seanli9jan/tensorflow,dancingdan/tensorflow,gojira/tensorflow,Bismarrck/tensorflow,tensorflow/tensorflow,caisq/tensorflow,jhseu/tensorflow,manipopopo/tensorflow,jalexvig/tensorflow,zasdfgbnm/tensorflow,nburn42/tensorflow,aam-at/tensorflow,aam-at/tensorflow,girving/tensorflow,kobejean/tensorflow,ghchinoy/tensorflow,freedomtan/tensorflow,dendisuhubdy/tensorflow,kevin-coder/tensorflow-fork,Xeralux/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,xodus7/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_saved_model,ppwwyyxx/tensorflow,seanli9jan/tensorflow,manipopopo/tensorflow,seanli9jan/tensorflow,renyi533/tensorflow,zasdfgbnm/tensorflow,dendisuhubdy/tensorflow,lukeiwanski/tensorflow,hfp/tensorflow-xsmm,jalexvig/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow,dongjoon-hyun/tensorflow,gunan/tensorflow,jart/tensorflow,davidzchen/tensorflow,freedomtan/tensorflow,adit-chandra/tensorflow,allenlavoie/tensorflow,nburn42/tensorflow,caisq/tensorflow,aselle/tensorflow,ppwwyyxx/tensorflow,gautam1858/tensorflow,renyi533/tensorflow,AnishShah/tensorflow,theflofly/tensorflow,girving/tensorflow,renyi533/tensorflow,lukeiwanski/tensorflow,jendap/tensorflow,yanchen036/tensorflow,caisq/tensorflow,tensorflow/tensorflow-pywrap_saved_model,jalexvig/tensorflow,paolodedios/tensorflow,meteorcloudy/tensorflow,drpngx/tensorflow,chemelnucfin/tensorflow,dancingdan/tensorflow,aselle/tensorflow,aldian/tensorflow,gojira/tensorflow,aam-at/tensorflow,dongjoon-hyun/tensorflow,karllessard/tensorflow,cxxgtxy/tensorflow,frreiss/tensorflow-fred,jbedorf/tensorflow,hfp/tensorflow-xsmm,ageron/tensorflow,annarev/tensorflow,ghchinoy/tensorflow,nburn42/tensorflow,seanli9jan/tensorflow,jalexvig/tensorflow,tensorflow/tensorflow,AnishShah/tensorflow,gojira/tensorflow,allenlavoie/tensorflow,freedomtan/tensorflow,aldian/tensorflow,ZhangXinNan/tensorflow,davidzchen/tensorflow,ppwwyyxx/tensorflow,gunan/tensorflow,hehongliang/tensorflow,lukeiwanski/tensorflow,jart/tensorflow,nburn42/tensorflow,snnn/tensorflow,petewarden/tensorflow,meteorcloudy/tensorflow,kobejean/tensorflow,yongtang/tensorflow,alshedivat/tensorflow,aselle/tensorflow,adit-chandra/tensorflow,nburn42/tensorflow,davidzchen/tensorflow,brchiu/tensorflow,brchiu/tensorflow,yongtang/tensorflow,xzturn/tensorflow,alshedivat/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gojira/tensorflow,gojira/tensorflow,gojira/tensorflow,lukeiwanski/tensorflow,cxxgtxy/tensorflow,alsrgv/tensorflow,ZhangXinNan/tensorflow,dendisuhubdy/tensorflow,annarev/tensorflow,xodus7/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,jhseu/tensorflow,ZhangXinNan/tensorflow,seanli9jan/tensorflow,Bismarrck/tensorflow,alsrgv/tensorflow,caisq/tensorflow,arborh/tensorflow,apark263/tensorflow,eaplatanios/tensorflow,xzturn/tensorflow,brchiu/tensorflow,alsrgv/tensorflow,gautam1858/tensorflow,seanli9jan/tensorflow | tensorflow/examples/speech_commands/label_wav_dir.py | tensorflow/examples/speech_commands/label_wav_dir.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs a trained audio graph against WAVE files and reports the results.
The model, labels and .wav files specified in the arguments will be loaded, and
then the predictions from running the model against the audio data will be
printed to the console. This is a useful script for sanity checking trained
models, and as an example of how to use an audio model from Python.
Here's an example of running it:
python tensorflow/examples/speech_commands/label_wav_dir.py \
--graph=/tmp/my_frozen_graph.pb \
--labels=/tmp/speech_commands_train/conv_labels.txt \
--wav_dir=/tmp/speech_dataset/left
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import glob
import tensorflow as tf
# pylint: disable=unused-import
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
# pylint: enable=unused-import
FLAGS = None
def load_graph(filename):
"""Unpersists graph from file as default graph."""
with tf.gfile.FastGFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def load_labels(filename):
"""Read in labels, one label per line."""
return [line.rstrip() for line in tf.gfile.GFile(filename)]
def run_graph(wav_dir, labels, input_layer_name, output_layer_name,
num_top_predictions):
"""Runs the audio data through the graph and prints predictions."""
with tf.Session() as sess:
# Feed the audio data as input to the graph.
# predictions will contain a two-dimensional array, where one
# dimension represents the input image count, and the other has
# predictions per class
for wav_path in glob.glob(wav_dir + "/*.wav"):
if not wav_path or not tf.gfile.Exists(wav_path):
tf.logging.fatal('Audio file does not exist %s', wav_path)
with open(wav_path, 'rb') as wav_file:
wav_data = wav_file.read()
softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data})
# Sort to show labels in order of confidence
print('\n%s' % (wav_path.split('/')[-1]))
top_k = predictions.argsort()[-num_top_predictions:][::-1]
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
return 0
def label_wav(wav_dir, labels, graph, input_name, output_name, how_many_labels):
"""Loads the model and labels, and runs the inference to print predictions."""
if not labels or not tf.gfile.Exists(labels):
tf.logging.fatal('Labels file does not exist %s', labels)
if not graph or not tf.gfile.Exists(graph):
tf.logging.fatal('Graph file does not exist %s', graph)
labels_list = load_labels(labels)
# load graph, which is stored in the default session
load_graph(graph)
run_graph(wav_dir, labels_list, input_name, output_name, how_many_labels)
def main(_):
"""Entry point for script, converts flags to arguments."""
label_wav(FLAGS.wav_dir, FLAGS.labels, FLAGS.graph, FLAGS.input_name,
FLAGS.output_name, FLAGS.how_many_labels)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wav_dir', type=str, default='', help='Audio file to be identified.')
parser.add_argument(
'--graph', type=str, default='', help='Model to use for identification.')
parser.add_argument(
'--labels', type=str, default='', help='Path to file containing labels.')
parser.add_argument(
'--input_name',
type=str,
default='wav_data:0',
help='Name of WAVE data input node in model.')
parser.add_argument(
'--output_name',
type=str,
default='labels_softmax:0',
help='Name of node outputting a prediction in the model.')
parser.add_argument(
'--how_many_labels',
type=int,
default=3,
help='Number of results to show.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | Python |
|
a3a48824b36ef62edaf128379f1baec5482166e7 | Save error_message for resources (SAAS-982) | opennode/nodeconductor-saltstack | src/nodeconductor_saltstack/migrations/0005_resource_error_message.py | src/nodeconductor_saltstack/migrations/0005_resource_error_message.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_saltstack', '0004_remove_useless_spl_fields'),
]
operations = [
migrations.AddField(
model_name='domain',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='site',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
]
| mit | Python |
|
bfb51cadc66f34a67686bef3b15e9197c9d0617b | Create ping_help.py | Subh94/Traffic_Analyzer- | ping_help.py | ping_help.py | import time
import subprocess
import os
hostname=raw_input('')
#while 1:
os.system("ping -c 10 -i 5 " + hostname + " >1.txt")
os.system("awk -F'[= ]' '{print $6,$10}' < 1.txt >final.txt")
os.system("grep [0-9] final.txt >final1.txt")
| apache-2.0 | Python |
|
dacffcb3e79877e1ea5e71d1a2e67bd4edd865bf | Add SettingOverrideModel that exposes a SettingOverrideDecorator to QML | onitake/Uranium,onitake/Uranium | plugins/Tools/PerObjectSettingsTool/SettingOverrideModel.py | plugins/Tools/PerObjectSettingsTool/SettingOverrideModel.py | # Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from PyQt5.QtCore import Qt, pyqtSlot, QUrl
from UM.Application import Application
from UM.Qt.ListModel import ListModel
class SettingOverrideModel(ListModel):
KeyRole = Qt.UserRole + 1
LabelRole = Qt.UserRole + 2
DescriptionRole = Qt.UserRole + 3
ValueRole = Qt.UserRole + 4
TypeRole = Qt.UserRole + 5
UnitRole = Qt.UserRole + 6
ValidRole = Qt.UserRole + 7
def __init__(self, decorator, parent = None):
super().__init__(parent)
self._decorator = decorator
self._decorator.settingAdded.connect(self._onSettingsChanged)
self._decorator.settingRemoved.connect(self._onSettingsChanged)
self._decorator.settingValueChanged.connect(self._onSettingValueChanged)
self._onSettingsChanged()
self.addRoleName(self.KeyRole, "key")
self.addRoleName(self.LabelRole, "label")
self.addRoleName(self.DescriptionRole, "description")
self.addRoleName(self.ValueRole,"value")
self.addRoleName(self.TypeRole, "type")
self.addRoleName(self.UnitRole, "unit")
self.addRoleName(self.ValidRole, "valid")
def _onSettingsChanged(self):
self.clear()
active_instance = Application.getInstance().getMachineManager().getActiveMachineInstance()
for key, value in self._decorator.getAllSettings().items():
setting = active_instance.getSettingByKey(key)
if not setting:
continue
self.appendItem({
"key": key,
"label": setting.getLabel(),
"description": setting.getDescription(),
"value": value,
"type": setting.getType(),
"unit": setting.getUnit(),
"valid": setting.validate()
})
def _onSettingValueChanged(self, key, value):
index = self.find("key", key)
if index != -1:
self.setProperty(index, "value", value)
| agpl-3.0 | Python |
|
3652f1c666f3bf482862727838f0b4bbc9fea5e9 | fix bug 1076270 - add support for Windows 10 | rhelmer/socorro,linearregression/socorro,Tchanders/socorro,twobraids/socorro,linearregression/socorro,mozilla/socorro,Tayamarn/socorro,luser/socorro,m8ttyB/socorro,yglazko/socorro,yglazko/socorro,pcabido/socorro,Serg09/socorro,pcabido/socorro,KaiRo-at/socorro,Tayamarn/socorro,rhelmer/socorro,twobraids/socorro,rhelmer/socorro,AdrianGaudebert/socorro,rhelmer/socorro,rhelmer/socorro,m8ttyB/socorro,spthaolt/socorro,AdrianGaudebert/socorro,adngdb/socorro,linearregression/socorro,KaiRo-at/socorro,Serg09/socorro,m8ttyB/socorro,pcabido/socorro,Serg09/socorro,spthaolt/socorro,cliqz/socorro,Tchanders/socorro,yglazko/socorro,lonnen/socorro,adngdb/socorro,AdrianGaudebert/socorro,Tchanders/socorro,m8ttyB/socorro,KaiRo-at/socorro,luser/socorro,cliqz/socorro,cliqz/socorro,mozilla/socorro,Tchanders/socorro,lonnen/socorro,luser/socorro,cliqz/socorro,Tchanders/socorro,spthaolt/socorro,adngdb/socorro,lonnen/socorro,pcabido/socorro,Serg09/socorro,linearregression/socorro,KaiRo-at/socorro,Tchanders/socorro,mozilla/socorro,twobraids/socorro,m8ttyB/socorro,KaiRo-at/socorro,Tayamarn/socorro,Serg09/socorro,linearregression/socorro,rhelmer/socorro,mozilla/socorro,linearregression/socorro,twobraids/socorro,AdrianGaudebert/socorro,Tayamarn/socorro,lonnen/socorro,m8ttyB/socorro,Tayamarn/socorro,cliqz/socorro,mozilla/socorro,pcabido/socorro,spthaolt/socorro,AdrianGaudebert/socorro,luser/socorro,KaiRo-at/socorro,twobraids/socorro,Serg09/socorro,pcabido/socorro,adngdb/socorro,mozilla/socorro,cliqz/socorro,adngdb/socorro,Tayamarn/socorro,twobraids/socorro,adngdb/socorro,AdrianGaudebert/socorro,luser/socorro,luser/socorro,spthaolt/socorro,spthaolt/socorro,yglazko/socorro,yglazko/socorro,yglazko/socorro | alembic/versions/17e83fdeb135_bug_1076270_support_windows_10.py | alembic/versions/17e83fdeb135_bug_1076270_support_windows_10.py | """bug 1076270 - support windows 10
Revision ID: 17e83fdeb135
Revises: 52dbc7357409
Create Date: 2014-10-03 14:03:29.837940
"""
# revision identifiers, used by Alembic.
revision = '17e83fdeb135'
down_revision = '52dbc7357409'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
op.execute("""
INSERT INTO os_versions
(major_version, minor_version, os_name, os_version_string)
VALUES (6, 4, 'Windows', 'Windows 10')
""")
def downgrade():
op.execute("""
DELEE FROM os_versions
WHERE major_version = 6
AND minor_version = 4
AND os_name = 'Windows'
AND os_version_string = 'Windows 10'
""")
| mpl-2.0 | Python |
|
f5ada694fae30f15498c775e8c4aa14a08459251 | Add slogan plugin | Muzer/smartbot,thomasleese/smartbot-old,tomleese/smartbot,Cyanogenoid/smartbot | plugins/slogan.py | plugins/slogan.py | import re
import requests
import urllib.parse
class Plugin:
def __call__(self, bot):
bot.on_respond(r"slogan(?:ise)? (.*)$", self.on_respond)
bot.on_help("slogan", self.on_help)
def on_respond(self, bot, msg, reply):
url = "http://www.sloganizer.net/en/outbound.php?slogan={0}".format(urllib.parse.quote(msg["match"][0]))
headers = { "User-Agent": "SmartBot" }
page = requests.get(url, headers=headers)
reply(re.sub("<.*?>", "", page.text))
def on_help(self, bot, msg, reply):
reply("Syntax: slogan[ise] <thing>")
| mit | Python |
|
779fb015913a17fcb8fb290515845e6b47c3ae50 | Create the converter (with span-conversion functionality) | jladan/latex2markdown | latex2markdown.py | latex2markdown.py | """
A Very simple tool to convert latex documents to markdown documents
"""
import re
span_substitutions = [
(r'\\emph\{(.+)\}', r'*\1*'),
(r'\\textbf\{(.+)\}', r'**\1**'),
(r'\\verb;(.+);', r'`\1`'),
(r'\\includegraphics\{(.+)\}', r''),
]
def convert_span_elements(line):
""" Converts all recognizable span elements into markdown
"""
for (f, r) in span_substitutions:
p = re.compile(f)
line = p.sub(r, line)
return line
# This next bit is to test the conversion as it builds
from sys import stdin
if __name__=="__main__":
for line in stdin:
print(convert_span_elements(line),end='')
| mit | Python |
|
2fdabf544c75096efafe2d14988efa28619643ab | add scheme | johntut/MongoDisco,10genNYUITP/MongoDisco,sajal/MongoDisco,mongodb/mongo-disco,dcrosta/mongo-disco | app/scheme_mongodb.py | app/scheme_mongodb.py | import pymongo
import bson
from bson import json_util
import warnings
from cStringIO import StringIO
from pymongo import Connection, uri_parser
import bson.son as son
import json
import logging
def open(url=None, task=None):
#parses a mongodb uri and returns the database
#"mongodb://localhost/test.in?query='{"key": value}'"
uri = url if url else "mongodb://localhost/test.in"
#print 'uri: ' + uri
params = uri.split('?', 1)
uri = params[0]
uri_info = uri_parser.parse_uri(uri)
query = None
#TODO test flow from a query
#parse json to a dict = q_d
# ^^ this is where we use json_util.object_hook
#SON()['query'] = q_d['query']
#for k,v in q_d.iteritems:
# if k not "query":
# SON[k] = v
options = {}
if len(params) > 1:
params = params[1]
list_of_params = params.split('&', 1)
for p in params:
name, json_obj = params.split('=')
if name == 'query':
query = son.SON(json.loads(json_obj, object_hook=json_util.object_hook))
else:
options[name] = json_obj
'''
query = son.SON()
li_q = json.loads(json_query)
for tupl in li_q:
if tupl[0] == "$max" or tupl[0] == "$min":
obj_id = bson.objectid.ObjectId(tupl[1])
query[tupl[0]] = {u'_id' : obj_id}
else:
query[tupl[0]] = tupl[1]
'''
if not query:
query = {}
#go around: connect to the sonnection then choose db by ['dbname']
with warnings.catch_warnings():
warnings.simplefilter("ignore")
connection = Connection(uri)
database_name = uri_info['database']
collection_name = uri_info['collection']
db = connection[database_name]
collection = db[collection_name]
cursor = collection.find(query, None)
wrapper = MongoWrapper(cursor)
return wrapper
#WRAPPED!
class MongoWrapper(object):
"""Want to wrap the cursor in an object that
supports the following operations: """
def __init__(self, cursor):
self.cursor = cursor
self.offset = 0
def __iter__(self):
#most important method
for rec in self.cursor:
yield rec
def __len__(self):
#may need to do this more dynamically (see lib/disco/comm.py ln 163)
return self.cursor.count()
def close(self):
self.cursor.close()
@property
def read(self, size=-1):
list_of_records = []
if size > 0:
for i in range(size):
list_of_records.append(self.cursor.__iter__())
return list_of_records
def input_stream(stream, size, url, params):
mon = open(url)
return mon
| apache-2.0 | Python |
|
03ad7302f75ea5de0870c798ec70f1a1912288ca | Add main.py file Description for 'hello! hosvik' | Hosvik/Hosvik | src/main.py | src/main.py | import sys
print(sys.platform);
print('Hello hosvik!')
| apache-2.0 | Python |
|
054c75ce1a63732be7a58ec1150e9f8aaff2aedb | Create test.py | WebShark025/TheZigZagProject,WebShark025/TheZigZagProject | plugins/test.py | plugins/test.py | @bot.message_handler(commands=['test', 'toast'])
def send_test(message):
bot.send_message(message.chat.id, TEST_MSG.encode("utf-8"))
| mit | Python |
|
5c9ffaaa8e244bb9db627a0408258750cc0e81d6 | Create ping.py | johnbrannstrom/zipato-extension,johnbrannstrom/zipato-extension,johnbrannstrom/zipato-extension | src/ping.py | src/ping.py | nisse
| mit | Python |
|
1473e0f4f1949349ef7212e0755fa8ffa6401cbe | Create process_htk_mlf_zh.py | troylee/chinesetextnorm | process_htk_mlf_zh.py | process_htk_mlf_zh.py | #!/usr/bin/env python
#
# This script reads in a HTK MLF format label file and converts the
# encoded contents to GBK encoding.
#
import string, codecs
fin=open('vom_utt_wlab.mlf')
fout=codecs.open('vom_utt_wlab.gbk.mlf', encoding='gbk', mode='w')
while True:
sr=fin.readline()
if sr=='':break
sr=sr.strip()
if sr.endswith('.lab"'):
print >>fout, sr
while True:
sr=(fin.readline()).strip()
if sr=='.':break
if sr.startswith('\\'):
lst=(sr.strip('\\')).split('\\') # get the list of octal representation of each byte
bins=bytearray()
for itm in lst:
val=0
for ii in range(3): # each octal number will have exactly 3 numbers, i.e. of the form \nnn
val=val*8
val=val+int(itm[ii])
bins.append(val)
print >>fout, bins.decode('gbk')
else:
print >>fout, sr
print >>fout, '.'
else:
print >>fout, sr
fin.close()
fout.close()
| apache-2.0 | Python |
|
c8c807cfcb4422edc0e2dbe3a4673a62fa37cbfa | Add extra migration triggered by updated django / parler (#501) | nephila/djangocms-blog,nephila/djangocms-blog,nephila/djangocms-blog | djangocms_blog/migrations/0037_auto_20190806_0743.py | djangocms_blog/migrations/0037_auto_20190806_0743.py | # Generated by Django 2.1.11 on 2019-08-06 05:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import parler.fields
import taggit_autosuggest.managers
class Migration(migrations.Migration):
dependencies = [
('djangocms_blog', '0036_auto_20180913_1809'),
]
operations = [
migrations.AlterField(
model_name='authorentriesplugin',
name='authors',
field=models.ManyToManyField(limit_choices_to={'djangocms_blog_post_author__publish': True}, to=settings.AUTH_USER_MODEL, verbose_name='authors'),
),
migrations.AlterField(
model_name='blogcategorytranslation',
name='master',
field=parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogCategory'),
),
migrations.AlterField(
model_name='blogconfigtranslation',
name='master',
field=parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogConfig'),
),
migrations.AlterField(
model_name='latestpostsplugin',
name='tags',
field=taggit_autosuggest.managers.TaggableManager(blank=True, help_text='Show only the blog articles tagged with chosen tags.', related_name='djangocms_blog_latest_post', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='filter by tag'),
),
migrations.AlterField(
model_name='post',
name='tags',
field=taggit_autosuggest.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', related_name='djangocms_blog_tags', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AlterField(
model_name='posttranslation',
name='master',
field=parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.Post'),
),
]
| bsd-3-clause | Python |
|
e082c803bf5ce31c4948d0d512e9ec0366cf0adc | Create politeusersbot.py | kooldawgstar/PoliteUsersBot | politeusersbot.py | politeusersbot.py | #Polite Users Bot created by Kooldawgstar
import praw
from time import sleep
import random
USERNAME = "USERNAME"
PASSWORD = "PASSWORD"
LIMIT = 100
RESPONSES = ["Thanks for being a nice user and thanking people for help!",
"Thank you for being a nice user and thanking people for help!",
]
responded = set()
r = praw.Reddit(user_agent="Enter in Useragent here")
r.login(USERNAME, PASSWORD)
subreddit = r.get_subreddit("Polite_Users_Bot")
def meets_criteria(responded, comment):
#add whatever criteria/logic you want here
return (not str(comment.author) == USERNAME) and (not comment.id in responded) and ("thanks" , "thx" , "thank you" , "thank u" in comment.body.lower())
def generate_response(comment):
#generate whatever response you want, you can make it specific to a comment by checking for various conditions
return random.choice(RESPONSES)
while True:
for comment in subreddit.get_comments(limit=LIMIT):
if meets_criteria(responded, comment):
print (comment.body)
print (comment.id)
print (str(comment.author))
while True: #continue to try responding to the comment until it works, unless something unknown occurs
try:
comment.reply(generate_response(comment))
print ("Breaking out after responding, and adding to the list")
responded.add(comment.id)
break
except praw.errors.RateLimitExceeded:
print ("Sleeping, rate limit :(")
sleep(10*60) #sleep for 10 minutes, that's the timer limit
except:
print ("Some unknown error has occurred, bail out...")
break
print ("---------------------------------------\n\n")
print ("sleeping")
sleep(60) #sleep for a minute for new comments to show up
| mit | Python |
|
94481f656690956b2a4eb5a1227948d24ba4cc05 | Add actual command line python function (#7) | msimet/Stile,msimet/Stile | bin/CCDSingleEpochStile.py | bin/CCDSingleEpochStile.py | #!/usr/bin/env python
from stile.lsst.base_tasks import CCDSingleEpochStileTask
CCDSingleEpochStileTask.parseAndRun()
| bsd-3-clause | Python |
|
225d5232cca6bb42e39959b2330758225a748477 | add little script to retrieve URLs to PS1-DR1 images | legacysurvey/legacypipe,legacysurvey/legacypipe | py/legacyanalysis/get-ps1-skycells.py | py/legacyanalysis/get-ps1-skycells.py | import requests
from astrometry.util.fits import *
from astrometry.util.multiproc import *
def get_cell((skycell, subcell)):
url = 'http://ps1images.stsci.edu/cgi-bin/ps1filenames.py?skycell=%i.%03i' % (skycell, subcell)
print('Getting', url)
r = requests.get(url)
lines = r.text.split('\n')
#assert(len(lines) == 6)
cols = 'projcell subcell ra dec filter mjd type filename shortname'
assert(lines[0] == cols)
lines = lines[1:]
lines = [l.split() for l in lines]
T = fits_table()
types = dict(projcell=np.int16, subcell=np.int16, ra=np.float64, dec=np.float64, mjd=None)
types['type'] =None
for i,col in enumerate(cols.split()):
tt = types.get(col, str)
if tt is None:
continue
vals = [words[i] for words in lines]
#print('Values for', col, ':', vals)
# HACK -- base-10 parsing for integer subcell
if col == 'subcell':
vals = [int(v, 10) for v in vals]
T.set(col, np.array([tt(v) for v in vals], dtype=tt))
return T
mp = multiproc(8)
TT = []
for skycell in range(635, 2643+1):
args = []
for subcell in range(100):
args.append((skycell, subcell))
TTi = mp.map(get_cell, args)
Ti = merge_tables(TTi)
Ti.writeto('ps1skycells-%i.fits' % skycell)
TT.extend(TTi)
T = merge_tables(TT)
T.writeto('ps1skycells.fits')
| bsd-3-clause | Python |
|
0f5ecc42485d4f0e89fbe202b57a2e7735ea69cc | Create product_images.py | lukebranch/product_images | product_images.py | product_images.py | from openerp.osv import osv, fields
class product_template(osv.Model):
_inherit = 'product.template'
_columns = {
'x_secondpicture': fields.binary("Second Image",
help="This field holds the second image used as image for the product, limited to 1024x1024px."),
}
product_template()
| mit | Python |
|
e81f6e01ac55723e015c4d7d9d8f61467378325a | Add autoincrement to ZUPC.id | openmaraude/APITaxi,openmaraude/APITaxi | migrations/versions/e187aca7c77a_zupc_id_autoincrement.py | migrations/versions/e187aca7c77a_zupc_id_autoincrement.py | """ZUPC.id autoincrement
Revision ID: e187aca7c77a
Revises: ccd5b0142a76
Create Date: 2019-10-21 14:01:10.406983
"""
# revision identifiers, used by Alembic.
revision = 'e187aca7c77a'
down_revision = '86b41c3dbd00'
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy.schema import Sequence, CreateSequence, DropSequence
import sqlalchemy as sa
def upgrade():
op.execute('''
CREATE SEQUENCE ZUPC_id_seq;
ALTER TABLE "ZUPC" ALTER COLUMN id SET DEFAULT nextval('ZUPC_id_seq');
''')
def downgrade():
op.execute('''
ALTER TABLE "ZUPC" ALTER COLUMN id DROP DEFAULT;
DROP SEQUENCE ZUPC_id_seq
''')
| agpl-3.0 | Python |
|
cc8b3f8a7fb6af29f16d47e4e4caf56f17605325 | Add command handler. | CheeseLord/warts,CheeseLord/warts | src/server/commandHandler.py | src/server/commandHandler.py | from src.shared.encode import decodePosition
class CommandHandler(object):
def __init__(self, gameState, connectionManager):
self.gameState = gameState
self.connectionManager = connectionManager
def broadcastMessage(self, *args, **kwargs):
self.connectionManager.broadcastMessage(*args, **kwargs)
def sendMessage(self, *args, **kwargs):
self.connectionManager.sendMessage(*args, **kwargs)
def createConnection(self, playerId):
playerX, playerY = self.gameState.getPos(playerId)
self.sendMessage(playerId, "your_id_is", [playerId])
self.broadcastMessage("new_obelisk", [playerX, playerY])
for otherId in self.gameState.positions:
# We already broadcast this one to everyone, including ourself.
if otherId == playerId:
continue
otherX, otherY = self.gameState.getPos(otherId)
self.sendMessage("new_obelisk", [otherId, otherX, otherY])
def removeConnection(self, playerId):
self.broadcastMessage("delete_obelisk", [playerId])
self.gameState.removePlayer(playerId)
def stringReceived(self, playerId, data):
command = data.strip().lower()
STEP_SIZE = 1.0
RELATIVE_MOVES = {
'n': [ 0.0, STEP_SIZE],
's': [ 0.0, -STEP_SIZE],
'e': [ STEP_SIZE, 0.0],
'w': [-STEP_SIZE, 0.0],
}
if command in RELATIVE_MOVES:
self.gameState.movePlayerBy(playerId,
RELATIVE_MOVES[command])
else:
newPos = decodePosition(command)
if newPos is not None:
self.gameState.movePlayerTo(playerId, newPos)
# TODO: Maybe only broadcast the new position if we handled a valid
# command? Else the position isn't changed....
playerX, playerY = self.gameState.getPos(playerId)
self.broadcastMessage("set_pos", [playerId, playerX, myY])
| mit | Python |
|
be67baac2314408b295bddba3e5e4b2ca9bfd262 | Add ffs.exceptions | davidmiller/ffs,davidmiller/ffs | ffs/exceptions.py | ffs/exceptions.py | """
ffs.exceptions
Base and definitions for all exceptions raised by FFS
"""
class Error(Exception):
"Base Error class for FFS"
class DoesNotExistError(Error):
"Something should have been here"
| apache-2.0 | Python |
|
2737e1d46263eff554219a5fa5bad060b8f219d3 | Add CLI script for scoring huk-a-buk. | dhermes/huk-a-buk,dhermes/huk-a-buk,dhermes/huk-a-buk,dhermes/huk-a-buk | score_hukabuk.py | score_hukabuk.py | import json
import os
import time
DATA = {'turns': {}}
class Settings(object):
FILENAME = None
CURRENT_TURN = 0
NAME_CHOICES = None
def set_filename():
filename = raw_input('Set the filename? ').strip()
if not filename:
filename = str(int(time.time()))
Settings.FILENAME = filename + '.json'
def save_game():
with open(Settings.FILENAME, 'w') as fh:
json.dump(DATA, fh)
def enter_names():
names = {}
while True:
name = raw_input('Enter name: ')
if name.strip() == '':
break
names[name] = -5
DATA['names'] = names
Settings.NAME_CHOICES = '\n'.join([
'%d: %s' % (i, name)
for i, name in enumerate(names.keys())
])
save_game()
def game_over():
game_over = raw_input('Is the game over? [y/n] ')
return game_over.lower().strip() == 'y'
def get_bidder():
actual_bidder = None
while actual_bidder is None:
print(Settings.NAME_CHOICES)
bidder = raw_input('Who won the bid? ')
try:
bidder = int(bidder)
actual_bidder = Settings.NAME_CHOICES[bidder]
except:
if bidder in Settings.NAME_CHOICES:
actual_bidder = bidder
return actual_bidder
def get_bid():
actual_bid = None
while actual_bid is None:
bid = raw_input('Bid amount? ')
try:
bid = int(bid)
if bid in (2, 3, 4, 5):
actual_bid = bid
except:
pass
return actual_bid
def get_points():
result = {}
print '=' * 60
print 'Scores for turn %d:' % (Settings.CURRENT_TURN,)
for name in DATA['names'].keys():
msg = 'Score for %r: ' % (name,)
actual_score = None
while actual_score is None:
score = raw_input(msg)
try:
score = int(score)
if score in (-5, 0, 1, 2, 3, 4, 5):
actual_score = score
except:
pass
result[name] = actual_score
DATA['names'][name] += actual_score
return result
def play_turn():
turn = DATA['turns'].setdefault(Settings.CURRENT_TURN, {})
turn['bidder'] = get_bidder()
turn['bid'] = get_bid()
turn['points'] = get_points()
Settings.CURRENT_TURN += 1
save_game()
def print_scores():
print '=' * 60
print 'Current scores:'
print '-' * 60
for name, score in DATA['names'].items():
print '%r -> %d' % (name, score)
print '=' * 60
def play_game():
while not game_over():
print_scores()
play_turn()
def main():
set_filename()
enter_names()
play_game()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
7f64a56a17fc6d73da4ac2987d42931885925db0 | Create server.py | amoffat/Chaos,eamanu/Chaos,chaosbot/Chaos,Chaosthebot/Chaos,amoffat/Chaos,mark-i-m/Chaos,phil-r/chaos,botchaos/Chaos,mpnordland/chaos,mark-i-m/Chaos,mpnordland/chaos,amoffat/Chaos,amoffat/Chaos,hongaar/chaos,Chaosthebot/Chaos,phil-r/chaos,rudehn/chaos,hongaar/chaos,eamanu/Chaos,eukaryote31/chaos,hongaar/chaos,mpnordland/chaos,mark-i-m/Chaos,phil-r/chaos,chaosbot/Chaos,eukaryote31/chaos,g19fanatic/chaos,eamanu/Chaos,chaosbot/Chaos,rudehn/chaos,g19fanatic/chaos,chaosbot/Chaos,mpnordland/chaos,mark-i-m/Chaos,rudehn/chaos,phil-r/chaos,rudehn/chaos,mpnordland/chaos,g19fanatic/chaos,chaosbot/Chaos,Chaosthebot/Chaos,g19fanatic/chaos,botchaos/Chaos,botchaos/Chaos,eukaryote31/chaos,mark-i-m/Chaos,hongaar/chaos,phil-r/chaos,g19fanatic/chaos,eamanu/Chaos,eamanu/Chaos,rudehn/chaos,hongaar/chaos,eukaryote31/chaos,Chaosthebot/Chaos,eukaryote31/chaos,botchaos/Chaos,amoffat/Chaos,botchaos/Chaos,Chaosthebot/Chaos | server/server.py | server/server.py | import http.server
import socketserver
PORT = 80
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
httpd.serve_forever()
| mit | Python |
|
d599e5a35d4ac056dbefa8ec8af6c8be242c12f1 | Add test case for input pipeline. | google/flax,google/flax | linen_examples/wmt/input_pipeline_test.py | linen_examples/wmt/input_pipeline_test.py | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import tempfile
from absl.testing import absltest
import input_pipeline
from configs import default
import tensorflow_datasets as tfds
# We just use different values here to verify that the input pipeline uses the
# the correct value for the 3 different datasets.
_TARGET_LENGTH = 32
_EVAL_TARGET_LENGTH = 48
_PREDICT_TARGET_LENGTH = 64
class InputPipelineTest(absltest.TestCase):
def _get_datasets(self):
config = default.get_config()
config.per_device_batch_size = 1
config.vocab_size = 32
config.max_corpus_chars = 1000
config.max_target_length = _TARGET_LENGTH
config.max_eval_target_length = _EVAL_TARGET_LENGTH
config.max_predict_length = _PREDICT_TARGET_LENGTH
vocab_path = os.path.join(tempfile.mkdtemp(), 'sentencepiece_model')
# Go two directories up to the root of the flax directory.
flax_root_dir = pathlib.Path(__file__).parents[2]
data_dir = str(flax_root_dir) + '/.tfds/metadata' # pylint: disable=unused-variable
with tfds.testing.mock_data(num_examples=128, data_dir=data_dir):
train_ds, eval_ds, predict_ds, _ = input_pipeline.get_wmt_datasets(
n_devices=2,
config=config,
shard_idx=0,
shard_count=1,
vocab_path=vocab_path)
return train_ds, eval_ds, predict_ds
def test_train_ds(self):
train_ds = self._get_datasets()[0]
expected_shape = [2, _TARGET_LENGTH] # 2 devices.
# For training we pack multiple short examples in one example.
# *_position and *_segmentation indicate the boundaries.
for batch in train_ds.take(3):
self.assertEqual({k: v.shape.as_list() for k, v in batch.items()}, {
'inputs': expected_shape,
'inputs_position': expected_shape,
'inputs_segmentation': expected_shape,
'targets': expected_shape,
'targets_position': expected_shape,
'targets_segmentation': expected_shape,
})
def test_eval_ds(self):
eval_ds = self._get_datasets()[1]
expected_shape = [2, _EVAL_TARGET_LENGTH] # 2 devices.
for batch in eval_ds.take(3):
self.assertEqual({k: v.shape.as_list() for k, v in batch.items()}, {
'inputs': expected_shape,
'targets': expected_shape,
})
def test_predict_ds(self):
predict_ds = self._get_datasets()[2]
expected_shape = [2, _PREDICT_TARGET_LENGTH] # 2 devices.
for batch in predict_ds.take(3):
self.assertEqual({k: v.shape.as_list() for k, v in batch.items()}, {
'inputs': expected_shape,
'targets': expected_shape,
})
if __name__ == '__main__':
absltest.main()
| apache-2.0 | Python |
|
be6997772bd7e39dd1f68d96b3d52a82372ad216 | update migartions | rapidpro/tracpro-old,rapidpro/tracpro-old | tracpro/supervisors/migrations/0002_auto_20141102_2231.py | tracpro/supervisors/migrations/0002_auto_20141102_2231.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('supervisors', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='supervisor',
options={'verbose_name': 'Supervisor', 'verbose_name_plural': 'Supervisors'},
),
migrations.AlterField(
model_name='supervisor',
name='region',
field=models.CharField(help_text='The name of the Region or State this supervisor belongs to, this should map to the Contact group on RapidPro', max_length=64, verbose_name='Region'),
preserve_default=True,
),
]
| agpl-3.0 | Python |
|
0ccca70cf289fb219768d1a124cacf11396a0ecc | Add files via upload | SeanBeseler/data-structures | src/pque.py | src/pque.py | class Pque(object):
"""make as priority queue priority scale is 0 through -99
0 has greatest priority with ties being first come first pop"""
def __init__(self):
self.next_node = None
self.priority = 0
self.value = None
self.tail = None
self.head = None
self.size = 0
def insert(self,value , priority = -99):
""" inserts a value into the que defalt priority is -99"""
new_pque = Pque()
new_pque.priority = priority
if self.size is 0:
self.head = new_pque
self.tail = new_pque
else:
current_node = self.head
pre_node = None
for x in range(self.size - 1):
if new_pque.priority > current_node.priority:
if current_node is self.head:
new_pque.next_node = self.head
self.head = new_pque
break
else:
pre_node.next_node = new_pque
new_pque.next_node = current.node
break
if current_node is self.tail:
self.tail.next_node = new_pque
self.tail = new_pque
break
else:
pre_node = current_node
current_node = current_node.next_node
self.size += 1
new_pque.value = value
def peek(self):
"""returns the data in the head of the pque with out removing it"""
if self.head is None:
raise IndexError ('que is empty')
return slef.head.value
def pop(self):
"""returns the data in the head of pque and removes it """
if self.head is None:
raise IndexError ('que is empty')
temp_val = self.head.value
self.head = self.head.next_node
self.size -= 1
return temp_val
| mit | Python |
|
06235d5913cd5eb54d3767f6a7cf60acb1966b39 | Create prettyrpc.py | yan123/BitBox,yan123/QABox,yan123/QABox | prettyrpc.py | prettyrpc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from xmlrpclib import ServerProxy
class PrettyProxy(object):
def __init__(self, *args, **kwargs):
self._real_proxy = ServerProxy(*args, **kwargs)
def __getattr__(self, name):
return lambda *args, **kwargs: getattr(self._real_proxy, name)(args, kwargs)
| bsd-2-clause | Python |
|
38faa038cbc7b8cedbb2dc13c2760f2a270a5f1a | Create problem-5.py | vnbrs/project-euler | problem-5.py | problem-5.py | n = 0
while True:
n += 1
divisible_list = []
for i in range(1,21):
is_divisible = (n % i == 0)
if is_divisible:
divisible_list.append(is_divisible)
else:
break
if len(divisible_list) == 20:
break
print(n)
| mit | Python |
|
d326391f6412afb54ee05a02b3b11e075f703765 | fix value < 0 or higher than max. closes #941 | jegger/kivy,yoelk/kivy,cbenhagen/kivy,cbenhagen/kivy,bliz937/kivy,rnixx/kivy,ehealthafrica-ci/kivy,youprofit/kivy,el-ethan/kivy,viralpandey/kivy,rafalo1333/kivy,tony/kivy,niavlys/kivy,adamkh/kivy,aron-bordin/kivy,thezawad/kivy,inclement/kivy,yoelk/kivy,wangjun/kivy,manthansharma/kivy,matham/kivy,inclement/kivy,manthansharma/kivy,vitorio/kivy,CuriousLearner/kivy,LogicalDash/kivy,matham/kivy,vipulroxx/kivy,ernstp/kivy,arcticshores/kivy,manashmndl/kivy,wangjun/kivy,el-ethan/kivy,ehealthafrica-ci/kivy,Farkal/kivy,iamutkarshtiwari/kivy,wangjun/kivy,ernstp/kivy,denys-duchier/kivy,gonzafirewall/kivy,rafalo1333/kivy,darkopevec/kivy,adamkh/kivy,manashmndl/kivy,Shyam10/kivy,xiaoyanit/kivy,JohnHowland/kivy,eHealthAfrica/kivy,bob-the-hamster/kivy,gonzafirewall/kivy,andnovar/kivy,arcticshores/kivy,MiyamotoAkira/kivy,janssen/kivy,kivatu/kivy-bak,Davideddu/kivy-forkedtouch,vipulroxx/kivy,manthansharma/kivy,hansent/kivy,eHealthAfrica/kivy,dirkjot/kivy,arlowhite/kivy,MiyamotoAkira/kivy,VinGarcia/kivy,thezawad/kivy,dirkjot/kivy,kived/kivy,jffernandez/kivy,xiaoyanit/kivy,jkankiewicz/kivy,matham/kivy,aron-bordin/kivy,kivy/kivy,el-ethan/kivy,aron-bordin/kivy,janssen/kivy,xpndlabs/kivy,mSenyor/kivy,tony/kivy,vitorio/kivy,edubrunaldi/kivy,jffernandez/kivy,kived/kivy,akshayaurora/kivy,xpndlabs/kivy,manthansharma/kivy,gonzafirewall/kivy,mSenyor/kivy,jehutting/kivy,jegger/kivy,autosportlabs/kivy,habibmasuro/kivy,LogicalDash/kivy,arcticshores/kivy,KeyWeeUsr/kivy,adamkh/kivy,edubrunaldi/kivy,jkankiewicz/kivy,Cheaterman/kivy,bionoid/kivy,arlowhite/kivy,jffernandez/kivy,Davideddu/kivy-forkedtouch,xpndlabs/kivy,rnixx/kivy,angryrancor/kivy,akshayaurora/kivy,akshayaurora/kivy,Ramalus/kivy,rafalo1333/kivy,bionoid/kivy,JohnHowland/kivy,vitorio/kivy,dirkjot/kivy,Shyam10/kivy,Cheaterman/kivy,jkankiewicz/kivy,angryrancor/kivy,Cheaterman/kivy,Farkal/kivy,KeyWeeUsr/kivy,jehutting/kivy,zennobjects/kivy,zennobjects/kivy,KeyWeeUsr/kivy,habibmasuro/kivy,aron-bordin/kivy,darkopevec/kivy,mSenyor/kivy,viralpandey/kivy,yoelk/kivy,youprofit/kivy,tony/kivy,hansent/kivy,kivatu/kivy-bak,xiaoyanit/kivy,hansent/kivy,gonzafirewall/kivy,CuriousLearner/kivy,janssen/kivy,MiyamotoAkira/kivy,ehealthafrica-ci/kivy,niavlys/kivy,Ramalus/kivy,hansent/kivy,Ramalus/kivy,inclement/kivy,rnixx/kivy,arcticshores/kivy,eHealthAfrica/kivy,CuriousLearner/kivy,bionoid/kivy,janssen/kivy,bhargav2408/kivy,Farkal/kivy,kived/kivy,LogicalDash/kivy,edubrunaldi/kivy,denys-duchier/kivy,niavlys/kivy,bionoid/kivy,angryrancor/kivy,habibmasuro/kivy,youprofit/kivy,autosportlabs/kivy,jegger/kivy,kivatu/kivy-bak,darkopevec/kivy,VinGarcia/kivy,MiyamotoAkira/kivy,ernstp/kivy,Shyam10/kivy,Davideddu/kivy-forkedtouch,KeyWeeUsr/kivy,eHealthAfrica/kivy,vipulroxx/kivy,cbenhagen/kivy,wangjun/kivy,VinGarcia/kivy,angryrancor/kivy,arlowhite/kivy,iamutkarshtiwari/kivy,bhargav2408/kivy,denys-duchier/kivy,adamkh/kivy,ernstp/kivy,bob-the-hamster/kivy,bob-the-hamster/kivy,matham/kivy,bob-the-hamster/kivy,jegger/kivy,bhargav2408/kivy,viralpandey/kivy,dirkjot/kivy,LogicalDash/kivy,andnovar/kivy,thezawad/kivy,zennobjects/kivy,jehutting/kivy,Farkal/kivy,autosportlabs/kivy,kivy/kivy,bliz937/kivy,Cheaterman/kivy,niavlys/kivy,bliz937/kivy,jkankiewicz/kivy,iamutkarshtiwari/kivy,zennobjects/kivy,Shyam10/kivy,kivy/kivy,jffernandez/kivy,JohnHowland/kivy,andnovar/kivy,denys-duchier/kivy,ehealthafrica-ci/kivy,kivatu/kivy-bak,JohnHowland/kivy,darkopevec/kivy,manashmndl/kivy,yoelk/kivy,vipulroxx/kivy,Davideddu/kivy-forkedtouch | kivy/uix/progressbar.py | kivy/uix/progressbar.py | '''
Progress Bar
============
.. versionadded:: 1.0.8
.. image:: images/progressbar.jpg
:align: right
The :class:`ProgressBar` widget is used to visualize progress of some task.
Only horizontal mode is supported, vertical mode is not available yet.
The progress bar has no interactive elements, It is a display-only widget.
To use it, simply assign a value to indicate the current progress::
from kivy.uix.progressbar import ProgressBar
pb = ProgressBar(max=1000)
# this will update the graphics automatically (75% done):
pb.value = 750
'''
__all__ = ('ProgressBar', )
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, AliasProperty
class ProgressBar(Widget):
'''Class for creating a Progress bar widget.
See module documentation for more details.
'''
def __init__(self, **kwargs):
self._value = 0.
super(ProgressBar, self).__init__(**kwargs)
def _get_value(self):
return self._value
def _set_value(self, value):
value = max(0, min(self.max, value))
if value != self._value:
self._value = value
return True
value = AliasProperty(_get_value, _set_value)
'''Current value used for the slider.
:data:`value` is a :class:`~kivy.properties.AliasProperty`, than returns the
value of the progressbar. If the value is < 0 or > :data:`max`, it will be
normalized to thoses boundaries.
.. versionchanged:: 1.5.2
The value is now limited between 0 to :data:`max`
'''
def get_norm_value(self):
d = self.max
if d == 0:
return 0
return self.value / float(d)
def set_norm_value(self, value):
self.value = value * self.max
value_normalized = AliasProperty(get_norm_value, set_norm_value,
bind=('value', 'max'))
'''Normalized value inside the 0-max to 0-1 range::
>>> pb = ProgressBar(value=50, max=100)
>>> pb.value
50
>>> slider.value_normalized
0.5
:data:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
'''
max = NumericProperty(100.)
'''Maximum value allowed for :data:`value`.
:data:`max` is a :class:`~kivy.properties.NumericProperty`, default to 100.
'''
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(ProgressBar(value=50))
| '''
Progress Bar
============
.. versionadded:: 1.0.8
.. image:: images/progressbar.jpg
:align: right
The :class:`ProgressBar` widget is used to visualize progress of some task.
Only horizontal mode is supported, vertical mode is not available yet.
The progress bar has no interactive elements, It is a display-only widget.
To use it, simply assign a value to indicate the current progress::
from kivy.uix.progressbar import ProgressBar
pb = ProgressBar(max=1000)
# this will update the graphics automatically (75% done):
pb.value = 750
'''
__all__ = ('ProgressBar', )
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, AliasProperty
class ProgressBar(Widget):
'''Class for creating a Progress bar widget.
See module documentation for more details.
'''
value = NumericProperty(0.)
'''Current value used for the slider.
:data:`value` is a :class:`~kivy.properties.NumericProperty`, default to 0.
'''
def get_norm_value(self):
d = self.max
if d == 0:
return 0
return self.value / float(d)
def set_norm_value(self, value):
self.value = value * self.max
value_normalized = AliasProperty(get_norm_value, set_norm_value,
bind=('value', 'max'))
'''Normalized value inside the 0-max to 0-1 range::
>>> pb = ProgressBar(value=50, max=100)
>>> pb.value
50
>>> slider.value_normalized
0.5
:data:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
'''
max = NumericProperty(100.)
'''Maximum value allowed for :data:`value`.
:data:`max` is a :class:`~kivy.properties.NumericProperty`, default to 100.
'''
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(ProgressBar(value=50))
| mit | Python |
5e2e5eed760fdc40d474e511662cf7c22b1ea29b | add usbwatch.py | esjeon/graveyard,esjeon/graveyard,esjeon/graveyard,esjeon/graveyard,esjeon/graveyard,esjeon/graveyard,esjeon/graveyard,esjeon/graveyard,esjeon/graveyard,esjeon/graveyard | usbwatch.py | usbwatch.py | #!/usr/bin/env python3
# usbwatch.py - monitor addition/removal of USB devices
#
#
import pyudev
class UsbDevice:
@staticmethod
def fromUdevDevice(udev):
attr = lambda name: udev.attributes.asstring(name)
try:
try:
manufacturer = attr('manufacturer')
except KeyError:
manufacturer = None
return UsbDevice( udev.device_path,
( attr('busnum') + '-' + attr('devpath'),
attr('idVendor') + ":" + attr('idProduct'),
manufacturer,
attr('product')
)
)
except KeyError:
return None
def __init__(self, devicePath, info):
self.path = devicePath
self.bus, self.id, self.manufacturer, self.product = info
def __repr__(self):
return "UsbDevice(%s, %s)" % (self.path, (self.bus, self.id, self.manufacturer, self.product))
def __str__(self):
return "%s (%s): %s, %s" % (self.id, self.bus, self.manufacturer, self.product)
# UsbWatcher monitors the connection status of USB devices.
# It remembers the devices which are "known" to be connected to the system.
class UsbWatcher:
def __init__(self):
self.ctx = pyudev.Context()
self.mon = pyudev.Monitor.from_netlink(self.ctx)
self.mon.filter_by('usb')
self.knowns = dict()
# Query the currently connected USB devices
# Forcefully updates the list of "known" devices
def poll(self):
old_knowns = self.knowns
self.knowns = dict()
for udev in self.ctx.list_devices(subsystem="usb"):
dev = UsbDevice.fromUdevDevice(udev)
if dev is not None:
self.knowns[udev.device_path] = dev
if udev.device_path in old_knowns:
old_knowns.pop(udev.device_path)
else:
self.onAdd(dev)
for path, dev in old_knowns.items():
self.onRemove(dev)
# Monitor newly added devices. Any devices connected beforehand are ignored
def watch(self):
for action, udev in iter(self.mon):
if action == 'add':
dev = UsbDevice.fromUdevDevice(udev)
if dev is not None:
self.knowns[udev.device_path] = dev
self.onAdd(dev)
elif action == 'remove':
if udev.device_path in self.knowns:
dev = self.knowns.pop(udev.device_path)
self.onRemove(dev)
# Called upon a device is added to the system
# Override this
def onAdd(self,dev):
print("add %s " % str(dev))
# Called upon a device is removed from the system
# Override this
def onRemove(self,dev):
print("remove %s " % str(dev))
if __name__ == "__main__":
try:
wat = UsbWatcher()
wat.poll()
wat.watch()
except KeyboardInterrupt:
pass
| mit | Python |
|
ac7c3ccfdbd02eed6b2b7070160ef08f725c3578 | test migration | ACLeiChen/personalBlog,ACLeiChen/personalBlog,ACLeiChen/personalBlog,ACLeiChen/personalBlog | migrations/versions/5dc51870eece_initial_migration.py | migrations/versions/5dc51870eece_initial_migration.py | """initial migration
Revision ID: 5dc51870eece
Revises: None
Create Date: 2016-08-19 03:16:41.577553
"""
# revision identifiers, used by Alembic.
revision = '5dc51870eece'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('categorys',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('tag', sa.String(length=64), nullable=True),
sa.Column('count', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_roles_default'), 'roles', ['default'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('member_since', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('avatar_hash', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('follows',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.Text(), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('summary', sa.Text(), nullable=True),
sa.Column('summary_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['category_id'], ['categorys.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('disabled', sa.Boolean(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_comments_timestamp'), 'comments', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_comments_timestamp'), table_name='comments')
op.drop_table('comments')
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
op.drop_table('follows')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_roles_default'), table_name='roles')
op.drop_table('roles')
op.drop_table('categorys')
### end Alembic commands ###
| mit | Python |
|
61a4d783f2c16a7b8f4fbf5a79f9588c58c8f618 | Add a serial port emulation module for debug/development use | Robotma-com/candy-iot-service,Robotma-com/candy-iot-service | systemd/emulator_serialport.py | systemd/emulator_serialport.py |
class SerialPortEmurator:
def __init__(self):
self.res = {
'AT+CGDCONT?': [
"(ECHO_BACK)",
"",
"",
"+CGDCONT: 1,\"IPV4V6\",\"access_point_name\",\"0.0.0.0\",0,0",
"",
"OK",
""
],
'AT$QCPDPP?': [
"(ECHO_BACK)",
"",
"",
"$QCPDPP: 1,3,\"user_id\"",
"$QCPDPP: 2,0",
"$QCPDPP: 3,0",
"$QCPDPP: 4,0",
"$QCPDPP: 5,0",
"$QCPDPP: 6,0",
"$QCPDPP: 7,0",
"$QCPDPP: 8,0",
"$QCPDPP: 9,0",
"$QCPDPP: 10,0",
"$QCPDPP: 11,0",
"$QCPDPP: 12,0",
"$QCPDPP: 13,0",
"$QCPDPP: 14,0",
"$QCPDPP: 15,0",
"$QCPDPP: 16,0",
"",
"OK",
""
],
'AT+CGDCONT=': [
"(ECHO_BACK)",
"",
"",
"OK",
""
],
'AT$QCPDPP=': [
"(ECHO_BACK)",
"",
"",
"OK",
""
],
'AT+CSQ': [
"(ECHO_BACK)",
"",
"",
"+CSQ: 4,99", # "+CSQ: 99,99"
"",
"OK",
""
],
'AT+CNUM': [
"(ECHO_BACK)",
"",
"",
"+CNUM: ,\"09099999999\",129", # "+CNUM: ,\"\",129"
"",
"OK",
""
],
'AT+CIMI': [
"(ECHO_BACK)",
"",
"",
"440111111111111", # "+CME ERROR: operation not allowed"
"",
"OK",
""
],
'AT+CPAS': [
"(ECHO_BACK)",
"",
"",
"+CPAS: 4", # "+CPAS: 0"
"",
"OK",
""
],
'ATI': [
"(ECHO_BACK)",
"",
"",
"Manufacturer: MAN",
"Model: MOD",
"Revision: REV",
"IMEI: 999999999999999",
"+GCAP: +CGSM",
"",
"OK",
""
]
}
def read_line(self):
if self.line < 0:
return None
try:
text = self.res[self.cmd][self.line]
self.line += 1
return text
except:
self.line = -1
return None
def write(self, str):
print("W:[%s]" % str)
self.cmd = str.strip()
if self.cmd.find('=') >= 0:
self.cmd = self.cmd[:self.cmd.find('=') + 1]
self.line = 0
self.res[self.cmd][0] = str.strip()
| bsd-3-clause | Python |
|
144541a563a4f05a762aea82f39bdd63f33c19d5 | Add tests for new membrane | waltermoreira/tartpy | tartpy/tests/test_membrane2.py | tartpy/tests/test_membrane2.py | import pytest
from tartpy.runtime import SimpleRuntime, behavior
from tartpy.eventloop import EventLoop
from tartpy.membrane2 import Membrane
def test_membrane_protocol():
runtime = SimpleRuntime()
evloop = EventLoop()
m1 = Membrane({'protocol': 'membrane'}, runtime)
m2 = Membrane({'protocol': 'membrane'}, runtime)
result1 = None
@behavior
def actor1_beh(self, msg):
nonlocal result1
result1 = msg
result2 = None
@behavior
def actor2_beh(self, msg):
nonlocal result2
result2 = msg
actor1 = runtime.create(actor1_beh)
actor2 = runtime.create(actor2_beh)
uid_for_2_at_mb2 = m2.get_uid(actor2)
proxy_for_2_at_mb1 = m1.create_proxy(uid_for_2_at_mb2,
m2.config)
proxy_for_2_at_mb1 << {'foo': 5,
'reply_to': actor1}
evloop.run()
# test message from m1 to m2
assert result2['foo'] == 5
# test that 'reply_to' is a proxy at m2
proxy_for_1_at_mb2 = result2['reply_to']
assert proxy_for_1_at_mb2 is not actor1
proxy_for_1_at_mb2 << {'bar': 3,
'reply_to': actor2}
evloop.run()
# test message back from m2 to m1
assert result1['bar'] == 3
# test that proxy at m1 is reused
assert result1['reply_to'] is proxy_for_2_at_mb1
# test a string message across Membranes
proxy_for_2_at_mb1 << 'a string message'
evloop.run()
assert result2 == 'a string message'
def test_dos():
runtime = SimpleRuntime()
m = Membrane({'protocol': 'membrane'}, runtime)
with pytest.raises(KeyError):
m.local_delivery(0, {})
def test_marshall_unmarshall():
runtime = SimpleRuntime()
m = Membrane({'protocol': 'membrane'}, runtime)
assert m.marshall_message(5) == 5
assert m.marshall_message('foo') == 'foo'
assert m.marshall_message([1, 2, 'bar']) == [1, 2, 'bar']
assert m.marshall_message({'foo': 5, 'bar': 'baz'}) == {'foo': 5, 'bar': 'baz'}
assert m.unmarshall_message(5) == 5
assert m.unmarshall_message('foo') == 'foo'
assert m.unmarshall_message([1, 2, 'bar']) == [1, 2, 'bar']
assert m.unmarshall_message({'foo': 5, 'bar': 'baz'}) == {'foo': 5, 'bar': 'baz'}
@behavior
def sink_beh(self, msg):
pass
sink = runtime.create(sink_beh)
s = m.marshall_message(sink)
assert m.is_marshalled_actor(s)
assert m.unmarshall_message(s) is sink
s = m.marshall_message({'foo': sink})
assert m.is_marshalled_actor(s['foo'])
assert m.unmarshall_message(s)['foo'] is sink
s = m.marshall_message([sink])
assert m.is_marshalled_actor(s[0])
assert m.unmarshall_message(s)[0] is sink
| mit | Python |
|
9c2be5533dc14443a67ed22c34e2f059992e43cb | Create camera.py | RoboticaBrasil/-ComputerVision | Camera/camera.py | Camera/camera.py | from SimpleCV import Camera
# Initialize the camera
cam = Camera()
# Loop to continuously get images
while True:
# Get Image from camera
img = cam.getImage()
# Make image black and white
img = img.binarize()
# Draw the text "Hello World" on image
img.drawText("Hello World!")
# Show the image
img.show()
| apache-2.0 | Python |
|
6014dab06ed2275c5703ab9f9e63272656733c69 | Add retrieve_all_pages util method from mtp-cashbook | ministryofjustice/django-utils,ministryofjustice/django-utils | moj_utils/rest.py | moj_utils/rest.py | from django.conf import settings
def retrieve_all_pages(api_endpoint, **kwargs):
"""
Some MTP apis are paginated, this method loads all pages into a single results list
:param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get`
:param kwargs: additional arguments to pass into api callable
"""
loaded_results = []
offset = 0
while True:
response = api_endpoint(limit=settings.REQUEST_PAGE_SIZE, offset=offset,
**kwargs)
count = response.get('count', 0)
loaded_results += response.get('results', [])
if len(loaded_results) >= count:
break
offset += settings.REQUEST_PAGE_SIZE
return loaded_results
| mit | Python |
|
8d10e0e2db81023cb435b047f5c1da793e4b992e | Add python/matplotlib_.py | rstebbing/common,rstebbing/common | python/matplotlib_.py | python/matplotlib_.py | # matplotlib_.py
# Imports
from matplotlib import ticker
# label_axis
def label_axis(ax, x_or_y, axis_labels, flip, **props):
axis_ticks = range(0, len(axis_labels))
axis = getattr(ax, '%saxis' % x_or_y)
axis.set_major_locator(ticker.FixedLocator(axis_ticks))
axis.set_minor_locator(ticker.NullLocator())
axis.set_major_formatter(ticker.FixedFormatter(axis_labels))
axis.set_minor_formatter(ticker.NullFormatter())
lim = (-0.5, len(axis_labels) - 0.5)
if flip:
lim = lim[::-1]
set_lim = getattr(ax, 'set_%slim' % x_or_y)
set_lim(*lim)
if props:
plt.setp(axis.get_majorticklabels(), **props)
# label_xaxis
def label_xaxis(ax, xaxis_labels, flip=False, **props):
label_axis(ax, 'x', xaxis_labels, flip, **props)
# label_yaxis
def label_yaxis(ax, yaxis_labels, flip=False, **props):
label_axis(ax, 'y', yaxis_labels, flip, **props)
| mit | Python |
|
de6d7c2531f59d407864c737468ae50de38ba9ac | Add some spanish bad words | wiki-ai/revscoring,he7d3r/revscoring,aetilley/revscoring,ToAruShiroiNeko/revscoring,eranroz/revscoring | revscoring/languages/spanish.regex.py | revscoring/languages/spanish.regex.py | # import re
# import warnings
# import enchant
# from nltk.corpus import stopwords
# from nltk.stem.snowball import SnowballStemmer
# from .language import Language, LanguageUtility
# STEMMER = SnowballStemmer("english")
# STOPWORDS = set(stopwords.words('english'))
BAD_REGEXES = set([
'ano',
'bastardo', 'bollo', 'boludo', 'bugarr[óo]n',
'ca(gar(ro)?|ca)', 'cabr[óo]n', 'cacas', 'capullo', 'carajo',
'chingar', 'chino', 'choch[oa]', 'cholo', 'chucha', 'chupar',
'chupapollas', 'chupamedias', 'cipote', 'clamidia', 'coger',
'cojones', 'concha', 'conejo', 'consolador', 'coño', 'cuca',
'culear', 'culo', 'cundango',
'drogata',
'facha', 'follar', 'fornicar', 'fulana', 'furcia',
'gabacho', 'gay', 'gilipollas', 'gitano', 'gonorrea', 'gordo',
'gringo', 'guiri',
'herpes', 'homosexual', 'huevos', '(huev|we)[óo]n',
'imb[ée]cil',
'japo', 'joder', 'joto', 'jud[íi]o',
'lesbiana',
'mach(orra|etorra)', 'maldito', 'mamada', 'manola',
'maric(a|[óo]n)', 'marimach[ao]', 'maripos[óo]n',
'mea(r|da)', 'mam[óo]n', 'mierda', 'minga', 'moro',
'nazi', 'negrata',
'ojete',
'paja', 'paki', 'pedo', 'pelao', 'pelotas', 'pendejo', 'pene', 'picha',
'pinche', 'pito', 'polla', 'polvo', 'poto', 'prostituta', 'put[ao]',
'puñal',
'rabo', 'ramera',
'sida', 'skin(head)?', 'subnormal', 'sudaca', 's[íi]filis',
'tonto', 'torta', 'tortillera', 'tranca', 'tranny',
'travesti', 'travolo', 'trolo',
'verga', 'vibrador', 'vulva',
'zapatona', 'zorra'
])
# BAD_REGEX = re.compile("|".join(BAD_REGEXES))
# DICTIONARY = enchant.Dict("en")
# def stem_word_process():
# def stem_word(word):
# return STEMMER.stem(word).lower()
# return stem_word
# stem_word = LanguageUtility("stem_word", stem_word_process)
# def is_badword_process():
# def is_badword(word):
# return bool(BAD_REGEX.match(word.lower()))
# return is_badword
# is_badword = LanguageUtility("is_badword", is_badword_process)
# def is_misspelled_process():
# def is_misspelled(word):
# return not DICTIONARY.check(word)
# return is_misspelled
# is_misspelled = LanguageUtility("is_misspelled", is_misspelled_process)
# def is_stopword_process():
# def is_stopword(word):
# return word.lower() in STOPWORDS
# return is_stopword
# is_stopword = LanguageUtility("is_stopword", is_stopword_process)
# english = Language("revscoring.languages.english",
# [stem_word, is_badword, is_misspelled, is_stopword])
| mit | Python |
|
326ef75042fc1d3eeeb6834fd5ff80a2bd1a2be1 | Add incoreect_regex.py solution | byung-u/ProjectEuler | HackerRank/PYTHON/Errors_and_Exceptions/incoreect_regex.py | HackerRank/PYTHON/Errors_and_Exceptions/incoreect_regex.py | #!/usr/bin/env python3
import re
if __name__ == '__main__':
for _ in range(int(input())):
try:
re.compile(input())
print('True')
except:
print('False')
| mit | Python |
|
c675fe2a82733ef210bf287df277f8ae956a4295 | Add beginning of main script | jadams/rarbg-get | rarbg-get.py | rarbg-get.py | #!env /usr/bin/python3
import sys
import urllib.parse
import urllib.request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
| mit | Python |
|
480ae590ea1116fdbb5c6601d7466408f274c433 | Implement for GNOME activateAutoLoginCommand | srguiwiz/nrvr-commander | src/nrvr/el/gnome.py | src/nrvr/el/gnome.py | #!/usr/bin/python
"""nrvr.el.gnome - Manipulate Enterprise Linux GNOME
Classes provided by this module include
* Gnome
To be improved as needed.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2013.
Modified BSD License"""
import re
class Gnome():
"""Utilities for manipulating a Gnome installation."""
@classmethod
def activateAutoLoginCommand(cls, username=None):
"""Build command to activate auto-login into GNOME.
username
defaults to None, which effects deactivateAutoLoginCommand.
Return command to activate auto-login into GNOME."""
command = cls.deactivateAutoLoginCommand()
if username:
username = re.escape(username) # precaution
command += r" ; sed -i -e '/^\[daemon\]/ a \AutomaticLoginEnable=true\nAutomaticLogin=" + username + r"' /etc/gdm/custom.conf"
return command
@classmethod
def deactivateAutoLoginCommand(cls):
"""Build command to deactivate auto-login into GNOME.
Return command to deactivate auto-login into GNOME."""
return r"sed -i -e '/^\s*AutomaticLoginEnable\s*=/ d' -e '/^\s*AutomaticLogin\s*=/ d' /etc/gdm/custom.conf"
if __name__ == "__main__":
print Gnome.activateAutoLoginCommand("joe")
print Gnome.deactivateAutoLoginCommand()
print Gnome.activateAutoLoginCommand()
| bsd-2-clause | Python |
|
6d12624e094ec58118d39c4340438c4a814d404f | add wildcard, this is just a string contain problem | luozhaoyu/leetcode,luozhaoyu/leetcode | wildcard.py | wildcard.py | class Solution:
# @param s, an input string
# @param p, a pattern string
# @return a boolean
def shrink(self, pattern):
shrinked = []
i = 0
while i < len(pattern):
stars = 0
questions = 0
while i < len(pattern) and pattern[i] in ['*', '?']:
if pattern[i] == '*':
stars += 1
else:
questions += 1
i += 1
if stars == 0:
if questions > 0:
shrinked.extend(['?'] * questions)
else:
shrinked.append(('*', questions))
if i < len(pattern):
shrinked.append(pattern[i])
i += 1
return shrinked
def compress_string_score(self, string_score, pattern_list):
compressed = []
i = 0
while i < len(string_score):
p = pattern_list[string_score[i]]
compressed.append(p)
repeat = 0
while p != '?' and i < len(string_score)-1 and\
string_score[i + 1] == string_score[i]:
repeat += 1
i += 1
if repeat:
compressed.append(('*', repeat))
i += 1
return compressed
def isMatch(self, s, p):
pl = self.shrink(p)
string_score = []
cursor = 0
for c in s:
try:
while cursor < len(pl) and isinstance(pl[cursor], tuple):
cursor += 1
if cursor >= len(pl):
# pattern exhausted, while string exists
break
# cursor is not a star
if c == pl[cursor] or pl[cursor] == '?':
string_score.append(cursor)
# move on until meets with an alphabetic
cursor += 1
else:
if string_score:
string_score.append(string_score[-1])
else:
return False
except:
print "%s: %s vs %s: %s" % (s, c, pl, cursor)
print string_score
raise
compressed = self.compress_string_score(string_score, pl)
print "%s %s vs %s" % (string_score, compressed, pl)
for c_single, p_single in zip(compressed, pl):
if c_single != p_single:
if isinstance(c_single, tuple) and isinstance(p_single, tuple)\
and c_single[1] > p_single[1]:
continue
else:
return False
return True
so = Solution()
ls = ["aa", "aa", "aaa", "aa", "aa", "ab", "aab", "axxbxxycxxde"]
lp = ["a", "aa", "aa", "*", "a*", "?*", "c*a*b", "a**?*??b???c?d*?*e"]
for s,p in zip(ls, lp):
line = "%s, %s -> %s" % (s, p, so.isMatch(s, p))
print line
| mit | Python |
|
00c86aff808ecc5b6f015da5977265cfa76826bb | add fixtures that start related worker for tests | moccu/django-livewatch | livewatch/tests/conftest.py | livewatch/tests/conftest.py | import pytest
import time
import django_rq
from celery.signals import worker_ready
from .celery import celery
WORKER_READY = list()
@worker_ready.connect
def on_worker_ready(**kwargs):
"""Called when the Celery worker thread is ready to do work.
This is to avoid race conditions since everything is in one python process.
"""
WORKER_READY.append(True)
@pytest.yield_fixture
def celery_worker(request):
"""Fixture starting a celery worker in background"""
from multiprocessing import Process
celery_args = ['-C', '-q', '-c', '1', '-P', 'solo', '--without-gossip']
proc = Process(target=lambda: celery.worker_main(celery_args))
def cleanup():
proc.terminate()
request.addfinalizer(cleanup)
proc.start()
# Wait for worker to finish initializing to avoid a race condition I've been experiencing.
for i in range(5):
if WORKER_READY:
break
time.sleep(1)
yield proc
proc.terminate()
time.sleep(1)
@pytest.yield_fixture
def rq_worker(request):
"""Fixture starting a rq worker in background"""
from multiprocessing import Process
def _proc_target(env):
import os
os.environ.update(env)
worker = django_rq.get_worker()
worker.work()
proc = Process(target=_proc_target, kwargs={
'env': {'DJANGO_SETTINGS_MODULE': 'livewatch.tests.settings'}
})
def cleanup():
proc.terminate()
request.addfinalizer(cleanup)
proc.start()
time.sleep(1)
yield proc
proc.terminate()
time.sleep(1)
| bsd-3-clause | Python |
|
275cddfa56501868787abeef10fc515102ffd11d | make setup.py find all packages, now in src | Bengt/AL-FanControl,Bengt/AL-FanControl,Bengt/AL-FanControl | python/setup.py | python/setup.py | from distutils.core import setup
from setuptools import find_packages
setup(name='fancontrol',
version='0.1.0',
modules=['fancontrol'],
packages=find_packages(where="src"),
package_dir={"": "src"},
)
| mit | Python |
|
a14ac8fb2f10124a4978db19049bdf932e91c49d | Add avahi based beacon for zeroconf announcement | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/beacons/avahi_announce.py | salt/beacons/avahi_announce.py | # -*- coding: utf-8 -*-
'''
Beacon to announce via avahi (zeroconf)
'''
# Import Python libs
from __future__ import absolute_import
import logging
# Import 3rd Party libs
try:
import avahi
HAS_PYAVAHI = True
except ImportError:
HAS_PYAVAHI = False
import dbus
log = logging.getLogger(__name__)
__virtualname__ = 'avahi_announce'
LAST_GRAINS = {}
BUS = dbus.SystemBus()
SERVER = dbus.Interface(BUS.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER),
avahi.DBUS_INTERFACE_SERVER)
GROUP = dbus.Interface(BUS.get_object(avahi.DBUS_NAME, SERVER.EntryGroupNew()),
avahi.DBUS_INTERFACE_ENTRY_GROUP)
def __virtual__():
if HAS_PYAVAHI:
return __virtualname__
return False
def validate(config):
'''
Validate the beacon configuration
'''
if not isinstance(config, dict):
return False, ('Configuration for avahi_announcement '
'beacon must be a dictionary')
elif not all(x in list(config.keys()) for x in ('servicetype', 'port', 'txt')):
return False, ('Configuration for avahi_announce beacon '
'must contain servicetype, port and txt items')
return True, 'Valid beacon configuration'
def beacon(config):
'''
Broadcast values via zeroconf
If the announced values are static, it is adviced to set run_once: True
(do not poll) on the beacon configuration. Grains can be used to define
txt values using the syntax: grains.<grain_name>
The default servicename its the hostname grain value.
Example Config
.. code-block:: yaml
beacons:
avahi_announce:
run_once: True
servicetype: _demo._tcp
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
'''
ret = []
changes = {}
txt = {}
global LAST_GRAINS
_validate = validate(config)
if not _validate[0]:
log.warning('Beacon {0} configuration invalid, '
'not adding. {1}'.format(__virtualname__, _validate[1]))
return ret
if 'servicename' in config:
servicename = config['servicename']
else:
servicename = __grains__['host']
for item in config['txt']:
if config['txt'][item].startswith('grains.'):
grain = config['txt'][item][7:]
txt[item] = __grains__[grain]
if LAST_GRAINS and (LAST_GRAINS[grain] != __grains__[grain]):
changes[str('txt.' + item)] = txt[item]
else:
txt[item] = config['txt'][item]
if not LAST_GRAINS:
changes[str('txt.' + item)] = txt[item]
if changes:
if not LAST_GRAINS:
changes['servicename'] = servicename
changes['servicetype'] = config['servicetype']
changes['port'] = config['port']
else:
GROUP.Reset()
GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
servicename, config['servicetype'], '', '',
dbus.UInt16(config['port']), avahi.dict_to_txt_array(txt))
GROUP.Commit()
ret.append({'tag': 'result', 'changes': changes})
LAST_GRAINS = __grains__
return ret
| apache-2.0 | Python |
|
9c5de3b667a8e98b0304fb64e30113f551b33404 | Create getTwitterData.py | Sapphirine/StockMarketAssistantApp | getTwitterData.py | getTwitterData.py | from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import time
ckey = 'dNATh8K9vGwlOSR2phVzaB9fh'
csecret = 'LmBKfyfoZmK1uIu577yFR9jYkVDRC95CXcKZQBZ8jWx9qdS4Vt'
atoken = '2165798475-nuQBGrTDeCgXTOneasqSFZLd3SppqAJDmXNq09V'
asecret = 'FOVzgXM0NJO2lHFydFCiOXCZdkhHlYBkmPNsWbRhLk8xd'
class Listener(StreamListener):
def on_data(self, data):
try:
#print data
tweet = data.split(',"text":"')[1].split('","source')[0]
#print tweet
#saveThis = str(time.time())+'::'+tweet
saveFile = open('twitterData.txt','a')
saveFile.write(tweet)
saveFile.write('\n')
saveFile.close()
except BaseException as e:
print ('failed ondata'), str(e)
time.sleep(5)
def on_error(self, status):
print (status)
#to authorize
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream= Stream(auth, Listener())
twitterStream.filter(track=['Apple'])
start_time = time.clock()
while True:
if time.clock() - start_time > 5:
break
twitterStream.disconnect()
| mit | Python |
|
e84640c5c67759be3de1a934d974c250d7b73a0c | Split kernels into their own name space | matthew-brett/draft-statsmodels,matthew-brett/draft-statsmodels | scikits/statsmodels/sandbox/kernel.py | scikits/statsmodels/sandbox/kernel.py | # -*- coding: utf-8 -*-
"""
This models contains the Kernels for Kernel smoothing.
Hopefully in the future they may be reused/extended for other kernel based method
"""
class Kernel(object):
"""
Generic 1D Kernel object.
Can be constructed by selecting a standard named Kernel,
or providing a lambda expression and domain.
The domain allows some algorithms to run faster for finite domain kernels.
"""
# MC: Not sure how this will look in the end - or even still exist.
# Main purpose of this is to allow custom kernels and to allow speed up
# from finite support.
def __init__(self, shape, h = 1.0, domain = None):
"""
shape should be a lambda taking and returning numeric type.
For sanity it should always return positive or zero.
"""
self.domain = domain
# TODO: Add checking code that shape is valid
self._shape = shape
self.h = h
def evaluate(self, xs, ys, x):
# TODO: make filtering more efficient
filtered = [(xx,yy) for xx,yy in zip(xs,ys) if (xx-x)/self.h >= self.domain[0] and (xx-x)/self.h <= self.domain[1]]
if len(filtered) > 0:
xs,ys = zip(*filtered)
w = np.sum([self((xx-x)/self.h) for xx in xs])
v = np.sum([yy*self((xx-x)/self.h) for xx, yy in zip(xs,ys)])
return v/w
else:
return 0
def __call__(self, x):
return self._shape(x)
class Gaussian(Kernel):
def __init__(self, h=1.0):
self.h = h
self._shape = lambda x: np.exp(-x**2/2.0)
| bsd-3-clause | Python |
|
632056eef0666808d16740f434a305d0c8995132 | Create magooshScraper.py | preeteshjain/vidpy | magooshScraper.py | magooshScraper.py | import scrapy
from bs4 import BeautifulSoup
class magooshSpider(scrapy.Spider):
name = 'magoosh'
start_urls = ['http://gre.magoosh.com/login']
def parse(self, response):
return scrapy.FormRequest.from_response(
response,
'''
Replace the fake text below with your own registered
email and password on http://gre.magoosh.com:
'''
formdata={'session[login]': '[email protected]', 'session[password]': 'somepassword'},
callback=self.after_login
)
def after_login(self, response):
if 'Dashboard' in response.body:
self.logger.info('Logged in successfully!')
return scrapy.Request('http://gre.magoosh.com/lessons',
callback=self.lessonsPage_loaded)
def lessonsPage_loaded(self, response):
self.logger.info('Lessons page opened.')
soup = BeautifulSoup(response.body)
for categ in soup.find_all('h2'):
# Set the Subject name to crawl
# In this example, Maths section is scraped.
if 'Math' in categ:
self.logger.info('Math section found.')
cgparent = categ.parent.parent
for vu in cgparent.find_all('a'):
link = str(vu.get('href'))
if '/lessons/' in link:
s = 'http://gre.magoosh.com' + str(link) + "\n"
req = scrapy.Request(s, callback=self.videoPage_loaded)
yield req
return
def videoPage_loaded(self, response):
self.logger.info('Fetching video...')
soup = BeautifulSoup(response.body)
for div in soup.find_all('div'):
if div.get('data-file'):
vl = div.get('data-file')
f = open('scrapedVideoLinks.txt', 'a')
f.write(str(vl) + '\n')
f.close()
| mit | Python |
|
dbfc033fdfaad5820765a41766a5342831f3c4f9 | add util script to dump twitter oauth tokens | akrherz/iembot,akrherz/iembot | scripts/remove_twuser_oauth.py | scripts/remove_twuser_oauth.py | """Remove a twitter user's oauth tokens and reload iembot"""
from __future__ import print_function
import json
import sys
import psycopg2
import requests
def main(argv):
"""Run for a given username"""
screen_name = argv[1]
settings = json.load(open("../settings.json"))
pgconn = psycopg2.connect(database=settings['databaserw']['openfire'],
user=settings['databaserw']['user'],
host=settings['databaserw']['host'])
cursor = pgconn.cursor()
cursor.execute("""
DELETE from iembot_twitter_oauth where screen_name = %s
""", (screen_name, ))
print(("Removed %s entries from the database for screen name '%s'"
) % (cursor.rowcount, screen_name))
cursor.close()
pgconn.commit()
uri = "http://iembot:9003/reload"
req = requests.get(uri, timeout=30)
print("reloading iembot %s" % (repr(req.content), ))
if __name__ == '__main__':
main(sys.argv)
| mit | Python |
|
77aa24bbea447d8684614f0d089320d134412710 | Test ini-configured app. | DasAllFolks/CrowdCure | test_app.py | test_app.py | from flask import Flask
from flask.ext.iniconfig import INIConfig
app = Flask(__name__)
INIConfig(app)
with app.app_context():
app.config.from_inifile('settings.ini')
| mit | Python |
|
d1df2b573c515d3ea18ce46ccc58c8bc9e788915 | Clean commit | 3juholee/materialproject_ml,bismayan/MaterialsMachineLearning | src/pymatgen_pars.py | src/pymatgen_pars.py | import pymatgen as mg
from pymatgen.matproj.rest import MPRester
import pandas as pd
from pymatgen import Element,Composition
import multiprocessing as mp
import pickle
import json
from monty.json import MontyEncoder,MontyDecoder
import numpy as np
def ret_struct_obj(i):
return mg.Structure.from_str(i,fmt="cif")
def return_struct_list():
with open("ternaries_from_mg.pickle",'r') as f:
temp_list=pickle.load(f)
p=mp.Pool(4)
struct_lis=p.map(ret_struct_obj,temp_list)
return struct_lis
def read_ternaries():
with MPRester() as m:
ternaries1 = m.query(criteria={"nelements": 3}, properties=['icsd_ids', 'pretty_formula', 'cif'])
list_cif = [i['cif'] for i in ternaries1]
outfile=open("ternaries_from_mg.pickle",'w')
pickle.dump(list_cif,outfile)
del(list_cif)
outfile.close()
def read_unique_data(filename):
with open(filename,'r') as f:
structs=json.load(f,cls=MontyDecoder)
return structs
def get_space_groups(strts):
sgroups=np.array([a.get_spacegroup_info()[0] for a in strts])
return sgroups
def read_data(filename):
"""
:argument
filename - The filename of the csv file to read from
:returns
DataFrame - Pandas Dataframe containing the formatted parsed data
"""
uniq_data=read_unique_data(filename)
space_groups=get_space_groups(uniq_data)
(comps,stoich_coeffs,at_nos,eneg)=get_comp_data(uniq_data)
DataFrame = pd.DataFrame({"Z1": at_nos[:, 0]})
DataFrame["Z2"] = at_nos[:, 1]
DataFrame["Z3"] = at_nos[:, 2]
DataFrame["St_coeff1"] = stoich_coeffs[:, 0]
DataFrame["St_coeff2"] = stoich_coeffs[:, 1]
DataFrame["St_coeff3"] = stoich_coeffs[:, 2]
DataFrame["Eneg1"] = eneg[:, 0]
DataFrame["Eneg2"] = eneg[:, 1]
DataFrame["Eneg3"] = eneg[:, 2]
DataFrame["Space Group"] = space_groups
DataFrame["Composition"] = comps
return DataFrame
def get_comp_data(un_data):
element_universe = [str(e) for e in Element]
dict_element = {}
for i, j in enumerate(element_universe):
dict_element[str(j)] = i
stoich_array = np.zeros((len(un_data), 3), dtype=float)
at_num_array = np.zeros((len(un_data), 3), dtype=int)
electroneg_array = np.zeros((len(un_data), 3), dtype=float)
comp_array=[a.composition for a in un_data]
temp_dict_list = [dict(comp.get_el_amt_dict()) for comp in comp_array]
for index,temp_dict in enumerate(temp_dict_list):
for count, key in enumerate(temp_dict.keys()):
stoich_array[index][count] = temp_dict[key]
if key not in ['D', 'T']:
at_num_array[index][count] = Element(key).Z
electroneg_array[index][count] = Element(key).X
else:
at_num_array[index][count] = Element('H').Z
electroneg_array[index][count] = Element('H').X
del(dict_element)
del(temp_dict_list)
return (comp_array,stoich_array,at_num_array,electroneg_array)
#if __name__=="__main__":
| mit | Python |
|
74bde8878aa9b336046374ce75fc4c7bc63eaba7 | add test for VampSimpleHost | Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide | tests/test_vamp_simple_host.py | tests/test_vamp_simple_host.py | #! /usr/bin/env python
from unit_timeside import unittest, TestRunner
from timeside.decoder.file import FileDecoder
from timeside.core import get_processor
from timeside import _WITH_VAMP
from timeside.tools.test_samples import samples
@unittest.skipIf(not _WITH_VAMP, 'vamp-simple-host library is not available')
class TestVampsimpleHost(unittest.TestCase):
def setUp(self):
self.analyzer = get_processor('vamp_simple_host')()
def testOnC4_scale(self):
"runs on C4_scale"
self.source = samples["C4_scale.wav"]
def tearDown(self):
decoder = FileDecoder(self.source)
(decoder | self.analyzer).run()
results = self.analyzer.results
print results.keys()
#print results
#print results.to_yaml()
#print results.to_json()
#print results.to_xml()
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| agpl-3.0 | Python |
|
0d596f8c7148c2ac13c2b64be09ca1e20719cdb9 | add dumper of flowpaths to shapefile | akrherz/dep,akrherz/idep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/dep,akrherz/idep,akrherz/idep,akrherz/idep,akrherz/dep,akrherz/idep | scripts/util/dump_flowpaths.py | scripts/util/dump_flowpaths.py | """Dump flowpaths to a shapefile."""
from geopandas import read_postgis
from pyiem.util import get_dbconn
def main():
"""Go Main Go."""
pgconn = get_dbconn('idep')
df = read_postgis("""
SELECT f.fpath, f.huc_12, ST_Transform(f.geom, 4326) as geo from
flowpaths f, huc12 h WHERE h.scenario = 0 and f.scenario = 0
and h.huc_12 = f.huc_12 and h.states ~* 'IA'
""", pgconn, index_col=None, geom_col='geo')
df.to_file("ia_flowpaths.shp")
if __name__ == '__main__':
main()
| mit | Python |
|
345af55938baef0da1f0793d8a109fcee63692dd | Add files via upload | miradel51/preprocess | tokenize.py | tokenize.py | #!/usr/bin/python
#-*-coding:utf-8 -*-
# author: mld
# email: [email protected]
# date : 2017/9/28
import sys
import string
import re
def tokenizestr(original_str):
after_tok = ""
#in order to encoding type, I only do like this and only use replace some special tokens without re.sub
#sym = "[$%#@~&*;].,!^(){><\?}-:=-+"
#original_str = re.sub(sym," "+sym,original_str)
original_str = original_str.replace("[", " [")
original_str = original_str.replace('!', " !")
original_str = original_str.replace("%", " %")
original_str = original_str.replace("#", " #")
original_str = original_str.replace("@", " @")
original_str = original_str.replace("~", "~ ")
original_str = original_str.replace("&", " &")
original_str = original_str.replace("*", " *")
original_str = original_str.replace(".", " .")
original_str = original_str.replace(";", " ;")
original_str = original_str.replace(",", " ,")
original_str = original_str.replace("^", " ^")
original_str = original_str.replace("(", " (")
original_str = original_str.replace(")", " )")
original_str = original_str.replace("{", " {")
original_str = original_str.replace(">", " >")
original_str = original_str.replace("?", " ?")
original_str = original_str.replace("}", " }")
original_str = original_str.replace("-", " -")
original_str = original_str.replace(":", " :")
original_str = original_str.replace("=", " =")
original_str = original_str.replace("+", " +")
after_tok = original_str
return after_tok
if __name__ == '__main__':
ori_ = sys.argv[1]
tok_ = sys.argv[2]
ori_file = open(ori_,"r")
tok_file = open(tok_,"w")
context = ""
for eachline in ori_file:
context = eachline.strip()
#need to tokenization (just separate symboles from words in current line)
context = tokenizestr(context)
tok_file.write(context)
tok_file.write("\n")
ori_file.close()
tok_file.close() | mit | Python |
|
f7768b10df84a4b3bb784ee1d449e380b93d88bb | add a simple scan example | BladeSun/NliWithKnowledge,yang1fan2/nematus,nyu-dl/dl4mt-tutorial,yang1fan2/nematus,EdinburghNLP/nematus,BladeSun/NliWithKnowledge,cshanbo/nematus,yang1fan2/nematus,Proyag/nematus,rsennrich/nematus,vineetm/dl4mt-material,Proyag/nematus,BladeSun/NliWithKnowledge,cshanbo/nematus,cshanbo/nematus,BladeSun/NliWithKnowledge,cshanbo/nematus,jtoyama4/dl4mt,yang1fan2/nematus,shuoyangd/nematus,yang1fan2/nematus,Proyag/nematus,nyu-dl/dl4mt-tutorial,nyu-dl/dl4mt-tutorial,rsennrich/nematus,yang1fan2/nematus,vineetm/dl4mt-material,EdinburghNLP/nematus,nyu-dl/dl4mt-tutorial,rsennrich/nematus,EdinburghNLP/nematus,jtoyama4/dl4mt,shuoyangd/nematus,shuoyangd/nematus,EdinburghNLP/nematus,jtoyama4/dl4mt,shuoyangd/nematus,vineetm/dl4mt-material,rsennrich/nematus,shuoyangd/nematus,jtoyama4/dl4mt,kyunghyuncho/dl4mt-material,kyunghyuncho/dl4mt-material,vineetm/dl4mt-material,jtoyama4/dl4mt,EdinburghNLP/nematus,rsennrich/nematus,kyunghyuncho/dl4mt-material,rsennrich/nematus,yang1fan2/nematus,kyunghyuncho/dl4mt-material,Proyag/nematus,yang1fan2/nematus,shuoyangd/nematus,yang1fan2/nematus,EdinburghNLP/nematus,cshanbo/nematus,BladeSun/NliWithKnowledge,vineetm/dl4mt-material,yang1fan2/nematus,nyu-dl/dl4mt-tutorial,Proyag/nematus,cshanbo/nematus,Proyag/nematus | data/scan_example.py | data/scan_example.py | import numpy
import theano
from theano import tensor
# some numbers
n_steps = 10
n_samples = 5
dim = 10
input_dim = 20
output_dim = 2
# one step function that will be used by scan
def oneStep(x_t, h_tm1, W_x, W_h, W_o):
h_t = tensor.tanh(tensor.dot(x_t, W_x) +
tensor.dot(h_tm1, W_h))
o_t = tensor.dot(h_t, W_o)
return h_t, o_t
# spawn theano tensor variable, our symbolic input
# a 3D tensor (n_steps, n_samples, dim)
x = tensor.tensor3(dtype='float32')
# initial state of our rnn
init_state = tensor.alloc(0., n_samples, dim)
# create parameters that we will use,
# note that, parameters are theano shared variables
# parameters for input to hidden states
W_x_ = numpy.random.randn(input_dim, dim).astype('float32')
W_x = theano.shared(W_x_)
# parameters for hidden state transition
W_h_ = numpy.random.randn(dim, dim).astype('float32')
W_h = theano.shared(W_h_)
# parameters from hidden state to output
W_o_ = numpy.random.randn(dim, output_dim).astype('float32')
W_o = theano.shared(W_o_)
# scan function
([h_vals, o_vals], updates) = theano.scan(
fn=oneStep,
sequences=[x],
outputs_info=[init_state, None],
non_sequences=[W_x, W_h, W_o],
n_steps=n_steps,
strict=True)
# let us now compile a function to get the output
f = theano.function([x], [h_vals, o_vals])
# now we will call the compiled function with actual input
actual_input = numpy.random.randn(
n_steps, n_samples, input_dim).astype('float32')
h_vals_, o_vals_ = f(actual_input)
# print the shapes
print 'shape of input :', actual_input.shape
print 'shape of h_vals:', h_vals_.shape
print 'shape of o_vals:', o_vals_.shape
| bsd-3-clause | Python |
|
9737f8b1551adb5d3be62b1922de27d867ac2b24 | Add forwarding script for build-bisect.py. | timopulkkinen/BubbleFish,timopulkkinen/BubbleFish,markYoungH/chromium.src,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,patrickm/chromium.src,littlstar/chromium.src,keishi/chromium,PeterWangIntel/chromium-crosswalk,hujiajie/pa-chromium,zcbenz/cefode-chromium,pozdnyakov/chromium-crosswalk,junmin-zhu/chromium-rivertrail,nacl-webkit/chrome_deps,mogoweb/chromium-crosswalk,keishi/chromium,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,keishi/chromium,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,zcbenz/cefode-chromium,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,ChromiumWebApps/chromium,rogerwang/chromium,keishi/chromium,hgl888/chromium-crosswalk-efl,dednal/chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,Chilledheart/chromium,zcbenz/cefode-chromium,robclark/chromium,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,fujunwei/chromium-crosswalk,timopulkkinen/BubbleFish,Chilledheart/chromium,pozdnyakov/chromium-crosswalk,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,Chilledheart/chromium,littlstar/chromium.src,axinging/chromium-crosswalk,zcbenz/cefode-chromium,zcbenz/cefode-chromium,mogoweb/chromium-crosswalk,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,hujiajie/pa-chromium,timopulkkinen/BubbleFish,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,rogerwang/chromium,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,hujiajie/pa-chromium,junmin-zhu/chromium-rivertrail,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,dednal/chromium.src,keishi/chromium,zcbenz/cefode-chromium,patrickm/chromium.src,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,dednal/chromium.src,rogerwang/chromium,mogoweb/chromium-crosswalk,littlstar/chromium.src,rogerwang/chromium,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,ondra-novak/chromium.src,keishi/chromium,hujiajie/pa-chromium,fujunwei/chromium-crosswalk,Chilledheart/chromium,hujiajie/pa-chromium,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,Chilledheart/chromium,krieger-od/nwjs_chromium.src,M4sse/chromium.src,robclark/chromium,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,patrickm/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,anirudhSK/chromium,ltilve/chromium,Jonekee/chromium.src,ChromiumWebApps/chromium,axinging/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,ltilve/chromium,Jonekee/chromium.src,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,Jonekee/chromium.src,anirudhSK/chromium,zcbenz/cefode-chromium,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,hujiajie/pa-chromium,dushu1203/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,hujiajie/pa-chromium,hgl888/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,jaruba/chromium.src,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,robclark/chromium,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,ltilve/chromium,M4sse/chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,robclark/chromium,Chilledheart/chromium,fujunwei/chromium-crosswalk,anirudhSK/chromium,Just-D/chromium-1,ltilve/chromium,Fireblend/chromium-crosswalk,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,Jonekee/chromium.src,timopulkkinen/BubbleFish,ondra-novak/chromium.src,timopulkkinen/BubbleFish,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,pozdnyakov/chromium-crosswalk,zcbenz/cefode-chromium,ltilve/chromium,pozdnyakov/chromium-crosswalk,Chilledheart/chromium,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,Jonekee/chromium.src,jaruba/chromium.src,chuan9/chromium-crosswalk,robclark/chromium,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,dednal/chromium.src,mogoweb/chromium-crosswalk,dushu1203/chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,littlstar/chromium.src,bright-sparks/chromium-spacewalk,jaruba/chromium.src,bright-sparks/chromium-spacewalk,nacl-webkit/chrome_deps,anirudhSK/chromium,hujiajie/pa-chromium,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,hujiajie/pa-chromium,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,dednal/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,nacl-webkit/chrome_deps,patrickm/chromium.src,junmin-zhu/chromium-rivertrail,M4sse/chromium.src,hgl888/chromium-crosswalk,robclark/chromium,pozdnyakov/chromium-crosswalk,dednal/chromium.src,rogerwang/chromium,Fireblend/chromium-crosswalk,Jonekee/chromium.src,rogerwang/chromium,markYoungH/chromium.src,robclark/chromium,pozdnyakov/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,junmin-zhu/chromium-rivertrail,Fireblend/chromium-crosswalk,M4sse/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,markYoungH/chromium.src,rogerwang/chromium,Fireblend/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,junmin-zhu/chromium-rivertrail,jaruba/chromium.src,markYoungH/chromium.src,patrickm/chromium.src,keishi/chromium,pozdnyakov/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,rogerwang/chromium,junmin-zhu/chromium-rivertrail,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,ondra-novak/chromium.src,nacl-webkit/chrome_deps,dednal/chromium.src,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,chuan9/chromium-crosswalk,anirudhSK/chromium,dushu1203/chromium.src,nacl-webkit/chrome_deps,TheTypoMaster/chromium-crosswalk,junmin-zhu/chromium-rivertrail,keishi/chromium,M4sse/chromium.src,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,robclark/chromium,hgl888/chromium-crosswalk,zcbenz/cefode-chromium,anirudhSK/chromium,chuan9/chromium-crosswalk,markYoungH/chromium.src,ChromiumWebApps/chromium,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,keishi/chromium,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,junmin-zhu/chromium-rivertrail,keishi/chromium,Fireblend/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,ltilve/chromium,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,mogoweb/chromium-crosswalk,timopulkkinen/BubbleFish,keishi/chromium,patrickm/chromium.src,zcbenz/cefode-chromium,dednal/chromium.src,rogerwang/chromium,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,markYoungH/chromium.src,pozdnyakov/chromium-crosswalk,ChromiumWebApps/chromium,anirudhSK/chromium,patrickm/chromium.src,bright-sparks/chromium-spacewalk,robclark/chromium,anirudhSK/chromium,timopulkkinen/BubbleFish,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,littlstar/chromium.src,nacl-webkit/chrome_deps,junmin-zhu/chromium-rivertrail,nacl-webkit/chrome_deps,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,rogerwang/chromium,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,junmin-zhu/chromium-rivertrail,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,Jonekee/chromium.src,littlstar/chromium.src,robclark/chromium,nacl-webkit/chrome_deps,chuan9/chromium-crosswalk | build/build-bisect.py | build/build-bisect.py | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
print "This script has been moved to tools/bisect-builds.py."
print "Please update any docs you're working from!"
sys.exit(1)
| bsd-3-clause | Python |
|
4c2663939008285c395ee5959c38fab280f43e58 | Create 03.PracticeCharsAndStrings.py | stoyanov7/SoftwareUniversity,stoyanov7/SoftwareUniversity,stoyanov7/SoftwareUniversity,stoyanov7/SoftwareUniversity | TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Exercises/03.PracticeCharsAndStrings.py | TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Exercises/03.PracticeCharsAndStrings.py | print(input())
print(input())
print(input())
print(input())
print(input())
| mit | Python |
|
782e4da9d04c656b3e5290269a4f06328ee5d508 | add file | tkoyama010/Fibonacci | main.py | main.py | import numpy as np
@np.vectorize
def F(n):
return 1./np.sqrt(5.)*(((1.+np.sqrt(5))/2.)**n-((1.-np.sqrt(5))/2.)**n)
n = np.arange(10)
F = F(n)
np.savetxt("F.txt", F)
| mit | Python |
|
ea0f02d6be95d0d8ef3081d49743702605467b51 | Add toy box | ReginaExMachina/royaltea-word-app,ReginaExMachina/royaltea-word-app | royalword.py | royalword.py | # Public: Toybox verison of W3 program.
#
# w3_dict - dictionary of interesting words.
# w3_defs - dictionary of definitions.
#
# Keys for both dicts are matching integers for word/defintion.
#
# word_display() - Displays a randomly generated word and its definition.
# learning_listed - Removes displayed word from main list.
# Returns random sets of a word from the list and it's definition.
##########################################################################
# DICTIONARIES
w3_dict = {
1: 'amorous',
2: 'amorphous',
3: 'antithesis',
4: 'apostate',
5: 'apotheosis',
6: 'belligerent',
7: 'beneficent',
8: 'bromide',
9: 'callipygian',
10: 'censorious',
11: 'cistern',
12: 'codicil',
13: 'cognizant',
14: 'cognomen',
15: 'concise',
16: 'corollary',
17: 'debonair'
}
w3_defs = {
1: '1: inclined toward or displaying love 2: expressive of or exciting sexual love or romance',
2: 'formless, shapeless, having no definite form or distinct shape',
3: '1: exact opposite 2: the juxtaposition of contrasting words or ideas to give a feeling of balance',
4: '1: a disloyal person who betrays or deserts his cause or religion or political party or friend etc. 2: not faithful to religion or party or cause',
5: '1: model of excellence or perfection of a kind; one having no equal 2: the elevation of a person ie. as to the status of a god',
6: 'characteristic of an enemy or one eager to fight',
7: 'doing or producing good',
8: '1: any of the salts of hydrobromic acid; formerly used as a sedative but now generally replaced by safer drugs 2: a trite or obvious remark',
9: 'pertaining to or having finely developed buttocks',
10: 'harshly critical or expressing censure',
11: 'an artificial reservoir for storing liquids; especially an underground tank for storing rainwater',
12: 'a supplement to a will; a testamentary instrument intended to alter an already executed will',
13: 'having or showing knowledge or understanding or realization or perception',
14: 'a familiar name for a person',
15: 'expressing much in few words',
16: '1: a practical consequence that follows naturally 2: an inference that follows directly from the proof of another proposition',
17: '1: having a sophisticated charm 2: having a cheerful, lively, and self-confident air'
}
##########################################################################
from random import randint
##########################################################################
# INTIALIZING VARIABLES
n = len(w3_dict)
choice = ''
# Words to be re-displayed at algorithmic intervals until learnt
# * currently useless haha *
learning_list = {}
learning_list_defs = {}
# Words no longer needing to be displayed
# * currently even more useless ;_; *
learnt_list = {}
learnt_list_defs = {}
##########################################################################
# MAKING METHODS
def word_display():
word = randint(1,n)
print w3_dict[word]
print w3_defs[word]
learning_listed(word)
def learning_listed(word):
new_word = w3_dict.pop(word)
new_def = w3_defs.pop(word)
learning_list.update({word: new_word})
learning_list_defs.update({word: new_def})
def learning_time():
"""Outputs words from learning_list or returns false"""
##########################################################################
# MAIN PROGRAM
print("W3")
print('Press E to Exit or any key to reload.\n')
if w3_dict:
while choice != 'E':
try:
word_display()
choice = raw_input('\n ')
print(' ')
except KeyError:
# Random integer is removed key
print('Fetching...\n')
else:
# Main dictionary is emptied!
print('All done!')
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.