commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
841fb156fff3d257d39afdc9d3d4e587427fe2cf
Add new file missed in earlier commit place holder for projects that do not load for some reason
barry-scott/git-workbench,barry-scott/scm-workbench,barry-scott/scm-workbench,barry-scott/git-workbench,barry-scott/scm-workbench
Source/Scm/wb_scm_project_place_holder.py
Source/Scm/wb_scm_project_place_holder.py
''' ==================================================================== Copyright (c) 2016 Barry A Scott. All rights reserved. This software is licensed as described in the file LICENSE.txt, which you should have received as part of this distribution. ==================================================================== wb_scm_project_place_holder.py ''' import pathlib # # ScmProjectPlaceholder is used when the project cannot be loaded # class ScmProjectPlaceholder: def __init__( self, app, prefs_project ): self.app = app self.prefs_project = prefs_project self.tree = ScmProjectPlaceholderTreeNode( self, prefs_project.name, pathlib.Path( '.' ) ) def scmType( self ): return self.prefs_project.scm_type def isNotEqual( self, other ): return self.projectName() != other.projectName() def getBranchName( self ): return '' def projectName( self ): return self.prefs_project.name def projectPath( self ): return pathlib.Path( self.prefs_project.path ) def updateState( self ): pass class ScmProjectPlaceholderTreeNode: def __init__( self, project, name, path ): self.project = project self.name = name self.__path = path def __repr__( self ): return '<ScmProjectPlaceholderTreeNode: project %r, path %s>' % (self.project, self.__path) def isNotEqual( self, other ): return (self.relativePath() != other.relativePath() or self.project.isNotEqual( other.project )) def __lt__( self, other ): return self.name < other.name def relativePath( self ): return self.__path def absolutePath( self ): return self.project.projectPath() / self.__path def getAllFolderNodes( self ): return [] def getAllFolderNames( self ): return [] def getAllFileNames( self ): return [] def isByPath( self ): return False
apache-2.0
Python
f1ba45809e6682235c07ab89e4bc32e56b2fa84f
Create i_love_lance_janice.py
hirenvasani/foobar
i_love_lance_janice.py
i_love_lance_janice.py
""" I Love Lance & Janice ===================== You've caught two of your fellow minions passing coded notes back and forth - while they're on duty, no less! Worse, you're pretty sure it's not job-related - they're both huge fans of the space soap opera "Lance & Janice". You know how much Commander Lambda hates waste, so if you can prove that these minions are wasting her time passing non-job-related notes, it'll put you that much closer to a promotion. Fortunately for you, the minions aren't exactly advanced cryptographers. In their code, every lowercase letter [a..z] is replaced with the corresponding one in [z..a], while every other character (including uppercase letters and punctuation) is left untouched. That is, 'a' becomes 'z', 'b' becomes 'y', 'c' becomes 'x', etc. For instance, the word "vmxibkgrlm", when decoded, would become "encryption". Write a function called answer(s) which takes in a string and returns the deciphered string so you can show the commander proof that these minions are talking about "Lance & Janice" instead of doing their jobs. Languages ========= To provide a Python solution, edit solution.py To provide a Java solution, edit solution.java Test cases ========== Inputs: (string) s = "wrw blf hvv ozhg mrtsg'h vkrhlwv?" Output: (string) "did you see last night's episode?" Inputs: (string) s = "Yvzs! I xzm'g yvorvev Lzmxv olhg srh qly zg gsv xlolmb!!" Output: (string) "Yeah! I can't believe Lance lost his job at the colony!!" """ def strSlice(s): str_lst = [] for i in range(len(s)): sliced_str = s[0:i+1] str_lst.append(sliced_str) return str_lst def answer(s): str_lst = strSlice(s) str_len_lst = [] for elmt in str_lst: cnt_elmt = s.count(elmt) quotient = len(s)/len(elmt) if (elmt * quotient) == s: str_len_lst.append(cnt_elmt) return max(str_len_lst) # s = "abccbaabccba" # 2 s = "abcabcabcabc" # 4 print answer(s)
mit
Python
a08a7da41300721e07c1bff8e36e3c3d69af06fb
Add py-asdf package (#12817)
iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack
var/spack/repos/builtin/packages/py-asdf/package.py
var/spack/repos/builtin/packages/py-asdf/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyAsdf(PythonPackage): """The Advanced Scientific Data Format (ASDF) is a next-generation interchange format for scientific data. This package contains the Python implementation of the ASDF Standard.""" homepage = "https://github.com/spacetelescope/asdf" url = "https://pypi.io/packages/source/a/asdf/asdf-2.4.2.tar.gz" version('2.4.2', sha256='6ff3557190c6a33781dae3fd635a8edf0fa0c24c6aca27d8679af36408ea8ff2') depends_on('[email protected]:', type=('build', 'run')) depends_on('py-setuptools', type='build') depends_on('py-setuptools-scm', type='build') depends_on('[email protected]:2.6.0', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:3.999', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run'))
lgpl-2.1
Python
7f4642fc2e0edba668482f2ebbb64ab8870e709a
Initialize P01_basics
JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials
books/AutomateTheBoringStuffWithPython/Chapter01/P01_basics.py
books/AutomateTheBoringStuffWithPython/Chapter01/P01_basics.py
# This program performs basic Python instructions # Expressions print(2 + 2) print(2 + 3 * 6) print((2 + 3) * 6) print(48565878 * 578453) print(2 ** 8) print(23 / 7) print(23 // 7) print(23 % 7) print(2 + 2) print((5 - 1) * ((7 + 1) / (3 - 1))) # Uncomment to see what happens #print(5 + ) #print(42 + 5 + * 2) # The Integer, Floating-Point, and String Data Types #print("Hello world!) # Uncomment to see what happens print("Alice" + "Bob") #print("Alice" + 42) # Uncomment to see what happens print("Alice" * 5) # Uncomment to see what happens #print("Alice" * "Bob") #print("Alice" * 5.0) # Storing Values in Variables spam = 40 print(spam) eggs = 2 print(spam + eggs) print(spam + eggs + spam) spam = spam + 2 print(spam) spam = "Hello" print(spam) spam = "Goodbye" print(spam) # The len() Function print(len("hello")) print(len("My very energetic monster just scarfed nachos.")) print(len('')) #print("I am" + 29 + " years old.") # Uncomment to see what happens # The str(), int(), and float() Functions print(str(29)) print("I am " + str(29) + " years old.") print(str(0)) print(str(-3.14)) print(int("42")) print(int("-99")) print(int(1.25)) print(int(1.99)) print(float("3.14")) print(float(10)) spam = input("Type 101 here: ") # Type 101 when prompted print(spam) spam = int(spam) print(spam) print(spam * 10 / 5) # Uncomment to see what happens #print(int("99.99")) #print(int("twelve")) print(int(7.7)) print(int(7.7) + 1)
mit
Python
ea6d73ac2b9274eae0a866acd1e729854c59fb17
Add update.py to drive the update loop.
kargakis/test-infra,rmmh/kubernetes-test-infra,ixdy/kubernetes-test-infra,cjwagner/test-infra,spxtr/test-infra,kargakis/test-infra,pwittrock/test-infra,pwittrock/test-infra,jessfraz/test-infra,krzyzacy/test-infra,krousey/test-infra,mindprince/test-infra,kewu1992/test-infra,jlowdermilk/test-infra,cblecker/test-infra,cblecker/test-infra,monopole/test-infra,lavalamp/test-infra,BenTheElder/test-infra,BenTheElder/test-infra,jessfraz/test-infra,dims/test-infra,dims/test-infra,krousey/test-infra,pwittrock/test-infra,krzyzacy/test-infra,dims/test-infra,ixdy/kubernetes-test-infra,monopole/test-infra,kargakis/test-infra,foxish/test-infra,grodrigues3/test-infra,mikedanese/test-infra,spxtr/test-infra,cjwagner/test-infra,shyamjvs/test-infra,cjwagner/test-infra,lavalamp/test-infra,mikedanese/test-infra,cblecker/test-infra,nlandolfi/test-infra-1,cjwagner/test-infra,kargakis/test-infra,michelle192837/test-infra,krzyzacy/test-infra,kewu1992/test-infra,kubernetes/test-infra,dims/test-infra,grodrigues3/test-infra,spxtr/test-infra,jlowdermilk/test-infra,jlowdermilk/test-infra,krousey/test-infra,abgworrall/test-infra,jlowdermilk/test-infra,kargakis/test-infra,mindprince/test-infra,kubernetes/test-infra,kargakis/test-infra,monopole/test-infra,cblecker/test-infra,fejta/test-infra,grodrigues3/test-infra,shyamjvs/test-infra,lavalamp/test-infra,brahmaroutu/test-infra,krousey/test-infra,abgworrall/test-infra,dims/test-infra,foxish/test-infra,shyamjvs/test-infra,kewu1992/test-infra,piosz/test-infra,michelle192837/test-infra,fejta/test-infra,foxish/test-infra,piosz/test-infra,shashidharatd/test-infra,foxish/test-infra,michelle192837/test-infra,nlandolfi/test-infra-1,rmmh/kubernetes-test-infra,shyamjvs/test-infra,BenTheElder/test-infra,cblecker/test-infra,mindprince/test-infra,pwittrock/test-infra,jessfraz/test-infra,abgworrall/test-infra,mindprince/test-infra,pwittrock/test-infra,BenTheElder/test-infra,monopole/test-infra,shashidharatd/test-infra,ixdy/kubernetes-test-infra,cjwagner/test-infra,nlandolfi/test-infra-1,shashidharatd/test-infra,jessfraz/test-infra,fejta/test-infra,kewu1992/test-infra,kubernetes/test-infra,brahmaroutu/test-infra,krousey/test-infra,kewu1992/test-infra,shyamjvs/test-infra,cjwagner/test-infra,abgworrall/test-infra,shashidharatd/test-infra,grodrigues3/test-infra,fejta/test-infra,krzyzacy/test-infra,foxish/test-infra,brahmaroutu/test-infra,rmmh/kubernetes-test-infra,BenTheElder/test-infra,lavalamp/test-infra,monopole/test-infra,piosz/test-infra,ixdy/kubernetes-test-infra,spxtr/test-infra,abgworrall/test-infra,jessfraz/test-infra,spxtr/test-infra,krzyzacy/test-infra,kubernetes/test-infra,monopole/test-infra,lavalamp/test-infra,lavalamp/test-infra,jlowdermilk/test-infra,krzyzacy/test-infra,mindprince/test-infra,rmmh/kubernetes-test-infra,nlandolfi/test-infra-1,michelle192837/test-infra,kubernetes/test-infra,dims/test-infra,brahmaroutu/test-infra,rmmh/kubernetes-test-infra,shyamjvs/test-infra,mikedanese/test-infra,mikedanese/test-infra,ixdy/kubernetes-test-infra,fejta/test-infra,piosz/test-infra,BenTheElder/test-infra,jlowdermilk/test-infra,mikedanese/test-infra,brahmaroutu/test-infra,michelle192837/test-infra,michelle192837/test-infra,nlandolfi/test-infra-1,grodrigues3/test-infra,brahmaroutu/test-infra,cblecker/test-infra,shashidharatd/test-infra,fejta/test-infra,kubernetes/test-infra,piosz/test-infra,jessfraz/test-infra
kettle/update.py
kettle/update.py
#!/usr/bin/env python # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time def modified_today(fname): now = time.time() try: return os.stat(fname).st_mtime > (now - now % (24 * 60 * 60)) except OSError: return False def call(cmd): print '+', cmd status = os.system(cmd) if status: raise Exception('invocation failed') def main(): call('time python make_db.py --buckets ../buckets.yaml --junit --threads 128') bq_cmd = 'bq load --source_format=NEWLINE_DELIMITED_JSON --max_bad_records=1000' mj_cmd = 'pypy make_json.py' mj_ext = '' bq_ext = '' if not modified_today('build_day.json.gz'): # cycle daily/weekly tables bq_ext = ' --replace' mj_ext = ' --reset-emitted' call(mj_cmd + mj_ext + ' --days 1 | pv | gzip > build_day.json.gz') call(bq_cmd + bq_ext + ' k8s-gubernator:build.day build_day.json.gz schema.json') call(mj_cmd + mj_ext + ' --days 7 | pv | gzip > build_week.json.gz') call(bq_cmd + bq_ext + ' k8s-gubernator:build.week build_week.json.gz schema.json') call(mj_cmd + ' | pv | gzip > build_all.json.gz') call(bq_cmd + ' k8s-gubernator:build.all build_all.json.gz schema.json') call('python stream.py --poll kubernetes-jenkins/gcs-changes/kettle ' ' --dataset k8s-gubernator:build --tables all:0 day:1 week:7 --stop_at=1') if __name__ == '__main__': os.chdir(os.path.dirname(__file__)) os.environ['TZ'] = 'America/Los_Angeles' main()
apache-2.0
Python
5114f177741b105f33819b98415702e53b52eb01
Add script to update site setup which is used at places like password reset email [skip ci]
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
corehq/apps/hqadmin/management/commands/update_site_setup.py
corehq/apps/hqadmin/management/commands/update_site_setup.py
from django.core.management.base import BaseCommand, CommandError from django.contrib.sites.models import Site from django.conf import settings class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( 'site_address', help="the new site address that should be used. This would get set in the site objects name " "and domain." ) parser.add_argument( '--skip-checks', action='store_true', default=False, help="If you are sure of what you are doing and want to skip checks to ensure safe update." ) def handle(self, site_address, *args, **options): if not options['skip_checks']: if settings.SITE_ID != 1: raise CommandError("SITE ID under settings expected to have value 1 since only one object is expected") sites_count = Site.objects.count() if sites_count != 1: raise CommandError("Expected to have only one object added by Site during setup but currently its %s "% Site.objects.count()) site_object = Site.objects.first() if site_object.name != "example.com" and site_object.domain != "example.com": raise CommandError( """ Expected the present site object to have dummy example values. They were probably modified and needs to be rechecked. Current Values, name -> {name}, domain -> {domain} """.format(name=site_object.name, domain=site_object.domain )) site_object = Site.objects.first() site_object.name = site_address site_object.domain = site_address site_object.save() Site.objects.clear_cache() site_object = Site.objects.first() print('Updated!') print('Site object now is name -> {name}, domain -> {domain}'.format( name=site_object.name, domain=site_object.domain ))
bsd-3-clause
Python
6fd0cee9bca0449aa6aab6a62e470ba8ff909cbb
print all caesar rotations for some string
edunham/toys,edunham/toys,edunham/toys,edunham/toys,edunham/toys
language/rotN.py
language/rotN.py
#! /usr/bin/env python import string ciphered = "LVFU XAN YIJ UVXRB RKOYOFB" def make_rot_n(n): # http://stackoverflow.com/questions/3269686/short-rot13-function lc = string.ascii_lowercase uc = string.ascii_uppercase trans = string.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n]) return lambda s: string.translate(s, trans) for i in range(26): rotator = make_rot_n(i) deciphered = rotator(ciphered) print str(i) + ' ' + deciphered
mit
Python
d3dbb797575221d574fdda9c3d087d8696f6091a
Add netstring lib
bjencks/captiveportal,bjencks/captiveportal
lib/netstring.py
lib/netstring.py
def encode_netstring(s): return str(len(s)).encode('ascii') + b':' + s + b',' def consume_netstring(s): """If s is a bytestring beginning with a netstring, returns (value, rest) where value is the contents of the netstring, and rest is the part of s after the netstring. Raises ValueError if s does not begin with a netstring. """ (length, sep, rest) = s.partition(b':') if sep != b':': raise ValueError("No colon found in s") if not length.isdigit(): raise ValueError("Length is not numeric") length = int(length) if len(rest) <= length: raise ValueError("String not long enough") if rest[length] != 0x2c: raise ValueError("String not terminated with comma") return (rest[:length], rest[length+1:]) def is_netstring(s): try: (val, rest) = consume_netstring(s) return len(rest) == 0 except ValueError: return False
apache-2.0
Python
9f5c3715f4b3cd5bf451bdc504cded6459e8ee79
add one test file and add content to it
WheatonCS/Lexos,WheatonCS/Lexos,WheatonCS/Lexos
test/unit_test/test_similarity2.py
test/unit_test/test_similarity2.py
from lexos.helpers.error_messages import MATRIX_DIMENSION_UNEQUAL_MESSAGE count_matrix = [['', 'The', 'all', 'bobcat', 'cat', 'caterpillar', 'day.', 'slept'], ['catBobcat', 9.0, 9.0, 5.0, 4.0, 0.0, 9.0, 9.0], ['catCaterpillar', 9.0, 9.0, 0.0, 4.0, 5.0, 9.0, 9.0], ['test', 9.0, 9.0, 5.0, 4.0, 0.0, 9.0, 9.0]] assert all(len(line) == len(count_matrix[1]) for line in count_matrix[1:]), MATRIX_DIMENSION_UNEQUAL_MESSAGE print("pass")
mit
Python
44863ff1f7064f1d9a9bb897822834eb6755ed59
Add SMTP auth server
Debetux/sr.ht,Ninja3047/sr.ht,Ninja3047/sr.ht,Debetux/sr.ht,Debetux/sr.ht,Ninja3047/sr.ht,Ninja3047/sr.ht,Debetux/sr.ht
authserver.py
authserver.py
import bcrypt import asyncore from secure_smtpd import SMTPServer, FakeCredentialValidator from srht.objects import User class UserValidator(object): def validate(self, username, password): user = User.query.filter(User.username == username).first() if not user: return False return bcrypt.checkpw(password, user.password) SMTPServer( ('0.0.0.0', 4650), None, require_authentication=True, ssl=False, credential_validator=FakeCredentialValidator(), ) asyncore.loop()
mit
Python
f56e390be0e2cea8e08080029aad756a6ab3c91f
Add lc0253_meeting_rooms_ii.py from Copenhagen :)
bowen0701/algorithms_data_structures
lc0253_meeting_rooms_ii.py
lc0253_meeting_rooms_ii.py
"""Leetcode 253. Meeting Rooms II (Premium) Medium URL: https://leetcode.com/problems/meeting-rooms-ii Given an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference rooms required. """ class Solution2(object): # @param {Interval[]} intervals # @return {integer} def minMeetingRooms(self, intervals): pass def main(): pass if __name__ == '__main__': main()
bsd-2-clause
Python
8b4c34e84d306b5f9021de47bc3ae9050e2fc2b3
Fix loading of ply files exported by meshlab
drewm1980/multi_view_stereo_benchmark,drewm1980/multi_view_stereo_benchmark,drewm1980/multi_view_stereo_benchmark
compare_clouds.py
compare_clouds.py
#!/usr/bin/env python3 from pathlib import Path """Code for comparing point clouds""" cloud1Path = Path("./data/reconstructions/2016_10_24__17_43_17/reference.ply") cloud2Path = Path("./data/reconstructions/2016_10_24__17_43_17/high_quality.ply") from load_ply import load_ply cloud1PointData = load_ply(cloud1Path)[0][:,:3].copy() cloud2PointData = load_ply(cloud2Path)[0][:,:3].copy() #if __name__=='__main__': #pass
mit
Python
4f70773bb9041c44b0f83ef61a46d5fa974b366e
Create conwaytesting.py
peterbynum/Conway-Life,peterbynum/Conway-Life
conwaytesting.py
conwaytesting.py
mit
Python
73d753e315c7feb18af39360faf4d6fc6d10cedf
test to demonstrate bug 538
cledio66/pyglet,arifgursel/pyglet,gdkar/pyglet,Austin503/pyglet,arifgursel/pyglet,mpasternak/pyglet-fix-issue-552,Austin503/pyglet,shaileshgoogler/pyglet,mpasternak/pyglet-fix-issue-518-522,kmonsoor/pyglet,mpasternak/michaldtz-fix-552,odyaka341/pyglet,shaileshgoogler/pyglet,mpasternak/michaldtz-fixes-518-522,google-code-export/pyglet,odyaka341/pyglet,mpasternak/pyglet-fix-issue-552,qbektrix/pyglet,xshotD/pyglet,mpasternak/pyglet-fix-issue-552,google-code-export/pyglet,cledio66/pyglet,google-code-export/pyglet,mpasternak/michaldtz-fix-552,mpasternak/pyglet-fix-issue-552,odyaka341/pyglet,mpasternak/michaldtz-fix-552,Alwnikrotikz/pyglet,kmonsoor/pyglet,cledio66/pyglet,qbektrix/pyglet,Alwnikrotikz/pyglet,gdkar/pyglet,mpasternak/pyglet-fix-issue-518-522,xshotD/pyglet,xshotD/pyglet,gdkar/pyglet,mpasternak/michaldtz-fixes-518-522,odyaka341/pyglet,Alwnikrotikz/pyglet,qbektrix/pyglet,google-code-export/pyglet,shaileshgoogler/pyglet,kmonsoor/pyglet,qbektrix/pyglet,Austin503/pyglet,mpasternak/michaldtz-fix-552,mpasternak/michaldtz-fixes-518-522,Alwnikrotikz/pyglet,kmonsoor/pyglet,gdkar/pyglet,kmonsoor/pyglet,gdkar/pyglet,odyaka341/pyglet,xshotD/pyglet,arifgursel/pyglet,mpasternak/pyglet-fix-issue-518-522,arifgursel/pyglet,shaileshgoogler/pyglet,Alwnikrotikz/pyglet,mpasternak/michaldtz-fixes-518-522,Austin503/pyglet,google-code-export/pyglet,cledio66/pyglet,mpasternak/pyglet-fix-issue-518-522,Austin503/pyglet,qbektrix/pyglet,xshotD/pyglet,shaileshgoogler/pyglet,arifgursel/pyglet,cledio66/pyglet
tests/text/ELEMENT_CHANGE_STYLE.py
tests/text/ELEMENT_CHANGE_STYLE.py
#!/usr/bin/env python '''Test that inline elements can have their style changed, even after text has been deleted before them. [This triggers bug 538 if it has not yet been fixed.] To run the test, delete the first line, one character at a time, verifying that the element remains visible and no tracebacks are printed to the console. Press ESC to end the test. ''' __docformat__ = 'restructuredtext' __version__ = '$Id$' import unittest import pyglet from pyglet.text import caret, document, layout doctext = '''ELEMENT.py test document. PLACE CURSOR AT THE END OF THE ABOVE LINE, AND DELETE ALL ITS TEXT, BY PRESSING THE DELETE KEY REPEATEDLY. IF THIS WORKS OK, AND THE ELEMENT (GRAY RECTANGLE) WITHIN THIS LINE [element here] REMAINS VISIBLE BETWEEN THE SAME CHARACTERS, WITH NO ASSERTIONS PRINTED TO THE CONSOLE, THE TEST PASSES. (In code with bug 538, the element sometimes moves within the text, and eventually there is an assertion failure. Note that there is another bug, unrelated to this one, which sometimes causes the first press of the delete key to be ignored.) Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Fusce venenatis pharetra libero. Phasellus lacinia nisi feugiat felis. Sed id magna in nisl cursus consectetuer. Aliquam aliquam lectus eu magna. Praesent sit amet ipsum vitae nisl mattis commodo. Aenean pulvinar facilisis lectus. Phasellus sodales risus sit amet lectus. Suspendisse in turpis. Vestibulum ac mi accumsan eros commodo tincidunt. Nullam velit. In pulvinar, dui sit amet ullamcorper dictum, dui risus ultricies nisl, a dignissim sapien enim sit amet tortor. Pellentesque fringilla, massa sit amet bibendum blandit, pede leo commodo mi, eleifend feugiat neque tortor dapibus mauris. Morbi nunc arcu, tincidunt vel, blandit non, iaculis vel, libero. Vestibulum sed metus vel velit scelerisque varius. Vivamus a tellus. Proin nec orci vel elit molestie venenatis. Aenean fringilla, lorem vel fringilla bibendum, nibh mi varius mi, eget semper ipsum ligula ut urna. Nullam tempor convallis augue. Sed at dui. ''' element_index = doctext.index('[element here]') doctext = doctext.replace('[element here]', '') class TestElement(document.InlineElement): vertex_list = None def place(self, layout, x, y): ## assert layout.document.text[self._position] == '\x00' ### in bug 538, this fails after two characters are deleted. self.vertex_list = layout.batch.add(4, pyglet.gl.GL_QUADS, layout.top_group, 'v2i', ('c4B', [200, 200, 200, 255] * 4)) y += self.descent w = self.advance h = self.ascent - self.descent self.vertex_list.vertices[:] = (x, y, x + w, y, x + w, y + h, x, y + h) def remove(self, layout): self.vertex_list.delete() del self.vertex_list class TestWindow(pyglet.window.Window): def __init__(self, *args, **kwargs): super(TestWindow, self).__init__(*args, **kwargs) self.batch = pyglet.graphics.Batch() self.document = pyglet.text.decode_attributed(doctext) for i in [element_index]: self.document.insert_element(i, TestElement(60, -10, 70)) self.margin = 2 self.layout = layout.IncrementalTextLayout(self.document, self.width - self.margin * 2, self.height - self.margin * 2, multiline=True, batch=self.batch) self.caret = caret.Caret(self.layout) self.push_handlers(self.caret) self.set_mouse_cursor(self.get_system_mouse_cursor('text')) def on_draw(self): pyglet.gl.glClearColor(1, 1, 1, 1) self.clear() self.batch.draw() def on_key_press(self, symbol, modifiers): super(TestWindow, self).on_key_press(symbol, modifiers) if symbol == pyglet.window.key.TAB: self.caret.on_text('\t') self.document.set_style(0, len(self.document.text), dict(bold = None)) ### trigger bug 538 class TestCase(unittest.TestCase): def test(self): self.window = TestWindow(##resizable=True, visible=False) self.window.set_visible() pyglet.app.run() if __name__ == '__main__': unittest.main()
bsd-3-clause
Python
2e796f38dbdc1044c13a768c76fa733ad07b9829
Add astro/21cm/extract_slice.py
liweitianux/atoolbox,liweitianux/atoolbox,liweitianux/atoolbox,liweitianux/atoolbox,liweitianux/atoolbox,liweitianux/atoolbox
astro/21cm/extract_slice.py
astro/21cm/extract_slice.py
#!/usr/bin/env python3 # # Copyright (c) 2017 Weitian LI <[email protected]> # MIT License # """ Extract a slice from the 21cm cube and save as FITS image. """ import os import sys import argparse import numpy as np import astropy.io.fits as fits def main(): outfile_default = "{prefix}_z{z:05.2f}_N{Nside}_L{Lside}_s{sidx}.fits" parser = argparse.ArgumentParser( description="Extract a slice from cube and save as FITS image") parser.add_argument("-C", "--clobber", dest="clobber", action="store_true", help="overwrite existing files") parser.add_argument("-d", "--dtype", dest="dtype", default="float32", help="NumPy dtype of data cubes (default: float32)") parser.add_argument("-z", "--redshift", dest="redshift", type=float, required=True, help="redshift of the input data cube") parser.add_argument("-L", "--len-side", dest="Lside", type=float, required=True, help="Side length of the cube [comoving Mpc]") parser.add_argument("-s", "--slice-idx", dest="sidx", type=int, default=None, help="slice index to be extracted (default: " "the central slice)") parser.add_argument("-u", "--unit", dest="unit", help="data unit (e.g., K, mK)") parser.add_argument("-i", "--infile", dest="infile", required=True, help="input data cube") parser.add_argument("-o", "--outfile", dest="outfile", default=outfile_default, help="output FITS image slice (default: %s)" % outfile_default) parser.add_argument("-p", "--prefix", dest="prefix", required=True, help="prefix for the output FITS image") args = parser.parse_args() cube = np.fromfile(open(args.infile, "rb"), dtype=args.dtype) Nside = round(cube.shape[0] ** (1.0/3)) print("Read cube: %s (Nside=%d)" % (args.infile, Nside)) if args.sidx is None: sidx = int(Nside / 2.0) elif args.idx >= 0 and args.idx < Nside: sidx = args.idx else: raise ValueError("invalid slice index: %s" % args.sidx) outfile = args.outfile.format(prefix=args.prefix, z=args.redshift, Nside=Nside, Lside=args.Lside, sidx=sidx) if os.path.exists(outfile) and not args.clobber: raise OSError("output file already exists: %s" % outfile) cube = cube.reshape((Nside, Nside, Nside)) simg = cube[:, :, sidx] header = fits.Header() header["REDSHIFT"] = args.redshift header["Lside"] = (args.Lside, "Cube side length [comoving Mpc]") header["Nside"] = (Nside, "Number of pixels on each cube side") header["SliceIdx"] = (sidx, "Index of this extracted slice") if args.unit: header["BUNIT"] = (args.unit, "Data unit") header.add_history(" ".join(sys.argv)) hdu = fits.PrimaryHDU(data=simg, header=header) try: hdu.writeto(outfile, overwrite=args.clobber) except TypeError: hdu.writeto(outfile, clobber=args.clobber) print("Extracted #%d slice: %s" % (sidx, outfile)) if __name__ == "__main__": main()
mit
Python
0bf7d9fb20a3d2588ffc0e8341ec2af3df5fe300
Add test for depot index page
verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool
depot/tests/test_depot_index.py
depot/tests/test_depot_index.py
from django.test import TestCase, Client from depot.models import Depot def create_depot(name, state): return Depot.objects.create(name=name, active=state) class DepotIndexTestCase(TestCase): def test_depot_index_template(self): response = self.client.get('/depots/') self.assertTemplateUsed( response, template_name='depot/index.html' ) def test_depot_index_with_no_depots(self): response = self.client.get('/depots/') self.assertEqual(response.status_code, 200) self.assertQuerysetEqual(response.context['depot_list'], []) self.assertContains(response, 'No depots available :(') def test_depot_index_with_active_depot(self): depot = create_depot('active depot', True) response = self.client.get('/depots/') self.assertEqual(response.status_code, 200) self.assertQuerysetEqual( response.context['depot_list'], ['<Depot: Depot active depot>'] ) self.assertContains(response, depot.name) def test_depot_index_with_archived_depot(self): depot = create_depot('archived depot', False) response = self.client.get('/depots/') self.assertEqual(response.status_code, 200) self.assertQuerysetEqual(response.context['depot_list'], []) self.assertContains(response, 'No depots available') self.assertNotContains(response, depot.name) def test_depot_index_with_active_and_archived_depot(self): active_depot = create_depot('active depot', True) archived_depot = create_depot('archived depot', False) response = self.client.get('/depots/') self.assertEqual(response.status_code, 200) self.assertQuerysetEqual( response.context['depot_list'], ['<Depot: Depot active depot>'] ) self.assertContains(response, active_depot.name) self.assertNotContains(response, archived_depot.name)
agpl-3.0
Python
2a903d721c44f9c6b53c8516b28b9dd6c1faa5e0
Create crawler_utils.py
nikohernandiz/TVLineFinder
crawler_utils.py
crawler_utils.py
import json import os.path def comments_to_json(comments): result = [] for comment in comments: result.append({"score": comment.score, "url": comment.permalink, "body": comment.body, "id": comment.id, "replies": comments_to_json(comment.replies)}) return result def save_submission(submission, storage_dir): with open(os.path.join(storage_dir, submission.id), "w") as f: f.write(json.dumps({"url": submission.permalink, "text": submission.selftext, "title": submission.title, "score": submission.score, "comments": comments_to_json(submission.comments)})) f.close()
mit
Python
d81a1f3ef63aef7f003a018f26ea636cf47cfc5d
Add init file for installation
coded-by-hand/mass,coded-by-hand/mass
jswatchr/__init__.py
jswatchr/__init__.py
from jswatchr import *
bsd-2-clause
Python
7850371982cc50dc2a5a59c7b01d5a1bec80cf3f
Add FairFuzz tool spec
dbeyer/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,dbeyer/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec
benchexec/tools/fairfuzz.py
benchexec/tools/fairfuzz.py
""" BenchExec is a framework for reliable benchmarking. This file is part of BenchExec. Copyright (C) 2007-2015 Dirk Beyer All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import benchexec.result as result import benchexec.util as util import benchexec.tools.template import benchexec.model class Tool(benchexec.tools.template.BaseTool): """ Tool info for FairFuzz (https://https://github.com/carolemieux/afl-rb/tree/testcomp). """ REQUIRED_PATHS = [ "bin" ] def executable(self): return util.find_executable('fairfuzz-svtestcomp') def version(self, executable): return "FairFuzz, built on AFL 2.52b" def name(self): return 'FairFuzz' def determine_result(self, returncode, returnsignal, output, isTimeout): """ Parse the output of the tool and extract the verification result. This method always needs to be overridden. If the tool gave a result, this method needs to return one of the benchexec.result.RESULT_* strings. Otherwise an arbitrary string can be returned that will be shown to the user and should give some indication of the failure reason (e.g., "CRASH", "OUT_OF_MEMORY", etc.). """ for line in output: if "ERROR: couldn't run FairFuzz" in line: return "Couldn't run FairFuzz" if "CRASHES FOUND" in line: return result.RESULT_FALSE_REACH if "DONE RUNNING" in line: return result.RESULT_DONE return result.RESULT_UNKNOWN
apache-2.0
Python
a39fbd74eaf84c757f15d14bfc728bd1d0f63bd4
Create blockchain.py
openblockchains/awesome-blockchains,openblockchains/awesome-blockchains,openblockchains/awesome-blockchains,openblockchains/awesome-blockchains,openblockchains/awesome-blockchains,openblockchains/awesome-blockchains,openblockchains/awesome-blockchains
blockchain.py/blockchain.py
blockchain.py/blockchain.py
########################### # build your own blockchain from scratch in python3! # # inspired by # Let's Build the Tiniest Blockchain In Less Than 50 Lines of Python by Gerald Nash # see https://medium.com/crypto-currently/lets-build-the-tiniest-blockchain-e70965a248b # # # to run use: # $ python ./blockchain.py import hashlib as hasher import datetime as date import pprint class Block: def __init__(self, index, data, previous_hash): self.index = index self.timestamp = date.datetime.now() self.data = data self.previous_hash = previous_hash self.hash = self.calc_hash() def calc_hash(self): sha = hasher.sha256() sha.update(str(self.index).encode("utf-8") + str(self.timestamp).encode("utf-8") + str(self.data).encode("utf-8") + str(self.previous_hash).encode("utf-8")) return sha.hexdigest() def __repr__(self): return "Block<\n index: {},\n timestamp: {},\n data: {},\n previous_hash: {},\n hash: {}>".format( self.index, self.timestamp, self.data, self.previous_hash, self.hash) @staticmethod def first( data="Genesis" ): return Block(0, data, "0") @staticmethod def next( previous, data="Transaction Data..." ): return Block(previous.index + 1, data, previous.hash) ##### ## let's get started ## build a blockchain a block at a time b0 = Block.first( "Genesis" ) b1 = Block.next( b0, "Transaction Data..." ) b2 = Block.next( b1, "Transaction Data......" ) b3 = Block.next( b2, "More Transaction Data..." ) blockchain = [b0, b1, b2, b3] pprint.pprint( blockchain ) ###### # will pretty print something like: # # [Block< # index: 0, # timestamp: 2017-09-19 19:21:04.015584, # data: Genesis, # previous_hash: 0, # hash: b0cb7953bfad60415ea3b5d3b8015ee22c89d43351ea8f53e5367ee06193b1d3>, # Block< # index: 1, # timestamp: 2017-09-19 19:21:04.015584, # data: Transaction Data..., # previous_hash: b0cb7953bfad60415ea3b5d3b8015ee22c89d43351ea8f53e5367ee06193b1d3, # hash: a87707b2867d28e7367c74e4a2800ec112ea2a8b1517a332ad0b4c49c3b3d60b>, # Block< # index: 2, # timestamp: 2017-09-19 19:21:04.015584, # data: Transaction Data......, # previous_hash: a87707b2867d28e7367c74e4a2800ec112ea2a8b1517a332ad0b4c49c3b3d60b, # hash: 9a8aecdd62da47301502f0079aa1bf24dcf39ad392c723baef6b9bfbc927cf4e>, # Block< # index: 3, # timestamp: 2017-09-19 19:21:04.015584, # data: More Transaction Data..., # previous_hash: 9a8aecdd62da47301502f0079aa1bf24dcf39ad392c723baef6b9bfbc927cf4e, # hash: 5ef442875fb8c3e18d08531f3eba26ea75b608604fa0cc75715d76e15edbb5ea>]
cc0-1.0
Python
37db687b4167aee0e88036c5d85995de891453ed
Create cbalusek_01.py
GT-IDEaS/SkillsWorkshop2017,GT-IDEaS/SkillsWorkshop2017,GT-IDEaS/SkillsWorkshop2017
Week01/Problem01/cbalusek_01.py
Week01/Problem01/cbalusek_01.py
#This project defines a function that takes any two numbers and sums their multiples to some cutoff value def sum(val1, val2, test): i = 1 j = 1 cum = 0 while i*val1 < test: cum += i*val1 i += 1 while j*val2 < test: if j*val2%val1 != 0: cum += j*val2 j += 1 else: j += 1 return cum print(sum(3,5,1000))
bsd-3-clause
Python
089f93bcf7157ee3eaf83964294c5c2df19683f0
Create retweet.py
kingoak/firstapp
settings-search_query-yourHashtag-Leave-empty-for-all-languages-tweet_language-Create-your-app-on-http/retweet.py
settings-search_query-yourHashtag-Leave-empty-for-all-languages-tweet_language-Create-your-app-on-http/retweet.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os, ConfigParser, tweepy, inspect, hashlib path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # read config config = ConfigParser.SafeConfigParser() config.read(os.path.join(path, "config")) # your hashtag or search query and tweet language (empty = all languages) hashtag = config.get("settings","search_query") tweetLanguage = config.get("settings","tweet_language") # blacklisted users and words userBlacklist = [] wordBlacklist = ["RT", u"♺"] # build savepoint path + file hashedHashtag = hashlib.md5(hashtag).hexdigest() last_id_filename = "last_id_hashtag_%s" % hashedHashtag rt_bot_path = os.path.dirname(os.path.abspath(__file__)) last_id_file = os.path.join(rt_bot_path, last_id_filename) # create bot auth = tweepy.OAuthHandler(config.get("twitter","consumer_key"), config.get("twitter","consumer_secret")) auth.set_access_token(config.get("twitter","access_token"), config.get("twitter","access_token_secret")) api = tweepy.API(auth) # retrieve last savepoint if available try: with open(last_id_file, "r") as file: savepoint = file.read() except IOError: savepoint = "" print "No savepoint found. Trying to get as many results as possible." # search query timelineIterator = tweepy.Cursor(api.search, q=hashtag, since_id=savepoint, lang=tweetLanguage).items() # put everything into a list to be able to sort/filter timeline = [] for status in timelineIterator: timeline.append(status) try: last_tweet_id = timeline[0].id except IndexError: last_tweet_id = savepoint # filter @replies/blacklisted words & users out and reverse timeline timeline = filter(lambda status: status.text[0] != "@", timeline) timeline = filter(lambda status: not any(word in status.text.split() for word in wordBlacklist), timeline) timeline = filter(lambda status: status.author.screen_name not in userBlacklist, timeline) timeline.reverse() tw_counter = 0 err_counter = 0 # iterate the timeline and retweet for status in timeline: try: print "(%(date)s) %(name)s: %(message)s\n" % \ { "date" : status.created_at, "name" : status.author.screen_name.encode('utf-8'), "message" : status.text.encode('utf-8') } api.retweet(status.id) tw_counter += 1 except tweepy.error.TweepError as e: # just in case tweet got deleted in the meantime or already retweeted err_counter += 1 #print e continue print "Finished. %d Tweets retweeted, %d errors occured." % (tw_counter, err_counter) # write last retweeted tweet id to file with open(last_id_file, "w") as file: file.write(str(last_tweet_id))
mpl-2.0
Python
cc79ee252e09ade17961d03265c61a87e270bd88
Make color emoji use character sequences instead of PUA.
davelab6/nototools,anthrotype/nototools,pathumego/nototools,googlei18n/nototools,davelab6/nototools,pahans/nototools,moyogo/nototools,googlefonts/nototools,namemealrady/nototools,googlei18n/nototools,moyogo/nototools,dougfelt/nototools,googlefonts/nototools,pahans/nototools,googlei18n/nototools,googlefonts/nototools,davelab6/nototools,namemealrady/nototools,dougfelt/nototools,pathumego/nototools,anthrotype/nototools,dougfelt/nototools,googlefonts/nototools,anthrotype/nototools,pahans/nototools,namemealrady/nototools,pathumego/nototools,googlefonts/nototools,moyogo/nototools
nototools/map_pua_emoji.py
nototools/map_pua_emoji.py
#!/usr/bin/python # # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Modify an emoji font to map legacy PUA characters to standard ligatures.""" __author__ = '[email protected] (Roozbeh Pournader)' import sys from fontTools import ttLib from nototools import add_emoji_gsub from nototools import font_data def get_glyph_name_from_gsub(char_seq, font): """Find the glyph name for ligature of a given character sequence from GSUB. """ cmap = font_data.get_cmap(font) # FIXME: So many assumptions are made here. try: first_glyph = cmap[char_seq[0]] rest_of_glyphs = [cmap[ch] for ch in char_seq[1:]] except KeyError: return None for lookup in font['GSUB'].table.LookupList.Lookup: ligatures = lookup.SubTable[0].ligatures try: for ligature in ligatures[first_glyph]: if ligature.Component == rest_of_glyphs: return ligature.LigGlyph except KeyError: continue return None def add_pua_cmap(source_file, target_file): """Add PUA characters to the cmap of the first font and save as second.""" font = ttLib.TTFont(source_file) cmap = font_data.get_cmap(font) for pua, (ch1, ch2) in (add_emoji_gsub.EMOJI_KEYCAPS.items() + add_emoji_gsub.EMOJI_FLAGS.items()): if pua not in cmap: glyph_name = get_glyph_name_from_gsub([ch1, ch2], font) if glyph_name is not None: cmap[pua] = glyph_name font.save(target_file) def main(argv): """Save the first font given to the second font.""" add_pua_cmap(argv[1], argv[2]) if __name__ == '__main__': main(sys.argv)
apache-2.0
Python
2a5b7773af3e9516d8a4a3df25c0b829598ebb1c
Remove redundant str typecasting
klmitch/nova,jianghuaw/nova,mahak/nova,rajalokan/nova,jianghuaw/nova,openstack/nova,mahak/nova,hanlind/nova,hanlind/nova,Juniper/nova,gooddata/openstack-nova,rahulunair/nova,alaski/nova,gooddata/openstack-nova,mikalstill/nova,sebrandon1/nova,jianghuaw/nova,sebrandon1/nova,mahak/nova,Juniper/nova,rajalokan/nova,jianghuaw/nova,cloudbase/nova,rahulunair/nova,phenoxim/nova,cloudbase/nova,openstack/nova,vmturbo/nova,mikalstill/nova,sebrandon1/nova,klmitch/nova,Juniper/nova,hanlind/nova,vmturbo/nova,Juniper/nova,cloudbase/nova,vmturbo/nova,openstack/nova,vmturbo/nova,gooddata/openstack-nova,gooddata/openstack-nova,klmitch/nova,mikalstill/nova,rajalokan/nova,klmitch/nova,rajalokan/nova,alaski/nova,phenoxim/nova,rahulunair/nova
nova/tests/uuidsentinel.py
nova/tests/uuidsentinel.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys class UUIDSentinels(object): def __init__(self): from oslo_utils import uuidutils self._uuid_module = uuidutils self._sentinels = {} def __getattr__(self, name): if name.startswith('_'): raise ValueError('Sentinels must not start with _') if name not in self._sentinels: self._sentinels[name] = self._uuid_module.generate_uuid() return self._sentinels[name] sys.modules[__name__] = UUIDSentinels()
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys class UUIDSentinels(object): def __init__(self): from oslo_utils import uuidutils self._uuid_module = uuidutils self._sentinels = {} def __getattr__(self, name): if name.startswith('_'): raise ValueError('Sentinels must not start with _') if name not in self._sentinels: self._sentinels[name] = str(self._uuid_module.generate_uuid()) return self._sentinels[name] sys.modules[__name__] = UUIDSentinels()
apache-2.0
Python
ac673650673f7d6b9785d577499037bf9db4435a
refactor prompt abstraction
mattvonrocketstein/smash,mattvonrocketstein/smash,mattvonrocketstein/smash,mattvonrocketstein/smash
lib/smashlib/prompt.py
lib/smashlib/prompt.py
""" smash.prompt """ from smashlib.data import PROMPT_DEFAULT as DEFAULT class Prompt(dict): def __setitem__(self, k, v, update=True): if k in self: raise Exception,'prompt component is already present: ' + str(k) super(Prompt, self).__setitem__(k, v) if update: self.update_prompt() def update_prompt(self): parts = self.values() parts.sort() parts = [part[1] for part in parts] self.template = ' '.join(parts) def _get_template(self): """ get the current prompt template """ opc = getattr(__IPYTHON__.shell, 'outputcache', None) if opc: return opc.prompt1.p_template else: return 'error-getting-output-prompt' def _set_template(self, t): """ set the current prompt template """ opc = getattr(__IPYTHON__.shell, 'outputcache', None) if opc: opc.prompt1.p_template = t template = property(_get_template, _set_template) prompt = Prompt() prompt.__setitem__('working_dir', [100, DEFAULT], update=False) prompt.template = DEFAULT
mit
Python
2c178c5ea05d2454ef6896aaf9c58b6536f5a15f
Create bubblesort.py
GabrielGhe/MyAlgorithms,GabrielGhe/MyAlgorithms
bubblesort.py
bubblesort.py
def bubblesort(lst): #from last index to second for passes in range(len(lst) - 1, 0, -1): #from [0,passes[ keep swapping to put the largest #number at index passes for i in range(passes): if lst[i] > lst[i+1]: swap(lst, i, i+1) return lst def swap(lst, i, j): temp = lst[i] lst[i] = lst[j] lst[j] = temp print "{0}".format(bubblesort([23,57,75,33,6,8,56]))
mit
Python
9e954d5181d36762a8c34e69516c7f5510bae5a7
add exception class to use for mtconvert errors
ryanraaum/oldowan.mtconvert
oldowan/mtconvert/error.py
oldowan/mtconvert/error.py
class MtconvertError(Exception): """Exception raised for errors in the mtconvert module. Attributes: expression -- input expression in which the error occurred message -- explanation of the error """ def __init__(self, expression, message): self.expression = expression self.message = message
mit
Python
3adcefcad4fc3ecb85aa4a22e8b3c4bf5ca4e6f5
Add tests for revision updates via import
edofic/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core
test/integration/ggrc/converters/test_import_update.py
test/integration/ggrc/converters/test_import_update.py
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Tests for bulk updates with CSV import.""" from integration.ggrc.converters import TestCase from ggrc import models class TestImportUpdates(TestCase): """ Test importing of already existing objects """ def setUp(self): TestCase.setUp(self) self.client.get("/login") def test_policy_basic_update(self): """ Test simple policy title update """ filename = "policy_basic_import.csv" response = self.import_file(filename) self._check_response(response, {}) policy = models.Policy.query.filter_by(slug="p1").first() self.assertEqual(policy.title, "some weird policy") revision_count = models.Revision.query.filter( models.Revision.resource_type == "Policy", models.Revision.resource_id == policy.id ).count() self.assertEqual(revision_count, 1) filename = "policy_basic_import_update.csv" response = self.import_file(filename) self._check_response(response, {}) policy = models.Policy.query.filter_by(slug="p1").first() self.assertEqual(policy.title, "Edited policy") revision_count = models.Revision.query.filter( models.Revision.resource_type == "Policy", models.Revision.resource_id == policy.id ).count() self.assertEqual(revision_count, 2)
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> from integration.ggrc.converters import TestCase from ggrc import models class TestImportUpdates(TestCase): """ Test importing of already existing objects """ def setUp(self): TestCase.setUp(self) self.client.get("/login") def test_policy_basic_update(self): """ Test simple policy title update """ filename = "policy_basic_import.csv" response = self.import_file(filename) self._check_response(response, {}) policy = models.Policy.query.filter_by(slug="p1").first() self.assertEqual(policy.title, "some weird policy") filename = "policy_basic_import_update.csv" response = self.import_file(filename) self._check_response(response, {}) policy = models.Policy.query.filter_by(slug="p1").first() self.assertEqual(policy.title, "Edited policy")
apache-2.0
Python
f79d10de2adb99e3a3d07caa2a00359208186c15
Add twext.python.datetime tests
trevor/calendarserver,trevor/calendarserver,trevor/calendarserver
twext/python/test/test_datetime.py
twext/python/test/test_datetime.py
## # Copyright (c) 2006-2010 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from datetime import date, datetime, timedelta from twext.python.datetime import dateordatetime, timerange, utc from twistedcaldav.test.util import TestCase, testUnimplemented class DateTimeTests(TestCase): def test_date_date(self): d = date.today() dodt = dateordatetime(d) self.assertEquals(dodt.date(), d) def test_date_date_tz(self): d = date.today() dodt = dateordatetime(d, defaultTZ=utc) self.assertEquals(dodt.date(), d) def test_date_datetime(self): d = date.today() dodt = dateordatetime(d) self.assertEquals(dodt.datetime(), datetime(d.year, d.month, d.day)) def test_date_datetime_tz(self): d = date.today() dodt = dateordatetime(d, defaultTZ=utc) self.assertEquals(dodt.datetime(), datetime(d.year, d.month, d.day, tzinfo=utc)) def test_datetime_date(self): dt = datetime.now() dodt = dateordatetime(dt) self.assertEquals(dodt.date(), dt.date()) def test_datetime_datetime(self): dt = datetime.now() dodt = dateordatetime(dt) self.assertEquals(dodt.datetime(), dt) def test_datetime_datetime_tz(self): dt = datetime.now() dodt = dateordatetime(dt, defaultTZ=utc) self.assertEquals(dodt.datetime(), dt) def test_date_iCalendarString(self): d = date(2010, 2, 22) dodt = dateordatetime(d) self.assertEquals(dodt.iCalendarString(), "20100222") def test_datetime_iCalendarString(self): dt = datetime(2010, 2, 22, 17, 44, 42, 98303) dodt = dateordatetime(dt) self.assertEquals(dodt.iCalendarString(), "20100222T174442") def test_datetime_iCalendarString_utc(self): dt = datetime(2010, 2, 22, 17, 44, 42, 98303, tzinfo=utc) dodt = dateordatetime(dt) self.assertEquals(dodt.iCalendarString(), "20100222T174442Z") @testUnimplemented def test_datetime_iCalendarString_tz(self): # Need to test a non-UTC timezone also raise NotImplementedError() @testUnimplemented def test_asTimeZone(self): raise NotImplementedError() @testUnimplemented def test_asUTC(self): raise NotImplementedError() class TimeRangeTests(TestCase): def test_start(self): start = datetime.now() tr = timerange(start=start) self.assertEquals(tr.start(), start) def test_start_none(self): tr = timerange() self.assertEquals(tr.start(), None) def test_end(self): end = datetime.now() tr = timerange(end=end) self.assertEquals(tr.end(), end) def test_end_none(self): tr = timerange() self.assertEquals(tr.end(), None) def test_end_none_duration(self): duration = timedelta(seconds=8) tr = timerange(duration=duration) self.assertEquals(tr.end(), None) def test_end_none_duration_start(self): start = datetime.now() duration = timedelta(seconds=8) tr = timerange(start=start, duration=duration) self.assertEquals(tr.end(), start + duration) def test_duration(self): duration = timedelta(seconds=8) tr = timerange(duration=duration) self.assertEquals(tr.duration(), duration) def test_duration_none(self): tr = timerange() self.assertEquals(tr.duration(), None) def test_duration_none_end(self): end = datetime.now() tr = timerange(end=end) self.assertEquals(tr.duration(), None) def test_duration_none_start_end(self): start = datetime.now() duration = timedelta(seconds=8) end = start + duration tr = timerange(start=start, end=end) self.assertEquals(tr.duration(), duration) @testUnimplemented def test_overlapsWith(self): # Need a few tests; combinations of: # - start/end are None # - overlapping and not # - dates and datetimes # - timezones raise NotImplementedError()
apache-2.0
Python
157cb4518412a6e6de9c3d0d64c68ac0af276c6a
Access checking unit tests added for FormPage view.
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
tests/app/soc/modules/gsoc/views/test_student_forms.py
tests/app/soc/modules/gsoc/views/test_student_forms.py
# Copyright 2013 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for student forms view.""" from tests import profile_utils from tests import test_utils class FormPageTest(test_utils.GSoCDjangoTestCase): """Test student form page.""" def setUp(self): self.init() def testLoneUserAccessForbidden(self): self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl()) self._assertAccessForbiddenForUrl(self._getTaxFormUrl()) def testMentorAccessForbidden(self): self.data.createMentor(self.org) self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl()) self._assertAccessForbiddenForUrl(self._getTaxFormUrl()) def testOrgAdminAccessForbidden(self): self.data.createOrgAdmin(self.org) self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl()) self._assertAccessForbiddenForUrl(self._getTaxFormUrl()) def testHostAccessForbidden(self): self.data.createHost() self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl()) self._assertAccessForbiddenForUrl(self._getTaxFormUrl()) def testStudentAccessForbidden(self): # access should be forbidden because at this point students are not # permitted to upload their forms self.timeline.studentsAnnounced() mentor = self._createNewMentor() self.data.createStudentWithProject(self.org, mentor) self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl()) self._assertAccessForbiddenForUrl(self._getTaxFormUrl()) def testStudentAccessGranted(self): self.timeline.formSubmission() mentor = self._createNewMentor() self.data.createStudentWithProject(self.org, mentor) # check for enrollment form url = self._getEnrollmentFormUrl() response = self.get(url) self._assertStudentFormsTemplatesUsed(response) # check for tax form url = self._getTaxFormUrl() response = self.get(url) self._assertStudentFormsTemplatesUsed(response) def _getEnrollmentFormUrl(self): """Returns URL for the student enrollment form upload.""" return '/gsoc/student_forms/enrollment/' + self.gsoc.key().name() def _getTaxFormUrl(self): """Returns URL for the student tax form upload.""" return '/gsoc/student_forms/tax/' + self.gsoc.key().name() def _assertAccessForbiddenForUrl(self, url): """Asserts that GET request will return forbidden response for the specified URL.""" response = self.get(url) self.assertResponseForbidden(response) self.assertErrorTemplatesUsed(response) def _assertStudentFormsTemplatesUsed(self, response): """Asserts that all the templates from the student forms were used. """ self.assertGSoCTemplatesUsed(response) self.assertTemplateUsed(response, 'v2/modules/gsoc/student_forms/base.html') self.assertTemplateUsed(response, 'v2/modules/gsoc/_form.html') def _createNewMentor(self): """Returns a newly created mentor.""" profile_helper = profile_utils.GSoCProfileHelper(self.gsoc, self.dev_test) profile_helper.createOtherUser('[email protected]') return profile_helper.createMentor(self.org)
apache-2.0
Python
e48caa4bb61cce466ad5eb9bffbfba8e33312474
Add Python EC2 TerminateInstances example
awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples
python/example_code/ec2/terminate_instances.py
python/example_code/ec2/terminate_instances.py
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] # snippet-sourcedescription:[terminate_instances.py demonstrates how to terminate an Amazon EC2 instance.] # snippet-service:[ec2] # snippet-keyword:[Amazon EC2] # snippet-keyword:[Python] # snippet-keyword:[AWS SDK for Python (Boto3)] # snippet-keyword:[Code Sample] # snippet-sourcetype:[full-example] # snippet-sourcedate:[2019-2-11] # snippet-sourceauthor:[AWS] # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # This file is licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import boto3 from botocore.exceptions import ClientError def terminate_instances(instance_ids): """Terminate one or more Amazon EC2 instances :param instance_ids: List of string IDs of EC2 instances to terminate :return: List of state information for each instance specified in instance_ids. If error, return None. """ # Terminate each instance in the argument list ec2 = boto3.client('ec2') try: states = ec2.terminate_instances(InstanceIds=instance_ids) except ClientError as e: logging.error(e) return None return states['TerminatingInstances'] def main(): """Exercise terminate_instances()""" # Assign these values before running the program ec2_instance_ids = ['EC2_INSTANCE_ID'] # Set up logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(asctime)s: %(message)s') # Terminate the EC2 instance(s) states = terminate_instances(ec2_instance_ids) if states is not None: logging.debug('Terminating the following EC2 instances') for state in states: logging.debug(f'ID: {state["InstanceId"]}') logging.debug(f' Current state: Code {state["CurrentState"]["Code"]}, ' f'{state["CurrentState"]["Name"]}') logging.debug(f' Previous state: Code {state["PreviousState"]["Code"]}, ' f'{state["PreviousState"]["Name"]}') if __name__ == '__main__': main()
apache-2.0
Python
34c0ca7ba0f8d2ac51583dfab4ea2f4cee7a62d5
add script to read csv files to list
qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script
python/read_formated_txt_file/read_csv2list.py
python/read_formated_txt_file/read_csv2list.py
import csv def csv_to_list(csv_file, delimiter=','): """ Reads in a CSV file and returns the contents as list, where every row is stored as a sublist, and each element in the sublist represents 1 cell in the table. """ with open(csv_file, 'r') as csv_con: reader = csv.reader(csv_con, delimiter=delimiter) return list(reader) data = csv_to_list('./astro.csv') print data
bsd-3-clause
Python
d402c25a6b257778e08e6db2890ae575432daed0
Add new linkedlist file for intersection
keon/algorithms,amaozhao/algorithms
linkedlist/intersection.py
linkedlist/intersection.py
def intersection(h1, h2): """ This function takes two lists and returns the node they have in common, if any. In this example: 1 -> 3 -> 5 \ 7 -> 9 -> 11 / 2 -> 4 -> 6 ...we would return 7. Note that the node itself is the unique identifier, not the value of the node. """ count = 0 flag = None h1_orig = h1 h2_orig = h2 while h1 or h2: count += 1 if not flag and (h1.next is None or h2.next is None): # We hit the end of one of the lists, set a flag for this flag = (count, h1.next, h2.next) if h1: h1 = h1.next if h2: h2 = h2.next long_len = count # Mark the length of the longer of the two lists short_len = flag[0] if flag[1] is None: shorter = h1_orig longer = h2_orig elif flag[2] is None: shorter = h2_orig longer = h1_orig while longer and shorter: while long_len > short_len: # force the longer of the two lists to "catch up" longer = longer.next long_len -= 1 if longer == shorter: # The nodes match, return the node return longer else: longer = longer.next shorter = shorter.next return None class Node(object): def __init__(self, val=None): self.val = val self.next = None def test(): def printLinkedList(head): string = "" while head.next: string += head.val + " -> " head = head.next string += head.val print(string) # 1 -> 3 -> 5 # \ # 7 -> 9 -> 11 # / # 2 -> 4 -> 6 a1 = Node("1") b1 = Node("3") c1 = Node("5") d = Node("7") a2 = Node("2") b2 = Node("4") c2 = Node("6") e = Node("9") f = Node("11") a1.next = b1 b1.next = c1 c1.next = d a2.next = b2 b2.next = c2 c2.next = d d.next = e e.next = f printLinkedList(a1) printLinkedList(a2) print(intersection(a1, a2)) assert intersection(a1, a2).val == d.val test()
mit
Python
8f1cf446a0b602e6e64ccebaa794e7ec6a2f840d
add support routines for oversampling
harpolea/pyro2,zingale/pyro2,zingale/pyro2,harpolea/pyro2
compressible_fv4/initialization_support.py
compressible_fv4/initialization_support.py
"""Routines to help initialize cell-average values by oversampling the initial conditions on a finer mesh and averaging down to the requested mesh""" import mesh.fv as fv def get_finer(myd): mgf = myd.grid.fine_like(4) fd = fv.FV2d(mgf) for v in myd.names: fd.register_var(v, myd.BCs[v]) fd.create() return fd def average_down(myd, fd): """average the fine data from fd into the coarser object, myd""" for v in myd.names: var = myd.get_var(v) var[:,:] = fd.restrict(v, N=4)
bsd-3-clause
Python
4227cef6567023717c8d66f99ce776d9d8aa0929
Add OS::Contrail::PhysicalRouter
safchain/contrail-heat
contrail_heat/resources/physical_router.py
contrail_heat/resources/physical_router.py
from heat.engine import properties from vnc_api import vnc_api from contrail_heat.resources import contrail import uuid class HeatPhysicalRouter(contrail.ContrailResource): PROPERTIES = ( NAME, ) = ( 'name', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Physical Router name'), update_allowed=True, ), } attributes_schema = { "name": _("The name of the Physical Router."), "fq_name": _("The FQ name of the Physical Router."), "physical_interfaces": _("List of Physical Interfaces attached."), "show": _("All attributes."), } def handle_create(self): config_obj = self.vnc_lib().global_system_config_read( fq_name=["default-global-system-config"]) pr_obj = vnc_api.PhysicalRouter(name=self.properties[self.NAME], parent_obj=config_obj) pr_uuid = self.vnc_lib().physical_router_create(pr_obj) self.resource_id_set(pr_uuid) def _show_resource(self): pr_obj = self.vnc_lib().physical_router_read(id=self.resource_id) dic = {} dic['name'] = pr_obj.get_display_name() dic['fq_name'] = pr_obj.get_fq_name_str() dic['physical_interfaces'] = ( [pi['to'] for pi in pr_obj.get_physical_interfaces() or []]) return dic def handle_delete(self): try: self.vnc_lib().physical_router_delete(id=self.resource_id) except Exception: pass def handle_update(self, json_snippet, tmpl_diff, prop_diff): # TODO pass def resource_mapping(): return { 'OS::Contrail::PhysicalRouter': HeatPhysicalRouter, }
apache-2.0
Python
5cebd0b56f81dfc02feb5511dade82ebf6db99ff
add presence.py
nullpixel/litecord,nullpixel/litecord
litecord/presence.py
litecord/presence.py
''' presence.py - presence management Sends PRESENCE_UPDATE to clients when needed ''' class PresenceManager: def __init__(self, server): self.server = server async def update_presence(self, user_id, status): ''' PresenceManager.update_presence(user_id, status) Updates the presence of a user. Sends a PRESENCE_UPDATE event to relevant clients. ''' ''' ????dummy code???? current_presence = self.presences.get(user_id) new_presence = self.make_presence(status) # something like this lol user = await self.user.get_user(user_id) for guild_id in user.guilds: guild = await self.guilds.get_guild(guild_id) for member in guild: member = await self.guilds.get_member(guild_id, member_id) c = await self.server.get_connection(member_id) if c is not None: await c.dispatch('PRESENCE_UPDATE', self.diff(current_presence, new_presence)) ''' pass
mit
Python
02c59aa1d2eec43442f4bcf1d6662535e094bffd
add move pics by modified date
mcxiaoke/python-labs,mcxiaoke/python-labs,mcxiaoke/python-labs,mcxiaoke/python-labs,mcxiaoke/python-labs
media/pic_date_move.py
media/pic_date_move.py
''' File: pic_date_move.py Created: 2021-04-01 10:46:38 Modified: 2021-04-01 10:46:43 Author: mcxiaoke ([email protected]) License: Apache License 2.0 ''' import os import sys import shutil from datetime import date, datetime import pathlib def move_one_file(src_file): old_file = pathlib.Path(src_file) old_dir = pathlib.Path(old_file).parent name = old_file.name # old_file = pathlib.Path(old_dir, name) fd = datetime.fromtimestamp(old_file.stat().st_mtime) new_dir = pathlib.Path(old_dir.parent, fd.strftime('%Y%m%d')) new_file = pathlib.Path(new_dir, name) if not (new_dir.exists() and new_dir.samefile(old_dir)): if not new_dir.exists(): new_dir.mkdir(parents=True, exist_ok=True) print('Move to', new_file) # old_file.rename(new_file) def move_by_date(src_dir): ''' move image files by file modified date ''' for root, _, files in os.walk(src_dir): print(root) for name in files: move_one_file(pathlib.Path(root, name)) move_by_date(sys.argv[1])
apache-2.0
Python
3ae5dc9a4325251033d3db9cae0d80eb4812815d
Add lazy iterator
krishnasrinivas/minio-py,minio/minio-py,NitishT/minio-py,harshavardhana/minio-py,NitishT/minio-py,minio/minio-py,donatello/minio-py
minio/lazy_iterator.py
minio/lazy_iterator.py
# Minimal Object Storage Library, (C) 2015 Minio, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __author__ = 'minio' class LazyIterator(object): def __init__(self, populator): self.populator = populator self.values = [] def __iter__(self): return self def next(self): if self.populator is None: # should never see this, but we'll be defensive raise StopIteration() if len(self.values) == 0: self.values, self.populator = self.populator() if len(self.values) > 0: return self.values.pop(0) raise StopIteration()
apache-2.0
Python
860f93d2bb4c08b63c64fe9e5b7b620b824d8490
test ++/--/+
smspillaz/pychecker,smspillaz/pychecker,smspillaz/pychecker
pychecker/pychecker2/utest/ops.py
pychecker/pychecker2/utest/ops.py
from pychecker2 import TestSupport from pychecker2 import OpChecks class OpTests(TestSupport.WarningTester): def testOperator(self): for op in ['--', '++']: self.warning('def f(x):\n' ' return %sx' % op, 2, OpChecks.OpCheck.operator, op) def testOperatorPlus(self): self.warning('def f(x):\n' ' return +x', 2, OpChecks.OpCheck.operatorPlus)
bsd-3-clause
Python
58fee826ab5298f7de036bf320bbc109b853eec8
Add null check for sds sync thread which can be optional
Tendrl/commons,rishubhjain/commons,r0h4n/commons
tendrl/commons/manager/__init__.py
tendrl/commons/manager/__init__.py
import abc import logging import six from tendrl.commons import jobs LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Manager(object): def __init__( self, sds_sync_thread, central_store_thread, ): self._central_store_thread = central_store_thread self._sds_sync_thread = sds_sync_thread self._job_consumer_thread = jobs.JobConsumerThread() def stop(self): LOG.info("%s stopping" % self.__class__.__name__) self._job_consumer_thread.stop() if self._sds_sync_thread: self._sds_sync_thread.stop() self._central_store_thread.stop() def start(self): LOG.info("%s starting" % self.__class__.__name__) self._central_store_thread.start() if self._sds_sync_thread: self._sds_sync_thread.start() self._job_consumer_thread.start() def join(self): LOG.info("%s joining" % self.__class__.__name__) self._job_consumer_thread.join() if self._sds_sync_thread: self._sds_sync_thread.join() self._central_store_thread.join()
import abc import logging import six from tendrl.commons import jobs LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Manager(object): def __init__( self, sds_sync_thread, central_store_thread, ): self._central_store_thread = central_store_thread self._sds_sync_thread = sds_sync_thread self._job_consumer_thread = jobs.JobConsumerThread() def stop(self): LOG.info("%s stopping" % self.__class__.__name__) self._job_consumer_thread.stop() self._sds_sync_thread.stop() self._central_store_thread.stop() def start(self): LOG.info("%s starting" % self.__class__.__name__) self._central_store_thread.start() self._sds_sync_thread.start() self._job_consumer_thread.start() def join(self): LOG.info("%s joining" % self.__class__.__name__) self._job_consumer_thread.join() self._sds_sync_thread.join() self._central_store_thread.join()
lgpl-2.1
Python
27cce3b6708a17f813f0a82871c988fec3a36517
Add quart to contrib (#300)
rollbar/pyrollbar
rollbar/contrib/quart/__init__.py
rollbar/contrib/quart/__init__.py
""" Integration with Quart """ from quart import request import rollbar def report_exception(app, exception): rollbar.report_exc_info(request=request) def _hook(request, data): data['framework'] = 'quart' if request: data['context'] = str(request.url_rule) rollbar.BASE_DATA_HOOK = _hook
mit
Python
e5a39d4e17a0555cb242731b34f0ee480367b4fe
Add task that sends out notifications
stefanw/foireminder,stefanw/foireminder
foireminder/foireminder/reminders/tasks.py
foireminder/foireminder/reminders/tasks.py
from django.utils import timezone from .models import ReminderRequest, EmailReminder def send_todays_notifications(self): today = timezone.now() reminders = ReminderRequest.objects.filter( start__year=today.year, start__month=today.month, start__day=today.da ) for reminder in reminders: for subscriber in EmailReminder.objects.filter(rule=reminder.rule): subscriber.send_notification()
mit
Python
9efda5a5a2b7aa16423e68fb10e1a0cb94c1f33e
Create rectangles_into_squares.py
Kunalpod/codewars,Kunalpod/codewars
rectangles_into_squares.py
rectangles_into_squares.py
#Kunal Gautam #Codewars : @Kunalpod #Problem name: Rectangles into Squares #Problem level: 6 kyu def sqInRect(lng, wdth): if lng==wdth: return None li=[] while lng and wdth: if lng>wdth: lng-=wdth li.append(wdth) else: wdth-=lng li.append(lng) return li
mit
Python
c5fc38749dcf966787f6c6a201e23c310a22358c
Add script to update UniProt protein names
clulab/bioresources
src/main/resources/org/clulab/reach/update_uniprot.py
src/main/resources/org/clulab/reach/update_uniprot.py
import os import re import csv import requests import itertools from gilda.generate_terms import parse_uniprot_synonyms # Base URL for UniProt uniprot_url = 'http://www.uniprot.org/uniprot' # Get protein names, gene names and the organism columns = ['id', 'protein%20names', 'genes', 'organism'] # Only get reviewed entries and use TSV format params = { 'sort': 'id', 'desc': 'no', 'compress': 'no', 'query': 'reviewed:yes', 'format': 'tab', 'columns': ','.join(columns) } def process_row(row): entry, protein_names, genes, organisms = row # Gene names are space separated gene_synonyms = genes.split(' ') if genes else [] # We use a more complex function to parse protein synonyms which appear # as "first synonym (second synonym) (third synonym) ...". protein_synonyms = parse_uniprot_synonyms(protein_names) \ if protein_names else [] # We remove EC codes as synonyms because they always refer to higher-level # enzyme categories shared across species protein_synonyms = [p for p in protein_synonyms if not p.startswith('EC ')] # Organisms and their synonyms also appear in the format that protein # synonyms do organism_synonyms = parse_uniprot_synonyms(organisms) # ... except we need to deal with a special case in which the first # organism name has a strain name in parantheses after it, and make sure # that the strain name becomes part of the first synonym. if len(organism_synonyms) >= 2 and \ organism_synonyms[1].startswith('strain'): organism_synonyms[0] = '%s (%s)' % (organism_synonyms[0], organism_synonyms[1]) organism_synonyms = [organism_synonyms[0]] + organism_synonyms[2:] # We now take each gene synonym and each organism synonym and create all # combinations of these as entries. entries = [] for gene, organism in itertools.product(gene_synonyms + protein_synonyms, organism_synonyms): # We skip synonyms that are more than 5 words in length (consistent # with original KB construction). if len(gene.split(' ')) > 5: continue entries.append((gene, entry, organism)) return entries if __name__ == '__main__': if not os.path.exists('uniprot_entries.tsv'): res = requests.get(uniprot_url, params=params) res.raise_for_status() with open('uniprot_entries.tsv', 'w') as fh: fh.write(res.text) processed_entries = [] with open('uniprot_entries.tsv', 'r') as fh: reader = csv.reader(fh, delimiter='\t') next(reader) for row in reader: processed_entries += process_row(row) # We sort the entries first by the synonym but in a way that special # characters and capitalization is ignored, then sort by ID and then # by organism. processed_entries = sorted(processed_entries, key=lambda x: (re.sub('[^A-Za-z0-9]', '', x[0]).lower(), x[1], x[2])) with open('kb/uniprot-proteins.tsv.update', 'w') as fh: writer = csv.writer(fh, delimiter='\t') for entry in processed_entries: writer.writerow(entry)
apache-2.0
Python
aeadfbd4ae1f915291328f040cda54f309743024
Add main application code
Elemental-IRCd/oline-gangnam-style,Elemental-IRCd/oline-gangnam-style
oline-gangnam-style.py
oline-gangnam-style.py
from jinja2 import Environment, FileSystemLoader import json import os import sys env = Environment(loader=FileSystemLoader(".")) template = env.get_template('ircd.conf.jinja') config = {} with open(sys.argv[1] if len(sys.argv) > 1 else "config.json", "r") as fin: config = json.loads(fin.read()) network = config["network"] for server in config["servers"]: with open("confs/" + server["name"]+".conf", "w") as fout: fout.write(template.render(**locals()))
bsd-2-clause
Python
7a6b5396ce760eaa206bfb9b556a374c9c17f397
Add DecisionTree estimator.
LeartS/kaggle-competitions
bike-sharing/2-decision-tree.py
bike-sharing/2-decision-tree.py
import math import argparse from datetime import datetime import numpy as np from sklearn import cross_validation from sklearn import tree from sklearn import metrics def load_data(path, **kwargs): return np.loadtxt(path, **kwargs) def save_data(path, data, **kwargs): np.savetxt(path, data, **kwargs) def hour_from_dt_string(dt_string): return datetime.strptime(dt_string, '%Y-%m-%d %H:%M:%S').hour def preprocessing(X, y): is_seasons = np.empty((X.shape[0], 4)) return X, y def cv(estimator, X, y): k_fold = cross_validation.KFold(n=len(train_dataset), n_folds=10, indices=True) a = 0.0 for train_idx, test_idx in k_fold: r = estimator.fit(X[train_idx], y[train_idx]).predict(X[test_idx]) r = np.where(r > 0, r, 0).astype(np.int) s = math.sqrt(metrics.mean_squared_error(np.log(y[test_idx] + 1), np.log(r + 1.0))) a += s print 'Score: {:.4f}'.format(s) print 'Average score: {:.4f}'.format(a/len(k_fold)) def loss_func(y_real, y_predicted): return math.sqrt(metrics.mean_squared_error(np.log(y_real + 1), np.log(y_predicted + 1))) if __name__ == '__main__': # Command arguments parser = argparse.ArgumentParser(description='bike-sharing estimator') parser.add_argument('--cv', dest='cv', action='store_const', const=True, default=False, help='Do cross validation') parser.add_argument('--no-test', dest='out', action='store_const', const=False, default=True, help='No test dataset') args = parser.parse_args() # Input common_input_options = {'delimiter': ',', 'skiprows': 1, 'converters': {0: hour_from_dt_string} } train_dataset = load_data('data/train.csv', usecols=(0,1,2,3,4,5,6,7,8,11), **common_input_options) test_dataset = load_data('data/test.csv', usecols=(0,1,2,3,4,5,6,7,8), **common_input_options) common_input_options['converters'] = {} out_column = load_data('data/test.csv', usecols=(0,), dtype=str, **common_input_options) # Data preprocessing X_train, y_train = preprocessing(train_dataset[:,:-1], train_dataset[:,-1]) X_test, y_test = preprocessing(test_dataset, None) # The interesting part estimator = tree.DecisionTreeRegressor(max_depth=12) if args.cv: cv(estimator, X_train, y_train) if args.out: results = estimator.fit(X_train, y_train).predict(X_test) results = np.where(results > 0, results, 0.01).astype(np.int) # Output save_data('data/out.csv', np.column_stack((out_column.T, results.T)), delimiter=',', header='datetime,count', fmt=('%s', '%s'), comments='')
mit
Python
c58c58d5bf1394e04e30f5eeb298818558be027f
Add directory for tests of rules removin
PatrikValkovic/grammpy
tests/rules_tests/clearAfterNonTermRemove/__init__.py
tests/rules_tests/clearAfterNonTermRemove/__init__.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 17.08.2017 22:06 :Licence GNUv3 Part of grammpy """
mit
Python
9a7091c1502b9758c1492a1c99ace7d4ad74026c
move integer_divions to syntax
ratnania/pyccel,ratnania/pyccel
tests/pyccel/parser/scripts/syntax/integer_division.py
tests/pyccel/parser/scripts/syntax/integer_division.py
5 // 3 a // 3 5 // b a // b 5.// 3. a // 3. 5.// b a // b
mit
Python
17e0b81463e3c4c9b62f95f40912b270652a8e63
Create new package (#6376)
iulian787/spack,mfherbst/spack,EmreAtes/spack,matthiasdiener/spack,LLNL/spack,LLNL/spack,LLNL/spack,krafczyk/spack,matthiasdiener/spack,mfherbst/spack,tmerrick1/spack,EmreAtes/spack,krafczyk/spack,krafczyk/spack,iulian787/spack,iulian787/spack,EmreAtes/spack,tmerrick1/spack,LLNL/spack,krafczyk/spack,mfherbst/spack,tmerrick1/spack,mfherbst/spack,iulian787/spack,matthiasdiener/spack,LLNL/spack,tmerrick1/spack,matthiasdiener/spack,tmerrick1/spack,matthiasdiener/spack,EmreAtes/spack,EmreAtes/spack,mfherbst/spack,krafczyk/spack,iulian787/spack
var/spack/repos/builtin/packages/r-ggridges/package.py
var/spack/repos/builtin/packages/r-ggridges/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RGgridges(RPackage): """Ridgeline plots provide a convenient way of visualizing changes in distributions over time or space.""" homepage = "https://cran.r-project.org/web/packages/ggridges/index.html" url = "https://cran.r-project.org/src/contrib/ggridges_0.4.1.tar.gz" list_url = "https://cran.rstudio.com/src/contrib/Archive/ggridges" version('0.4.1', '21d53b3f7263beb17f629f0ebfb7b67a') version('0.4.0', 'da94ed1ee856a7fa5fb87712c84ec4c9') depends_on('[email protected]:3.4.9') depends_on('r-ggplot2', type=('build', 'run'))
lgpl-2.1
Python
1c2292dcd47865a3dbd3f7b9adf53433f6f34770
Create new package. (#6215)
LLNL/spack,skosukhin/spack,tmerrick1/spack,krafczyk/spack,mfherbst/spack,matthiasdiener/spack,iulian787/spack,LLNL/spack,skosukhin/spack,matthiasdiener/spack,matthiasdiener/spack,krafczyk/spack,iulian787/spack,skosukhin/spack,iulian787/spack,krafczyk/spack,krafczyk/spack,EmreAtes/spack,mfherbst/spack,mfherbst/spack,iulian787/spack,EmreAtes/spack,EmreAtes/spack,EmreAtes/spack,tmerrick1/spack,skosukhin/spack,iulian787/spack,LLNL/spack,mfherbst/spack,tmerrick1/spack,tmerrick1/spack,LLNL/spack,tmerrick1/spack,skosukhin/spack,mfherbst/spack,LLNL/spack,EmreAtes/spack,matthiasdiener/spack,matthiasdiener/spack,krafczyk/spack
var/spack/repos/builtin/packages/r-timedate/package.py
var/spack/repos/builtin/packages/r-timedate/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RTimedate(RPackage): """Environment for teaching "Financial Engineering and Computational Finance". Managing chronological and calendar objects.""" homepage = "https://cran.r-project.org/package=timeDate" url = "https://cran.r-project.org/src/contrib/timeDate_3012.100.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/timeDate" version('3012.100', '9f69d3724efbf0e125e6b8e6d3475fe4')
lgpl-2.1
Python
5ba4fce42892634213bede09759bbca1cd56e346
add package py-brian2 (#3617)
iulian787/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,lgarren/spack,mfherbst/spack,matthiasdiener/spack,iulian787/spack,matthiasdiener/spack,skosukhin/spack,LLNL/spack,skosukhin/spack,mfherbst/spack,krafczyk/spack,iulian787/spack,krafczyk/spack,skosukhin/spack,mfherbst/spack,lgarren/spack,mfherbst/spack,skosukhin/spack,matthiasdiener/spack,mfherbst/spack,TheTimmy/spack,tmerrick1/spack,TheTimmy/spack,lgarren/spack,EmreAtes/spack,lgarren/spack,TheTimmy/spack,LLNL/spack,TheTimmy/spack,EmreAtes/spack,LLNL/spack,skosukhin/spack,krafczyk/spack,matthiasdiener/spack,tmerrick1/spack,krafczyk/spack,tmerrick1/spack,lgarren/spack,iulian787/spack,EmreAtes/spack,tmerrick1/spack,tmerrick1/spack,krafczyk/spack,LLNL/spack,TheTimmy/spack,EmreAtes/spack,LLNL/spack
var/spack/repos/builtin/packages/py-brian2/package.py
var/spack/repos/builtin/packages/py-brian2/package.py
############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PyBrian2(PythonPackage): """A clock-driven simulator for spiking neural networks""" homepage = "http://www.briansimulator.org" url = "https://pypi.io/packages/source/B/Brian2/Brian2-2.0.1.tar.gz" version('2.0.1', 'df5990e9a71f7344887bc02f54dfd0f0') version('2.0rc3', '3100c5e4eb9eb83a06ff0413a7d43152') variant('docs', default=False) # depends on py-setuptools@6: for windows, if spack targets windows, # this will need to be added here depends_on('py-setuptools', type='build') depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('py-pyparsing', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) # depends_on('[email protected]:', type=('build', 'run')) # extra test depends_on('[email protected]:', type=('build', 'run'), when='+docs') depends_on('[email protected]:', type=('build', 'run'), when='+docs')
lgpl-2.1
Python
26525354e7bf2465a561a5172a0d9fef4205e77d
move column defs to singleton object
rlowrance/re-avm
chart06columns.py
chart06columns.py
'''define columns in all reports produced by chart06''' import numpy as np # all possible column definition _defs = { 'median_absolute_error': [6, '%6d', (' ', 'MAE'), 'median absolute error'], 'model': [5, '%5s', (' ', 'model'), 'model name (en = elastic net, gd = gradient boosting, rf = random forests)'], 'n_months_back': [2, '%2d', (' ', 'bk'), 'number of mnths back for training'], 'max_depth': [4, '%4d', (' ', 'mxd'), 'max depth of any individual decision tree'], 'n_estimators': [4, '%4d', (' ', 'next'), 'number of estimators (= number of trees)'], 'max_features': [4, '%4s', (' ', 'mxft'), 'maximum number of features examined to split a node'], 'learning_rate': [4, '%4.1f', (' ', 'lr'), 'learning rate for gradient boosting'], 'alpha': [5, '%5.2f', (' ', 'alpha'), 'constant multiplying penalty term for elastic net'], 'l1_ratio': [4, '%4.2f', (' ', 'l1'), 'l1_ratio mixing L1 and L2 penalties for elastic net'], 'units_X': [6, '%6s', (' ', 'unitsX'), 'units for the x value; either natural (nat) or log'], 'units_y': [6, '%6s', (' ', 'unitsY'), 'units for the y value; either natural (nat) or log'], 'validation_month': [6, '%6d', ('vald', 'month'), 'month used for validation'], 'rank': [4, '%4d', (' ', 'rank'), 'rank within validation month; 1 == lowest MAE'], 'median_price': [6, '%6d', ('median', 'price'), 'median price in the validation month'], 'mae_validation': [6, '%6d', ('vald ', 'MAE'), 'median absolute error in validation month'], 'mae_next': [6, '%6d', ('next ', 'MAE'), 'median absolute error in test month (which follows the validation month)'], 'note': [15, '%15s', (' ', 'note'), 'when provided, the next MAE column contains the specified value'], 'rank_index': [5, '%5d', ('rank', 'index'), 'ranking of model performance in the validation month; 0 == best'], 'weight': [6, '%6.4f', (' ', 'weight'), 'weight of the model in the ensemble method'], } def defs_for_columns(*key_list): return [[key] + _defs[key] for key in key_list ] def replace_by_spaces(k, v): 'define values that are replaced by spaces' if isinstance(v, float) and np.isnan(v): return True return False
bsd-3-clause
Python
32740172d4258a95145a5bb68be315fe1640db23
Add alpha version of bootstraps script
cpvargas/stacklib
bootstraps.py
bootstraps.py
''' Does N times random stacks of X maps of large L in pixels. At each stacks it gets the central temperature, makes a histogram for all stacks, then fits a normal distribution for the histogram. ''' N = 100000 X = 10 L = 16 import stacklib as sl import numpy as np from scipy.stats import norm import matplotlib.pyplot as plt import matplotlib.mlab as mlab import os path = os.environ["HOME"] + '/FILES/' m = path + 'ACT_148_equ_season_3_1way_v3_src_free.fits' w = path + 'ACT_148_equ_season_3_1way_calgc_strictcuts2_weights.fits' b = path + 'profile_AR1_2009_pixwin_130224.txt' s = path + 'Equa_mask_15mJy.fits' RA0 = 55. RA1 = 324. DEC0 = -1.5 DEC1 = 1.5 M = sl.StackMap(m,w,b,s,RA0,RA1,DEC0,DEC1) M.squeezefullmap() M.filterfullmap() M.unsqueezefullmap() DeltaTs = [] def onestack(X,L): cat = sl.fakecatalog(X) M.setsubmapL(L) M.setstackmap() for item in cat: M.setsubmap(item[0],item[1]) M.stacksubmap() M.finishstack() return DeltaTs.append(M.stackmap[L/2,L/2]) for i in range(N): onestack(X,L) # histogram n, bins, patches = plt.hist(DeltaTs,bins=50,normed = 1, facecolor = 'blue') # best fit of data (mu, sigma) = norm.fit(DeltaTs) # add a 'best fit' line y = mlab.normpdf( bins, mu, sigma) l = plt.plot(bins, y, 'r--', linewidth=2) plt.xlabel('Temperature (microKelvin)') plt.ylabel('Probability Density') plt.show()
mit
Python
a3bfe6b91cbd87cb4292e92d7bf1ac4a44afd462
Add struct.py mirroring C struct declarations.
kaushik94/tardis,Tobychev/tardis,kaushik94/tardis,Tobychev/tardis,orbitfold/tardis,orbitfold/tardis,Tobychev/tardis,kaushik94/tardis,kaushik94/tardis,orbitfold/tardis,orbitfold/tardis
tardis/montecarlo/struct.py
tardis/montecarlo/struct.py
from ctypes import Structure, POINTER, c_int, c_int64, c_double, c_ulong class RPacket(Structure): _fields_ = [ ('nu', c_double), ('mu', c_double), ('energy', c_double), ('r', c_double), ('tau_event', c_double), ('nu_line', c_double), ('current_shell_id', c_int64), ('next_line_id', c_int64), ('last_line', c_int64), ('close_line', c_int64), ('current_continuum_id', c_int64), ('virtual_packet_flag', c_int64), ('virtual_packet', c_int64), ('d_line', c_double), ('d_electron', c_double), ('d_boundary', c_double), ('d_cont', c_double), ('next_shell_id', c_int64), ('status', c_int), ('id', c_int64), ('chi_th', c_double), ('chi_cont', c_double), ('chi_ff', c_double), ('chi_bf', c_double) ] class StorageModel(Structure): _fields_ = [ ('packet_nus', POINTER(c_double)), ('packet_mus', POINTER(c_double)), ('packet_energies', POINTER(c_double)), ('output_nus', POINTER(c_double)), ('output_energies', POINTER(c_double)), ('last_interaction_in_nu', POINTER(c_double)), ('last_line_interaction_in_id', POINTER(c_int64)), ('last_line_interaction_out_id', POINTER(c_int64)), ('last_line_interaction_shell_id', POINTER(c_int64)), ('last_line_interaction_type', POINTER(c_int64)), ('no_of_packets', c_int64), ('no_of_shells', c_int64), ('r_inner', POINTER(c_double)), ('r_outer', POINTER(c_double)), ('v_inner', POINTER(c_double)), ('time_explosion', c_double), ('inverse_time_explosion', c_double), ('electron_densities', POINTER(c_double)), ('inverse_electron_densities', POINTER(c_double)), ('line_list_nu', POINTER(c_double)), ('continuum_list_nu', POINTER(c_double)), ('line_lists_tau_sobolevs', POINTER(c_double)), ('line_lists_tau_sobolevs_nd', c_int64), ('line_lists_j_blues', POINTER(c_double)), ('line_lists_j_blues_nd', c_int64), ('no_of_lines', c_int64), ('no_of_edges', c_int64), ('line_interaction_id', c_int64), ('transition_probabilities', POINTER(c_double)), ('transition_probabilities_nd', c_int64), ('line2macro_level_upper', POINTER(c_int64)), ('macro_block_references', POINTER(c_int64)), ('transition_type', POINTER(c_int64)), ('destination_level_id', POINTER(c_int64)), ('transition_line_id', POINTER(c_int64)), ('js', POINTER(c_double)), ('nubars', POINTER(c_double)), ('spectrum_start_nu', c_double), ('spectrum_delta_nu', c_double), ('spectrum_end_nu', c_double), ('spectrum_virt_start_nu', c_double), ('spectrum_virt_end_nu', c_double), ('spectrum_virt_nu', POINTER(c_double)), ('sigma_thomson', c_double), ('inverse_sigma_thomson', c_double), ('inner_boundary_albedo', c_double), ('reflective_inner_boundary', c_int64), ('current_packet_id', c_int64), ('chi_bf_tmp_partial', POINTER(c_double)), ('t_electrons', POINTER(c_double)), ('l_pop', POINTER(c_double)), ('l_pop_r', POINTER(c_double)), ('cont_status', c_int), ('virt_packet_nus', POINTER(c_double)), ('virt_packet_energies', POINTER(c_double)), ('virt_packet_last_interaction_in_nu', POINTER(c_double)), ('virt_packet_last_interaction_type', POINTER(c_int64)), ('virt_packet_last_line_interaction_in_id', POINTER(c_int64)), ('virt_packet_last_line_interaction_out_id', POINTER(c_int64)), ('virt_packet_count', c_int64), ('virt_array_size', c_int64) ] class RKState(Structure): _fields_ = [ ('key', POINTER(c_ulong)), ('pos', c_int), ('has_gauss', c_int), ('gauss', c_double) ]
bsd-3-clause
Python
89b10996cfe6e60870e55b7c759aa73448bfa4d8
remove off curve pen
typemytype/RoboFontExamples
pens/removeOffcurvesPen.py
pens/removeOffcurvesPen.py
## use BasePen as base class from fontTools.pens.basePen import BasePen class RemoveOffcurvesPen(BasePen): """ A simple pen drawing a contour without any offcurves. """ def __init__(self, glyphSet): BasePen.__init__(self, glyphSet) self._contours = [] self._components = [] def _moveTo(self, pt): self._contours.append([]) self._contours[-1].append(("moveTo", pt)) def _lineTo(self, pt): self._contours[-1].append(("lineTo", pt)) def _curveToOne(self, pt1, pt2, pt3): self._contours[-1].append(("lineTo", pt3)) def qCurveTo(self, *points): pt = points[-1] self._contours[-1].append(("lineTo", pt)) def _closePath(self): self._contours[-1].append(("closePath", None)) def _endpath(self): self._contours[-1].append(("endPath", None)) def addComponent(self, baseName, transformation): self._components.append((baseName, transformation)) def draw(self, outPen): """ Draw the stored instructions in an other pen. """ for contour in self._contours: for penAttr, pt in contour: func = getattr(outPen, penAttr) if pt is None: func() else: func(pt) for baseGlyph, transformation in self._components: outPen.addComponent(baseGlyph, transformation) ## get the current glyph g = CurrentGlyph() ## prepare the glyph for undo g.prepareUndo("Remove All Offcurves") ## create a pen pen = RemoveOffcurvesPen(g.getParent()) ## draw the glyph in the pen g.draw(pen) ## clear the glyph g.clear() ## draw the stored contour from the pen into the emtpy glyph pen.draw(g.getPen()) ## tell the glyph undo watching is over g.performUndo()
mit
Python
10952213496e8a1cbf80ba1eee7a0e968bdea14a
add missing test
puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq
corehq/ex-submodules/couchforms/tests/test_errors.py
corehq/ex-submodules/couchforms/tests/test_errors.py
from django.test import TestCase from casexml.apps.case.exceptions import IllegalCaseId from corehq.apps.receiverwrapper import submit_form_locally from couchforms.models import XFormError class CaseProcessingErrorsTest(TestCase): def test_no_case_id(self): """ submit form with a case block that has no case_id check that - it errors - the form is not saved under its original id - an XFormError is saved with the original id as orig_id - the error was logged (<-- is this hard to test?) <data xmlns="example.com/foo"> <case case_id=""> <update><foo>bar</foo></update> </case> </data> """ with self.assertRaises(IllegalCaseId): submit_form_locally( """<data xmlns="example.com/foo"> <meta> <instanceID>abc-easy-as-123</instanceID> </meta> <case case_id="" xmlns="http://commcarehq.org/case/transaction/v2"> <update><foo>bar</foo></update> </case> </data>""", 'my_very_special_domain', ) xform_errors = XFormError.view( 'domain/docs', startkey=['my_very_special_domain', 'XFormError'], endkey=['my_very_special_domain', 'XFormError', {}], ) related_errors = [xform_error for xform_error in xform_errors if xform_error.orig_id == 'abc-easy-as-123'] self.assertEqual(len(related_errors), 1) related_error = related_errors[0] self.assertEqual(related_error.problem, 'IllegalCaseId: case_id must not be empty')
bsd-3-clause
Python
f120e2524f09ed462bca52dbc83863ba74291dd5
Fix backend import.
jodal/mopidy-gmusic,jeh/mopidy-gmusic,hechtus/mopidy-gmusic,elrosti/mopidy-gmusic,mopidy/mopidy-gmusic,jaibot/mopidy-gmusic,jaapz/mopidy-gmusic,Tilley/mopidy-gmusic
tests/test_extension.py
tests/test_extension.py
import unittest from mopidy_gmusic import GMusicExtension, actor as backend_lib class ExtensionTest(unittest.TestCase): def test_get_default_config(self): ext = GMusicExtension() config = ext.get_default_config() self.assertIn('[gmusic]', config) self.assertIn('enabled = true', config) def test_get_config_schema(self): ext = GMusicExtension() schema = ext.get_config_schema() self.assertIn('username', schema) self.assertIn('password', schema) self.assertIn('deviceid', schema) def test_get_backend_classes(self): ext = GMusicExtension() backends = ext.get_backend_classes() self.assertIn(backend_lib.GMusicBackend, backends)
import unittest from mopidy_gmusic import GMusicExtension, backend as backend_lib class ExtensionTest(unittest.TestCase): def test_get_default_config(self): ext = GMusicExtension() config = ext.get_default_config() self.assertIn('[gmusic]', config) self.assertIn('enabled = true', config) def test_get_config_schema(self): ext = GMusicExtension() schema = ext.get_config_schema() self.assertIn('username', schema) self.assertIn('password', schema) self.assertIn('deviceid', schema) def test_get_backend_classes(self): ext = GMusicExtension() backends = ext.get_backend_classes() self.assertIn(backend_lib.GMusicBackend, backends)
apache-2.0
Python
b705c79b911ae201e9a79786e61ec36bd4a9be0f
add tests for revisions api
Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok
tests/test_revisions.py
tests/test_revisions.py
import datetime import json from werkzeug.exceptions import BadRequest from server.models import db, Backup, Group, Message, Version, Score from tests import OkTestCase class TestRevision(OkTestCase): """Tests revision API submission and scoring.""" def setUp(self): """ Add submissions for 3 users. """ super(TestRevision, self).setUp() self.setup_course() message_dict = {'file_contents': {'backup.py': '1'}, 'analytics': {}} self.active_user_ids = [self.user1.id, self.user2.id, self.user3.id] self.assignment.revisions_allowed = True time = self.assignment.due_date # Set to dt.now(), so future subms are late for user_id in self.active_user_ids: time -= datetime.timedelta(minutes=15) backup = Backup(submitter_id=user_id, assignment=self.assignment, submit=True) # Revisions are submitted on time. backup.created = time messages = [Message(kind=k, backup=backup, contents=m) for k, m in message_dict.items()] db.session.add_all(messages) db.session.add(backup) # Put user 3 in a group with user 4 Group.invite(self.user3, self.user4, self.assignment) group = Group.lookup(self.user3, self.assignment) group.accept(self.user4) okversion = Version(name="ok-client", current_version="v1.5.0", download_link="http://localhost/ok") db.session.add(okversion) db.session.commit() def _submit_revision(self): data = { 'assignment': self.assignment.name, 'messages': { 'file_contents': { 'hog.py': 'print("Hello world!")' } }, 'submit': False, 'revision': True, } response = self.client.post('/api/v3/revision/?client_version=v1.5.0', data=json.dumps(data), headers=[('Content-Type', 'application/json')]) return response def test_no_revisions(self): """ Ensure no user has revisions before submitting .""" for user in self.active_user_ids: revision = self.assignment.revision({self.user1.id}) self.assertIs(revision, None) def test_revison_anon(self): response = self._submit_revision() self.assert401(response) def test_revison_submit(self): self.login(self.user1.email) response = self._submit_revision() self.assert200(response) revision = self.assignment.revision({self.user1.id}) self.assertTrue(revision.is_revision) def test_revison_disabled(self): # Disable revisions self.assignment.revisions_allowed = False db.session.commit() self.login(self.user1.email) response = self._submit_revision() self.assert403(response) # Ensure that the backup is still accepted backups = Backup.query.filter_by(submitter=self.user1).count() self.assertEquals(backups, 2) def test_revison_no_submission(self): """ Revisions are not accepted if there is no final submission. """ self.login(self.user5.email) response = self._submit_revision() self.assert403(response) # Ensure that the backup is still accepted backups = Backup.query.filter_by(submitter=self.user5).count() self.assertEquals(backups, 1) def test_revison_test_group_member(self): self.login(self.user4.email) response = self._submit_revision() self.assert200(response) group = self.assignment.active_user_ids(self.user4.id) revision = self.assignment.revision(group) self.assertEquals(len(revision.owners()), 2) def test_revison_multiple_submit(self): group = self.assignment.active_user_ids(self.user3.id) self.login(self.user3.email) response = self._submit_revision() self.assert200(response) first_revision = self.assignment.revision(group) self.assertTrue(first_revision.is_revision) self.login(self.user4.email) response = self._submit_revision() self.assert200(response) second_revision = self.assignment.revision(group) self.assertTrue(second_revision.is_revision) self.assertNotEquals(first_revision.id, second_revision.id) # Check the number of revisions scores is 1 scores = Score.query.filter_by(kind="revision", archived=False).count() self.assertEquals(scores, 1)
apache-2.0
Python
7e15f973f0ee898a0c06e50151ada675be46263d
add basic data, query method and method scaffolds
mrtazz/notifo.py
notifo/notifo.py
notifo/notifo.py
# encoding: utf-8 """ notifo.py - python wrapper for notifo.com """ import json import urllib import urllib2 class Notifo: """ Class for wrapping notifo.com """ def __init__(self, user, api_secret): self.user = user self.api_secret = api_secret self.root_url = "https://api.notifo.com/v1/" # status codes (Request successful) self.status_codes = { 2201 : "OK.", 2202 : "User is already subscribed." } # error codes (Something went wrong) self.error_codes = { 1100 : "An error occurred.", 1101 : "Invalid credentials.", 1102 : "Not allowed to sent to user.", 1105 : "No such user.", 1106 : "Not allowed to subscribe user.", 1107 : "Missing required parameters.", } def subsribe_user(self, user): """ method to subscribe a user to a service """ pass def send_notification(self): """ method to send a message to a user """ pass def _query(self, url, data = None): """ query method to do HTTP POST/GET Parameters: url -> the url to POST/GET data -> header_data as a dict (only for POST) Returns: Parsed JSON data as dict or None on error """ if data is not None: # we have POST data if there is data values = urllib.urlencode(data) request = urllib2.Request(url, values) else: # do a GET otherwise request = urllib2.Request(url) try: response = urllib2.urlopen(request) except IOError: # no connection return None json_data = response.read() data = json.loads(json_data) return data
mit
Python
d40fb122d7083b9735728df15120ed682431be79
Create script for generating analysis seeds.
woodmd/haloanalysis,woodmd/haloanalysis
scripts/make_fhes_seeds.py
scripts/make_fhes_seeds.py
import yaml import sys import numpy as np from astropy.table import Table from astropy.coordinates import SkyCoord from fermipy.catalog import * from fermipy.utils import * def get_coord(name,tab): row = tab[tab['Source_Name'] == name] return SkyCoord(float(row['RAJ2000']), float(row['DEJ2000']),unit='deg') def avg_coords(coords): xyz = np.zeros(3) for t in coords: xyz += t.cartesian.xyz xyz /= np.sum(xyz**2)**0.5 c = SkyCoord(xyz[0], xyz[1], xyz[2],representation='cartesian') c.representation='spherical' return c tab = Table.read(sys.argv[1]) src_names = [] m = np.abs(tab['glat']) < 0. #m |= (tab['fit_ext_gauss_ts_ext'] > 9.0) #m |= (tab['fit_ext_disk_ts_ext'] > 9.0) m |= (tab['fit_halo_ts'] > 16.0) #m |= (tab['ts'] > 20000.0) for row in tab[m]: src_names += [row['codename']] src_names = sorted(list(set(src_names))) o = {} for name in src_names: #coords = [get_coord(t,cat.table) for t in names] #c0 = avg_coords(coords) print(name) #print(create_source_name(c0)) names = [name] row = tab[tab['codename'] == names[0].lower().replace(' ','_')] c0 = SkyCoord(row['ra'],row['dec'],unit='deg') name = create_source_name(c0).replace('PS','FHES') + 'e' #print(c0.ra.deg,c0.dec.deg) #print(names[0]) #print(row['codename']) src = {'name' : name, 'ra' : float(c0.ra.deg), 'dec' : float(c0.dec.deg), 'SpectrumType' : 'PowerLaw', 'SpatialModel' : 'RadialGaussian', 'SpatialWidth' : float(row['fit_halo_r68']), 'Index' : float(row['fit_halo_index'])} o[name.lower().replace(' ','_')] = {'selection' : {'target' : name}, 'model' : {'sources' : [src]} } yaml.dump(o,open('out.yaml','w'))
bsd-3-clause
Python
9696acf13a6b25b1935b7fcaae5763db8e16e83a
Create MyoRemote.py
MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab
home/Alessandruino/MyoRemote.py
home/Alessandruino/MyoRemote.py
from com.thalmic.myo.enums import PoseType remote = Runtime.start("remote","RemoteAdapter") remote.setDefaultPrefix("raspi") remote.connect("tcp://192.168.0.5:6767") roll = 0.0 sleep(2) python.send("raspiarduino", "connect","/dev/ttyUSB0") sleep(1) python.send("raspiarduino", "digitalWrite",2,1) python.send("raspiarduino", "digitalWrite",3,1) python.send("raspiarduino", "servoAttach","raspiservo",6) python.send("raspiservo", "map",5.0,12.0,50.0,110.0) myo = Runtime.start("myo","MyoThalmic") myo.connect() myo.addMyoDataListener(python) def onMyoData(data): if (data.getPose() == PoseType.FIST): global roll roll = data.getRoll() python.send("raspiarduino", "analogWrite",5,50) python.send("raspiservo", "moveTo",roll) elif (data.getPose() == PoseType.WAVE_OUT): python.send("raspiarduino", "analogWrite",11,50) elif (data.getPose() == PoseType.REST): python.send("raspiarduino", "analogWrite",5,0) python.send("raspiarduino", "analogWrite",11,0)
apache-2.0
Python
6bd3869a2c2a6041e47da01ddaaa15b309bf90d7
Add example checkerscript
fausecteam/ctf-gameserver,fausecteam/ctf-gameserver,fausecteam/ctf-gameserver,fausecteam/ctf-gameserver,fausecteam/ctf-gameserver
checker/examples/dummyrunner.py
checker/examples/dummyrunner.py
#!/usr/bin/python3 import sys import time import os import codecs def generate_flag(tick, payload=None): if payload is None: sys.stdout.write("FLAG %d\n" % (tick,)) else: sys.stdout.write("FLAG %d %s\n" % (tick, codecs.encode(os.urandom(8), 'hex').decode('latin-1'))) sys.stdout.flush() return sys.stdin.readline().strip() def place_flag(flag, ip): return 0 def check_for_flag(flag, ip): return 0 def main(tick, ip): result = place_flag(generate_flag(tick), ip) if 0 != result: sys.exit(result) oldesttick = max(tick-7, -1) for ctick in range(tick-1, oldesttick, -1): result = check_for_flag(generate_flag(ctick), ip) if 0 != result: sys.exit(result) sys.exit(0) if __name__ == '__main__': _, tick, ip = sys.argv main(tick=int(tick), ip=ip)
isc
Python
4a782b2930210053bcc2fc705d55e56af5900771
Create dispatch_curve_cooling_plant.py
architecture-building-systems/CEAforArcGIS,architecture-building-systems/CEAforArcGIS
cea/plots/supply_system/dispatch_curve_cooling_plant.py
cea/plots/supply_system/dispatch_curve_cooling_plant.py
""" Show a Pareto curve plot for individuals in a given generation. """ from __future__ import division from __future__ import print_function import plotly.graph_objs as go import cea.plots.supply_system from cea.plots.variable_naming import NAMING, COLOR __author__ = "Jimeno Fonseca" __copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich" __credits__ = ["Jimeno Fonseca"] __license__ = "MIT" __version__ = "0.1" __maintainer__ = "Daren Thomas" __email__ = "[email protected]" __status__ = "Production" class DispatchCurveDistrictCoolingPlot(cea.plots.supply_system.SupplySystemPlotBase): """Show a pareto curve for a single generation""" name = "Dispatch curve cooling plant" expected_parameters = { 'generation': 'plots-supply-system:generation', 'individual': 'plots-supply-system:individual', 'timeframe': 'plots-supply-system:timeframe', 'scenario-name': 'general:scenario-name', } def __init__(self, project, parameters, cache): super(DispatchCurveDistrictCoolingPlot, self).__init__(project, parameters, cache) self.analysis_fields = [ "Q_DailyStorage_gen_directload_W", "Q_Trigen_NG_gen_directload_W", "Q_BaseVCC_WS_gen_directload_W", "Q_PeakVCC_WS_gen_directload_W", "Q_BaseVCC_AS_gen_directload_W", "Q_PeakVCC_AS_gen_directload_W", "Q_BackupVCC_AS_directload_W", ] self.analysis_field_demand = ['Q_districtcooling_sys_req_W'] self.timeframe = self.parameters['timeframe'] self.input_files = [(self.locator.get_optimization_slave_cooling_activation_pattern, [self.individual, self.generation])] @property def title(self): return "Dispatch curve for cooling plant in system #%s (%s)" % (self.individual, self.timeframe) @property def output_path(self): return self.locator.get_timeseries_plots_file( 'gen{generation}_ind{individual}dispatch_curve_cooling_plant'.format(individual=self.individual, generation=self.generation), self.category_name) @property def layout(self): return dict(barmode='relative', yaxis=dict(title='Energy Generation [MWh]')) def calc_graph(self): # main data about technologies data = self.process_individual_dispatch_curve_cooling() graph = [] analysis_fields = self.remove_unused_fields(data, self.analysis_fields) for field in analysis_fields: y = (data[field].values) / 1E6 # into MW trace = go.Bar(x=data.index, y=y, name=NAMING[field], marker=dict(color=COLOR[field])) graph.append(trace) # data about demand for field in self.analysis_field_demand: y = (data[field].values) / 1E6 # into MW trace = go.Scattergl(x=data.index, y=y, name=NAMING[field], line=dict(width=1, color=COLOR[field])) graph.append(trace) return graph def main(): """Test this plot""" import cea.config import cea.plots.cache config = cea.config.Configuration() cache = cea.plots.cache.NullPlotCache() DispatchCurveDistrictCoolingPlot(config.project, {'scenario-name': config.scenario_name, 'generation': config.plots_supply_system.generation, 'individual': config.plots_supply_system.individual, 'timeframe': config.plots_supply_system.timeframe}, cache).plot(auto_open=True) if __name__ == '__main__': main()
mit
Python
3026c007a4f9cbb6befa1599c8a8390a96d8396b
test import checks
thomasvs/pychecker,akaihola/PyChecker,akaihola/PyChecker,thomasvs/pychecker
pychecker2/utest/import.py
pychecker2/utest/import.py
from pychecker2.TestSupport import WarningTester from pychecker2 import ImportChecks class ImportTestCase(WarningTester): def testImportChecks(self): self.silent('import sys; print sys.argv') self.silent('import pychecker2; print pychecker2') self.silent('import pychecker2.utest; print pychecker2.utest') def testImportChecks(self): self.warning('import sys\n' 'print sys.argv\n' 'import sys\n', 3, ImportChecks.ImportCheck.duplicateImport, 'sys', ' in current scope') self.warning('from sys import *\n' 'def f():\n' ' def g():\n' ' from sys import argv\n' ' return argv\n' ' return g() + g()\n' 'print argv\n', 4, ImportChecks.ImportCheck.duplicateImport, 'argv', ' of import in parent scope <ModuleScope: global>') self.warning('import no_such_module\n', 1, ImportChecks.ImportCheck.importError, 'no_such_module', 'No module named no_such_module') self.warning('from pychecker2.utest.data import *\n' 'import exceptions\n' 'print exceptions\n', 2, ImportChecks.ImportCheck.shadowImport, 'exceptions', 'pychecker2.utest.data', 1)
bsd-3-clause
Python
5219e970f1b09d8f2d41bf61a3b9f9803a8aed1d
Add database.py with working db find function
parisandmilo/Ko-lect-FoC2015,saulsmcouk/LostMyNameYRS2015,saulsmcouk/LostMyNameYRS2015,parisandmilo/Ko-lect-FoC2015,saulsmcouk/LostMyNameYRS2015,parisandmilo/Ko-lect-FoC2015
python-backend/database.py
python-backend/database.py
from pymongo import MongoClient client = MongoClient() client = MongoClient('localhost', 27017) # `community` database db = client.community; # Database find wrapper def db_find( db_collection, db_query, find_one = False ): # Get collection collection = db[db_collection] if (find_one): result = collection.find(db_query) else: result = collection.find_one(db_query) return result; # Database insert wrapper # Database update wrapper # Database remove wrapper print db_find('test', {'name': 'test'})
mit
Python
271dca123ff9bb3004cbd2cfa366f606dd250f94
Add test for configmap
fiaas/k8s
tests/k8s/test_configmap.py
tests/k8s/test_configmap.py
#!/usr/bin/env python # -*- coding: utf-8 import mock import pytest from k8s.client import NotFound from k8s.models.common import ObjectMeta from k8s.models.configmap import ConfigMap NAME = "my-name" NAMESPACE = "my-namespace" @pytest.mark.usefixtures("k8s_config") class TestIngress(object): def test_created_if_not_exists(self, post, api_get): api_get.side_effect = NotFound() configmap = _create_default_configmap() call_params = configmap.as_dict() assert configmap._new configmap.save() assert not configmap._new pytest.helpers.assert_any_call(post, _uri(NAMESPACE), call_params) def test_updated_if_exists(self, get, put): mock_response = _create_mock_response() get.return_value = mock_response configmap = _create_default_configmap() from_api = ConfigMap.get_or_create(metadata=configmap.metadata, data=configmap.data) assert not from_api._new assert from_api.data == {"foo": "bar"} from_api.data = {"baz": "quux"} call_params = from_api.as_dict() from_api.save() pytest.helpers.assert_any_call(put, _uri(NAMESPACE, NAME), call_params) def test_deleted(self, delete): ConfigMap.delete(NAME, namespace=NAMESPACE) pytest.helpers.assert_any_call(delete, _uri(NAMESPACE, NAME)) def _create_mock_response(): mock_response = mock.Mock() mock_response.json.return_value = { "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "creationTimestamp": "2017-09-08T13:37:00Z", "generation": 1, "labels": { "test": "true" }, "name": NAME, "namespace": NAMESPACE, "resourceVersion": "42", "selfLink": _uri(NAMESPACE, NAME), "uid": "d8f1ba26-b182-11e6-a364-fa163ea2a9c4" }, "data": { "foo": "bar", }, } return mock_response def _create_default_configmap(): object_meta = ObjectMeta(name=NAME, namespace=NAMESPACE, labels={"test": "true"}) data = {"foo": "bar"} configmap = ConfigMap(metadata=object_meta, data=data) return configmap def _uri(namespace, name=""): return "/api/v1/namespaces/{namespace}/configmaps/{name}".format(name=name, namespace=namespace)
apache-2.0
Python
db912d4097da45c2b14cce4f8f852cbc1e720750
add test framework
ahwagner/g2p-federated
tests/test_clientManager.py
tests/test_clientManager.py
from unittest import TestCase class TestClientManager(TestCase): def test_add_http_client(self): self.fail() def test_add_local_client(self): self.fail() def test_restrictClient(self): self.fail() def test_load_clients_from_config(self): self.fail() def test_federated_featurephenotypeassociaton_query(self): self.fail()
apache-2.0
Python
401a1aabde600336bd129cce8fb3884ed8945272
Create HCS_interpreter.py
helton-hcs/hcs_programming_language
HCS_interpreter.py
HCS_interpreter.py
#!/usr/bin/env python3 from HCS import HCS def interpret_loop(): hcs = HCS() while True: print(">> ", end="") try: command = input() except EOFError as e: print() return if command in ['quit', 'exit']: return try: print(hcs.eval(command)) except Exception as e: print("Error: \n" + repr(e)) if __name__ == '__main__': interpret_loop()
apache-2.0
Python
d58d2c6bb2805c8ebc95fe3445dc973560de9c79
Create generate.py
pranshumalviya2/playwith_rnn
Names/generate.py
Names/generate.py
import numpy as np from keras.models import Sequential from keras.layers import Dense, Activation from keras.layers import LSTM from keras.callbacks import ModelCheckpoint from keras.utils import np_utils from random import randint inp = 'name.txt' with open(inp) as f: content = f.readlines() content = [x.lower() for x in content] vocab = '' for i in content: vocab+=str(i).lower() vocab = sorted(list(set(vocab))) vocab = vocab[vocab.index('a'):] vocab.append(('\n')) vocab_indices = dict((c, i) for i, c in enumerate(vocab)) indices_vocab = dict((i, c) for i, c in enumerate(vocab)) def sample(preds, temperature=1): preds = np.asarray(preds[0]).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) """def exists(new): for j in content: if i==j[:-1]: return True return False""" seq = randint(1,6) dataX = [] dataY = [] for x,word in enumerate(content): for i in range(0, len(word) - seq, 1): seq_in = word[i:i + seq] seq_out = word[i + seq] dataX.append([vocab_indices[char] for char in seq_in]) dataY.append(vocab_indices[seq_out]) n_patterns = len(dataX) X = np.reshape(dataX, (n_patterns, seq, 1)) X = X / float(len(vocab)) y = np_utils.to_categorical(dataY) model = Sequential() model.add(LSTM(256, return_sequences=True,input_shape=(X.shape[1], X.shape[2]))) model.add(LSTM(256)) model.add(Dense(y.shape[1])) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.summary() filepath='nn.hdf5' checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] model.fit(X, y,batch_size=128,epochs=10,verbose=2,callbacks=callbacks_list) filename = 'nn.hdf5' model.load_weights(filename) final=[] for temp in [.5,.8,.1,1.2]: j=1 while(j<=500): new=[] word = content[randint(0, len(content)-1)][:seq] for k in word[-seq:]: new.append(vocab_indices[k]) try: for t in range(10): x = np.reshape(new, (1, seq, 1)) x = x / float(len(vocab)) preds = model.predict(x, verbose=2) word+=indices_vocab[sample(preds,temp)] new=[] for k in word[-seq:]: new.append(vocab_indices[k]) if word[-1] == '\n': word=word.strip() word.replace('\n','') break if (len(word)>seq and len(word)>=2) and word not in final: final.append(word.capitalize()) except: 0 j+=1 open("New Names.txt", 'w').close() new_text = open("New Names.txt", "w") for i in sorted(final): new_text.write(i.replace('\n','')+'\n') new_text.close()
mit
Python
d4a04d4a0fffd8dbb006d86504fc3593ae800cc6
add bitly shorten function in proper directory
grahamhayes/will,mike-love/will,woohgit/will,dmuntean/will,mike-love/will,dmuntean/will,ammartins/will,skoczen/will,dmuntean/will,jacobbridges/will,pcurry/will,wontonst/will,mvanbaak/will,grahamhayes/will,Ironykins/will,pcurry/will,mvanbaak/will,chillipeper/will,wontonst/will,pcurry/will,woohgit/will,grahamhayes/will,fredsmith/will,brandonsturgeon/will,brandonsturgeon/will,Regner/will,fredsmith/will,shadow7412/will,Regner/will,shadow7412/will,mvanbaak/will,ammartins/will,Ironykins/will,mike-love/will,chillipeper/will,fredsmith/will,brandonsturgeon/will,Regner/will,wontonst/will,jacobbridges/will,skoczen/will,woohgit/will,ammartins/will,Ironykins/will,jacobbridges/will,skoczen/will,chillipeper/will,shadow7412/will
will/plugins/productivity/bitly.py
will/plugins/productivity/bitly.py
# coding: utf-8 import bitly_api # pip install bitly_api from will.plugin import WillPlugin from will.decorators import (respond_to, periodic, hear, randomly, route, rendered_template, require_settings) from will import settings # BITLY_ACCESS_TOKEN = ' <get_access_token_from_bitly.com> ' class BitlyPlugin(WillPlugin): """Class for creating Bitly shorten URL's.""" @respond_to("^bitly (?P<long_url>.*)$") @require_settings("BITLY_ACCESS_TOKEN",) def get_bitly_shorten_url(self, message, long_url, short_url=None): """Function to get shorten_url from bit.ly through API.""" # use oauth2 endpoints c = bitly_api.Connection(access_token=settings.BITLY_ACCESS_TOKEN) response = c.shorten(uri=long_url) short_url = response['url'] self.reply("Shorten URL: %s" % short_url, message=message)
mit
Python
6922ad3922d187a3e05d339a49449292a1d7efd6
add Prototype pattern
JakubVojvoda/design-patterns-python
prototype/Prototype.py
prototype/Prototype.py
# # Python Design Patterns: Prototype # Author: Jakub Vojvoda [github.com/JakubVojvoda] # 2016 # # Source code is licensed under MIT License # (for more details see LICENSE) # import sys import copy # # Prototype # declares an interface for cloning itself # class Prototype: def clone(self): pass def getType(self): pass # # Concrete Prototypes # implement an operation for cloning itself # class ConcretePrototypeA(Prototype): def clone(self): return copy.deepcopy(self) def getType(self): return "type A" class ConcretePrototypeB(Prototype): def clone(self): return copy.deepcopy(self) def getType(self): return "type B" # # Client # creates a new object by asking a prototype to clone itself # class Client: def __init__(self): self._types = [ConcretePrototypeA(), ConcretePrototypeB()] def make(self, index): return self._types[index].clone() if __name__ == "__main__": client = Client() prototype = client.make(0) print(prototype.getType()) prototype = client.make(1) print(prototype.getType())
mit
Python
841487ab4d0e05fa6f0780cf39973072417ec701
Complete cherry-pick of PR#95
CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend
service/management/commands/start_celery.py
service/management/commands/start_celery.py
import os from django.core.management.base import BaseCommand from subprocess import call class Command(BaseCommand): help = 'Custom manage.py command to start celery.' def handle(self, *args, **options): logfile = "celery_node.log" if not os.path.isfile(logfile): with open(logfile, 'w+') as f: f.close() call(("celery worker --app=atmosphere --loglevel=INFO -c 5 --logfile=%s" % logfile).split())
apache-2.0
Python
d1ee86414d45c571571d75434b8c2256b0120732
Add py solution for 563. Binary Tree Tilt
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
py/binary-tree-tilt.py
py/binary-tree-tilt.py
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def findTilt(self, root): """ :type root: TreeNode :rtype: int """ return self.do_findTilt(root)[1] def do_findTilt(self, cur): if cur is None: return (0, 0) lsum, ltiltsum = self.do_findTilt(cur.left) rsum, rtiltsum = self.do_findTilt(cur.right) tilt = abs(lsum - rsum) return lsum + rsum + cur.val, ltiltsum + rtiltsum + tilt
apache-2.0
Python
34bfea59b600f9dac457e2a16a812ce2fb768d15
Add graph.py to collect runtime data on workers and tasks (#8)
matyasselmeci/dask_condor,matyasselmeci/dask_condor
chtc/graph.py
chtc/graph.py
#!/usr/bin/env python from __future__ import print_function import csv import itertools import time from distributed import Client START_TIMEOUT = 900 # 15 min MAX_COLLECT_TIME = 86400 # 1 day def running_task_list(cli): return list(itertools.chain.from_iterable(cli.processing().values())) cli = Client('127.0.0.1:8786') print("Waiting for tasks to start running") timeout = time.time() + START_TIMEOUT while not cli.ncores(): time.sleep(5) if time.time() > timeout: raise Exception("workers never started") print("First worker connected. Starting data collection.") start_time = time.time() end_time = time.time() + MAX_COLLECT_TIME with open('graph.csv', 'wb') as outfile: writer = csv.writer(outfile) while cli.ncores() and time.time() < end_time: n_running_tasks = len(running_task_list(cli)) n_cores = sum(cli.ncores().values()) n_futures = len(cli.who_has().keys()) row = [time.time() - start_time, n_cores, n_running_tasks, n_futures] print("{0:>6.0f}s {1:>5d} cores {2:>5d} tasks {3:>5d} futures".format(*row)) writer.writerow(row) time.sleep(5) print("Done with data collection.")
apache-2.0
Python
925ff38344b5058ce196877e1fdcf79a1d1f6719
Add basic test for checking messages are received correctly
m2u/m2u
ue4/tests/test_messaging.py
ue4/tests/test_messaging.py
import pytest from m2u.ue4 import connection def test_send_message_size(): """Send a big message, larger than buffer size, so the server has to read multiple chunks. """ message = "TestMessageSize " + ("abcdefg" * 5000) connection.connect() result = connection.send_message(message) assert result == str(len(message)) connection.disconnect()
mit
Python
ff700e5d6fc5e0c5062f687110563d7f0312a3f0
Set up test suite to ensure server admin routes are added.
sheagcraig/sal,salopensource/sal,salopensource/sal,sheagcraig/sal,salopensource/sal,sheagcraig/sal,sheagcraig/sal,salopensource/sal
server/tests/test_admin.py
server/tests/test_admin.py
"""General functional tests for the API endpoints.""" from django.test import TestCase, Client # from django.urls import reverse from rest_framework import status from server.models import ApiKey, User # from api.v2.tests.tools import SalAPITestCase class AdminTest(TestCase): """Test the admin site is configured to have all expected views.""" admin_endpoints = { 'apikey', 'businessunit', 'condition', 'fact', 'historicalfact', 'installedupdate', 'machinedetailplugin', 'machinegroup', 'machine', 'pendingappleupdate', 'pendingupdate', 'pluginscriptrow', 'pluginscriptsubmission', 'plugin', 'report', 'salsetting', 'updatehistoryitem', 'updatehistory', 'userprofile'} def setUp(self): self.client = Client() self.user = User.objects.create(username='test') def test_no_access(self): """Test that unauthenticated requests redirected to login.""" for path in self.admin_endpoints: response = self.client.get('/admin/server/{}'.format(path)) # Redirect to login page. self.assertEqual(response.status_code, status.HTTP_301_MOVED_PERMANENTLY) def test_ro_access(self): """Test that ro requests are rejected. RO users should not have access to the admin site (unless they have `is_staff = True`. """ self.user.user_profile = 'RO' self.user.save() self.client.force_login(self.user) for path in self.admin_endpoints: url = '/admin/server/{}/'.format(path) response = self.client.get(url) msg = 'Failed for path: "{}"'.format(path) self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg) self.assertEqual(response.url, '/admin/login/?next=/admin/server/{}/'.format(path), msg=msg) def test_ga_access(self): """Ensure GA userprofile grants admin page access.""" self.user.user_profile = 'GA' self.user.save() self.client.force_login(self.user) for path in self.admin_endpoints: url = '/admin/server/{}/'.format(path) response = self.client.get(url, follow=True) msg = 'Failed for path: "{}"'.format(path) self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
apache-2.0
Python
2913d840b63746669ac5695bd244abd6db24fe5a
Create script that prepares LaGeR strings for use with a machine learning training algorithms
andresodio/lager,andresodio/lager,andresodio/lager,andresodio/lager
lager_ml/lager_training_prep.py
lager_ml/lager_training_prep.py
#!/usr/bin/env python3 # This program prepares LaGeR strings for use with a machine learning training # algorithm. # # It expands the string or set of strings to specific length (number of # features), then generates variants for each of those. Finally, it converts # the variants into numbers and adds the result to a dataset file. import sys from subprocess import call if (len(sys.argv) < 5): print("lager_training_prep [GESTURE_NAME] [GESTURE_LABEL] [NUM_FEATURES] [NUM_VARIANTS]") exit() gesture_name = sys.argv[1] gesture_label = sys.argv[2] num_features = sys.argv[3] num_variants = sys.argv[4] print("Gesture name: ", gesture_name) print("Gesture label: ", gesture_label) print("Number of features: ", num_features) print("Number of variants: ", num_variants) orig_gesture_filename = gesture_name + ".dat" gesture_expanded_filename = gesture_name + "_expanded.dat" gesture_variants_filename = gesture_name + "_expanded_variants.dat" gesture_numbers_filename = gesture_name + "_expanded_variants_numbers.csv" call(['./lager_expander.py', orig_gesture_filename, num_features]) call(['../lager_generator/lager_generator.py', gesture_expanded_filename, num_variants]) call(['./lager_file_to_numbers.py', gesture_variants_filename, gesture_label]) call('cat ' + gesture_numbers_filename + ' >>'+ ' dataset.csv', shell=True)
mit
Python
352379690275e970693a06ed6981f530b6704354
Add index to Task.status
dropbox/changes,dropbox/changes,dropbox/changes,dropbox/changes
migrations/versions/181adec926e2_add_status_index_to_task.py
migrations/versions/181adec926e2_add_status_index_to_task.py
"""Add status index to task Revision ID: 181adec926e2 Revises: 43397e521791 Create Date: 2016-10-03 17:41:44.038137 """ # revision identifiers, used by Alembic. revision = '181adec926e2' down_revision = '43397e521791' from alembic import op def upgrade(): op.create_index('idx_task_status', 'task', ['status'], unique=False) def downgrade(): op.drop_index('id_task_status', table_name='task')
apache-2.0
Python
6ccf99966461bd8545654084584d58093dac03d5
Add missing version file
pyranges/pyrle,pyranges/pyrle
pyrle/version.py
pyrle/version.py
__version__ = "0.0.17"
mit
Python
f4e08d41d53cf74f8a53efeb7e238de6a98946cc
add script to find allreferenced hashes
trshaffer/cvmfs,djw8605/cvmfs,alhowaidi/cvmfsNDN,DrDaveD/cvmfs,DrDaveD/cvmfs,MicBrain/cvmfs,djw8605/cvmfs,cvmfs/cvmfs,trshaffer/cvmfs,cvmfs-testing/cvmfs,trshaffer/cvmfs,djw8605/cvmfs,Moliholy/cvmfs,Gangbiao/cvmfs,cvmfs-testing/cvmfs,trshaffer/cvmfs,Gangbiao/cvmfs,alhowaidi/cvmfsNDN,Moliholy/cvmfs,reneme/cvmfs,reneme/cvmfs,cvmfs/cvmfs,DrDaveD/cvmfs,reneme/cvmfs,cvmfs/cvmfs,Moliholy/cvmfs,djw8605/cvmfs,cvmfs-testing/cvmfs,alhowaidi/cvmfsNDN,MicBrain/cvmfs,DrDaveD/cvmfs,Moliholy/cvmfs,Gangbiao/cvmfs,reneme/cvmfs,Moliholy/cvmfs,alhowaidi/cvmfsNDN,DrDaveD/cvmfs,MicBrain/cvmfs,cvmfs/cvmfs,reneme/cvmfs,cvmfs-testing/cvmfs,Gangbiao/cvmfs,DrDaveD/cvmfs,cvmfs/cvmfs,Gangbiao/cvmfs,MicBrain/cvmfs,cvmfs/cvmfs,djw8605/cvmfs,DrDaveD/cvmfs,alhowaidi/cvmfsNDN,cvmfs/cvmfs,trshaffer/cvmfs,MicBrain/cvmfs,cvmfs-testing/cvmfs
add-ons/tools/get_referenced_hashes.py
add-ons/tools/get_referenced_hashes.py
#!/usr/bin/env python import sys import cvmfs def usage(): print sys.argv[0] + " <local repo name | remote repo url> [root catalog]" print "This script walks the catalogs and generates a list of all referenced content hashes." # get referenced hashes from a single catalog (files, chunks, nested catalogs) def get_hashes_for_catalog(catalog): print >> sys.stderr, "Processing" , catalog.hash , catalog query = " SELECT DISTINCT \ lower(hex(hash)) \ FROM catalog \ WHERE hash != 0 \ UNION \ SELECT DISTINCT \ lower(hex(hash)) || 'P' \ FROM chunks \ WHERE hash != 0 \ UNION \ SELECT DISTINCT \ sha1 || 'C' \ FROM nested_catalogs;" return { res[0] for res in catalog.run_sql(query) } def get_hashes_for_catalog_tree(repo, root_catalog): hashes = { root_catalog.hash + "C" } for catalog in repo.catalogs(root_catalog): hashes = hashes | get_hashes_for_catalog(catalog) return hashes def get_hashes_for_revision(repo, root_hash = None): root_catalog = repo.retrieve_catalog(root_hash) if root_hash else repo.retrieve_root_catalog() return get_hashes_for_catalog_tree(repo, root_catalog) # check input values if len(sys.argv) != 2 and len(sys.argv) != 3: usage() sys.exit(1) # get input parameters repo_identifier = sys.argv[1] root_catalog_hash = sys.argv[2] if len(sys.argv) == 3 else None repo = cvmfs.open_repository(repo_identifier) hashes = get_hashes_for_revision(repo, root_catalog_hash) print '\n'.join(hashes)
bsd-3-clause
Python
df852b2ee81756fa62a98e425e156530333bf5a1
add migration to change order of participation choices
liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin
meinberlin/apps/plans/migrations/0033_change_order_participation_choices.py
meinberlin/apps/plans/migrations/0033_change_order_participation_choices.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.18 on 2019-01-28 13:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('meinberlin_plans', '0032_rename_topic_field'), ] operations = [ migrations.AlterField( model_name='plan', name='participation', field=models.SmallIntegerField(choices=[(0, 'Yes'), (1, 'No'), (2, 'Still undecided')], verbose_name='Participation'), ), ]
agpl-3.0
Python
92debba4bb0b0064b865a53b40476effa4d09c78
Undo Framework example
satishgoda/learningqt,satishgoda/learningqt
pyside/demos/framework/undo/document.py
pyside/demos/framework/undo/document.py
from collections import namedtuple from PySide.QtGui import QWidget, QPalette, QPainter from PySide.QtCore import Qt, QRect ShapeType = namedtuple('ShapeType', 'Rectangle Circle Triangle')(*range(3)) class Shape(object): def __init__(self, type=ShapeType.Rectangle, color=Qt.red, rect=QRect()): self._type = type self._color = color self._rect = rect @property def type(self): return self._type @property def color(self): return self._color @property def rect(self): return self._rect @property def name(self): return self._name class Document(QWidget): def __init__(self, parent=None): super(Document, self).__init__(parent) self._shapeList = [] self.setAutoFillBackground(True) self.setBackgroundRole(QPalette.Base) pal = QPalette() pal.setColor(QPalette.HighlightedText, Qt.red) self.setPalette(pal) def paintEvent(self, event): paintRegion = event.region() painter = QPainter(self) pal = self.palette() for shape in self._shapeList: rect = shape.rect if not paintRegion.contains(rect): continue shapeType = shape.type painter.setBrush(shape.color) if shapeType == ShapeType.Rectangle: print "rectangle" painter.drawRect(rect) elif shapeType == ShapeType.Circle: print "circle" painter.drawEllipse(rect) s1 = Shape(ShapeType.Rectangle, color=Qt.green, rect=QRect(0, 0, 100, 100)) s2 = Shape(ShapeType.Circle, rect=QRect(200, 200, 100, 100)) d = Document() d._shapeList = [s1, s2] d.show()
mit
Python
fe08242647962af0fdfab0ce34417b6a6079ed65
add another import now missing
drufat/sympy,drufat/sympy,chaffra/sympy,chaffra/sympy,postvakje/sympy,Titan-C/sympy,kaushik94/sympy,mafiya69/sympy,rahuldan/sympy,yashsharan/sympy,jerli/sympy,sampadsaha5/sympy,jaimahajan1997/sympy,aktech/sympy,madan96/sympy,jerli/sympy,jaimahajan1997/sympy,ChristinaZografou/sympy,postvakje/sympy,souravsingh/sympy,yashsharan/sympy,drufat/sympy,souravsingh/sympy,ChristinaZografou/sympy,mafiya69/sympy,sampadsaha5/sympy,skidzo/sympy,sampadsaha5/sympy,yashsharan/sympy,skidzo/sympy,rahuldan/sympy,hargup/sympy,kaushik94/sympy,rahuldan/sympy,jerli/sympy,aktech/sympy,hargup/sympy,madan96/sympy,mafiya69/sympy,postvakje/sympy,kaushik94/sympy,skidzo/sympy,jaimahajan1997/sympy,aktech/sympy,hargup/sympy,Titan-C/sympy,ChristinaZografou/sympy,Titan-C/sympy,chaffra/sympy,madan96/sympy,souravsingh/sympy
sympy/strategies/tests/test_traverse.py
sympy/strategies/tests/test_traverse.py
from sympy.strategies.traverse import (top_down, bottom_up, sall, top_down_once, bottom_up_once, basic_fns) from sympy.strategies.util import expr_fns from sympy import Basic, symbols, Symbol, S zero_symbols = lambda x: S.Zero if isinstance(x, Symbol) else x x,y,z = symbols('x,y,z') def test_sall(): zero_onelevel = sall(zero_symbols) assert zero_onelevel(Basic(x, y, Basic(x, z))) == \ Basic(0, 0, Basic(x, z)) def test_bottom_up(): _test_global_traversal(bottom_up) _test_stop_on_non_basics(bottom_up) def test_top_down(): _test_global_traversal(top_down) _test_stop_on_non_basics(top_down) def _test_global_traversal(trav): x,y,z = symbols('x,y,z') zero_all_symbols = trav(zero_symbols) assert zero_all_symbols(Basic(x, y, Basic(x, z))) == \ Basic(0, 0, Basic(0, 0)) def _test_stop_on_non_basics(trav): def add_one_if_can(expr): try: return expr + 1 except: return expr expr = Basic(1, 'a', Basic(2, 'b')) expected = Basic(2, 'a', Basic(3, 'b')) rl = trav(add_one_if_can) assert rl(expr) == expected class Basic2(Basic): pass rl = lambda x: Basic2(*x.args) if isinstance(x, Basic) else x def test_top_down_once(): top_rl = top_down_once(rl) assert top_rl(Basic(1, 2, Basic(3, 4))) == \ Basic2(1, 2, Basic(3, 4)) def test_bottom_up_once(): bottom_rl = bottom_up_once(rl) assert bottom_rl(Basic(1, 2, Basic(3, 4))) == \ Basic(1, 2, Basic2(3, 4)) def test_expr_fns(): from sympy.strategies.rl import rebuild from sympy import Add x, y = map(Symbol, 'xy') expr = x + y**3 e = bottom_up(lambda x: x + 1, expr_fns)(expr) b = bottom_up(lambda x: Basic.__new__(Add, x, 1), basic_fns)(expr) assert rebuild(b) == e
from sympy.strategies.traverse import (top_down, bottom_up, sall, top_down_once, bottom_up_once, expr_fns, basic_fns) from sympy import Basic, symbols, Symbol, S zero_symbols = lambda x: S.Zero if isinstance(x, Symbol) else x x,y,z = symbols('x,y,z') def test_sall(): zero_onelevel = sall(zero_symbols) assert zero_onelevel(Basic(x, y, Basic(x, z))) == \ Basic(0, 0, Basic(x, z)) def test_bottom_up(): _test_global_traversal(bottom_up) _test_stop_on_non_basics(bottom_up) def test_top_down(): _test_global_traversal(top_down) _test_stop_on_non_basics(top_down) def _test_global_traversal(trav): x,y,z = symbols('x,y,z') zero_all_symbols = trav(zero_symbols) assert zero_all_symbols(Basic(x, y, Basic(x, z))) == \ Basic(0, 0, Basic(0, 0)) def _test_stop_on_non_basics(trav): def add_one_if_can(expr): try: return expr + 1 except: return expr expr = Basic(1, 'a', Basic(2, 'b')) expected = Basic(2, 'a', Basic(3, 'b')) rl = trav(add_one_if_can) assert rl(expr) == expected class Basic2(Basic): pass rl = lambda x: Basic2(*x.args) if isinstance(x, Basic) else x def test_top_down_once(): top_rl = top_down_once(rl) assert top_rl(Basic(1, 2, Basic(3, 4))) == \ Basic2(1, 2, Basic(3, 4)) def test_bottom_up_once(): bottom_rl = bottom_up_once(rl) assert bottom_rl(Basic(1, 2, Basic(3, 4))) == \ Basic(1, 2, Basic2(3, 4)) def test_expr_fns(): from sympy.strategies.rl import rebuild from sympy import Add x, y = map(Symbol, 'xy') expr = x + y**3 e = bottom_up(lambda x: x + 1, expr_fns)(expr) b = bottom_up(lambda x: Basic.__new__(Add, x, 1), basic_fns)(expr) assert rebuild(b) == e
bsd-3-clause
Python
0c29b431a0f5ce9115d7acdcaaabbd27546949c6
Add test for contact success view.
cdriehuys/chmvh-website,cdriehuys/chmvh-website,cdriehuys/chmvh-website
chmvh_website/contact/tests/views/test_success_view.py
chmvh_website/contact/tests/views/test_success_view.py
from django.test import RequestFactory from django.urls import reverse from contact.views import SuccessView class TestSuccessView(object): """Test cases for the success view""" url = reverse('contact:success') def test_get(self, rf: RequestFactory): """Test sending a GET request to the view. Sending a GET request to the view should render the success page. """ request = rf.get(self.url) response = SuccessView.as_view()(request) assert response.status_code == 200 assert 'contact/success.html' in response.template_name
mit
Python
f66a60411b4e1cb30ac1fde78735ba38e99289cf
Create cfprefs.py
dataJAR/jnuc2016
cfprefs.py
cfprefs.py
#!/usr/bin/python import CoreFoundation domain = 'com.apple.appstore' key = 'restrict-store-require-admin-to-install' key_value = CoreFoundation.CFPreferencesCopyAppValue(key, domain) print 'Key Value = ', key_value key_forced = CoreFoundation.CFPreferencesAppValueIsForced(key, domain) print 'Key Forced = ', key_forced
apache-2.0
Python
e5fecce2693056ac53f7d34d00801829ea1094c3
add JPEG decoder CPU perf bench
yao-matrix/mProto,yao-matrix/mProto,yao-matrix/mProto,yao-matrix/mProto,yao-matrix/mProto
tools/jpegdec_perf/reader_perf_multi.py
tools/jpegdec_perf/reader_perf_multi.py
import cv2 import os from turbojpeg import TurboJPEG, TJPF_GRAY, TJSAMP_GRAY, TJFLAG_PROGRESSIVE import time import threading # specifying library path explicitly # jpeg = TurboJPEG(r'D:\turbojpeg.dll') # jpeg = TurboJPEG('/usr/lib64/libturbojpeg.so') # jpeg = TurboJPEG('/usr/local/lib/libturbojpeg.dylib') # using default library installation def decode(): jpeg = TurboJPEG() image_folder = '/home/matrix/data/val/' cnt = 0 time_sum = 0.0 for fname in sorted(os.listdir(image_folder)): fpath = os.path.join(image_folder, fname) # print(fpath) in_file = open(fpath, 'rb') jpg = in_file.read() cnt += 1 # (width, height, jpeg_subsample, jpeg_colorspace) = jpeg.decode_header(jpg) # print(width, height, jpeg_subsample, jpeg_colorspace) begin = time.time() * 1000 raw = jpeg.decode(jpg) end = time.time() * 1000 time_sum += end - begin in_file.close() print("image cnt: ", cnt) print("time per image is(ms):", time_sum / cnt) for i in range(52): print('thread %s is running...' % threading.current_thread().name) t = threading.Thread(target=decode, name='DecodeThread') t.start() # t.join() print('thread %s ended.' % threading.current_thread().name)
apache-2.0
Python
4e5c0ea499dd596d3719717166172113e7209d1e
check in script, authored by Joseph Bisch
Winetricks/winetricks,Winetricks/winetricks
src/github-api-releases.py
src/github-api-releases.py
# Homepage: https://github.com/josephbisch/test-releases-api/blob/master/github-api-releases.py # # Copyright # Copyright (C) 2016 Joseph Bisch <joseph.bisch AT gmail.com> # # License # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later # version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # along with this program. If not, see <http://www.gnu.org/licenses/>. import requests import getpass import json import sys import os import ntpath import magic from urllib.parse import urljoin GITHUB_API = 'https://api.github.com' def check_status(res, j): if res.status_code >= 400: msg = j.get('message', 'UNDEFINED') print('ERROR: %s' % msg) return 1 return 0 def create_release(owner, repo, tag, token): url = urljoin(GITHUB_API, '/'.join(['repos', owner, repo, 'releases'])) headers = {'Authorization': token} data = {'tag_name': tag, 'name': tag, 'body': 'winetricks - %s' % tag} res = requests.post(url, auth=(owner, token), data=json.dumps(data), headers=headers) j = json.loads(res.text) if check_status(res, j): return 1 return 0 def upload_asset(path, owner, repo, tag): token = os.environ['GITHUB_TOKEN'] url = urljoin(GITHUB_API, '/'.join(['repos', owner, repo, 'releases', 'tags', tag])) res = requests.get(url) j = json.loads(res.text) if check_status(res, j): # release must not exist, creating release from tag if create_release(owner, repo, tag, token): return 0 else: # Need to start over with uploading now that release is created # Return 1 to indicate we need to run upload_asset again return 1 upload_url = j['upload_url'] upload_url = upload_url.split('{')[0] fname = ntpath.basename(path) with open(path) as f: contents = f.read() content_type = magic.from_file(path) headers = {'Content-Type': content_type, 'Authorization': token} params = {'name': fname} res = requests.post(upload_url, data=contents, auth=(owner, token), headers=headers, params=params) j = json.loads(res.text) if check_status(res, j): return 0 print('SUCCESS: %s uploaded' % fname) return 0 if __name__ == '__main__': path = sys.argv[1] owner = sys.argv[2] repo = sys.argv[3] tag = sys.argv[4] if not os.path.isabs(path): path = os.path.join(os.path.dirname(os.path.realpath(__file__)), path) ret = 1 # Run upload_asset at least once. while ret: ret = upload_asset(path, owner, repo, tag)
lgpl-2.1
Python
e10f6ebf9eba5cf734bbeead68a3b36f9db8dae8
add ridesharing
kinnou02/navitia,xlqian/navitia,Tisseo/navitia,pbougue/navitia,pbougue/navitia,Tisseo/navitia,xlqian/navitia,kinnou02/navitia,Tisseo/navitia,kinnou02/navitia,CanalTP/navitia,Tisseo/navitia,CanalTP/navitia,kinnou02/navitia,xlqian/navitia,pbougue/navitia,xlqian/navitia,xlqian/navitia,pbougue/navitia,CanalTP/navitia,CanalTP/navitia,Tisseo/navitia,CanalTP/navitia
source/jormungandr/jormungandr/street_network/ridesharing.py
source/jormungandr/jormungandr/street_network/ridesharing.py
# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved. # # This file is part of Navitia, # the software to build cool stuff with public transport. # # Hope you'll enjoy and contribute to this project, # powered by Canal TP (www.canaltp.fr). # Help us simplify mobility and open public transport: # a non ending quest to the responsive locomotion way of traveling! # # LICENCE: This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Stay tuned using # twitter @navitia # IRC #navitia on freenode # https://groups.google.com/d/forum/navitia # www.navitia.io import logging import copy import itertools from jormungandr.street_network.street_network import AbstractStreetNetworkService, StreetNetworkPathType from jormungandr import utils from jormungandr.utils import get_pt_object_coord, SectionSorter from navitiacommon import response_pb2 class Ridesharing(AbstractStreetNetworkService): """ TODO: """ def __init__(self, instance, service_url, modes=None, id=None, timeout=10, api_key=None, **kwargs): self.instance = instance self.modes = modes or [] self.sn_system_id = id or 'ridesharing' config = kwargs.get('street_network', None) if 'service_url' not in config['args']: config['args'].update({'service_url': None}) if 'instance' not in config['args']: config['args'].update({'instance': instance}) config['args'].update({'modes': self.modes}) self.street_network = utils.create_object(config) def status(self): return {'id': unicode(self.sn_system_id), 'class': self.__class__.__name__, 'modes': self.modes} def _direct_path( self, mode, pt_object_origin, pt_object_destination, fallback_extremity, request, direct_path_type ): # TODO: the ridesharing_speed is stored in car_no_park_speed # a proper way to handle this is to override car_no_park_speed use the ridesharing_speed here # copy_request = copy.deepcopy(request) # copy_request["car_no_park_speed"] = copy_request["ridesharing_speed"] response = self.street_network._direct_path( mode, pt_object_origin, pt_object_destination, fallback_extremity, request, direct_path_type ) if response: for journey in response.journeys: for section in journey.sections: section.street_network.mode = response_pb2.Ridesharing return response def get_street_network_routing_matrix( self, origins, destinations, street_network_mode, max_duration, request, **kwargs ): # TODO: the ridesharing_speed is stored in car_no_park_speed # a proper way to handle this is to override car_no_park_speed use the ridesharing_speed here # copy_request = copy.deepcopy(request) # copy_request["car_no_park_speed"] = copy_request["ridesharing_speed"] return self.street_network.get_street_network_routing_matrix( origins, destinations, street_network_mode, max_duration, request, **kwargs ) def make_path_key(self, mode, orig_uri, dest_uri, streetnetwork_path_type, period_extremity): """ :param orig_uri, dest_uri, mode: matters obviously :param streetnetwork_path_type: whether it's a fallback at the beginning, the end of journey or a direct path without PT also matters especially for car (to know if we park before or after) :param period_extremity: is a PeriodExtremity (a datetime and its meaning on the fallback period) Nota: period_extremity is not taken into consideration so far because we assume that a direct path from A to B remains the same even the departure time are different (no realtime) """ return self.street_network.make_path_key(mode, orig_uri, dest_uri, streetnetwork_path_type, None)
agpl-3.0
Python
c650d64247d63d2af7a8168795e7edae5c9ef6ef
Add realtime chart plotting example
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
realtime-plot.py
realtime-plot.py
import time, random import math from collections import deque start = time.time() class RealtimePlot: def __init__(self, axes, max_entries = 100): self.axis_x = deque(maxlen=max_entries) self.axis_y = deque(maxlen=max_entries) self.axes = axes self.max_entries = max_entries self.lineplot, = axes.plot([], [], "ro-") self.axes.set_autoscaley_on(True) def add(self, x, y): self.axis_x.append(x) self.axis_y.append(y) self.lineplot.set_data(self.axis_x, self.axis_y) self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15) self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis def animate(self, figure, callback, interval = 50): import matplotlib.animation as animation def wrapper(frame_index): self.add(*callback(frame_index)) self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis return self.lineplot animation.FuncAnimation(figure, wrapper, interval=interval) def main(): from matplotlib import pyplot as plt fig, axes = plt.subplots() display = RealtimePlot(axes) display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100)) plt.show() fig, axes = plt.subplots() display = RealtimePlot(axes) while True: display.add(time.time() - start, random.random() * 100) plt.pause(0.001) if __name__ == "__main__": main()
mit
Python
8ae82037dde45019cae8912f45a36cf3a362c444
Revert "HAProxy uses milliseconds ..."
stackforge/python-openstacksdk,openstack/python-openstacksdk,dtroyer/python-openstacksdk,openstack/python-openstacksdk,stackforge/python-openstacksdk,dtroyer/python-openstacksdk,briancurtin/python-openstacksdk,briancurtin/python-openstacksdk
openstack/network/v2/health_monitor.py
openstack/network/v2/health_monitor.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network import network_service from openstack import resource2 as resource class HealthMonitor(resource.Resource): resource_key = 'healthmonitor' resources_key = 'healthmonitors' base_path = '/lbaas/healthmonitors' service = network_service.NetworkService() # capabilities allow_create = True allow_get = True allow_update = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'delay', 'expected_codes', 'http_method', 'max_retries', 'timeout', 'type', 'url_path', is_admin_state_up='adminstate_up', project_id='tenant_id', ) # Properties #: The time, in seconds, between sending probes to members. delay = resource.Body('delay') #: Expected HTTP codes for a passing HTTP(S) monitor. expected_codes = resource.Body('expected_codes') #: The HTTP method that the monitor uses for requests. http_method = resource.Body('http_method') #: The administrative state of the health monitor, which is up #: ``True`` or down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: Maximum consecutive health probe tries. max_retries = resource.Body('max_retries') #: Name of the health monitor. name = resource.Body('name') #: List of pools associated with this health monitor #: *Type: list of dicts which contain the pool IDs* pool_ids = resource.Body('pools', type=list) #: The ID of the project this health monitor is associated with. project_id = resource.Body('tenant_id') #: The maximum number of seconds for a monitor to wait for a #: connection to be established before it times out. This value must #: be less than the delay value. timeout = resource.Body('timeout') #: The type of probe sent by the load balancer to verify the member #: state, which is PING, TCP, HTTP, or HTTPS. type = resource.Body('type') #: Path portion of URI that will be probed if type is HTTP(S). url_path = resource.Body('url_path')
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.network import network_service from openstack import resource2 as resource class HealthMonitor(resource.Resource): resource_key = 'healthmonitor' resources_key = 'healthmonitors' base_path = '/lbaas/healthmonitors' service = network_service.NetworkService() # capabilities allow_create = True allow_get = True allow_update = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( 'delay', 'expected_codes', 'http_method', 'max_retries', 'timeout', 'type', 'url_path', is_admin_state_up='adminstate_up', project_id='tenant_id', ) # Properties #: The time, in milliseconds, between sending probes to members. delay = resource.Body('delay') #: Expected HTTP codes for a passing HTTP(S) monitor. expected_codes = resource.Body('expected_codes') #: The HTTP method that the monitor uses for requests. http_method = resource.Body('http_method') #: The administrative state of the health monitor, which is up #: ``True`` or down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: Maximum consecutive health probe tries. max_retries = resource.Body('max_retries') #: Name of the health monitor. name = resource.Body('name') #: List of pools associated with this health monitor #: *Type: list of dicts which contain the pool IDs* pool_ids = resource.Body('pools', type=list) #: The ID of the project this health monitor is associated with. project_id = resource.Body('tenant_id') #: The maximum number of milliseconds for a monitor to wait for a #: connection to be established before it times out. This value must #: be less than the delay value. timeout = resource.Body('timeout') #: The type of probe sent by the load balancer to verify the member #: state, which is PING, TCP, HTTP, or HTTPS. type = resource.Body('type') #: Path portion of URI that will be probed if type is HTTP(S). url_path = resource.Body('url_path')
apache-2.0
Python
f7e4ca11c7bfc35bf0fd6becd2a5d5fdd2ca5ed5
Add a script to split data with partitions.
juckele/ddr-grader,juckele/ddr-grader
src/main/python/partition_data.py
src/main/python/partition_data.py
import csv; import random; import sys; in_file = str(sys.argv[1]) out_file = str(sys.argv[2]) num_partitions = int(sys.argv[3]) header = []; partitions = []; for i in range(num_partitions): partitions.append([]) # Load all the training rows row_num = 0; with open(in_file) as file: reader = csv.reader(file); header = reader.next(); for row in reader: partitions[row_num % num_partitions].append(row); row_num += 1; # Write test and train files for k partitions for i in range(num_partitions): train_rows = [] test_rows = partitions[i]; for j in range(num_partitions): if i != j: for row in partitions[j]: train_rows.append(row); with open(out_file+'_k'+str(i+1)+'_train.csv', 'wb') as ofile: writer = csv.writer(ofile) writer.writerow(header) for row in train_rows: writer.writerow(row) with open(out_file+'_k'+str(i+1)+'_test.csv', 'wb') as ofile: writer = csv.writer(ofile) writer.writerow(header) for row in test_rows: writer.writerow(row)
mit
Python
dc76e7c085e7462d75567bf3d0228defb6bbbc58
Add tests for converter
stormaaja/csvconverter,stormaaja/csvconverter,stormaaja/csvconverter
tests/test_csv_converter.py
tests/test_csv_converter.py
import os import sys sys.path.insert(0, os.path.dirname(__file__)) sys.path.insert(1, os.path.dirname("..")) import unittest from csv_converter import CsvConverter class TestCsvConverter(unittest.TestCase): def test_parse_csv(self): converter = CsvConverter("tests/data/data_1.csv") converter.setSourceColumns("tuotekoodi", "qty") converter.read_file() def test_convert_row(self): converter = CsvConverter("") row = converter.convertRow({ "product_code": "some_code", "quantity": "50" }) self.assertEqual("some_code", row["product_code"]) with self.assertRaises(ValueError): row = converter.convertRow({ "product_code": "23", "quantity": "error" }) with self.assertRaises(ValueError): row = converter.convertRow({ "product_code": "", "quantity": "error" }) with self.assertRaises(ValueError): row = converter.convertRow({ "product_code": "sd", "quantity": "" }) if __name__ == '__main__': unittest.main()
mit
Python
645507ed9ec43b354880673fbc75afe169ef6697
Add test capturing bad implementation of contains handler.
jawilson/pmxbot,jawilson/pmxbot
tests/unit/test_handlers.py
tests/unit/test_handlers.py
from pmxbot import core def test_contains_always_match(): """ Contains handler should always match if no rate is specified. """ handler = core.ContainsHandler(name='#', func=None) assert handler.match('Tell me about #foo', channel='bar')
bsd-3-clause
Python
d61c42221774f36477b1288396f4e7e7337e905c
add data migration
eldarion/formly,eldarion/formly
formly/migrations/0012_fix_multi_text_answer_data.py
formly/migrations/0012_fix_multi_text_answer_data.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2018-01-23 13:46 from __future__ import unicode_literals import json from django.db import migrations def migrate_data(apps, schema_editor): FieldResult = apps.get_model("formly", "FieldResult") # alias for Field.MULTIPLE_TEXT MULTIPLE_TEXT_TYPE = 8 multiple_text_results = FieldResult.objects.filter(question__field_type=MULTIPLE_TEXT_TYPE) print("\n") if multiple_text_results.exists() is False: print("formly-data-migration: No multiple text results data found. Skipping data migration.") return print("formly-data-migration: Updating data on {} FieldResult instances".format(multiple_text_results.count())) for result in multiple_text_results: raw_answer = result.answer["answer"] if isinstance(raw_answer, unicode): try: answer = json.loads(raw_answer) except: answer = [raw_answer] result.answer["answer"] = answer result.save() print("formly-data-migration: Data update complete!") class Migration(migrations.Migration): dependencies = [ ("formly", "0011_field_mapping"), ] operations = [ migrations.RunPython(migrate_data), ]
bsd-3-clause
Python
a1f864de0c5e71f0e9dc0ff4a23dc8101556832b
add new script
open-rdc/icart_mini,open-rdc/icart,open-rdc/icart_mini,open-rdc/icart
icart_mini_navigation/scripts/navigation_strategy.py
icart_mini_navigation/scripts/navigation_strategy.py
#!/usr/bin/env python # -*- coding: utf-8 -*- try: import roslib; roslib,load_manifest('rospeex_if') except: pass import rospy import re from rospeex_if import ROSpeexInterface from std_msgs.msg import String syscommand_pub = rospy.Publisher('syscommand', String, queue_size=10) rospy.init_node('navigation_strategy', anonymous=True) r = rospy.Rate(10) class talk_node(object): def __init__(self): self._interface = ROSpeexInterface() def sr_response(self, message): run = re.compile('(?P<run>走行)').search(message) start = re.compile('(?P<start>開始)').search(message) print 'you said : %s' %message if run is not None and start is not None: text = u'ナビゲーションを開始します。' robot_msg = 'start' rospy.loginfo(robot_msg) syscommand_pub.publish(robot_msg) print 'rospeex reply : %s' %text self._interface.say(text, 'ja', 'nict') def run(self): self._interface.init() self._interface.register_sr_response(self.sr_response) self._interface.set_spi_config(language='ja',engine='nict') rospy.spin() if __name__ == '__main__': try: node = talk_node() node.run() except rospy.ROSInterruptException: pass
bsd-2-clause
Python
b3e9075e819402f93f7dc2e29b61e3e621ab7355
Add unit tests for avging imputations
eltonlaw/impyute
impy/imputations/tests/test_averaging_imputations.py
impy/imputations/tests/test_averaging_imputations.py
"""test_averaging_imputations.py""" import unittest import numpy as np from impy.imputations import mean_imputation from impy.imputations import mode_imputation from impy.imputations import median_imputation from impy.datasets import random_int class TestAveraging(unittest.TestCase): """ Tests for Averaging """ def setUp(self): self.data = random_int(missingness="complete") def test_mean_return_type(self): """Mean Imputation Return Type""" self.assertEqual(str(type(mean_imputation(self.data))), "<class 'numpy.ndarray'>") def test_mode_return_type(self): """Mode Imputation Return Type""" self.assertEqual(str(type(mode_imputation(self.data))), "<class 'numpy.ndarray'>") def test_median_return_type(self): """Median Imputation Return Type""" self.assertEqual(str(type(median_imputation(self.data))), "<class 'numpy.ndarray'>") def test_mean_fill(self): """ Mean Imputation Fill Complete Data(nothing should happen)""" actual = mean_imputation(self.data) self.assertTrue(np.array_equal(actual, self.data)) def test_mode_fill(self): """ Mode Imputation Fill Complete Data(nothing should happen)""" actual = mode_imputation(self.data) self.assertTrue(np.array_equal(actual, self.data)) def test_median_fill(self): """ Median Imputation Fill Complete Data(nothing should happen)""" actual = median_imputation(self.data) self.assertTrue(np.array_equal(actual, self.data)) if __name__ == "__main__": unittest.main()
mit
Python
dd784f9035f66fd7d4febb8e43a09353821312b9
add solution for Kth Largest Element in an Array
zhyu/leetcode,zhyu/leetcode
algorithms/KthLargestElementInAnArray/KthLargestElementInAnArray.py
algorithms/KthLargestElementInAnArray/KthLargestElementInAnArray.py
class Solution: # @param {integer[]} nums # @param {integer} k # @return {integer} def findKthLargest(self, nums, k): k = len(nums) - k def quickselect(st, ed): pivot = nums[ed] pos = st for i in xrange(st, ed): if nums[i] < pivot: nums[i], nums[pos] = nums[pos], nums[i] pos += 1 nums[pos], nums[ed] = nums[ed], nums[pos] if pos == k: return nums[pos] elif pos < k: return quickselect(pos+1, ed) else: return quickselect(st, pos-1) return quickselect(0, len(nums)-1)
mit
Python
d3afe17fa3f259e2b09f76e4b486e4cbf9e659c3
Create albumCoverFinder.py
btran29/yet-another-album-cover-finder
albumCoverFinder.py
albumCoverFinder.py
# albumCoverFinder - Brian Tran, [email protected] # This program scans a tree of directories containing mp3 files. For # each directory, it attempts to download the cover image from the # Apple iTunes service. Subdirectories must be named <Artist>/<Album> # contain .mp3 files to be considered. The cover will be saved to # "cover.jpg" in each directory. # Usage example: # albumCoverFinder.py <music directory> import sys import os import shutil import re import urllib.request import json import tempfile # For testing + possible future expansion with classes defaults = { "artist": 'Jack Johnson', "album": 'In Between Dreams', "country": 'US', "media": 'music', "attribute": 'albumTerm', "base": 'https://itunes.apple.com/search?' } # Clean up album names via dictionary below cleanup_table = { ' ': '+' } # Clean up album folder names for input def clean_input(term): print("\n" + "Search Term: " + "\"" + term + "\"") # Replaces strings in folder names with keywords # in cleanup_table via regex pattern = re.compile('|'.join(cleanup_table.keys())) term = pattern.sub(lambda x: cleanup_table[x.group()], term) return term # Generate url for apple api search def gen_url(term): url = defaults["base"] + \ 'term=' + term + '&' + \ 'attribute=' + defaults["attribute"] + '&' +\ 'media=' + defaults["media"] print("URL Used: " + url) return url # Connect to website and collect response def collect_data(url): response = urllib.request.urlopen(url) # Convert to http response to utf-8 string = response.read().decode('utf-8') data = json.loads(string) # returns dictionary object return data # Parse data to get album cover url def parse_data(data, artist): data = data['results'] # Initialize key vars found = False album_art_url = 'stringThing' # Loop over results to find matching artist given album for result in data: if result['artistName'] == artist: found = True album_art_url = result['artworkUrl100'] print("Album Art URL: " + album_art_url) break if found is False: print("No album/artist combination found.") return album_art_url # Download album art def download(album_art_url): img = urllib.request.urlopen(album_art_url) output = tempfile.mktemp(".jpg") # Enable writing o = open(output, "wb") o.write(img.read()) o.close() return output # Simplified method def get_art(directory): # Get path values, artist, album final_path = directory + os.sep + "cover.jpg" values = directory.split(os.sep) artist = values[-2] album = values[-1] # Run through procedure url = gen_url(clean_input(album)) data = collect_data(url) parsed_url = parse_data(data, artist) dl_art = download(parsed_url) if dl_art is not None: # Copy file to location shutil.copyfile(dl_art, final_path) os.remove(dl_art) print("Saved to: " + final_path) # Define usage def usage(argv): print("Usage" + argv[1] + "<music root directory>") sys.exit(1) # Main method def main(argv): if len(argv) < 2: usage(argv) source_directory = argv[1] print("Searching within: " + source_directory) # Obtain list of directories directories = [source_directory] for directory in directories: files = os.listdir(directory) for file in files: if os.path.isdir(os.path.join(directory, file)): directories.append(os.path.join(directory, file)) # Travel through directories for directory in directories: files = os.listdir(directory) for file in files: # TODO: skip directories with cover.jpg already present # Only directories with mp3 files if file.endswith('.mp3'): # Get album art for this directory get_art(directory) break # TODO: try out os.walk # for root, dirs, files in os.walk(source_directory): # for directory in dirs: # # for file in files: # # if file.endswith(".mp3"): # # # y # # Get album art for this directory # get_art(directory) # Limits this python file to script functionality (vs a module) if __name__ == "__main__": main(sys.argv)
mit
Python
aa411ddcd62b824c0bfe8660c795b71e6e6929ea
add reset command.
grue/django-axes,svenhertle/django-axes,django-pci/django-axes,SteveByerly/django-axes,nidhi-delhivery/django-axes,zoten/django-axes,SteveByerly/django-axes,jazzband/django-axes
axes/utils.py
axes/utils.py
from axes.models import AccessAttempt def reset(ip=None, silent=False): if not ip: attempts = AccessAttempt.objects.all() if attempts: for attempt in AccessAttempt.objects.all(): attempt.delete() else: if not silent: print 'No attempts found.' else: try: attempt = AccessAttempt.objects.get(ip_address=ip) except: if not silent: print 'No matching attempt found.' else: attempt.delete()
mit
Python
ceb8a32637bc0fd9ab0517be7f025755e19ec2c7
add leetcode Excel Sheet Column Number
Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code
leetcode/ExcelSheetColumnNumber/solution.py
leetcode/ExcelSheetColumnNumber/solution.py
# -*- coding:utf-8 -*- class Solution: # @param s, a string # @return an integer def titleToNumber(self, s): col = 0 for c in s: col = col * 26 + ord(c) - ord('A') + 1 return col
mit
Python