commit
stringlengths
40
40
old_file
stringlengths
4
264
new_file
stringlengths
4
264
old_contents
stringlengths
0
3.26k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
624
message
stringlengths
15
4.7k
lang
stringclasses
3 values
license
stringclasses
13 values
repos
stringlengths
5
91.5k
3fcea684179da92e304e8eb2caafae80311e8507
app/__version__.py
app/__version__.py
#!/usr/bin/env python """ Change Log 1.3.0 Update all packages to current releases. Refactor to support Python 3.7 1.1.7 Update application logging to separate application events from those logged by the uwsgi servivce 1.1.6 Add email address detail for various authentication failures 1.1.5 Refactor _convert_email_uri(email) to properly handle a null email address. 1.1.4 Add code to convert plus signs located the the username portion of an email address to a '%2B'when the email address is embedded in a URL. 1.1.3 Added documentation around the user account registration process. """ __version__ = "1.3.0"
Move the application version number to a separate file per PEP 396.
Move the application version number to a separate file per PEP 396.
Python
mit
parallaxinc/Cloud-Session,parallaxinc/Cloud-Session
88a673c402b60e3212e2a60477a854b756ae5e9a
db/specific_event.py
db/specific_event.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # import uuid from db.common import Base from db.common import session_scope class SpecificEvent(): @classmethod def find_by_event_id(self, event_id): # retrieving table name for specific event table_name = self.__tablename__ # finding class associated with table name for c in Base._decl_class_registry.values(): if hasattr(c, '__tablename__') and c.__tablename__ == table_name: break # retrieving specific event with the associated class type with session_scope() as session: try: specific_event = session.query(c).filter( c.event_id == event_id ).one() except: specific_event = None return specific_event def update(self, other): # copying each standard attribute value from other object to this one for attr in self.STANDARD_ATTRS: setattr(self, attr, getattr(other, attr)) def __eq__(self, other): # comparing each standard attribute value (and event id) of this object # with other one's return [self.event_id].extend( [getattr(self, attr) for attr in self.STANDARD_ATTRS] ) == [other.event_id].extend( [getattr(other, attr) for attr in other.STANDARD_ATTRS] ) def __ne__(self, other): return not self == other
Add initial definition of specific event item
Add initial definition of specific event item
Python
mit
leaffan/pynhldb
9aeebde15b5ad2d6526c9b62ab37cf0d890d167d
pbs/gen.py
pbs/gen.py
#!/usr/bin/env python3 #============================================================================== # author : Pavel Polishchuk # date : 19-08-2018 # version : # python_version : # copyright : Pavel Polishchuk 2018 # license : #============================================================================== import argparse import os from subprocess import Popen if __name__ == '__main__': parser = argparse.ArgumentParser(description='Generate files for fragment database.') parser.add_argument('-o', '--out', metavar='OUTPUT_DIR', required=True, help='output dir to store files.') parser.add_argument('-k', '--keep_ids', required=True, help='path to the file with mol ids to keep at SB generation.') args = vars(parser.parse_args()) for o, v in args.items(): if o == "out": output_dir = os.path.abspath(v) if o == "keep_ids": fname = os.path.abspath(v) if not os.path.exists(output_dir): os.mkdir(output_dir) job_dir = os.path.join(output_dir, 'jobs') if not os.path.exists(job_dir): os.mkdir(job_dir) for r in [1, 2, 3]: pbs_name = os.path.join(job_dir, '%s_r%i.pbs' % (os.path.basename(output_dir), r)) script = """ #!/usr/bin/env bash #PBS -l select=1:ncpus=32 #PBS -k oe RADIUS=%i cd %s source ~/anaconda3/bin/activate rdkit-1709 python3 ~/python/crem/frag_to_env_mp.py -i ~/imtm/crem/frags.txt -o r${RADIUS}.txt -k %s -r ${RADIUS} -c 32 -v sort r${RADIUS}.txt | uniq -c > r${RADIUS}_c.txt """ % (r, output_dir, fname) with open(pbs_name, "wt") as f: f.write(script) p = Popen(['qsub', pbs_name], encoding='utf8')
Add script to run PBS jobs to create fragment database
Add script to run PBS jobs to create fragment database
Python
bsd-3-clause
DrrDom/crem,DrrDom/crem
1208f86c8c5ba677bd6001442129d49f28c22764
test/dict_parameter_test.py
test/dict_parameter_test.py
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from helpers import unittest, in_parse import luigi import luigi.interface import json import collections class DictParameterTask(luigi.Task): param = luigi.DictParameter() class DictParameterTest(unittest.TestCase): _dict = collections.OrderedDict([('username', 'me'), ('password', 'secret')]) def test_parse(self): d = luigi.DictParameter().parse(json.dumps(DictParameterTest._dict)) self.assertEqual(d, DictParameterTest._dict) def test_serialize(self): d = luigi.DictParameter().serialize(DictParameterTest._dict) self.assertEqual(d, '{"username": "me", "password": "secret"}') def test_parse_interface(self): in_parse(["DictParameterTask", "--param", '{"username": "me", "password": "secret"}'], lambda task: self.assertEqual(task.param, DictParameterTest._dict)) def test_serialize_task(self): t = DictParameterTask(DictParameterTest._dict) self.assertEqual(str(t), 'DictParameterTask(param={"username": "me", "password": "secret"})') def test_parse_invalid_input(self): self.assertRaises(ValueError, lambda: luigi.DictParameter().parse('{"invalid"}'))
Add test cases for DictParameter
Add test cases for DictParameter
Python
apache-2.0
jamesmcm/luigi,samepage-labs/luigi,jw0201/luigi,h3biomed/luigi,humanlongevity/luigi,riga/luigi,Wattpad/luigi,dlstadther/luigi,javrasya/luigi,edx/luigi,Magnetic/luigi,adaitche/luigi,foursquare/luigi,dlstadther/luigi,linsomniac/luigi,samuell/luigi,Houzz/luigi,Houzz/luigi,PeteW/luigi,ContextLogic/luigi,PeteW/luigi,ehdr/luigi,ivannotes/luigi,h3biomed/luigi,mbruggmann/luigi,mfcabrera/luigi,mfcabrera/luigi,rayrrr/luigi,casey-green/luigi,samuell/luigi,ivannotes/luigi,jamesmcm/luigi,ivannotes/luigi,jamesmcm/luigi,foursquare/luigi,lungetech/luigi,javrasya/luigi,Tarrasch/luigi,Magnetic/luigi,soxofaan/luigi,Tarrasch/luigi,Houzz/luigi,h3biomed/luigi,lungetech/luigi,soxofaan/luigi,rizzatti/luigi,humanlongevity/luigi,republic-analytics/luigi,ContextLogic/luigi,riga/luigi,rayrrr/luigi,dstandish/luigi,rizzatti/luigi,Magnetic/luigi,PeteW/luigi,soxofaan/luigi,riga/luigi,rayrrr/luigi,rizzatti/luigi,linsomniac/luigi,fabriziodemaria/luigi,samuell/luigi,humanlongevity/luigi,mbruggmann/luigi,Wattpad/luigi,adaitche/luigi,mfcabrera/luigi,foursquare/luigi,ehdr/luigi,h3biomed/luigi,ContextLogic/luigi,spotify/luigi,linsomniac/luigi,linsomniac/luigi,casey-green/luigi,mbruggmann/luigi,Houzz/luigi,dlstadther/luigi,stroykova/luigi,riga/luigi,samepage-labs/luigi,rayrrr/luigi,samuell/luigi,republic-analytics/luigi,soxofaan/luigi,lungetech/luigi,adaitche/luigi,jw0201/luigi,jw0201/luigi,rizzatti/luigi,fabriziodemaria/luigi,Tarrasch/luigi,Wattpad/luigi,jamesmcm/luigi,adaitche/luigi,lungetech/luigi,spotify/luigi,stroykova/luigi,dstandish/luigi,dlstadther/luigi,spotify/luigi,spotify/luigi,casey-green/luigi,dstandish/luigi,ehdr/luigi,samepage-labs/luigi,dstandish/luigi,republic-analytics/luigi,republic-analytics/luigi,edx/luigi,fabriziodemaria/luigi,jw0201/luigi,javrasya/luigi,samepage-labs/luigi,ContextLogic/luigi,humanlongevity/luigi,Magnetic/luigi,PeteW/luigi,mfcabrera/luigi,fabriziodemaria/luigi,stroykova/luigi,ehdr/luigi,javrasya/luigi,foursquare/luigi,edx/luigi,edx/luigi,ivannotes/luigi,Tarrasch/luigi,stroykova/luigi,mbruggmann/luigi,casey-green/luigi
ce929da100303a56ca5d1e4c5ca3982c314d8696
CodeFights/simpleComposition.py
CodeFights/simpleComposition.py
#!/usr/local/bin/python # Code Fights Simple Composition Problem from functools import reduce import math def compose(f, g): return lambda x: f(g(x)) def simpleComposition(f, g, x): return compose(eval(f), eval(g))(x) # Generic composition of n functions: def compose_n(*functions): return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x) def main(): tests = [ ["math.log10", "abs", -100, 2], ["math.sin", "math.cos", 34.4, math.sin(math.cos(34.4))], ["int", "lambda x: 1.0 * x / 22", 1000, 45], ["math.exp", "lambda x: x ** 0", -1000, math.e], ["lambda z: z", "lambda y: y", 239, 239] ] for t in tests: res = simpleComposition(t[0], t[1], t[2]) ans = t[3] if ans == res: print("PASSED: simpleComposition({}, {}, {}) returned {}" .format(t[0], t[1], t[2], res)) else: print(("FAILED: simpleComposition({}, {}, {}) returned {}," "answer: {}").format(t[0], t[1], t[2], res, ans)) if __name__ == '__main__': main()
Solve Code Fights simple composition problem
Solve Code Fights simple composition problem
Python
mit
HKuz/Test_Code
b58fc31236e0c2226d31f7c846cc6a6392c98d52
parsers/python/tests/test_single_reference.py
parsers/python/tests/test_single_reference.py
import unittest from jsonasobj import as_json from pyshexc.parser_impl.generate_shexj import parse shex = """<http://a.example/S0> @<http://a.example/S1> <http://a.example/S1> { <http://a.example/p1> . }""" shexj = """{ "type": "Schema", "shapes": [ "http://a.example/S1", { "type": "Shape", "id": "http://a.example/S1", "expression": { "type": "TripleConstraint", "predicate": "http://a.example/p1" } } ] }""" class SingleReferenceTestCase(unittest.TestCase): """ Test to determine what this parser does with a single reference """ def test_shex(self): schema = parse(shex) self.assertEqual(shexj, as_json(schema)) if __name__ == '__main__': unittest.main()
Add an experimental test for a single referencew
Add an experimental test for a single referencew
Python
mit
shexSpec/grammar,shexSpec/grammar,shexSpec/grammar
46f8389f79ad7aae6c038a2eef853eb0652349c7
examples/auto_update_example.py
examples/auto_update_example.py
from guizero import * import random def read_sensor(): return random.randrange(3200, 5310, 10) / 100 def update_label(): text.set(read_sensor()) # recursive call text.after(1000, update_label) if __name__ == '__main__': app = App(title='Sensor Display!', height=100, width=200, layout='grid') title = Text(app, 'Sensor value:', grid=[0, 0]) text = Text(app, "xx", grid=[1, 0]) text.after(1000, update_label) app.display()
Add example for auto update of a widget using .after()
Add example for auto update of a widget using .after() Thanks to @ukBaz and @jezdean an example of the already implemented .after() function being used to update a widget automatically.
Python
bsd-3-clause
lawsie/guizero,lawsie/guizero,lawsie/guizero
8dbce56b1b595a761fdc29c8730ad5e11e40a203
php4dvd/test_searchfilm.py
php4dvd/test_searchfilm.py
# -*- coding: utf-8 -*- from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.keys import Keys import unittest class searchFilm(unittest.TestCase): def setUp(self): self.driver = webdriver.Firefox() self.driver.implicitly_wait(10) self.base_url = "http://hub.wart.ru/" self.verificationErrors = [] self.accept_next_alert = True def test_searchfilm(self): driver = self.driver driver.get(self.base_url + "php4dvd/") driver.find_element_by_id("username").clear() driver.find_element_by_id("username").send_keys("admin") driver.find_element_by_name("password").clear() driver.find_element_by_name("password").send_keys("admin") driver.find_element_by_name("submit").click() # search a film which does present driver.find_element_by_id("q").clear() driver.find_element_by_id("q").send_keys(u"Вос") driver.find_element_by_id("q").send_keys(Keys.RETURN) results = driver.find_element_by_id("results") if not results.find_elements_by_class_name("title"): self.fail("Movie not found") # search a film which does not present driver.find_element_by_id("q").clear() driver.find_element_by_id("q").send_keys("Test film") driver.find_element_by_id("q").send_keys(Keys.RETURN) results = driver.find_element_by_id("results") if not results.find_elements_by_class_name("content"): self.fail("Movies found") # finish our tests driver.find_element_by_link_text("Log out").click() self.assertRegexpMatches(self.close_alert_and_get_its_text(), r"^Are you sure you want to log out[\s\S]$") def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException, e: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to_alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": unittest.main()
Add a test for search saces.
Add a test for search saces.
Python
bsd-2-clause
bsamorodov/selenium-py-training-samorodov
7a51f5a4e4effee4891cddbb867f873ec15c5fab
Python/strings/sort_anagrams.py
Python/strings/sort_anagrams.py
''' Sort an array of strings so that the anagrams are next to one another Ex. 'abba', 'foo', 'bar', 'aabb' becomes: 'abba', 'aabb', 'foo', 'bar' ''' from __future__ import print_function from collections import OrderedDict def collect_anagrams(str_arr): d = OrderedDict() for i, s in enumerate(str_arr): t = ''.join(sorted(s)) if not d.get(t, None): d[t] = [i] else: d[t].append(i) # at this stage, the dictionary, d has the sorted # strings as the keys and their positions as the values # so we just iterate over it and recreate a list str_arr_sorted = [] for pos in d.values(): for p in pos: str_arr_sorted.append(str_arr[p]) return str_arr_sorted print(collect_anagrams(['abba', 'foo', 'bar', 'aabb'])) print(collect_anagrams(['foo', 'bar', 'abba', 'rab', 'aabb'])) print(collect_anagrams(['foo', 'bar', 'abba', 'rab', 'aabb', 'oof']))
Sort an array of strings so that the anagrams are grouped
Sort an array of strings so that the anagrams are grouped
Python
unlicense
amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning
cb159032856c4409187154cc0ec3d6ffae1fc4db
py/top-k-frequent-words.py
py/top-k-frequent-words.py
from collections import Counter import heapq class Neg(): def __init__(self, x): self.x = x def __cmp__(self, other): return -cmp(self.x, other.x) class Solution(object): def topKFrequent_nlogk(self, words, k): """ :type words: List[str] :type k: int :rtype: List[str] """ c = Counter(words) ary = [Neg((-cnt, w)) for w, cnt in c.iteritems()] heap = ary[:k] heapq.heapify(heap) for i in xrange(k, len(ary)): heapq.heappush(heap, ary[i]) heapq.heappop(heap) heap.sort(key=lambda x:(x.x[0], x.x[1])) return [x.x[1] for x in heap] def topKFrequent_klogn(self, words, k): """ :type words: List[str] :type k: int :rtype: List[str] """ c = Counter(words) ary = [(-cnt, w) for w, cnt in c.iteritems()] return [x[1] for x in heapq.nsmallest(k, ary)] topKFrequent = topKFrequent_nlogk
Add py solution for 692. Top K Frequent Words
Add py solution for 692. Top K Frequent Words 692. Top K Frequent Words: https://leetcode.com/problems/top-k-frequent-words/ O(nlgk) and O(klgn) approaches
Python
apache-2.0
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
1a75c38f43f0857fcc1c0dfe594f719870ad3553
issue_id.py
issue_id.py
#!/bin/python from __future__ import print_function, division import argparse import os import os.path import random import string import shutil if __name__ == '__main__': parser = argparse.ArgumentParser( description=""" Merge new content into existing dataset, assigining safe unique keys. File extensions will be lowercased.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( 'new', metavar='DATASET_NEW', nargs=1, type=str, help='New dataset directory path') parser.add_argument( 'existing', metavar='DATASET_EXISTING', nargs=1, type=str, help='Existing dataset directory path') args = parser.parse_args() if args.new == args.existing: raise SyntaxError('New and existing dataset cannot be the same') existing_ids = set(os.path.splitext(os.path.basename(path))[0] for path in os.listdir(args.existing[0])) def random_id(n): charset = string.lowercase + string.digits return ''.join(random.choice(charset) for i in range(n)) def issue_new_id(): n = 1 while True: for i in range(3): i = random_id(n) if i not in existing_ids: existing_ids.add(i) return i else: n += 1 for path in os.listdir(args.new[0]): entry_id = issue_new_id() path_src = os.path.join(args.new[0], path) path_dst = os.path.join(args.existing[0], entry_id + os.path.splitext(path)[1].lower()) print('Copying from %s to %s' % (path_src, path_dst)) shutil.copyfile(path_src, path_dst)
Add an utility to assign unique ids to files.
Add an utility to assign unique ids to files.
Python
mit
xanxys/shogi_recognizer,xanxys/shogi_recognizer
4fae632c55f2b74cc29dd443bc6c017b666b46f5
demo/amqp_clock.py
demo/amqp_clock.py
#!/usr/bin/env python """ AMQP Clock Fires off simple messages at one-minute intervals to a topic exchange named 'clock', with the topic of the message being the local time as 'year.month.date.dow.hour.minute', for example: '2007.11.26.1.12.33', where the dow (day of week) is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab). A consumer could then bind a queue to the routing key '#.0' for example to get a message at the beginning of each hour. 2007-11-26 Barry Pederson <[email protected]> """ from datetime import datetime from optparse import OptionParser from time import sleep import amqplib.client_0_8 as amqp Message = amqp.Message EXCHANGE_NAME = 'clock' TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern def main(): parser = OptionParser() parser.add_option('--host', dest='host', help='AMQP server to connect to (default: %default)', default='localhost') parser.add_option('-u', '--userid', dest='userid', help='AMQP userid to authenticate as (default: %default)', default='guest') parser.add_option('-p', '--password', dest='password', help='AMQP password to authenticate with (default: %default)', default='guest') parser.add_option('--ssl', dest='ssl', action='store_true', help='Enable SSL with AMQP server (default: not enabled)', default=False) options, args = parser.parse_args() conn = amqp.Connection(options.host, options.userid, options.password) ch = conn.channel() ch.access_request('/data', write=True, active=True) ch.exchange_declare(EXCHANGE_NAME, type='topic') # Make sure our first message is close to the beginning # of a minute now = datetime.now() if now.second > 0: sleep(60 - now.second) while True: now = datetime.now() msg = Message(timestamp=now) topic = now.strftime(TOPIC_PATTERN) ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic) # Don't know how long the basic_publish took, so # grab the time again. now = datetime.now() sleep(60 - now.second) ch.close() conn.close() if __name__ == '__main__': main()
Add another demo program, one that spits out messages at regular intervals.
Add another demo program, one that spits out messages at regular intervals.
Python
lgpl-2.1
jonahbull/py-amqp,newvem/py-amqplib,yetone/py-amqp,smurfix/aio-py-amqp,smurfix/aio-py-amqp,dallasmarlow/py-amqp,dims/py-amqp,jonahbull/py-amqp,dallasmarlow/py-amqp,yetone/py-amqp,dims/py-amqp
0996f1c59dfca9c22d0e3d78598bd3edbce62696
dailyFileCopy.py
dailyFileCopy.py
import os import time import shutil import glob def reviewAndCopy(copy_from_directory, copy_to_directory): review_window_in_hours = 24 _review_window_in_sec = review_window_in_hours*3600 os.chdir(copy_from_directory) text_files = getAllTxtFilesFromCurrentDirectory() files_with_age = createFileAgeDict(text_files) trimToOnlyModifiedInWindow(files_with_age, _review_window_in_sec) files_to_copy = filesDictToFileList(files_with_age) deleteAllFilesInDirectory(copy_to_directory) copyFilesToTargetDirectory(files_to_copy, copy_to_directory) def getAllTxtFilesFromCurrentDirectory(): return glob.glob('*.txt') def createFileAgeDict(file_list): files_with_age = {} now = time.time() for each_file in file_list: files_with_age[each_file] = findAgeSinceChange(each_file, now) return files_with_age def findAgeSinceChange(single_file, time_to_check_age_against): try: modify_time = os.path.getmtime(single_file) create_time = os.path.getctime(single_file) change_time = max(modify_time, create_time) age_since_change = time_to_check_age_against - change_time return age_since_change except WindowsError: print 'There was an error reading create/modify time from file ', single_file def trimToOnlyModifiedInWindow(files_with_age, time_window): for each_file in files_with_age.keys(): if files_with_age[each_file] > time_window: del files_with_age[each_file] return files_with_age def filesDictToFileList(file_dict): return list(file_dict.keys()) def deleteAllFilesInDirectory(target_directory): current_directory = os.getcwd() os.chdir(target_directory) deleteAllFilesInCurrentDirectory() os.chdir(current_directory) def deleteAllFilesInCurrentDirectory(): current_directory = os.getcwd() for each_file in os.listdir(current_directory): try: os.remove(each_file) except WindowsError: print 'There was an error deleting file ', each_file def copyFilesToTargetDirectory(files_to_copy, target_directory): for each_file in files_to_copy: try: shutil.copy2(each_file, target_directory) except WindowsError: print 'There was an error copying file ', each_file if __name__ == '__main__': copy_from_directory = 'C:\\ReviewDaily' copy_to_directory = 'C:\\DailyTransfer' reviewAndCopy(copy_from_directory, copy_to_directory)
Add file copy utility script
Add file copy utility script
Python
mit
danielharada/fileCopyUtility
1d4960adcc307504ecd62e45cac21c69c3ac85a1
django_afip/migrations/0026_vat_conditions.py
django_afip/migrations/0026_vat_conditions.py
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('afip', '0025_receipt__default_currency'), ] operations = [ migrations.AlterField( model_name='receiptpdf', name='vat_condition', field=models.CharField(choices=[('IVA Liberado - Ley Nº 19.640', 'IVA Liberado - Ley Nº 19.640'), ('Monotributista Social', 'Monotributista Social'), ('Consumidor Final', 'Consumidor Final'), ('IVA no alcanzado', 'IVA no alcanzado'), ('IVA Responsable Inscripto - Agente de Percepción', 'IVA Responsable Inscripto - Agente de Percepción'), ('Proveedor del Exterior', 'Proveedor del Exterior'), ('IVA Sujeto Exento', 'IVA Sujeto Exento'), ('Cliente del Exterior', 'Cliente del Exterior'), ('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'), ), migrations.AlterField( model_name='taxpayerprofile', name='vat_condition', field=models.CharField(choices=[('IVA Liberado - Ley Nº 19.640', 'IVA Liberado - Ley Nº 19.640'), ('Monotributista Social', 'Monotributista Social'), ('Consumidor Final', 'Consumidor Final'), ('IVA no alcanzado', 'IVA no alcanzado'), ('IVA Responsable Inscripto - Agente de Percepción', 'IVA Responsable Inscripto - Agente de Percepción'), ('Proveedor del Exterior', 'Proveedor del Exterior'), ('IVA Sujeto Exento', 'IVA Sujeto Exento'), ('Cliente del Exterior', 'Cliente del Exterior'), ('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'), ), ]
Add updated migration with vat_conditions
Add updated migration with vat_conditions
Python
isc
hobarrera/django-afip,hobarrera/django-afip
da514b74cce0e605e7fb6f98ff2280a1ba87323f
scripts/sqa_module_init.py
scripts/sqa_module_init.py
#!/usr/bin/env python from __future__ import print_function import os import shutil import argparse parser = argparse.ArgumentParser(description='Setup SQA documentation for a MOOSE module.') parser.add_argument('module', type=str, help='The module folder name') args = parser.parse_args() folder = args.module title = folder.replace('_', ' ').title() doc_location = os.path.join(os.getenv('MOOSE_DIR'), 'modules', folder, 'doc') # Create YAML config data = "directories:\n" \ " - ${{MOOSE_DIR}}/modules/{}/test/tests\n" \ "specs:\n" \ " - tests\n".format(folder) ymlfile = os.path.join(doc_location, 'sqa_{}.yml'.format(folder)) with open(ymlfile, 'w+') as fid: print('CREATE: {}'.format(ymlfile)) fid.write(data) # Copy and update app SQA files src_dir = os.path.join(os.getenv('MOOSE_DIR'), 'modules', 'tensor_mechanics', 'doc', 'content', 'modules', 'tensor_mechanics', 'sqa') dst_dir = os.path.join(os.getenv('MOOSE_DIR'), 'modules', folder, 'doc', 'content', 'modules', folder, 'sqa') sqa_files = ['index.md', 'tensor_mechanics_sdd.md', 'tensor_mechanics_stp.md', 'tensor_mechanics_rtm.md', 'tensor_mechanics_srs.md', 'tensor_mechanics_vvr.md'] if not os.path.isdir(dst_dir): os.makedirs(dst_dir) for fname in sqa_files: src = os.path.join(src_dir, fname) dst = os.path.join(dst_dir, fname.replace('tensor_mechanics', folder)) print('COPY: {} -> {}'.format(src, dst)) shutil.copyfile(src, dst) with open(dst, 'r') as fid: content = fid.read() content = content.replace('category=tensor_mechanics', 'category={}'.format(folder)) content = content.replace('app=Tensor Mechanics', 'app={}'.format(title)) with open(dst, 'w') as fid: print('UPDATE: {}'.format(dst)) fid.write(content) print(content)
Add script for creating SQA docs in a module
Add script for creating SQA docs in a module (refs #13661)
Python
lgpl-2.1
permcody/moose,jessecarterMOOSE/moose,jessecarterMOOSE/moose,jessecarterMOOSE/moose,lindsayad/moose,dschwen/moose,jessecarterMOOSE/moose,sapitts/moose,harterj/moose,laagesen/moose,andrsd/moose,idaholab/moose,bwspenc/moose,lindsayad/moose,harterj/moose,permcody/moose,lindsayad/moose,SudiptaBiswas/moose,harterj/moose,dschwen/moose,milljm/moose,dschwen/moose,sapitts/moose,SudiptaBiswas/moose,laagesen/moose,jessecarterMOOSE/moose,SudiptaBiswas/moose,dschwen/moose,SudiptaBiswas/moose,harterj/moose,bwspenc/moose,sapitts/moose,idaholab/moose,lindsayad/moose,SudiptaBiswas/moose,nuclear-wizard/moose,idaholab/moose,laagesen/moose,andrsd/moose,milljm/moose,idaholab/moose,dschwen/moose,nuclear-wizard/moose,bwspenc/moose,andrsd/moose,nuclear-wizard/moose,bwspenc/moose,andrsd/moose,harterj/moose,milljm/moose,idaholab/moose,sapitts/moose,bwspenc/moose,sapitts/moose,milljm/moose,lindsayad/moose,laagesen/moose,nuclear-wizard/moose,laagesen/moose,andrsd/moose,permcody/moose,permcody/moose,milljm/moose
adf7234437c75d1a7c0b121f4b14676356df20e5
100_Same_Tree.py
100_Same_Tree.py
/* * https://leetcode.com/problems/same-tree/ * * Given two binary trees, write a function to check if they are equal or not. * Two binary trees are considered equal if they are structurally identical and the nodes have the same value. * */ # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def isSameTree(self, p, q): """ :type p: TreeNode :type q: TreeNode :rtype: bool """ if not p and not q: return True elif p and q: return (p.val == q.val) and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right) else: return False
Add solution 100. Same Tree.
Add solution 100. Same Tree.
Python
mit
wangyangkobe/leetcode,wangyangkobe/leetcode,wangyangkobe/leetcode,wangyangkobe/leetcode
4424959dd8bef2dfe709319bdf55b860ccc4971e
accounts/tests/tests_vbuserlist_page.py
accounts/tests/tests_vbuserlist_page.py
#! /usr/bin/env python __author__ = 'Henri Buyse' import pytest import datetime from django.contrib.auth.handlers.modwsgi import check_password from django.contrib.auth.models import User from django.test import Client from accounts.models import VBUserProfile key_expires = datetime.datetime.strftime(datetime.datetime.now() + datetime.timedelta(days=2), "%Y-%m-%d %H:%M:%S") @pytest.mark.django_db def test_vbuserdetail_page(): c = Client() response = c.get('/users/') assert response.status_code == 200
Add a client test on the users list view
Add a client test on the users list view
Python
mit
hbuyse/VBTournaments,hbuyse/VBTournaments,hbuyse/VBTournaments
72ca20f34b9ef70cee930271fa698de187f97857
examples/simple_app.py
examples/simple_app.py
from flask_table import Table, Col, LinkCol from flask import Flask """A example for creating a simple table within a working Flask app. Our table has just two columns, one of which shows the name and is a link to the item's page. The other shows the description. """ app = Flask(__name__) class ItemTable(Table): name = LinkCol('Name', 'single_item', url_kwargs=dict(id='id'), attr='name') description = Col('Description') @app.route('/') def index(): items = Item.get_elements() table = ItemTable(items) # You would usually want to pass this out to a template with # render_template. return table.__html__() @app.route('/item/<int:id>') def single_item(id): element = Item.get_element_by_id(id) # Similarly, normally you would use render_template return '<h1>{}</h1><p>{}</p><hr><small>id: {}</small>'.format( element.name, element.description, element.id) class Item(object): """ a little fake database """ def __init__(self, id, name, description): self.id = id self.name = name self.description = description @classmethod def get_elements(cls): return [ Item(1, 'Z', 'zzzzz'), Item(2, 'K', 'aaaaa'), Item(3, 'B', 'bbbbb')] @classmethod def get_element_by_id(cls, id): return [i for i in cls.get_elements() if i.id == id][0] if __name__ == '__main__': app.run(debug=True)
Add example for simple table within a flask app
Add example for simple table within a flask app
Python
bsd-3-clause
plumdog/flask_table,plumdog/flask_table,plumdog/flask_table
b77b284c1ecbd599fec218d10068e419e0070994
src/roman_to_integer.py
src/roman_to_integer.py
class Solution(object): def romanToInt(self, s): """ :type s: str :rtype: int """ if not s: return 0 amount = 0 for i, c in enumerate(s): cur = self.romanTable(c) if i < len(s)-1: nex = self.romanTable(s[i+1]) if cur >= nex: amount = amount + cur else: amount = amount - cur else: amount = amount + cur return amount def romanTable(self, c): return { 'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000, }.get(c, 0) if __name__ == '__main__': s_list = ['MCMIV', 'MCMLIV', 'MCMXC', 'MMXIV', 'DCXXI'] result_list = [1904, 1954, 1990, 2014, 621] success = True solution = Solution() for i in range(len(s_list)): result = solution.romanToInt(s_list[i]) if result != result_list[i]: print s_list[i] print 'current', result print 'expected', result_list[i] success = False if success: print 'All passed'
Add Roman To Integer solution
Add Roman To Integer solution
Python
mit
chancyWu/leetcode
3bb9be0dd508a33d9063c8919d471ec39255aefb
load_and_plot.py
load_and_plot.py
import SixChannelReader as SCR import matplotlib.pyplot as plt import numpy as np def main(): """ open previously saved data and plot it. Convert raw 12-bit ADC data to voltage """ filename = "TestData-2015-05-15-1306.pkl" SR = SCR.SerialDataLogger() t, C1, C2, C3, C4, C5, C6 = SR.load_data(filename) fig = plt.figure("Test load plot",figsize=(5,12)) plt.clf() ax1 = fig.add_subplot(611) ax2 = fig.add_subplot(612,sharex=ax1) ax3 = fig.add_subplot(613,sharex=ax1) ax4 = fig.add_subplot(614,sharex=ax1) ax5 = fig.add_subplot(615,sharex=ax1) ax6 = fig.add_subplot(616,sharex=ax1) ax1.plot(t,C1*3.3/4095) ax2.plot(t,C2*3.3/4095) ax3.plot(t,C3*3.3/4095) ax4.plot(t,C4*3.3/4095) ax5.plot(t,C5*3.3/4095) ax6.plot(t,C6*3.3/4095) ax1.set_xlim(t[0],t[-1]) for ax in [ax1,ax2,ax3,ax4,ax5,ax6]: ax.set_ylim(0,3.3) ax6.set_xlabel('Time (ms)') ax1.set_ylabel('A0 (V)') ax2.set_ylabel('A1 (V)') ax3.set_ylabel('A2 (V)') ax4.set_ylabel('A3 (V)') ax5.set_ylabel('A4 (V)') ax6.set_ylabel('A5 (V)') fig.tight_layout() if __name__ == '__main__': main()
Load and plot python script
Load and plot python script
Python
mit
jameskeaveney/ArduinoDUE-Data-Logger,jameskeaveney/ArduinoDUE-Data-Logger
8ee65cbf390d5caae04498397f74cd8a64e7903e
email_auth/migrations/0003_auto_20151209_0746.py
email_auth/migrations/0003_auto_20151209_0746.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import email_auth.models class Migration(migrations.Migration): dependencies = [ ('email_auth', '0002_auto_20151011_1652'), ] operations = [ migrations.AlterModelManagers( name='user', managers=[ ('objects', email_auth.models.UserManager()), ], ), migrations.AlterField( model_name='user', name='groups', field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'), ), migrations.AlterField( model_name='user', name='last_login', field=models.DateTimeField(null=True, verbose_name='last login', blank=True), ), ]
Add a necessary migration for email_auth
Add a necessary migration for email_auth
Python
bsd-3-clause
divio/django-shop,rfleschenberg/django-shop,jrief/django-shop,khchine5/django-shop,awesto/django-shop,rfleschenberg/django-shop,jrief/django-shop,awesto/django-shop,jrief/django-shop,divio/django-shop,divio/django-shop,nimbis/django-shop,khchine5/django-shop,rfleschenberg/django-shop,khchine5/django-shop,jrief/django-shop,nimbis/django-shop,nimbis/django-shop,awesto/django-shop,khchine5/django-shop,rfleschenberg/django-shop,nimbis/django-shop
2da04cccf32e5280e36966b2d9c93a643e7458e7
osf/migrations/0083_add_ember_waffle_flags.py
osf/migrations/0083_add_ember_waffle_flags.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-03-02 17:45 from __future__ import unicode_literals from waffle.models import Flag from django.db import migrations, IntegrityError, transaction EMBER_WAFFLE_PAGES = [ 'completed_registration_form_detail', 'dashboard', 'draft_registration_form', 'file_detail', 'home_logged_out', 'meeting_detail', 'meetings', 'my_projects', 'prereg_onboarder', 'project_analytics', 'project_contributors', 'project_detail', 'project_files', 'project_forks', 'project_registrations', 'project_settings', 'project_wiki', 'registration_detail', 'search', 'support', 'user_profile', 'user_settings' ] def reverse_func(state, schema): pages = [format_ember_waffle_flag_name(page) for page in EMBER_WAFFLE_PAGES] Flag.objects.filter(name__in=pages).delete() return def format_ember_waffle_flag_name(page): return '{}{}{}'.format('ember_', page, '_page') def add_ember_waffle_flags(state, schema): """ This migration adds some waffle flags for pages that are being emberized. Waffle flags are used for feature flipping, for example, showing an emberized page to one set of users, and the existing osf page for another set. By default, flags are given an everyone=False value, which overrides all other settings, making the flag False for everyone. Flag settings can be changed in the Django admin app. """ for page in EMBER_WAFFLE_PAGES: try: with transaction.atomic(): Flag.objects.create(name=format_ember_waffle_flag_name(page), everyone=False) except IntegrityError as e: # just in case someone has already added a flag with this name to the db pass return class Migration(migrations.Migration): dependencies = [ ('osf', '0082_merge_20180213_1502'), ] operations = [ migrations.RunPython(add_ember_waffle_flags, reverse_func) ]
Add migration to add some ember waffle flags to the db for waffle pages with a default value of False. Can be changed in django app when ready.
Add migration to add some ember waffle flags to the db for waffle pages with a default value of False. Can be changed in django app when ready.
Python
apache-2.0
adlius/osf.io,mfraezz/osf.io,mattclark/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,caseyrollins/osf.io,sloria/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,adlius/osf.io,brianjgeiger/osf.io,binoculars/osf.io,cslzchen/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,felliott/osf.io,chennan47/osf.io,aaxelb/osf.io,icereval/osf.io,erinspace/osf.io,saradbowman/osf.io,baylee-d/osf.io,erinspace/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,adlius/osf.io,pattisdr/osf.io,mfraezz/osf.io,mfraezz/osf.io,icereval/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,cslzchen/osf.io,erinspace/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,chennan47/osf.io,mattclark/osf.io,pattisdr/osf.io,binoculars/osf.io,baylee-d/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,felliott/osf.io,sloria/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,sloria/osf.io,icereval/osf.io,mfraezz/osf.io,binoculars/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,pattisdr/osf.io,felliott/osf.io,aaxelb/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io
b5d5f377cf3d3b2b4459c36ba47e2e0f4a3125f4
mothermayi/colors.py
mothermayi/colors.py
BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def green(text): return GREEN + text + ENDC def red(text): return RED + text + ENDC
Add module for putting color in our text output
Add module for putting color in our text output
Python
mit
EliRibble/mothermayi
ee5b38c649b1a5b46ce5b53c179bde57b3a6e6f2
examples/customserverexample.py
examples/customserverexample.py
#!/usr/bin/env python3 from pythinclient.server import ThinServer class CustomThinServer(BasicThinServer): def __init__(self, port=65000, is_daemon=False): super(BasicThinServer, self).__init__(port, is_daemon=is_daemon) # add custom server hooks self.add_hook('help', self.__server_help) self.add_hook('echo', self.__server_echo) self.add_hook('log', self.__server_log) def on_accept(self, conn, addr): """ This is basically a copy of the on_accept from the BasicThinServer """ # receive the message message = conn.recv(self.recv_size) # handle the message self.on_receive(message, conn, addr) # close the connection conn.close() def __server_help(self, msg, conn, addr): conn.send( """ Available commands: help : shows this help echo : send a message back to the client log : log a message to the server.log file """.encode('ascii')) def __server_echo(self, msg, conn, addr): conn.send((msg + '\n').encode('ascii')) def __server_log(self, msg, conn, addr): # write the given message to the logfile with open("server.log", "a+") as fp: fp.write(msg + '\n') if __name__ == "__main__": from sys import argv daemon = True if "-d" in argv or "--daemon" in argv else False server = CustomThinServer(is_daemon=daemon) # start it up server.start()
Add a custom server example implementation. This is fully compatible with the client example.
Add a custom server example implementation. This is fully compatible with the client example.
Python
bsd-3-clause
alekratz/pythinclient
fe6d7b11ab87d9ea3a2b99aee17b9a371c55f162
examples/mhs_atmosphere_plot.py
examples/mhs_atmosphere_plot.py
# -*- coding: utf-8 -*- """ Created on Fri Jan 9 12:52:31 2015 @author: stuart """ import os import glob import yt model = 'spruit' datadir = os.path.expanduser('~/mhs_atmosphere/'+model+'/') files = glob.glob(datadir+'/*') files.sort() print(files) ds = yt.load(files[0]) slc = yt.SlicePlot(ds, fields='density_bg', normal='x') slc.save('~/yt.png')
Add a very basic yt plotting example
Add a very basic yt plotting example
Python
bsd-2-clause
SWAT-Sheffield/pysac,Cadair/pysac
d6d321fa99b9def1d85ff849fc13bbe17fa58510
featurex/tests/test_datasets.py
featurex/tests/test_datasets.py
from featurex.datasets.text import _load_datasets, fetch_dictionary from unittest import TestCase from pandas import DataFrame import urllib2 class TestDatasets(TestCase): def test_dicts(self): """ Check that all text dictionaries download successfully. """ datasets = _load_datasets() for dataset in datasets.keys(): try: data = fetch_dictionary(dataset, save=False) except: print("Dataset failed: {0}".format(dataset)) data = None # Determine cause of error. try: urllib2.urlopen(datasets[dataset]["url"]) except urllib2.HTTPError, e: print("HTTP Error: {0}".format(e.code)) except urllib2.URLError, e: print("URL Error: {0}".format(e.args)) self.assertIsInstance(data, DataFrame)
Add test for text dictionaries.
Add test for text dictionaries.
Python
bsd-3-clause
tyarkoni/featureX,tyarkoni/pliers
18c40bdc02c5cd27c69edb394e040a3db3d75e05
bin/filter_pycapsule.py
bin/filter_pycapsule.py
#!/usr/bin/python3 """Filters Pycapsule error""" import fileinput import sys STRING = "RuntimeError: Object of type <class 'NamedArray'>" NUM_AFTER = 5 # How many lines after to delete NUM_BEFORE = 4 # How far before to delete output_lines = [] delete_count = 0 for line in fileinput.input(): if delete_count > 0: delete_count -= 1 elif STRING in line: output_lines = output_lines[:-NUM_BEFORE] delete_count = NUM_AFTER else: output_lines.append(line) # output_str = "\n".join(output_lines) sys.stdout.write(output_str)
Add filtering for bogus output
Add filtering for bogus output
Python
apache-2.0
ScienceStacks/BaseStack,ScienceStacks/BaseStack,ScienceStacks/BaseStack
d0aa398a0df17540c7d9160dadd48d3ab60e230e
core/tests/tests_client_home_page.py
core/tests/tests_client_home_page.py
#! /usr/bin/env python __author__ = "Henri Buyse" import pytest from django.test import Client def test_client_get_home_page(): c = Client() response = c.get('/') assert response.status_code == 200 @pytest.mark.django_db def test_logged_client_get_home_page(): c = Client() c.login(username='test', password='test') response = c.get('/') assert response.status_code == 200 def test_client_post_home_page(): c = Client() response = c.post('/', {'username': 'john', 'password': 'smith'}) assert response.status_code == 200 @pytest.mark.django_db def test_logged_client_post_home_page(): c = Client() c.login(username='test', password='test') response = c.post('/', {'username': 'john', 'password': 'smith'}) assert response.status_code == 200
Test home page as anonymous and logged user
Test home page as anonymous and logged user
Python
mit
hbuyse/VBTournaments,hbuyse/VBTournaments,hbuyse/VBTournaments
b49371bbcb756371296bfe309071f2b9579e5b99
examples/test_markers.py
examples/test_markers.py
""" These tests demonstrate pytest marker use for finding and running tests. Usage examples from this file: pytest -v -m marker_test_suite # Runs A, B, C, D pytest -v -m marker1 # Runs A pytest -v -m marker2 # Runs B, C pytest -v -m xkcd_code # Runs C pytest test_markers.py -v -m "not marker2" # Runs A, D (The "-v" will display the names of tests as they run.) (Add "--collect-only" to display names of tests without running them.) """ import pytest from seleniumbase import BaseCase @pytest.mark.marker_test_suite class MarkerTestSuite(BaseCase): @pytest.mark.marker1 def test_A(self): self.open("https://xkcd.com/1319/") self.assert_text("Automation", "div#ctitle") @pytest.mark.marker2 def test_B(self): self.open("https://www.xkcd.com/1700/") self.assert_text("New Bug", "div#ctitle") @pytest.mark.marker2 @pytest.mark.xkcd_code # Tests can have multiple markers def test_C(self): self.open("https://xkcd.com/844/") self.assert_text("Good Code", "div#ctitle") def test_D(self): self.open("https://xkcd.com/2021/") self.assert_text("Software Development", "div#ctitle")
Add test suite for demoing pytest markers
Add test suite for demoing pytest markers
Python
mit
mdmintz/seleniumspot,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/seleniumspot,seleniumbase/SeleniumBase,mdmintz/SeleniumBase
e06a766e082f168e0b89776355b622b980a0a735
locations/spiders/learning_experience.py
locations/spiders/learning_experience.py
# -*- coding: utf-8 -*- import scrapy import re from locations.items import GeojsonPointItem class TheLearningExperienceSpider(scrapy.Spider): name = "learning_experience" allowed_domains = ["thelearningexperience.com"] start_urls = ( 'https://thelearningexperience.com/our-centers/directory', ) def parse(self, response): for loc_path in response.xpath('//a[@itemprop="url"]/@href'): yield scrapy.Request( response.urljoin(loc_path.extract()), callback=self.parse_location, ) def parse_location(self, response): properties = { 'name': response.xpath('//h1[@class="lp-yellow-text"]/text()').extract_first(), 'addr:full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(), 'addr:city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(), 'addr:state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(), 'addr:postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(), 'phone': response.xpath('//a[@itemprop="telephone"]/text()').extract_first(), 'opening_hours': response.xpath('//tr[@itemprop="openingHours"]/@datetime').extract_first(), 'ref': response.request.url, 'website': response.request.url, } lon_lat = [ float(response.xpath('//meta[@name="place:location:longitude"]/@content').extract_first()), float(response.xpath('//meta[@name="place:location:latitude"]/@content').extract_first()), ] yield GeojsonPointItem( properties=properties, lon_lat=lon_lat, )
Add The Learning Experience spider
Add The Learning Experience spider
Python
mit
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
c44448e6e2fa846d4eea8b6d7d907168becdd1ad
runtests.py
runtests.py
#!/usr/bin/env python import sys import logging from optparse import OptionParser from tests.config import configure logging.disable(logging.CRITICAL) def run_tests(*test_args): from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner() if not test_args: test_args = ['tests'] num_failures = test_runner.run_tests(test_args) if num_failures: sys.exit(num_failures) if __name__ == '__main__': parser = OptionParser() __, args = parser.parse_args() # If no args, then use 'progressive' plugin to keep the screen real estate # used down to a minimum. Otherwise, use the spec plugin nose_args = ['-s', '-x', '--with-progressive' if not args else '--with-spec'] nose_args.extend([ '--with-coverage', '--cover-package=oscar', '--cover-html', '--cover-html-dir=htmlcov']) configure(nose_args) run_tests(*args)
#!/usr/bin/env python import sys import logging from optparse import OptionParser from tests.config import configure logging.disable(logging.CRITICAL) def run_tests(*test_args): from django_nose import NoseTestSuiteRunner test_runner = NoseTestSuiteRunner() if not test_args: test_args = ['tests'] num_failures = test_runner.run_tests(test_args) if num_failures: sys.exit(num_failures) if __name__ == '__main__': parser = OptionParser() __, args = parser.parse_args() # If no args, then use 'progressive' plugin to keep the screen real estate # used down to a minimum. Otherwise, use the spec plugin nose_args = ['-s', '-x', '--with-progressive' if not args else '--with-spec'] #nose_args.extend([ # '--with-coverage', '--cover-package=oscar', '--cover-html', # '--cover-html-dir=htmlcov']) configure(nose_args) run_tests(*args)
Remove coverage options from default test run
Remove coverage options from default test run These were getting annoying for normal runs.
Python
bsd-3-clause
faratro/django-oscar,WadeYuChen/django-oscar,django-oscar/django-oscar,jmt4/django-oscar,okfish/django-oscar,manevant/django-oscar,john-parton/django-oscar,jinnykoo/wuyisj,rocopartners/django-oscar,itbabu/django-oscar,makielab/django-oscar,saadatqadri/django-oscar,solarissmoke/django-oscar,sonofatailor/django-oscar,pdonadeo/django-oscar,taedori81/django-oscar,josesanch/django-oscar,john-parton/django-oscar,MatthewWilkes/django-oscar,josesanch/django-oscar,vovanbo/django-oscar,bschuon/django-oscar,ahmetdaglarbas/e-commerce,pdonadeo/django-oscar,QLGu/django-oscar,sonofatailor/django-oscar,pdonadeo/django-oscar,monikasulik/django-oscar,elliotthill/django-oscar,jmt4/django-oscar,john-parton/django-oscar,monikasulik/django-oscar,manevant/django-oscar,sasha0/django-oscar,jinnykoo/christmas,mexeniz/django-oscar,kapari/django-oscar,eddiep1101/django-oscar,DrOctogon/unwash_ecom,mexeniz/django-oscar,Jannes123/django-oscar,pasqualguerrero/django-oscar,MatthewWilkes/django-oscar,nickpack/django-oscar,john-parton/django-oscar,jlmadurga/django-oscar,taedori81/django-oscar,ka7eh/django-oscar,bnprk/django-oscar,nfletton/django-oscar,josesanch/django-oscar,anentropic/django-oscar,spartonia/django-oscar,Jannes123/django-oscar,jlmadurga/django-oscar,MatthewWilkes/django-oscar,faratro/django-oscar,WadeYuChen/django-oscar,jinnykoo/wuyisj.com,jinnykoo/wuyisj,Idematica/django-oscar,WillisXChen/django-oscar,taedori81/django-oscar,machtfit/django-oscar,makielab/django-oscar,ahmetdaglarbas/e-commerce,Jannes123/django-oscar,itbabu/django-oscar,thechampanurag/django-oscar,WillisXChen/django-oscar,jinnykoo/christmas,taedori81/django-oscar,nickpack/django-oscar,elliotthill/django-oscar,bschuon/django-oscar,jmt4/django-oscar,rocopartners/django-oscar,bnprk/django-oscar,WillisXChen/django-oscar,thechampanurag/django-oscar,monikasulik/django-oscar,adamend/django-oscar,jlmadurga/django-oscar,kapari/django-oscar,ka7eh/django-oscar,bschuon/django-oscar,eddiep1101/django-oscar,michaelkuty/django-oscar,bschuon/django-oscar,nfletton/django-oscar,binarydud/django-oscar,Bogh/django-oscar,adamend/django-oscar,jmt4/django-oscar,amirrpp/django-oscar,django-oscar/django-oscar,vovanbo/django-oscar,pdonadeo/django-oscar,django-oscar/django-oscar,nickpack/django-oscar,dongguangming/django-oscar,jinnykoo/wuyisj,sasha0/django-oscar,WillisXChen/django-oscar,QLGu/django-oscar,sasha0/django-oscar,DrOctogon/unwash_ecom,binarydud/django-oscar,WadeYuChen/django-oscar,pasqualguerrero/django-oscar,binarydud/django-oscar,michaelkuty/django-oscar,Jannes123/django-oscar,QLGu/django-oscar,marcoantoniooliveira/labweb,jinnykoo/wuyisj.com,ademuk/django-oscar,jinnykoo/christmas,jinnykoo/wuyisj,ademuk/django-oscar,monikasulik/django-oscar,kapt/django-oscar,solarissmoke/django-oscar,django-oscar/django-oscar,amirrpp/django-oscar,michaelkuty/django-oscar,anentropic/django-oscar,WadeYuChen/django-oscar,makielab/django-oscar,spartonia/django-oscar,solarissmoke/django-oscar,eddiep1101/django-oscar,QLGu/django-oscar,faratro/django-oscar,saadatqadri/django-oscar,machtfit/django-oscar,manevant/django-oscar,ka7eh/django-oscar,thechampanurag/django-oscar,anentropic/django-oscar,MatthewWilkes/django-oscar,mexeniz/django-oscar,saadatqadri/django-oscar,manevant/django-oscar,mexeniz/django-oscar,adamend/django-oscar,nfletton/django-oscar,itbabu/django-oscar,Idematica/django-oscar,kapt/django-oscar,itbabu/django-oscar,ademuk/django-oscar,sonofatailor/django-oscar,marcoantoniooliveira/labweb,lijoantony/django-oscar,adamend/django-oscar,DrOctogon/unwash_ecom,bnprk/django-oscar,kapari/django-oscar,Bogh/django-oscar,makielab/django-oscar,spartonia/django-oscar,ademuk/django-oscar,ahmetdaglarbas/e-commerce,pasqualguerrero/django-oscar,dongguangming/django-oscar,ka7eh/django-oscar,rocopartners/django-oscar,rocopartners/django-oscar,sonofatailor/django-oscar,anentropic/django-oscar,nickpack/django-oscar,saadatqadri/django-oscar,Bogh/django-oscar,elliotthill/django-oscar,vovanbo/django-oscar,faratro/django-oscar,amirrpp/django-oscar,spartonia/django-oscar,lijoantony/django-oscar,Idematica/django-oscar,pasqualguerrero/django-oscar,michaelkuty/django-oscar,vovanbo/django-oscar,kapt/django-oscar,thechampanurag/django-oscar,Bogh/django-oscar,jlmadurga/django-oscar,nfletton/django-oscar,okfish/django-oscar,kapari/django-oscar,sasha0/django-oscar,WillisXChen/django-oscar,lijoantony/django-oscar,lijoantony/django-oscar,ahmetdaglarbas/e-commerce,jinnykoo/wuyisj.com,solarissmoke/django-oscar,amirrpp/django-oscar,bnprk/django-oscar,WillisXChen/django-oscar,eddiep1101/django-oscar,marcoantoniooliveira/labweb,dongguangming/django-oscar,marcoantoniooliveira/labweb,machtfit/django-oscar,okfish/django-oscar,jinnykoo/wuyisj.com,okfish/django-oscar,dongguangming/django-oscar,binarydud/django-oscar
97b19e6f3705fc0b320f33b44476f7139833de9e
glowing-lines.py
glowing-lines.py
from PIL import Image, ImageDraw import random W = 500 im = Image.new('RGB', (W, W)) NCOLORS = 19 COLORS = [] def get_origin_point(): return [random.randint(0, W-1), random.randint(0, W-1)] def get_vector(): return [random.randint(0.3*W, 0.6*W), random.randint(0.3*W, 0.6*W)] def draw_one_line(draw): op = get_origin_point() vec = get_vector() tu = tuple(op + vec) for i in range(NCOLORS): draw.line(tu, fill=COLORS[i], width=NCOLORS-i) def draw_lines(draw): for i in range(30): draw_one_line(draw) def init_colors(ncolors): v = 255.0 for i in range(ncolors): COLORS.append((0, int(v), 0)) v *= 0.80 COLORS.reverse() init_colors(NCOLORS) draw = ImageDraw.Draw(im) red = (255, 0, 0) draw_lines(draw) im.save('f.png')
Add crude script to draw glowing lines; currently the dark part hides other lines, making them look physical; should be additive
Add crude script to draw glowing lines; currently the dark part hides other lines, making them look physical; should be additive
Python
mit
redpig2/pilhacks
f0922bade498bcadf587c4b756060cf062fe68d4
tests/unit/test_rand.py
tests/unit/test_rand.py
''' Test sorbic.utils.rand ''' # import sorbic libs import sorbic.utils.rand # Import python libs import unittest class TestRand(unittest.TestCase): ''' Cover db funcs ''' def test_rand_hex_strs(self): ''' Test database creation ''' rands = [] for _ in range(0,100): r_1 = sorbic.utils.rand.rand_hex_str(24) self.assertEqual(24, len(r_1)) rands.append(r_1) for n_1 in range(0, 100): for n_2 in range(0, 100): if n_1 == n_2: continue self.assertNotEqual(rands[n_1], rands[n_2]) def test_rand_raw_strs(self): ''' Test database creation ''' rands = [] for _ in range(0,100): r_1 = sorbic.utils.rand.rand_raw_str(24) self.assertEqual(24, len(r_1)) rands.append(r_1) for n_1 in range(0, 100): for n_2 in range(0, 100): if n_1 == n_2: continue self.assertNotEqual(rands[n_1], rands[n_2])
Add initial tests for the rand module
Add initial tests for the rand module
Python
apache-2.0
thatch45/sorbic,s0undt3ch/sorbic
f74b1615567d5cbf2cb00572cc14450ffd4b0c1c
test/requests/parametrized_test.py
test/requests/parametrized_test.py
import logging import unittest from elasticsearch import Elasticsearch, TransportError class ParametrizedTest(unittest.TestCase): def __init__(self, methodName='runTest', gn2_url="http://localhost:5003", es_url="localhost:9200"): super(ParametrizedTest, self).__init__(methodName=methodName) self.gn2_url = gn2_url self.es_url = es_url def setUp(self): self.es = Elasticsearch([self.es_url]) self.es_cleanup = [] es_logger = logging.getLogger("elasticsearch") es_logger.addHandler( logging.FileHandler("/tmp/es_TestRegistrationInfo.log")) es_trace_logger = logging.getLogger("elasticsearch.trace") es_trace_logger.addHandler( logging.FileHandler("/tmp/es_TestRegistrationTrace.log")) def tearDown(self): self.es.delete_by_query( index="users" , doc_type="local" , body={"query":{"match":{"email_address":"[email protected]"}}})
Create parametrized superclass for tests
Create parametrized superclass for tests * Since the tests require that some parameters be provided while running the tests, create a class that helps abstract away the details of retrieving and setting the expected parameters.
Python
agpl-3.0
DannyArends/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2
1475a095620d2c9ef47f8bd9ea4363907ff0067b
remove_duplicates_from_sorted_list_ii.py
remove_duplicates_from_sorted_list_ii.py
# Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: # @param head, a ListNode # @return a ListNode def deleteDuplicates(self, head): if None == head: return None header = ListNode(-1) prev = header header.next = head slow = head fast = head.next count = 0x00 while None != fast: if slow.val == fast.val: fast = fast.next count += 1 continue elif count > 0: prev.next = fast slow = fast count = 0 fast = slow.next else: prev = slow slow = fast count = 0 fast = slow.next if count > 0: prev.next = None return header.next if __name__ == "__main__": s = Solution() head = ListNode(1) node_1 = ListNode(2) head.next = node_1 node_2 = ListNode(3) node_1.next = node_2 node_3 = ListNode(3) node_2.next = node_3 node_4 = ListNode(4) node_3.next = node_4 node_5 = ListNode(4) node_4.next = node_5 node_6 = ListNode(5) node_5.next = node_6 node_7 = ListNode(6) node_6.next = node_7 head1 = s.deleteDuplicates(head) print '=======================================' node = head1 while None != node: print node.val node = node.next
Remove Duplicates from Sorted List II
Remove Duplicates from Sorted List II
Python
apache-2.0
don7hao/leetcode_oj,don7hao/leetcode_oj
8a9b4de36f35416874d10734ae1c08287ebd5c32
mrequests/examples/get_json.py
mrequests/examples/get_json.py
import mrequests as requests host = 'http://localhost/' url = host + "get" r = requests.get(url, headers={"Accept": "application/json"}) print(r) print(r.content) print(r.text) print(r.json()) r.close()
Add simple mrequests GET example
Add simple mrequests GET example Signed-off-by: Christopher Arndt <[email protected]>
Python
mit
SpotlightKid/micropython-stm-lib
13b835d525f6576bfa047ce3001479d8b81f15b7
mistral/db/sqlalchemy/migration/alembic_migrations/versions/014_fix_past_scripts_discrepancies.py
mistral/db/sqlalchemy/migration/alembic_migrations/versions/014_fix_past_scripts_discrepancies.py
# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """fix_past_scripts_discrepancies Revision ID: 014 Revises: 013 Create Date: 2016-08-07 13:12:34.958845 """ # revision identifiers, used by Alembic. revision = '014' down_revision = '013' from alembic import op from sqlalchemy.dialects import mysql from sqlalchemy.engine import reflection def upgrade(): inspect = reflection.Inspector.from_engine(op.get_bind()) ct_unique_constraints = [ uc['name'] for uc in inspect.get_unique_constraints('cron_triggers_v2') ] # unique constraint was added in 001, 002 and 003 with slight variations # without deleting the previous ones. # here we try to delete all three in case they exist if 'workflow_input_hash' in ct_unique_constraints: op.drop_index('workflow_input_hash', table_name='cron_triggers_v2') if 'workflow_input_hash_2' in ct_unique_constraints: op.drop_index('workflow_input_hash_2', table_name='cron_triggers_v2') if 'workflow_input_hash_3' in ct_unique_constraints: op.drop_index('workflow_input_hash_3', table_name='cron_triggers_v2') # create the correct latest unique constraint for table cron_triggers_v2 op.create_unique_constraint( None, 'cron_triggers_v2', [ 'workflow_input_hash', 'workflow_name', 'pattern', 'project_id', 'workflow_params_hash', 'remaining_executions', 'first_execution_time' ] ) # column was added in 012. nullable value does not match today's model. op.alter_column( 'event_triggers_v2', 'workflow_id', existing_type=mysql.VARCHAR(length=36), nullable=True ) # column was added in 010. nullable value does not match today's model op.alter_column( 'resource_members_v2', 'project_id', existing_type=mysql.VARCHAR(length=80), nullable=True )
Fix past migration scripts discrepancies
Fix past migration scripts discrepancies It is important that running all migration scripts from first to last will give the expected model. This fix is a first step. It fixes all discrepancies from previous scripts. There are still changes in the model that needs to have a migration script written for them, but that is not handled now. Change-Id: Ie4079eecef6766ed91c210ccefa058175e0f22d0
Python
apache-2.0
StackStorm/mistral,openstack/mistral,StackStorm/mistral,openstack/mistral
ee05b846612aa5c978949ff90f38290915983385
check-entropy.py
check-entropy.py
#!/usr/bin/env python import sys import os import logging import time log = logging.getLogger(__name__) log.setLevel(logging.INFO) mainHandler = logging.StreamHandler() mainHandler.setFormatter(logging.Formatter('%(levelname)s %(asctime)s - %(module)s - %(funcName)s: %(message)s')) log.addHandler(mainHandler) PROC_ENTROPY_AVAIL = '/proc/sys/kernel/random/entropy_avail' def print_entropy_avail(): with open(PROC_ENTROPY_AVAIL, 'r') as entropy_avail: log.info('Entropy in pool: %s' % entropy_avail.readline()) def run_loop(): try: while True: print_entropy_avail() time.sleep(1) except KeyboardInterrupt as e: log.debug('Exiting due to keyboard interrupt') sys.exit(0) if __name__ == '__main__': run_loop()
Test tool for checking entropy available in a loop
Test tool for checking entropy available in a loop
Python
mit
infincia/TokenTools
8a029fb00892c8bf385dae76466ae1e211e27ca6
cohydra/test_profile.py
cohydra/test_profile.py
import tempfile import unittest import unittest.mock from . import profile from . import test_helper @unittest.mock.patch.object( profile.Profile, 'generate', autospec=True, ) @unittest.mock.patch.object( profile.Profile, '__abstractmethods__', new=set(), ) class TestProfile(unittest.TestCase): def setUp(self): self.dir = tempfile.TemporaryDirectory() def tearDown(self): self.dir.cleanup() def test_generate_all(self, mock_generate): p = profile.Profile(self.dir.name, None) p0 = profile.Profile(self.dir.name, p) p00 = profile.Profile(self.dir.name, p0) p1 = profile.Profile(self.dir.name, p) p.generate_all() self.assertEqual( mock_generate.mock_calls, [unittest.mock.call(x) for x in (p, p0, p00, p1)])
Add basic test for profile.Profile.
Add basic test for profile.Profile. Addresses #3.
Python
apache-2.0
dseomn/cohydra
7bf60d5ef1e6052044ebfedf1e2bf2dddc0940b8
python/getmonotime.py
python/getmonotime.py
import getopt, sys if __name__ == '__main__': sippy_path = None try: opts, args = getopt.getopt(sys.argv[1:], 's:S:i:o:b') except getopt.GetoptError: usage() for o, a in opts: if o == '-S': sippy_path = a.strip() continue if sippy_path != None: sys.path.insert(0, sippy_path) from sippy.Time.clock_dtime import clock_getdtime, CLOCK_MONOTONIC print clock_getdtime(CLOCK_MONOTONIC)
Implement RTPP_LOG_TSTART and RTPP_LOG_TFORM="rel" env parameters to aid debugging.
Implement RTPP_LOG_TSTART and RTPP_LOG_TFORM="rel" env parameters to aid debugging.
Python
bsd-2-clause
sippy/rtp_cluster,sippy/rtp_cluster
5ee0f309521320f0cc91c61b112fd94c8415f37c
jinja2.py
jinja2.py
from __future__ import (division, absolute_import, print_function, unicode_literals) import jinja2 class PermissiveUndefined(jinja2.Undefined): def __getattr__(self, name): return PermissiveUndefined(name) def __getitem__(self, name): return PermissiveUndefined(name) def __call__(self, *args, **kwargs): return PermissiveUndefined() class JSDict(dict): def __getitem__(self, item): try: return super(JSDict, self).__getitem__(item) except KeyError: return PermissiveUndefined(item) def __getattr__(self, name): return self[name] class JSList(list): @property def length(self): return len(self) # TODO(eitan): this won't work for dict and list literals inside expressions in # the template def js_data(obj): if type(obj) is dict: out = JSDict() for k, v in obj.iteritems(): out[k] = js_data(v) elif type(obj) is list: out = JSList(js_data(item) for item in obj) else: out = obj return out
Add helpers to make Jinja2 more like Angular: PermissiveUndefined, JSDict, and JSList.
Add helpers to make Jinja2 more like Angular: PermissiveUndefined, JSDict, and JSList.
Python
mit
emosenkis/angular2tmpl
05b90dab50281e9b1cb35575d35db5f45d2ba15a
connected_components.py
connected_components.py
def get_connected_components(): # assume nodes labeled 1 to n # connected_components = [] # for i in 1..n # if i not yet explored # connected_component = bfs (graph, node i) # connected_components.append(connected_component) # return connected_components
Add pseudo for connected components
Add pseudo for connected components
Python
mit
stephtzhang/algorithms
c09446f758f42fbf00866360e0760f1a0fae0ab7
tests/test_sso/test_azure_id_creation.py
tests/test_sso/test_azure_id_creation.py
from urllib.parse import urlparse import pytest from django.urls import reverse from tests.utils import BaseViewTest @pytest.mark.sso_mark class AzureIdentityTest(BaseViewTest): def test_wrong_provider_raises_404(self): auth_path = reverse('oauth:create_identity', kwargs={'provider': 'undefined'}) resp = self.client.get(auth_path) assert resp.status_code == 404 def test_get_redirects_to_login(self): auth_path = reverse('oauth:create_identity', kwargs={'provider': 'azure'}) resp = self.client.get(auth_path) redirect = urlparse(resp['Location']) assert resp.status_code == 302 assert redirect.path == reverse('users:login') def test_flow(self): auth_path = reverse('oauth:create_identity', kwargs={'provider': 'azure'}) resp = self.client.post(auth_path) assert resp.status_code == 302 redirect = urlparse(resp['Location']) assert redirect.scheme == 'https' assert redirect.netloc == 'login.microsoft.com' assert redirect.path == '/None/oauth2/authorize'
Add azure id creation tests
Add azure id creation tests
Python
apache-2.0
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
62906d37cca8cde2617372f71881dc802f23d6b9
h2/frame_buffer.py
h2/frame_buffer.py
# -*- coding: utf-8 -*- """ h2/frame_buffer ~~~~~~~~~~~~~~~ A data structure that provides a way to iterate over a byte buffer in terms of frames. """ from hyperframe.frame import Frame class FrameBuffer(object): """ This is a data structure that expects to act as a buffer for HTTP/2 data that allows iteraton in terms of H2 frames. """ def __init__(self, server=False): self.data = b'' self.preamble_len = ( len(b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n') if server else 0 ) def add_data(self, data): """ Add more data to the frame buffer. :param data: A bytestring containing the byte buffer. """ if self.preamble_len: data_len = len(data) data = data[self.preamble_len:] self.preamble_len -= min(data_len, self.preamble_len) self.data += data def __iter__(self): return self def next(self): if len(self.data) < 9: raise StopIteration() f, length = Frame.parse_frame_header(self.data[:9]) if len(self.data) < length + 9: raise StopIteration() f.parse_body(memoryview(self.data[9:9+length])) self.data = self.data[9+length:] return f
Define an iterable frame buffer.
Define an iterable frame buffer.
Python
mit
vladmunteanu/hyper-h2,vladmunteanu/hyper-h2,python-hyper/hyper-h2,bhavishyagopesh/hyper-h2,Kriechi/hyper-h2,python-hyper/hyper-h2,Kriechi/hyper-h2,mhils/hyper-h2
595eaf19c1d3a89970b5ebe148f12a5df11807cc
run_helmholtz.py
run_helmholtz.py
from __future__ import absolute_import, print_function, division from firedrake import * from helmholtz import MixedHelmholtzProblem from meshes import generate_2d_square_mesh import matplotlib as plt def run_helmholtz_resolution_test(degree, quadrilateral=False): """ """ params = {'mat_type': 'matfree', 'ksp_type': 'preonly', 'pc_type': 'python', 'pc_python_type': 'firedrake.HybridizationPC', 'hybridization_ksp_rtol': 1e-8, 'hybridization_pc_type': 'lu', 'hybridization_pc_factor_mat_solver_package': 'mumps', 'hybridization_ksp_type': 'preonly', 'hybridization_projector_tolerance': 1e-14} scalar_data = [] flux_data = [] for r in range(1, 10): mesh = generate_2d_square_mesh(r, quadrilateral=quadrilateral) problem = MixedHelmholtzProblem(mesh, degree) u, p = problem.solve(params) analytic_u, analytic_p = problem.analytic_solution() err_u = errornorm(u, analytic_u) err_p = errornorm(p, analytic_p) scalar_data.append(err_p) flux_data.append(err_u) return scalar_data, flux_data
Add module for helmholtz results
Add module for helmholtz results
Python
mit
thomasgibson/firedrake-hybridization
3ef4e68ae64a46f09103001f391b3d6a3d098e33
test/test_bezier_direct.py
test/test_bezier_direct.py
from __future__ import division import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) # import cocos from cocos.director import director from cocos.actions import Bezier from cocos.sprite import Sprite import pyglet from cocos import path def direct_bezier(p0, p1, p2, p3): '''Given four points, returns a bezier path that go through them. It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and t=0.6 respectively. ''' def _one_dim(p0xy, B1xy, B2xy, p3xy): '''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3. p0: P sub 0 of bezier, it's also B(0) B1: B(0.4) B2: B(0.6) p3: P sub 3 of bezier, it's also B(1) ''' p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36 p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288 return p1xy, p2xy bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0]) bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1]) bp1 = bp1x, bp1y bp2 = bp2x, bp2y bezier_path = path.Bezier(p0, p3, bp1, bp2) return bezier_path class TestLayer(cocos.layer.Layer): def __init__(self): super( TestLayer, self ).__init__() go_through = [(100,300), (370,330), (430,270), (750,550)] # visually spot through where it should go for pos in go_through: sprite = Sprite('fire.png') sprite.position = pos sprite.scale = .3 self.add(sprite) # calculate the points bezier_path = direct_bezier(*go_through) sprite = Sprite('fire.png') sprite.scale = .3 sprite.color = (0, 0, 255) self.add(sprite) sprite.do(Bezier(bezier_path, 5)) if __name__ == "__main__": director.init(width=800, height=600) test_layer = TestLayer () main_scene = cocos.scene.Scene (test_layer) director.run (main_scene)
Test using bezier going through 4 specific points
Test using bezier going through 4 specific points git-svn-id: 5665c17dde288ce6190d85f4a2d6486351776710@869 f663ce52-ac46-0410-b8de-c1c220b0eb76
Python
bsd-3-clause
eevee/cocos2d-mirror
b4a932eb8d99f9f4d29d3459c62e0cf81240fbdb
scripts/stats.py
scripts/stats.py
import os import telegram from leonard import Leonard telegram_client = telegram.Bot(os.environ['BOT_TOKEN']) bot = Leonard(telegram_client) bot.collect_plugins() def main(): count = 0 for key in bot.redis.scan_iter(match='user:*:registered'): count += 1 print('Total users:', count) if __name__ == '__main__': try: main() except Exception as e: bot.logger.error(e)
Add script for counting all users count
Add script for counting all users count
Python
mit
sevazhidkov/leonard
8f97a104bf988a277453bf70043829c45a394919
libvirt/libvirt_attach_device_rbd.py
libvirt/libvirt_attach_device_rbd.py
#!/usr/bin/env python __author__ = 'weezhard' __license__ = 'GPL' __version__ = '1.0.0' import sys import argparse import libvirt import subprocess parser = argparse.ArgumentParser() parser.add_argument('-d','--domain', help='Domain libvirt',required=True) parser.add_argument('-p','--pool',help='Pool ceph', required=True) parser.add_argument('-i','--image',help='Volume ceph', required=True) parser.add_argument('-t','--target',help='Device target', default='vdz') args = parser.parse_args() def getConnection(): try: conn=libvirt.open("qemu:///system") return conn except libvirt.libvirtError, e: print e.get_error_message() sys.exit(1) def delConnection(conn): try: conn.close() except: print get_error_message() sys.exit(1) def getSecretUUID(conn, client): for secret in conn.listAllSecrets(): username, stype = secret.usageID().split() if username == client: uuid = secret.UUIDString() try: return uuid except NameError, e: print "Not UUID For this client name : %s." % name print e sys.exit(1) def attach_device(dom, uuid, pool, volume, dev): device = """\ <disk type='network' device='disk'> <driver name='qemu' type='raw'/> <auth username='libvirt'> <secret type='ceph' uuid="{uuid}"/> </auth> <source protocol='rbd' name="{pool}/{volume}"> <host name='192.168.102.100' port='6789'/> <host name='192.168.102.101' port='6789'/> <host name='192.168.102.102' port='6789'/> </source> <target dev="{dev}" bus='virtio'/> </disk> """ device = device.format(uuid=uuid, pool=pool, volume=volume, dev=dev) dom.attachDevice(device) if __name__ == '__main__': conn = getConnection() dom = conn.lookupByName(args.domain) attach_device( dom, getSecretUUID(conn, args.name), args.pool, args.image, args.target) delConnection(conn)
Add script attach device rbd
Add script attach device rbd
Python
apache-2.0
skylost/heap,skylost/heap,skylost/heap
a20a1cc4ff34185fa1badf812f292c007f9f3d05
flexx/ui/examples/using_python_in_js.py
flexx/ui/examples/using_python_in_js.py
# doc-export: UsingPython """ This example demonstrates what things from Python land can be used in JS. Flexx detects what names are used in the transpiled JS of a Model (or Widget class, and tries to look these up in the module, converting the used objects if possible. Check out the source of the generated page to see what Flexx did. Note that once running, there is no interaction with the Python side, so this example be exported to standalone HTML. """ from flexx import event, app, ui # Define a value. This can be used in JS as long as it can be serialized # using JSON (None, bool, int, float, str, list, dict). # The definition of this value is inside the JS version of this module. info = dict(name='John', age=42) # Import a value from another module. It's still just a value, and there is # no way for Flexx to tell where it was defined, so on the JS side it is # defined in *this* module just like info. This means that if you import # and use a value in different modules, in JS these are different instances. from sys import version # Define a function (or a class). Provided that its compatible with PyScript, # you can just use this in the JS. Note that if this function used a value # or a function, that would be converted too. def poly(x, *coefs): degree = len(coefs) - 1 y = 0 for coef in coefs: y += coef * x ** degree degree -= 1 return y # Import a (PyScript-compatible) function from another module. In this case # Flexx can tell where it was defined and put it in its own module. See # the page source. from html import escape class UsingPython(ui.Widget): def init(self): self.label = ui.Label(wrap=0) class JS: def init(self): # A rather boring way to present the info. The point is that # we're using all sorts of Python stuff here, that is automatically # converted for us. lines = [] lines.append('This JS was generated from Python ' + version) lines.append('Person %s is %i years old' % (info.name, info.age)) lines.append('Evaling 4*x**2 + 5*x + 6 with x=4: ' + poly(4, 4, 5, 6)) lines.append('... and with x=12: ' + poly(12, 4, 5, 6)) lines.append('String with escaped html: ' + escape('html <tags>!')) lines.append('String with escaped html: ' + escape('Woezel & Pip')) self.label.text = '<br />'.join(lines) if __name__ == '__main__': m = app.launch(UsingPython, 'firefox') app.run()
Add example for using Python in JS
Add example for using Python in JS
Python
bsd-2-clause
zoofIO/flexx,JohnLunzer/flexx,JohnLunzer/flexx,zoofIO/flexx,jrversteegh/flexx,JohnLunzer/flexx,jrversteegh/flexx
2eceebdd1eb052b81b6563e7bd58c275c4f74592
bin/check_rule_algs.py
bin/check_rule_algs.py
# Print list of rules with invalid algs from api import config from api.dao import APINotFoundException from api.jobs.gears import get_gear_by_name if __name__ == '__main__': for rule in config.db.project_rules.find({}): alg = rule.get('alg') if not alg: print 'Rule {} has no alg.'.format(rule['_id']) else: try: get_gear_by_name(alg) except APINotFoundException: print 'Rule {} with alg {} does not match any gear in the system'.format(rule['_id'], alg)
Add script to find malformed rules
Add script to find malformed rules
Python
mit
scitran/core,scitran/api,scitran/core,scitran/core,scitran/api,scitran/core
a715d344b72d598d06d8aaba4f82687e0657bd60
python/one-offs/list-to-json.py
python/one-offs/list-to-json.py
''' One-off junk code to convert a list of programming languages with some extra information in the comments to JSON ''' from collections import OrderedDict languages = [] for line in t.split('\n'): language = OrderedDict() slashes = line.find('//') if slashes != -1: language_name, language_etc = line.split('//', 1) else: language_name = line language_name = language_name.strip() language_name = language_name[1:-2] language['name'] = language_name if slashes != -1: language_etc = language_etc.strip() if language_etc.startswith('http'): language['include'] = False language['url'] = language_etc elif language_etc.find('http') != -1: language_description, language_url = language_etc.split('http') if language_description.endswith('('): language_description = language_description[:-1] language['description'] = language_description.strip() language['include'] = False if language_url.endswith(')'): language_url = language_url[:-1] language['url'] = 'http' + language_url.strip() else: language['include'] = False languages.append(language) language_names = [] for language in languages: language_names.append(language['name']) all_languages = [] for name in names: if name in language_names: for language in languages: if language['name'] == name: all_languages.append(language) continue else: language = OrderedDict() language['name'] = name language['include'] = True all_languages.append(language) outfile = open('languages.json', 'w') outfile.write(json.dumps(all_languages, indent=4)) outfile.close()
Add one-off script to convert a list of programming languages to JSON
Add one-off script to convert a list of programming languages to JSON
Python
mit
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
6b1f487f2ceb64b6b024b9d959a8ed5a0cd4d713
tests/rules_tests/FromRuleComputeTest.py
tests/rules_tests/FromRuleComputeTest.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import main, TestCase from grammpy import Rule class FromRuleComputeTest(TestCase): pass if __name__ == '__main__': main()
Add file for Rule.rule tests
Add file for Rule.rule tests
Python
mit
PatrikValkovic/grammpy
76f868137d1ee98f148032269251116887db38d5
loader.py
loader.py
from interface import Marcotti from models.config.local import LocalConfig from etl import get_local_handles, ingest_feeds from etl.csv import CSV_ETL_CLASSES if __name__ == "__main__": settings = LocalConfig() marcotti = Marcotti(settings) with marcotti.create_session() as sess: for group in ['Overview', 'Personnel', 'Match']: for entity, datafile in settings.CSV_DATA[group]: if group in ['Overview', 'Personnel']: if entity == 'Venues': params = (sess, settings.VENUE_EFF_DATE) else: params = (sess,) else: params = (sess, settings.COMPETITION_NAME, settings.SEASON_NAME) if CSV_ETL_CLASSES[group][entity] is list: for etl_class in CSV_ETL_CLASSES[group][entity]: ingest_feeds(get_local_handles, settings.CSV_DATA_DIR, datafile, etl_class(*params)) else: ingest_feeds(get_local_handles, settings.CSV_DATA_DIR, datafile, CSV_ETL_CLASSES[group][entity](*params))
Create script to execute Marcotti ETL
Create script to execute Marcotti ETL
Python
mit
soccermetrics/marcotti
bd717b8056a69ee7074a94b3234d840dd431dd1f
src/341_flatten_nested_list_iterator.py
src/341_flatten_nested_list_iterator.py
""" This is the interface that allows for creating nested lists. You should not implement it, or speculate about its implementation """ class NestedInteger(object): def isInteger(self): """ @return True if this NestedInteger holds a single integer, rather than a nested list. :rtype bool """ def getInteger(self): """ @return the single integer that this NestedInteger holds, if it holds a single integer Return None if this NestedInteger holds a nested list :rtype int """ def getList(self): """ @return the nested list that this NestedInteger holds, if it holds a nested list Return None if this NestedInteger holds a single integer :rtype List[NestedInteger] """ class NestedIterator(object): def __init__(self, nestedList): """ Initialize your data structure here. :type nestedList: List[NestedInteger] """ self.stack = [[nestedList, 0]] def next(self): """ :rtype: int """ nestedList, i = self.stack[-1] self.stack[-1][1] += 1 return nestedList[i].getInteger() def hasNext(self): """ :rtype: bool """ stk = self.stack while stk: nestedList, i = stk[-1] if i == len(nestedList): stk.pop() else: val = nestedList[i] if val.isInteger(): return True else: stk[-1][1] += 1 stk.append([val.getList(), 0]) return False # Your NestedIterator object will be instantiated and called as such: # i, v = NestedIterator(nestedList), [] # while i.hasNext(): v.append(i.next())
Use stack to solve the problem
Use stack to solve the problem
Python
apache-2.0
zhuxiang/LeetCode-Python
466b8a8fb2bdf7ca8f74316fac3e483c3ba763b5
net/data/websocket/protocol-test_wsh.py
net/data/websocket/protocol-test_wsh.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import cgi from mod_pywebsocket import msgutil def web_socket_do_extra_handshake(request): r = request.ws_resource.split('?', 1) if len(r) == 1: return param = cgi.parse_qs(r[1]) if 'protocol' in param: request.ws_protocol = param['protocol'][0] def web_socket_transfer_data(request): msgutil.send_message(request, request.ws_protocol)
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import cgi from mod_pywebsocket import msgutil def web_socket_do_extra_handshake(request): r = request.ws_resource.split('?', 1) if len(r) == 1: return param = cgi.parse_qs(r[1]) if 'protocol' in param: request.ws_protocol = param['protocol'][0] def web_socket_transfer_data(request): msgutil.send_message(request, request.ws_protocol) # Wait for a close message. unused = request.ws_stream.receive_message()
Make Pepper WebSocket UtilityGetProtocol test less flaky by making the wsh wait for close
Make Pepper WebSocket UtilityGetProtocol test less flaky by making the wsh wait for close Attempt to fix the flakiness by making sure the server handler doesn't exit before the client closes. BUG=389084 R=jgraettinger,yhirano Review URL: https://codereview.chromium.org/410383003 git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@285500 0039d316-1c4b-4281-b951-d872f2087c98
Python
bsd-3-clause
TheTypoMaster/chromium-crosswalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,M4sse/chromium.src,M4sse/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,markYoungH/chromium.src,markYoungH/chromium.src,ondra-novak/chromium.src,axinging/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,M4sse/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,dednal/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,ltilve/chromium,dednal/chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk,markYoungH/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,dushu1203/chromium.src,littlstar/chromium.src,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,Chilledheart/chromium,M4sse/chromium.src,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,Just-D/chromium-1,jaruba/chromium.src,bright-sparks/chromium-spacewalk,Chilledheart/chromium,littlstar/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,ltilve/chromium,Jonekee/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,dednal/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,jaruba/chromium.src,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,ltilve/chromium,markYoungH/chromium.src,fujunwei/chromium-crosswalk,Chilledheart/chromium,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,ltilve/chromium,jaruba/chromium.src,littlstar/chromium.src,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,Chilledheart/chromium,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,littlstar/chromium.src,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,ltilve/chromium,ondra-novak/chromium.src,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,ltilve/chromium,axinging/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,dushu1203/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,littlstar/chromium.src,Just-D/chromium-1,littlstar/chromium.src,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,littlstar/chromium.src,Just-D/chromium-1,Jonekee/chromium.src
de802c22865e4369cb938be7d7931b7e53374059
scripts/fix_nodes_with_no_creator.py
scripts/fix_nodes_with_no_creator.py
import logging import sys from django.db import transaction from website.app import setup_django setup_django() from osf.models import AbstractNode from scripts import utils as script_utils logger = logging.getLogger(__name__) def main(): dry = '--dry' in sys.argv if not dry: # If we're not running in dry mode log everything to a file script_utils.add_file_logger(logger, __file__) with transaction.atomic(): qs = AbstractNode.objects.filter(creator__isnull=True) logger.info('Found {} nodes with no creator'.format(qs.count())) for node in AbstractNode.objects.filter(creator__isnull=True): logger.info('Setting the creator for AbstractNode {} to the first contrbutor'.format(node._id)) AbstractNode.objects.filter(id=node.id).update(creator=node.contributors.first()) if dry: raise Exception('Abort Transaction - Dry Run') print('Done') if __name__ == '__main__': main()
Add one-off script to fix nodes with no creator
Add one-off script to fix nodes with no creator OSF-8571
Python
apache-2.0
brianjgeiger/osf.io,baylee-d/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,icereval/osf.io,chennan47/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,leb2dg/osf.io,laurenrevere/osf.io,felliott/osf.io,aaxelb/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,baylee-d/osf.io,leb2dg/osf.io,binoculars/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,adlius/osf.io,mattclark/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,adlius/osf.io,chennan47/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,mattclark/osf.io,erinspace/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,crcresearch/osf.io,crcresearch/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,sloria/osf.io,erinspace/osf.io,HalcyonChimera/osf.io,saradbowman/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,felliott/osf.io,adlius/osf.io,baylee-d/osf.io,crcresearch/osf.io,mfraezz/osf.io,sloria/osf.io,pattisdr/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,icereval/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,laurenrevere/osf.io,caseyrollins/osf.io,mfraezz/osf.io,felliott/osf.io,TomBaxter/osf.io,binoculars/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,icereval/osf.io,binoculars/osf.io,leb2dg/osf.io,sloria/osf.io,TomBaxter/osf.io,mfraezz/osf.io
cd2c0f6111221990c3838a64961d24208c310a1d
snippets/python/matplotlib-colors.py
snippets/python/matplotlib-colors.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import matplotlib def create_color_list(): color_names = matplotlib.colors.cnames color_list = [] for key,value in color_names.items(): ivalue = int(value[1:], 16) rvalue = int(value[1:3],16) gvalue = int(value[3:5],16) bvalue = int(value[5:], 16) color_list.append([key, value, ivalue, rvalue, gvalue, bvalue]) return color_list def sort_color_list(color_list, sort_index): return sorted(color_list, key=lambda c: c[sort_index]) def print_color_list(color_list,sort_order=None): if sort_order=='alphabetical': sorted_list = sort_color_list(color_list, sort_index=0) elif sort_order=='value': sorted_list = sort_color_list(color_list, sort_index=2) elif sort_order=='red': sorted_list = sort_color_list(color_list, sort_index=3) elif sort_order=='green': sorted_list = sort_color_list(color_list, sort_index=4) elif sort_order=='blue': sorted_list = sort_color_list(color_list, sort_index=5) else: # No sort order for item in color_list: key, value, ivalue, r, g, b = item print('{0}: {1}'.format(value,key)) return for item in sorted_list: key, value, ivalue, r, g, b = item print('{0}: {1}'.format(value,key)) if __name__ == "__main__": color_list = create_color_list() print_color_list(color_list,sort_order='alphabetical')
Add printing of color codes in matplotlib
Add printing of color codes in matplotlib
Python
apache-2.0
nathanielng/code-templates,nathanielng/code-templates,nathanielng/code-templates
23336c48a1dafcea47dab68b8915add1c7ff9f4f
src/sentry/api/serializers/rest_framework/origin.py
src/sentry/api/serializers/rest_framework/origin.py
from __future__ import absolute_import from rest_framework import serializers from sentry.utils.http import parse_uri_match class OriginField(serializers.CharField): # Special case origins that don't fit the normal regex pattern, but are valid WHITELIST_ORIGINS = ('*') def from_native(self, data): rv = super(OriginField, self).from_native(data) if not rv: return if not self.is_valid_origin(rv): raise serializers.ValidationError('%r is not an acceptable domain' % rv) return rv def is_valid_origin(self, value): if value in self.WHITELIST_ORIGINS: return True bits = parse_uri_match(value) # ports are not supported on matching expressions (yet) if ':' in bits.domain: return False return True
from __future__ import absolute_import from rest_framework import serializers from sentry.utils.http import parse_uri_match class OriginField(serializers.CharField): # Special case origins that don't fit the normal regex pattern, but are valid WHITELIST_ORIGINS = ('*') def from_native(self, data): rv = super(OriginField, self).from_native(data) if not rv: return if not self.is_valid_origin(rv): raise serializers.ValidationError('%s is not an acceptable domain' % rv) return rv def is_valid_origin(self, value): if value in self.WHITELIST_ORIGINS: return True bits = parse_uri_match(value) # ports are not supported on matching expressions (yet) if ':' in bits.domain: return False return True
Fix formatting of API error message
fix(api): Fix formatting of API error message
Python
bsd-3-clause
mvaled/sentry,gencer/sentry,gencer/sentry,ifduyue/sentry,ifduyue/sentry,ifduyue/sentry,mvaled/sentry,looker/sentry,beeftornado/sentry,looker/sentry,looker/sentry,beeftornado/sentry,mvaled/sentry,mvaled/sentry,beeftornado/sentry,gencer/sentry,ifduyue/sentry,looker/sentry,mvaled/sentry,mvaled/sentry,gencer/sentry,gencer/sentry,ifduyue/sentry,looker/sentry
2a0544bf399dbfbdb8e6d4ef4faf91e19a3c0a15
numba/cuda/tests/cudapy/test_warning.py
numba/cuda/tests/cudapy/test_warning.py
import numpy as np from numba import cuda from numba.cuda.testing import CUDATestCase, skip_on_cudasim from numba.tests.support import override_config from numba.core.errors import NumbaPerformanceWarning import warnings def numba_dist_cuda(a, b, dist): len = a.shape[0] for i in range(len): dist[i] = a[i] * b[i] def numba_dist_cuda2(a, b, dist): len = a.shape[0] len2 = a.shape[1] for i in range(len): for j in range(len2): dist[i, j] = a[i, j] * b[i, j] @skip_on_cudasim('Large data set causes slow execution in the simulator') class TestCUDAWarnings(CUDATestCase): def test_inefficient_kernel(self): a = np.random.rand(1024 * 1024 * 32).astype('float32') b = np.random.rand(1024 * 1024 * 32).astype('float32') dist = np.zeros(a.shape[0]).astype('float32') sig = 'void(float32[:], float32[:], float32[:])' with override_config('CUDA_LOW_OCCUPANCY_WARNINGS', 1): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', NumbaPerformanceWarning) cuda_func = cuda.jit(sig)(numba_dist_cuda) cuda_func[1,1](a, b, dist) self.assertEqual(w[0].category, NumbaPerformanceWarning) self.assertIn('Grid size', str(w[0].message)) self.assertIn('2 * SM count', str(w[0].message)) def test_efficient_kernel(self): a = np.random.rand(1024 * 1024 * 128).astype('float32').\ reshape((1024 * 1024, 128)) b = np.random.rand(1024 * 1024 * 128).astype('float32').\ reshape((1024 * 1024, 128)) dist = np.zeros_like(a) sig = 'void(float32[:, :], float32[:, :], float32[:, :])' with override_config('CUDA_LOW_OCCUPANCY_WARNINGS', 1): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', NumbaPerformanceWarning) cuda_func = cuda.jit(sig)(numba_dist_cuda2) cuda_func[256,256](a, b, dist) self.assertEqual(len(w), 0)
Add new file to test kernel efficiency warnings
Add new file to test kernel efficiency warnings
Python
bsd-2-clause
cpcloud/numba,seibert/numba,seibert/numba,cpcloud/numba,cpcloud/numba,numba/numba,stuartarchibald/numba,seibert/numba,stuartarchibald/numba,cpcloud/numba,seibert/numba,IntelLabs/numba,IntelLabs/numba,seibert/numba,stonebig/numba,cpcloud/numba,stuartarchibald/numba,stonebig/numba,stuartarchibald/numba,stonebig/numba,numba/numba,numba/numba,IntelLabs/numba,numba/numba,stonebig/numba,IntelLabs/numba,stuartarchibald/numba,numba/numba,stonebig/numba,IntelLabs/numba
512ab87b1786f8c3d003a62337eee75beff9d960
scripts/postag2file.py
scripts/postag2file.py
# Copyright 2018 Tomas Machalek <[email protected]> # Copyright 2018 Charles University, Faculty of Arts, # Institute of the Czech National Corpus # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sqlite3 import sys if __name__ == '__main__': db = sqlite3.connect(sys.argv[1]) c = db.cursor() c.execute('SELECT value FROM postag ORDER BY value') for v in c.fetchall(): print(v[0])
Add a helper script to dump tags to a file
Add a helper script to dump tags to a file
Python
apache-2.0
czcorpus/vert-tagextract,czcorpus/vert-tagextract
0721d6129da01c693d8a28c12d66e6b55d37f964
scripts/extract_pivots_from_model.py
scripts/extract_pivots_from_model.py
#!/usr/bin/env python import sys import numpy as np import torch from learn_pivots_tm import PivotLearnerModel, StraightThroughLayer def main(args): if len(args) < 1: sys.stderr.write("Required arguments: <model file> [num pivots (100)]\n") sys.exit(-1) num_pivots = 100 if len(args) > 1: num_pivots = int(args[1]) model = torch.load(args[0]) vec = np.abs(model.feature.input_layer.vector.data.cpu().numpy()) inds = np.argsort(vec) pivot_inds = inds[0, -num_pivots:] pivot_inds.sort() for x in pivot_inds: print(x) if __name__ == '__main__': main(sys.argv[1:])
Add code for extracting best pivot features from saved neural model.
Add code for extracting best pivot features from saved neural model.
Python
apache-2.0
tmills/uda,tmills/uda
4b25a7d34ea17c86c4b40a09f85898ea4769b22b
airflow/contrib/operators/file_to_gcs.py
airflow/contrib/operators/file_to_gcs.py
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook from airflow.models import BaseOperator import os class FileToGoogleCloudStorageOperator(BaseOperator): """ Uploads a file to Google Cloud Storage """ def __init__(self, src, dst, bucket, google_cloud_storage_conn_id='google_cloud_storage_default', mime_type='application/octet-stream', delegate_to=None, *args, **kwargs): """ :param src: Path to the local file :type src: string :param dst: Destination path within the specified bucket :type dst: string :param bucket: The bucket to upload to :type bucket: string :param google_cloud_storage_conn_id: The Airflow connection ID to upload with :type google_cloud_storage_conn_id: string :param mime_type: The mime-type string :type mime_type: string :param delegate_to: The account to impersonate, if any :type delegate_to: string """ super(FileToGoogleCloudStorageOperator, self).__init__(*args, **kwargs) self.src = src self.dst = dst self.bucket = bucket self.google_cloud_storage_conn_id = google_cloud_storage_conn_id self.mime_type = mime_type self.delegate_to = delegate_to def execute(self, context): """ Uploads the file to Google cloud storage """ hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to) hook.upload( bucket=self.bucket, object=self.dst, mime_type=self.mime_type, filename=self.src)
Add file to GCS operator
[AIRFLOW-125] Add file to GCS operator Adds an operator to upload a file to Google Cloud Storage. Used as follows: ```py from airflow.contrib.operators.file_to_gcs import FileToGoogleCloudStorageOperator gcs = FileToGoogleCloudStorageOperator( bucket='a-bucket-i-have-access-to-on-gcs', dag=dag, task_id='upload_stuff', google_cloud_storage_conn_id='an-airflow-bigquery-connection', src=os.path.join(os.path.dirname(__file__), 'csv/some_file.csv'), dst='project/some_file.csv') ```
Python
apache-2.0
dmitry-r/incubator-airflow,r39132/airflow,ledsusop/airflow,spektom/incubator-airflow,yk5/incubator-airflow,wndhydrnt/airflow,dmitry-r/incubator-airflow,subodhchhabra/airflow,juvoinc/airflow,zodiac/incubator-airflow,lyft/incubator-airflow,sdiazb/airflow,zoyahav/incubator-airflow,DEVELByte/incubator-airflow,adrpar/incubator-airflow,cfei18/incubator-airflow,wooga/airflow,ronfung/incubator-airflow,ProstoMaxim/incubator-airflow,jwi078/incubator-airflow,DinoCow/airflow,hamedhsn/incubator-airflow,andrewmchen/incubator-airflow,zack3241/incubator-airflow,wileeam/airflow,jesusfcr/airflow,gilt/incubator-airflow,sid88in/incubator-airflow,fenglu-g/incubator-airflow,asnir/airflow,preete-dixit-ck/incubator-airflow,bolkedebruin/airflow,wndhydrnt/airflow,lyft/incubator-airflow,zodiac/incubator-airflow,Twistbioscience/incubator-airflow,kerzhner/airflow,AllisonWang/incubator-airflow,wileeam/airflow,KL-WLCR/incubator-airflow,airbnb/airflow,subodhchhabra/airflow,jgao54/airflow,jiwang576/incubator-airflow,mistercrunch/airflow,KL-WLCR/incubator-airflow,jlowin/airflow,andrewmchen/incubator-airflow,apache/airflow,apache/incubator-airflow,DEVELByte/incubator-airflow,gritlogic/incubator-airflow,jesusfcr/airflow,N3da/incubator-airflow,rishibarve/incubator-airflow,NielsZeilemaker/incubator-airflow,jwi078/incubator-airflow,kerzhner/airflow,jesusfcr/airflow,Acehaidrey/incubator-airflow,MortalViews/incubator-airflow,sdiazb/airflow,skudriashev/incubator-airflow,danielvdende/incubator-airflow,danielvdende/incubator-airflow,vineet-rh/incubator-airflow,mtdewulf/incubator-airflow,ledsusop/airflow,jlowin/airflow,rishibarve/incubator-airflow,holygits/incubator-airflow,Acehaidrey/incubator-airflow,gtoonstra/airflow,jgao54/airflow,spektom/incubator-airflow,NielsZeilemaker/incubator-airflow,apache/airflow,ProstoMaxim/incubator-airflow,jfantom/incubator-airflow,Fokko/incubator-airflow,easytaxibr/airflow,nathanielvarona/airflow,vineet-rh/incubator-airflow,sekikn/incubator-airflow,vineet-rh/incubator-airflow,KL-WLCR/incubator-airflow,ProstoMaxim/incubator-airflow,mattuuh7/incubator-airflow,N3da/incubator-airflow,Twistbioscience/incubator-airflow,criccomini/airflow,gritlogic/incubator-airflow,sdiazb/airflow,brandsoulmates/incubator-airflow,fenglu-g/incubator-airflow,criccomini/airflow,kerzhner/airflow,jhsenjaliya/incubator-airflow,zoyahav/incubator-airflow,d-lee/airflow,plypaul/airflow,apache/incubator-airflow,andyxhadji/incubator-airflow,Tagar/incubator-airflow,aminghadersohi/airflow,CloverHealth/airflow,artwr/airflow,hgrif/incubator-airflow,gtoonstra/airflow,sergiohgz/incubator-airflow,andyxhadji/incubator-airflow,ty707/airflow,preete-dixit-ck/incubator-airflow,owlabs/incubator-airflow,danielvdende/incubator-airflow,spektom/incubator-airflow,holygits/incubator-airflow,N3da/incubator-airflow,yati-sagade/incubator-airflow,lxneng/incubator-airflow,adrpar/incubator-airflow,CloverHealth/airflow,skudriashev/incubator-airflow,cfei18/incubator-airflow,MetrodataTeam/incubator-airflow,RealImpactAnalytics/airflow,cjqian/incubator-airflow,mrkm4ntr/incubator-airflow,juvoinc/airflow,NielsZeilemaker/incubator-airflow,KL-WLCR/incubator-airflow,dgies/incubator-airflow,gilt/incubator-airflow,airbnb/airflow,btallman/incubator-airflow,cademarkegard/airflow,lxneng/incubator-airflow,yati-sagade/incubator-airflow,gilt/incubator-airflow,mrares/incubator-airflow,RealImpactAnalytics/airflow,MortalViews/incubator-airflow,nathanielvarona/airflow,nathanielvarona/airflow,r39132/airflow,akosel/incubator-airflow,dhuang/incubator-airflow,btallman/incubator-airflow,mtdewulf/incubator-airflow,apache/airflow,preete-dixit-ck/incubator-airflow,caseyching/incubator-airflow,apache/airflow,andrewmchen/incubator-airflow,mistercrunch/airflow,d-lee/airflow,owlabs/incubator-airflow,aminghadersohi/airflow,owlabs/incubator-airflow,gritlogic/incubator-airflow,sid88in/incubator-airflow,edgarRd/incubator-airflow,akosel/incubator-airflow,jwi078/incubator-airflow,airbnb/airflow,edgarRd/incubator-airflow,jgao54/airflow,DinoCow/airflow,forevernull/incubator-airflow,biln/airflow,stverhae/incubator-airflow,akosel/incubator-airflow,r39132/airflow,janczak10/incubator-airflow,wileeam/airflow,MortalViews/incubator-airflow,plypaul/airflow,Tagar/incubator-airflow,stverhae/incubator-airflow,mistercrunch/airflow,Tagar/incubator-airflow,saguziel/incubator-airflow,mrares/incubator-airflow,forevernull/incubator-airflow,biln/airflow,zodiac/incubator-airflow,jhsenjaliya/incubator-airflow,dhuang/incubator-airflow,adamhaney/airflow,alexvanboxel/airflow,forevernull/incubator-airflow,ty707/airflow,cfei18/incubator-airflow,wooga/airflow,dud225/incubator-airflow,dgies/incubator-airflow,Twistbioscience/incubator-airflow,sid88in/incubator-airflow,wndhydrnt/airflow,skudriashev/incubator-airflow,cademarkegard/airflow,mtagle/airflow,hamedhsn/incubator-airflow,artwr/airflow,jiwang576/incubator-airflow,yati-sagade/incubator-airflow,easytaxibr/airflow,OpringaoDoTurno/airflow,wolfier/incubator-airflow,fenglu-g/incubator-airflow,apache/airflow,d-lee/airflow,jlowin/airflow,mrkm4ntr/incubator-airflow,wooga/airflow,AllisonWang/incubator-airflow,mistercrunch/airflow,yiqingj/airflow,sid88in/incubator-airflow,criccomini/airflow,yiqingj/airflow,wooga/airflow,MortalViews/incubator-airflow,yk5/incubator-airflow,holygits/incubator-airflow,Acehaidrey/incubator-airflow,criccomini/airflow,juvoinc/airflow,nathanielvarona/airflow,zoyahav/incubator-airflow,OpringaoDoTurno/airflow,adamhaney/airflow,mtagle/airflow,dgies/incubator-airflow,lyft/incubator-airflow,adrpar/incubator-airflow,jwi078/incubator-airflow,preete-dixit-ck/incubator-airflow,malmiron/incubator-airflow,lxneng/incubator-airflow,mattuuh7/incubator-airflow,alexvanboxel/airflow,apache/incubator-airflow,mtdewulf/incubator-airflow,wndhydrnt/airflow,hgrif/incubator-airflow,plypaul/airflow,Twistbioscience/incubator-airflow,plypaul/airflow,RealImpactAnalytics/airflow,ty707/airflow,adamhaney/airflow,saguziel/incubator-airflow,owlabs/incubator-airflow,cjqian/incubator-airflow,jbhsieh/incubator-airflow,sergiohgz/incubator-airflow,biln/airflow,apache/incubator-airflow,ty707/airflow,N3da/incubator-airflow,gritlogic/incubator-airflow,janczak10/incubator-airflow,dhuang/incubator-airflow,rishibarve/incubator-airflow,MetrodataTeam/incubator-airflow,bolkedebruin/airflow,hgrif/incubator-airflow,mylons/incubator-airflow,hamedhsn/incubator-airflow,Fokko/incubator-airflow,cjqian/incubator-airflow,kerzhner/airflow,gtoonstra/airflow,zoyahav/incubator-airflow,danielvdende/incubator-airflow,andyxhadji/incubator-airflow,jhsenjaliya/incubator-airflow,brandsoulmates/incubator-airflow,yiqingj/airflow,Acehaidrey/incubator-airflow,artwr/airflow,stverhae/incubator-airflow,ledsusop/airflow,AllisonWang/incubator-airflow,AllisonWang/incubator-airflow,NielsZeilemaker/incubator-airflow,dmitry-r/incubator-airflow,cfei18/incubator-airflow,adrpar/incubator-airflow,apache/airflow,MetrodataTeam/incubator-airflow,wolfier/incubator-airflow,d-lee/airflow,aminghadersohi/airflow,spektom/incubator-airflow,mylons/incubator-airflow,caseyching/incubator-airflow,jfantom/incubator-airflow,yk5/incubator-airflow,ProstoMaxim/incubator-airflow,biln/airflow,saguziel/incubator-airflow,zack3241/incubator-airflow,mylons/incubator-airflow,cademarkegard/airflow,aminghadersohi/airflow,Acehaidrey/incubator-airflow,jbhsieh/incubator-airflow,dud225/incubator-airflow,yiqingj/airflow,mtagle/airflow,btallman/incubator-airflow,easytaxibr/airflow,mtagle/airflow,Fokko/incubator-airflow,jiwang576/incubator-airflow,stverhae/incubator-airflow,alexvanboxel/airflow,sergiohgz/incubator-airflow,vijaysbhat/incubator-airflow,wolfier/incubator-airflow,zack3241/incubator-airflow,skudriashev/incubator-airflow,brandsoulmates/incubator-airflow,RealImpactAnalytics/airflow,fenglu-g/incubator-airflow,jiwang576/incubator-airflow,sekikn/incubator-airflow,OpringaoDoTurno/airflow,malmiron/incubator-airflow,CloverHealth/airflow,hamedhsn/incubator-airflow,forevernull/incubator-airflow,asnir/airflow,DEVELByte/incubator-airflow,easytaxibr/airflow,cfei18/incubator-airflow,danielvdende/incubator-airflow,subodhchhabra/airflow,sekikn/incubator-airflow,Acehaidrey/incubator-airflow,vineet-rh/incubator-airflow,holygits/incubator-airflow,malmiron/incubator-airflow,ronfung/incubator-airflow,mrares/incubator-airflow,btallman/incubator-airflow,vijaysbhat/incubator-airflow,mattuuh7/incubator-airflow,edgarRd/incubator-airflow,malmiron/incubator-airflow,mtdewulf/incubator-airflow,jfantom/incubator-airflow,yk5/incubator-airflow,janczak10/incubator-airflow,airbnb/airflow,caseyching/incubator-airflow,jfantom/incubator-airflow,edgarRd/incubator-airflow,jesusfcr/airflow,andyxhadji/incubator-airflow,ronfung/incubator-airflow,sdiazb/airflow,brandsoulmates/incubator-airflow,mrares/incubator-airflow,bolkedebruin/airflow,jgao54/airflow,mattuuh7/incubator-airflow,janczak10/incubator-airflow,hgrif/incubator-airflow,gtoonstra/airflow,sekikn/incubator-airflow,caseyching/incubator-airflow,OpringaoDoTurno/airflow,jhsenjaliya/incubator-airflow,DEVELByte/incubator-airflow,wolfier/incubator-airflow,saguziel/incubator-airflow,bolkedebruin/airflow,ronfung/incubator-airflow,Tagar/incubator-airflow,rishibarve/incubator-airflow,yati-sagade/incubator-airflow,adamhaney/airflow,mylons/incubator-airflow,nathanielvarona/airflow,lxneng/incubator-airflow,juvoinc/airflow,mrkm4ntr/incubator-airflow,lyft/incubator-airflow,jbhsieh/incubator-airflow,cademarkegard/airflow,dud225/incubator-airflow,dud225/incubator-airflow,sergiohgz/incubator-airflow,danielvdende/incubator-airflow,CloverHealth/airflow,dmitry-r/incubator-airflow,asnir/airflow,andrewmchen/incubator-airflow,cjqian/incubator-airflow,cfei18/incubator-airflow,wileeam/airflow,zodiac/incubator-airflow,mrkm4ntr/incubator-airflow,ledsusop/airflow,asnir/airflow,r39132/airflow,vijaysbhat/incubator-airflow,dgies/incubator-airflow,zack3241/incubator-airflow,DinoCow/airflow,vijaysbhat/incubator-airflow,artwr/airflow,dhuang/incubator-airflow,nathanielvarona/airflow,bolkedebruin/airflow,subodhchhabra/airflow,Fokko/incubator-airflow,gilt/incubator-airflow,DinoCow/airflow,jbhsieh/incubator-airflow,MetrodataTeam/incubator-airflow,jlowin/airflow,akosel/incubator-airflow,alexvanboxel/airflow
655f2ac31df0055c06005311820848cceaea4122
nonclass.py
nonclass.py
from os import system from time import sleep from msvcrt import getch from threading import Thread system('mode 50,30') game_running = True direction = 1 delay = 0.07 def clear(): system('cls') def controller(): global game_running,direction,delay while game_running: key = ord(getch()) if key == 27: game_running = False elif key == 224: key = ord(getch()) if key == 72: direction = 0 elif key == 80: direction = 1 elif key == 77: direction = 2 elif key == 75: direction = 3 elif key == 102: delay -= 0.01 elif key == 115: delay += 0.01 def main(): global direction snake = 'S' snake_pos = [0,0] screen = [' '*49 for i in range(30)] size = (30,49) inputs = Thread(target=controller) inputs.start() def blit_snake(x2,y2): x1,y1 = snake_pos screen[x1] = screen[x1][:y1] + ' ' + screen[x1][y1+1:] screen[x2] = screen[x2][:y2] + snake + screen[x2][y2+1:] snake_pos[0],snake_pos[1] = x2,y2 blit_snake(0,0) while game_running: if direction == 0: blit_snake(snake_pos[0]-1,snake_pos[1]) elif direction == 1: blit_snake(snake_pos[0]+1,snake_pos[1]) elif direction == 2: blit_snake(snake_pos[0],snake_pos[1]+1) else: blit_snake(snake_pos[0],snake_pos[1]-1) clear() if snake_pos[0] == size[0]-1: direction = 0 elif snake_pos[0] == 0: direction = 1 elif snake_pos[1] == size[1]-1: direction = 3 elif snake_pos[1] == 0: direction = 2 for i in screen: print i, sleep(delay) main()
Test file added to estimate performance of program between systems.
Test file added to estimate performance of program between systems.
Python
mit
thebongy/Snake
a8a85a4bf0185a43b492ab8e81b558128db392aa
scripts/slice_trace.py
scripts/slice_trace.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import codecs import collections import json import math import os def parse_args(): description = """Slice JSON .trace file into smaller pieces.""" parser = argparse.ArgumentParser(description=description) parser.add_argument( 'trace_file', help='Path to trace file.') parser.add_argument( '--slice_size', help='Size of slice in Mb.', default=230) parser.add_argument( '--output_name', help='Format of slices, {slice} is the slice index.', default='{orig}_{slice}.trace') parser.add_argument( '--context_events_count', help='Number of events to carry over between slices for context.', default=50) return parser.parse_args() def dump_slice(args, slice_index, event_list): orig_file, _ = os.path.splitext(args.trace_file) slice_file_name = args.output_name.format( slice=slice_index, orig=orig_file) print('Writing slice to', slice_file_name) with codecs.open(slice_file_name, 'w', 'utf-8') as slice_file: json.dump(event_list, slice_file) def main(): args = parse_args() slice_size_mb = int(args.slice_size) context_events_count = int(args.context_events_count) trace_size_mb = os.path.getsize(args.trace_file) / (1024 * 1024) slice_ratio = min(1, slice_size_mb / float(trace_size_mb)) assert slice_ratio > 0 with codecs.open(args.trace_file, 'r', 'utf-8') as trace: data = json.load(trace) # Accurate "enough". The default slice size is less than 256MB. events_per_trace = int(len(data) * slice_ratio) print('Total events in trace file:', len(data), 'Events per trace', events_per_trace) slice_index = 0 start_index = 0 while start_index < len(data): i = max(0, start_index - context_events_count) j = start_index + events_per_trace + context_events_count dump_slice(args, slice_index, data[i:j]) slice_index += 1 start_index += events_per_trace if __name__ == '__main__': main()
Add a script to slice traces bigger than 256MB
Add a script to slice traces bigger than 256MB Summary: The chrome://tracing viewer doesn't support files larger than 256MB, to make it possible to look at traces from larger builds we add this simple script that splits a bigger trace into smaller chunks. Test Plan: manual Reviewed By: k21 fbshipit-source-id: 8c35a3f
Python
apache-2.0
k21/buck,robbertvanginkel/buck,rmaz/buck,vschs007/buck,sdwilsh/buck,sdwilsh/buck,clonetwin26/buck,justinmuller/buck,justinmuller/buck,raviagarwal7/buck,SeleniumHQ/buck,robbertvanginkel/buck,k21/buck,k21/buck,SeleniumHQ/buck,kageiit/buck,darkforestzero/buck,robbertvanginkel/buck,SeleniumHQ/buck,LegNeato/buck,rmaz/buck,vschs007/buck,nguyentruongtho/buck,vschs007/buck,marcinkwiatkowski/buck,davido/buck,darkforestzero/buck,OkBuilds/buck,rmaz/buck,kageiit/buck,SeleniumHQ/buck,robbertvanginkel/buck,ilya-klyuchnikov/buck,justinmuller/buck,k21/buck,Addepar/buck,robbertvanginkel/buck,janicduplessis/buck,janicduplessis/buck,daedric/buck,janicduplessis/buck,dsyang/buck,OkBuilds/buck,janicduplessis/buck,ilya-klyuchnikov/buck,illicitonion/buck,shybovycha/buck,sdwilsh/buck,nguyentruongtho/buck,dsyang/buck,raviagarwal7/buck,ilya-klyuchnikov/buck,clonetwin26/buck,marcinkwiatkowski/buck,shybovycha/buck,SeleniumHQ/buck,marcinkwiatkowski/buck,brettwooldridge/buck,janicduplessis/buck,grumpyjames/buck,Addepar/buck,JoelMarcey/buck,illicitonion/buck,raviagarwal7/buck,darkforestzero/buck,daedric/buck,ilya-klyuchnikov/buck,shybovycha/buck,shs96c/buck,robbertvanginkel/buck,marcinkwiatkowski/buck,sdwilsh/buck,marcinkwiatkowski/buck,ilya-klyuchnikov/buck,illicitonion/buck,zhan-xiong/buck,SeleniumHQ/buck,rmaz/buck,davido/buck,justinmuller/buck,davido/buck,k21/buck,davido/buck,raviagarwal7/buck,Addepar/buck,sdwilsh/buck,vschs007/buck,Addepar/buck,Addepar/buck,ilya-klyuchnikov/buck,darkforestzero/buck,shs96c/buck,illicitonion/buck,justinmuller/buck,dsyang/buck,davido/buck,dsyang/buck,zpao/buck,raviagarwal7/buck,OkBuilds/buck,dsyang/buck,dsyang/buck,Addepar/buck,LegNeato/buck,daedric/buck,SeleniumHQ/buck,facebook/buck,zpao/buck,JoelMarcey/buck,shybovycha/buck,marcinkwiatkowski/buck,JoelMarcey/buck,marcinkwiatkowski/buck,vschs007/buck,grumpyjames/buck,ilya-klyuchnikov/buck,JoelMarcey/buck,marcinkwiatkowski/buck,JoelMarcey/buck,davido/buck,LegNeato/buck,OkBuilds/buck,dsyang/buck,davido/buck,daedric/buck,Addepar/buck,justinmuller/buck,zhan-xiong/buck,nguyentruongtho/buck,daedric/buck,romanoid/buck,shs96c/buck,illicitonion/buck,zhan-xiong/buck,vschs007/buck,darkforestzero/buck,darkforestzero/buck,marcinkwiatkowski/buck,daedric/buck,vschs007/buck,robbertvanginkel/buck,illicitonion/buck,nguyentruongtho/buck,raviagarwal7/buck,zpao/buck,LegNeato/buck,SeleniumHQ/buck,grumpyjames/buck,shybovycha/buck,brettwooldridge/buck,illicitonion/buck,clonetwin26/buck,shs96c/buck,romanoid/buck,Addepar/buck,marcinkwiatkowski/buck,kageiit/buck,daedric/buck,robbertvanginkel/buck,romanoid/buck,JoelMarcey/buck,dsyang/buck,justinmuller/buck,ilya-klyuchnikov/buck,shybovycha/buck,clonetwin26/buck,romanoid/buck,vschs007/buck,brettwooldridge/buck,zhan-xiong/buck,shybovycha/buck,rmaz/buck,grumpyjames/buck,JoelMarcey/buck,janicduplessis/buck,brettwooldridge/buck,LegNeato/buck,shs96c/buck,zpao/buck,robbertvanginkel/buck,justinmuller/buck,OkBuilds/buck,janicduplessis/buck,ilya-klyuchnikov/buck,sdwilsh/buck,OkBuilds/buck,illicitonion/buck,marcinkwiatkowski/buck,justinmuller/buck,kageiit/buck,darkforestzero/buck,sdwilsh/buck,janicduplessis/buck,Addepar/buck,daedric/buck,OkBuilds/buck,shybovycha/buck,romanoid/buck,k21/buck,facebook/buck,davido/buck,rmaz/buck,zhan-xiong/buck,shs96c/buck,sdwilsh/buck,JoelMarcey/buck,dsyang/buck,davido/buck,grumpyjames/buck,JoelMarcey/buck,sdwilsh/buck,clonetwin26/buck,shybovycha/buck,clonetwin26/buck,grumpyjames/buck,ilya-klyuchnikov/buck,robbertvanginkel/buck,shybovycha/buck,justinmuller/buck,marcinkwiatkowski/buck,daedric/buck,Addepar/buck,vschs007/buck,OkBuilds/buck,brettwooldridge/buck,kageiit/buck,zhan-xiong/buck,zhan-xiong/buck,romanoid/buck,raviagarwal7/buck,dsyang/buck,romanoid/buck,rmaz/buck,SeleniumHQ/buck,rmaz/buck,rmaz/buck,JoelMarcey/buck,darkforestzero/buck,justinmuller/buck,brettwooldridge/buck,SeleniumHQ/buck,LegNeato/buck,LegNeato/buck,ilya-klyuchnikov/buck,facebook/buck,k21/buck,k21/buck,davido/buck,daedric/buck,LegNeato/buck,brettwooldridge/buck,vschs007/buck,zpao/buck,rmaz/buck,brettwooldridge/buck,zpao/buck,romanoid/buck,vschs007/buck,romanoid/buck,raviagarwal7/buck,Addepar/buck,OkBuilds/buck,Addepar/buck,JoelMarcey/buck,brettwooldridge/buck,nguyentruongtho/buck,raviagarwal7/buck,darkforestzero/buck,ilya-klyuchnikov/buck,raviagarwal7/buck,grumpyjames/buck,sdwilsh/buck,justinmuller/buck,LegNeato/buck,clonetwin26/buck,SeleniumHQ/buck,shs96c/buck,zpao/buck,brettwooldridge/buck,illicitonion/buck,brettwooldridge/buck,darkforestzero/buck,grumpyjames/buck,JoelMarcey/buck,robbertvanginkel/buck,vschs007/buck,zhan-xiong/buck,facebook/buck,nguyentruongtho/buck,SeleniumHQ/buck,shs96c/buck,clonetwin26/buck,zhan-xiong/buck,davido/buck,zhan-xiong/buck,brettwooldridge/buck,shybovycha/buck,k21/buck,shs96c/buck,shybovycha/buck,raviagarwal7/buck,grumpyjames/buck,vschs007/buck,darkforestzero/buck,davido/buck,clonetwin26/buck,dsyang/buck,k21/buck,OkBuilds/buck,clonetwin26/buck,janicduplessis/buck,zhan-xiong/buck,LegNeato/buck,dsyang/buck,clonetwin26/buck,JoelMarcey/buck,darkforestzero/buck,sdwilsh/buck,shs96c/buck,LegNeato/buck,brettwooldridge/buck,LegNeato/buck,janicduplessis/buck,facebook/buck,kageiit/buck,raviagarwal7/buck,shs96c/buck,grumpyjames/buck,OkBuilds/buck,romanoid/buck,k21/buck,janicduplessis/buck,OkBuilds/buck,daedric/buck,illicitonion/buck,clonetwin26/buck,marcinkwiatkowski/buck,romanoid/buck,OkBuilds/buck,LegNeato/buck,clonetwin26/buck,rmaz/buck,romanoid/buck,shs96c/buck,SeleniumHQ/buck,romanoid/buck,rmaz/buck,raviagarwal7/buck,illicitonion/buck,darkforestzero/buck,Addepar/buck,nguyentruongtho/buck,daedric/buck,illicitonion/buck,justinmuller/buck,k21/buck,daedric/buck,sdwilsh/buck,zhan-xiong/buck,sdwilsh/buck,grumpyjames/buck,facebook/buck,davido/buck,rmaz/buck,shs96c/buck,kageiit/buck,dsyang/buck,zhan-xiong/buck,k21/buck,ilya-klyuchnikov/buck,grumpyjames/buck,illicitonion/buck,janicduplessis/buck,robbertvanginkel/buck,facebook/buck,shybovycha/buck,robbertvanginkel/buck
2268cc13d4af71f1fbff4af26631013664fc1d93
pycroft/_compat.py
pycroft/_compat.py
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file. # This file is part of the Pycroft project and licensed under the terms of # the Apache License, Version 2.0. See the LICENSE file for details. import sys import operator PY2 = sys.version_info[0] == 2 if PY2: chr = unichr text_type = unicode string_types = (unicode, str) integer_types = (int, long) int_to_byte = chr xrange = xrange from future_builtins import filter as ifilter, map as imap, zip as izip from itertools import ifilterfalse, izip_longest iterkeys = operator.methodcaller("iterkeys") itervalues = operator.methodcaller("itervalues") iteritems = operator.methodcaller("iteritems") reduce = reduce from StringIO import StringIO from cStringIO import StringIO as BytesIO NativeStringIO = BytesIO else: chr = chr text_type = str string_types = (str, ) integer_types = (int, ) xrange = range ifilter = filter from itertools import filterfalse, zip_longest ifilterfalse = filterfalse izip_longest = zip_longest imap = map izip = zip from functools import reduce iterkeys = operator.methodcaller("keys") itervalues = operator.methodcaller("values") iteritems = operator.methodcaller("items") from io import StringIO, BytesIO NativeStringIO = StringIO def with_metaclass(meta, base=object): return meta("NewBase", (base,), {})
Add a Python 3 compatibility helper
Add a Python 3 compatibility helper
Python
apache-2.0
agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,agdsn/pycroft
8921f9a5d991cd6962d3d16f6e72d53d06a1e86f
rgbLed.py
rgbLed.py
''' Dr Who Box: RGB Effects LED ''' from __future__ import print_function import RPi.GPIO as GPIO import time from multiprocessing import Process import math # Define PINS RED = 23 GREEN = 24 BLUE = 26 # Use numbering based on P1 header GPIO.setmode(GPIO.BOARD) GPIO.setup(RED, GPIO.OUT, GPIO.HIGH) GPIO.setup(GREEN, GPIO.OUT, GPIO.HIGH) GPIO.setup(BLUE, GPIO.OUT, GPIO.HIGH) def rgbLed(): red = GPIO.PWM(RED, 100) green = GPIO.PWM(GREEN, 100) blue = GPIO.PWM(BLUE, 100) red.start(0) green.start(0) blue.start(0) values = [math.sin(x * math.pi / 180.0) for x in range(0, 181)] values = [100-int(100 * x ** 3) for x in values] rValues = values gValues = values[45:] + values[0:45] bValues = values[90:] + values[0:90] increasing = True count = 0 delay = 0.025 while True: red.ChangeDutyCycle(rValues[count]) green.ChangeDutyCycle(gValues[count]) blue.ChangeDutyCycle(bValues[count]) if increasing: count += 1 else: count -= 1 if (count >= len(values) - 1): increasing = False elif (count <= 0): increasing = True time.sleep(delay) # Loop forever... try: p = Process(target=rgbLed) p.start() while True: time.sleep(1) print(time.asctime(), 'and python is running!') except: GPIO.cleanup() p.terminate()
Add an RGB LED for initial experimentation.
Add an RGB LED for initial experimentation.
Python
mit
davidb24v/drwho
fe7e3ca713fa1351facd9821a29cae7838458baf
Python/handle_excel.py
Python/handle_excel.py
""" 使用场景: 有三个巨大的 Excel 文件(合计约一百三十万行)存放在 src_new 下面, 这个脚本能读取这三个 Excel 文件,将所有数据按第 5 列的值分别存放到 dst_new 里的不同的文件里去, dst 文件以数据第 5 列值命名 template_new.xlsx 是一个模板文件,只含表头 Python 3 """ import openpyxl import os import shutil base_path = '/Users/mazhuang/Downloads/excels/' src_path = base_path + 'src_new/' dst_path = base_path + 'dst_new/' template_file = base_path + 'template_new.xlsx' memory_dict = {} total_rows = 0 def read_from_file(file): global total_rows global memory_dict src_workbook = openpyxl.load_workbook(filename=file, read_only=True) sheet_names = src_workbook.sheetnames for sheet_name in sheet_names: src_sheet = src_workbook[sheet_name] count = 0 for row in src_sheet.rows: count += 1 if count == 1: continue total_rows += 1 sku_group = row[4].value if sku_group == '': print('存在数据物料组为空') sku_group = '物料组为空' if sku_group not in memory_dict: memory_dict[sku_group] = [] memory_dict[sku_group].append([cell.value for cell in row]) for file in os.listdir(src_path): if file.endswith('xlsx'): read_from_file(src_path + file) print('total rows: %d' % total_rows) dst_rows = 0 for key, value in memory_dict.items(): dst_rows += len(value) print('%s, %d' % (key, len(value))) dst_file = dst_path + key + '.xlsx' if not os.path.exists(dst_file): shutil.copy(template_file, dst_file) dst_workbook = openpyxl.load_workbook(filename=dst_file) for dst_sheet_name in dst_workbook.sheetnames: dst_sheet = dst_workbook[dst_sheet_name] for row in value: dst_sheet.append(row) dst_workbook.save(dst_file) print('sku groups: %d' % len(memory_dict)) print('dst rows: %d' % dst_rows)
Add handle excel script in python
Add handle excel script in python
Python
mit
mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets
70fa99f52436ac860de01eab311215ed1a7e24c4
app.py
app.py
import os from flask import Flask, abort, send_file app = Flask(__name__) @app.route("/<path:filename>/") def get_image(filename): if not os.path.isfile(filename): abort(404) return send_file(filename) if __name__ == "__main__": app.run()
Add functionality: photo or 404
Add functionality: photo or 404
Python
mit
DictGet/ecce-homo,DictGet/ecce-homo
45b3a5924047a841bb01ed079104533c8cc5c0ce
run.py
run.py
#!/usr/bin/env python3 from tas import make_schedule from tabulate import tabulate import os import csv if __name__ == '__main__': table = make_schedule() tabular = tabulate(list(table), tablefmt="plain") print(tabular)
Add a module that can be called from other scripts
Add a module that can be called from other scripts * Let's assume using the program's output as another program's input (e.g. Conky).
Python
mit
azbshiri/tas
f6b22c3c7ff33d074b18cb7344263db65bb0f40f
codingame/medium/stock_exchange_losses.py
codingame/medium/stock_exchange_losses.py
def maxLoss(lst): up = lst[0] down = lst[0] mini = down - up for i, val in enumerate(lst[:-1]): # We are decreasing if (lst[i+1] < val): if val > up: up = val down = val if lst[i+1] < down: down = lst[i+1] if (down - up) < mini: mini = down - up return mini # Number of values n = int(raw_input()) values = [int(x) for x in raw_input().split()] print maxLoss(values)
Add exercise Stock Exchange Losses
Add exercise Stock Exchange Losses
Python
mit
AntoineAugusti/katas,AntoineAugusti/katas,AntoineAugusti/katas
cd4879f4793924563bc27f251bd6a7af15c8ba3a
tests/test_msgbox.py
tests/test_msgbox.py
#!/usr/bin/python3 # -*- coding: utf-8 -*- """ Test code for controlling QMessageBox format. """ import sys from PyQt5.QtGui import QPixmap from PyQt5.QtWidgets import (QApplication, QLabel, QWidget, QMessageBox, QSpinBox, QLineEdit, QPushButton, QHBoxLayout, QVBoxLayout) class TestMsgBox(QWidget): """Application to demonstrate the Morse Trainer 'grouping' widget.""" def __init__(self): super().__init__() self.initUI() def initUI(self): lbl_font = QLabel('Font size', self) self.spb_fontsize = QSpinBox(self) self.spb_fontsize.setMinimum(1) self.spb_fontsize.setMaximum(20) lbl_msg = QLabel('Message', self) self.led_message = QLineEdit() btn_test = QPushButton('Test', self) hbox1 = QHBoxLayout() hbox1.addWidget(lbl_font) hbox1.addWidget(self.spb_fontsize) hbox1.addStretch() hbox2 = QHBoxLayout() hbox2.addWidget(lbl_msg) hbox2.addWidget(self.led_message) hbox2.addStretch() hbox3 = QHBoxLayout() hbox3.addStretch() hbox3.addWidget(btn_test) vbox = QVBoxLayout(self) self.setLayout(vbox) vbox.addLayout(hbox1) vbox.addLayout(hbox2) vbox.addLayout(hbox3) self.setGeometry(100, 100, 800, 300) self.setWindowTitle('Test of QMessageBox') self.show() btn_test.clicked.connect(self.test_msgbox) def test_msgbox(self): font_size = self.spb_fontsize.value() message = self.led_message.text() msg = ['<font size=%d>' % font_size, 'font size=%d<br>' % font_size, message, '<br>', '</font>'] msgbox = QMessageBox(self) msgbox.setText('Koch promotion') msgbox.setInformativeText(''.join(msg)) msgbox.setStandardButtons(QMessageBox.Ok) msgbox.setDefaultButton(QMessageBox.Ok) msgbox.setMinimumWidth(800) msgbox.setMaximumWidth(800) msgbox.exec() # QMessageBox.information(self, 'Test', ''.join(msg), QMessageBox.Ok) app = QApplication(sys.argv) ex = TestMsgBox() sys.exit(app.exec())
Test program for QMessageBox formatting
Test program for QMessageBox formatting
Python
mit
rzzzwilson/morse_trainer,rzzzwilson/morse_trainer
0c9dfa7f71cee8fe41aa6cdeb234a4c8ab89ea07
convert_ffos_sms_data_to_commhistory-tool_json.py
convert_ffos_sms_data_to_commhistory-tool_json.py
#!/usr/bin/python from json import loads from time import strftime, gmtime from datetime import datetime, time json_file = open('sms.txt') commtool = open('import.json', 'w') isfirst = True commtool.write('[\n') def escape(s): s = repr(s) s = s.replace('\\', '\\\\') s = s.replace("\\\\x", "\\x") s = s.replace('\'', '\\\'') s = s.replace('\n', '\\n') return s.decode('string_escape')[2:-1] for line in json_file: # Remove IndexedDB key to get the plain JSON data line = line.split(':', 1)[1] sms_entry = loads(line) if sms_entry['type'] == 'mms': continue if not isfirst: commtool.write('\t},\n') isfirst = False commtool.write('\t{\n') commtool.write('\t\t"type": "' + sms_entry['type'] + '",\n') if sms_entry['sender']: commtool.write('\t\t"to": "' + sms_entry['sender'] + '",\n') direction = 'in' if sms_entry['receiver']: commtool.write('\t\t"to": "' + sms_entry['receiver'] + '",\n') direction = 'out' commtool.write('\t\t"events": [\n') commtool.write('\t\t\t{\n') commtool.write('\t\t\t\t"direction": "' + direction + '",\n') commtool.write('\t\t\t\t"date": "' + datetime.utcfromtimestamp(sms_entry['timestamp'] / 1000).strftime('%Y-%m-%dT%H:%M:%S') + '",\n') if sms_entry['read'] == False: commtool.write('\t\t\t\t"unread": true,\n') commtool.write('\t\t\t\t"text": "' + escape(sms_entry['body']) + '"\n') commtool.write('\t\t\t}\n') commtool.write('\t\t]\n') commtool.write('\t}\n') commtool.write(']\n')
Add Python version of commhistory-tool converter
Add Python version of commhistory-tool converter Doesn't really work due to non-ASCII character handling, but adding nevertheless for the sake of preservation...
Python
mpl-2.0
laenion/Firefox-OS-Data-Exporter
b0bedcbd293d21e5a08ecd1acbc11b3fe8c5b7e9
osf/migrations/0002_add_lower_index_to_tags.py
osf/migrations/0002_add_lower_index_to_tags.py
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2017-03-30 14:55 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('osf', '0001_initial'), ] operations = [ migrations.RunSQL( [ """ CREATE INDEX lowercase_tag_index ON osf_tag (lower(name), system); """ ], [ """ DROP INDEX IF EXISTS lowercase_tag_index RESTRICT; """ ] ) ]
Add lower index to tags
Add lower index to tags
Python
apache-2.0
TomBaxter/osf.io,brianjgeiger/osf.io,adlius/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,sloria/osf.io,adlius/osf.io,baylee-d/osf.io,cwisecarver/osf.io,CenterForOpenScience/osf.io,chennan47/osf.io,caneruguz/osf.io,crcresearch/osf.io,chrisseto/osf.io,CenterForOpenScience/osf.io,chrisseto/osf.io,brianjgeiger/osf.io,icereval/osf.io,sloria/osf.io,Nesiehr/osf.io,hmoco/osf.io,cslzchen/osf.io,cwisecarver/osf.io,crcresearch/osf.io,laurenrevere/osf.io,caneruguz/osf.io,binoculars/osf.io,pattisdr/osf.io,laurenrevere/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,caseyrollins/osf.io,aaxelb/osf.io,mfraezz/osf.io,saradbowman/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,baylee-d/osf.io,cslzchen/osf.io,laurenrevere/osf.io,mfraezz/osf.io,erinspace/osf.io,erinspace/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,mfraezz/osf.io,aaxelb/osf.io,sloria/osf.io,cslzchen/osf.io,cwisecarver/osf.io,chennan47/osf.io,aaxelb/osf.io,icereval/osf.io,felliott/osf.io,binoculars/osf.io,caneruguz/osf.io,Nesiehr/osf.io,binoculars/osf.io,felliott/osf.io,chrisseto/osf.io,leb2dg/osf.io,adlius/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,mattclark/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,icereval/osf.io,hmoco/osf.io,adlius/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,felliott/osf.io,crcresearch/osf.io,leb2dg/osf.io,caseyrollins/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,hmoco/osf.io,pattisdr/osf.io,mattclark/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,felliott/osf.io,saradbowman/osf.io,chrisseto/osf.io,Nesiehr/osf.io,hmoco/osf.io,pattisdr/osf.io,cslzchen/osf.io
9f58e46b18775bfa0f4bf23bf6b25a1e2488c6ae
scripts/migrate.py
scripts/migrate.py
"""Script for migrating ipythonblocks grid data from SQLite to Postgres""" import json import os from pathlib import Path import sqlalchemy as sa from sqlalchemy.orm import sessionmaker # The module in the ipythonblocks.org application code that contains # table definitions from app import models # SQLite DB related variables SQLITEDB = 'sqlite:///' + str(Path.home() / 'ipborg.db') SQLITE_ENGINE = sa.create_engine(str(SQLITEDB)) SQLITE_META = sa.MetaData(bind=SQLITE_ENGINE) # Postgres DB related variables DBURL = os.environ['DATABASE_URL'] # could be local or remote server PSQL_ENGINE = sa.create_engine(DBURL) SESSION = sessionmaker(bind=PSQL_ENGINE) # columns that are serialized JSON in the SQLite DB JSONIZE_KEYS = {'python_version', 'code_cells', 'grid_data'} # drop and recreate tables in the destination DB so we're always # starting fresh models.Base.metadata.drop_all(bind=PSQL_ENGINE) models.Base.metadata.create_all(bind=PSQL_ENGINE) def sqlite_row_to_sa_row(row, sa_cls): """ Convert a row from the SQLite DB (a SQLAlchemy RowProxy instance) into an ORM instance such as PublicGrid or SecretGrid (exact class provided by the sa_cls argument). This takes care of de-serializing the JSON data stored in the SQLite DB. """ d = dict(row) for key in JSONIZE_KEYS: d[key] = json.loads(d[key]) if d[key] else None return sa_cls(**d) def sqlite_table_to_sa_rows(table_name, sa_cls): """ Yields SQLAlchemy ORM instances of sa_cls from a SQLite table specified by table_name. """ table = sa.Table(table_name, SQLITE_META, autoload=True) results = SQLITE_ENGINE.execute(table.select()) for row in results: yield sqlite_row_to_sa_row(row, sa_cls) def migrate(): """ Trigger the reading from SQLite, transformation of JSON data, and writing to Postgres. """ session = SESSION() session.add_all(sqlite_table_to_sa_rows('public_grids', models.PublicGrid)) session.add_all(sqlite_table_to_sa_rows('secret_grids', models.SecretGrid)) session.commit() if __name__ == '__main__': migrate()
Revert "removing migration script now that it's done"
Revert "removing migration script now that it's done" This reverts commit dd32cbb400fb667d05b215a60fe3682da2c1cf2b.
Python
mit
jiffyclub/ipythonblocks.org,jiffyclub/ipythonblocks.org
ea8b0193845d794b9612b0e78dffa772c15fbca3
tests/test_utils.py
tests/test_utils.py
from datetime import datetime from flexget.utils import json class TestJson(object): def test_dt_encode(self): date_str = '2016-03-11T17:12:17Z' dt = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ') encoded_dt = json.dumps(dt, encode_datetime=True) assert encoded_dt == '"%s"' % date_str def test_dt_decode(self): date_str = '"2016-03-11T17:12:17Z"' dt = datetime.strptime(date_str, '"%Y-%m-%dT%H:%M:%SZ"') decoded_dt = json.loads(date_str, decode_datetime=True) assert dt == decoded_dt
Add test for flexget utils (json)
Add test for flexget utils (json)
Python
mit
JorisDeRieck/Flexget,antivirtel/Flexget,poulpito/Flexget,antivirtel/Flexget,tobinjt/Flexget,crawln45/Flexget,oxc/Flexget,drwyrm/Flexget,Pretagonist/Flexget,jawilson/Flexget,Danfocus/Flexget,sean797/Flexget,LynxyssCZ/Flexget,tarzasai/Flexget,tarzasai/Flexget,dsemi/Flexget,ianstalk/Flexget,OmgOhnoes/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,Danfocus/Flexget,malkavi/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,Pretagonist/Flexget,qvazzler/Flexget,jawilson/Flexget,poulpito/Flexget,jawilson/Flexget,antivirtel/Flexget,malkavi/Flexget,tobinjt/Flexget,gazpachoking/Flexget,oxc/Flexget,jacobmetrick/Flexget,qvazzler/Flexget,JorisDeRieck/Flexget,dsemi/Flexget,Flexget/Flexget,drwyrm/Flexget,sean797/Flexget,qk4l/Flexget,tobinjt/Flexget,Danfocus/Flexget,poulpito/Flexget,crawln45/Flexget,gazpachoking/Flexget,jacobmetrick/Flexget,Flexget/Flexget,drwyrm/Flexget,ianstalk/Flexget,crawln45/Flexget,qk4l/Flexget,tarzasai/Flexget,malkavi/Flexget,LynxyssCZ/Flexget,jawilson/Flexget,Flexget/Flexget,jacobmetrick/Flexget,malkavi/Flexget,LynxyssCZ/Flexget,qvazzler/Flexget,dsemi/Flexget,oxc/Flexget,Flexget/Flexget,sean797/Flexget,OmgOhnoes/Flexget,crawln45/Flexget,Pretagonist/Flexget,JorisDeRieck/Flexget,qk4l/Flexget,Danfocus/Flexget,OmgOhnoes/Flexget
ae19d2e0bf1c7f808cd102a78bde25548bcb88b8
warehouse/cli.py
warehouse/cli.py
# Copyright 2013 Donald Stufft # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function from __future__ import unicode_literals import warehouse.migrations.cli __commands__ = { "migrate": warehouse.migrations.cli.__commands__, }
# Copyright 2013 Donald Stufft # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function from __future__ import unicode_literals import werkzeug.serving import warehouse.migrations.cli class ServeCommand(object): def __call__(self, app, host, port, reloader, debugger): werkzeug.serving.run_simple(host, port, app, use_reloader=reloader, use_debugger=debugger, ) def create_parser(self, parser): parser.add_argument("-H", "--host", default="localhost", help="The host to bind the server to, defaults to localhost", ) parser.add_argument("-p", "--port", default=6000, type=int, help="The port to bind the server to, defaults to 6000", ) parser.add_argument("--no-reload", default=True, action="store_false", dest="reloader", help="Disable automatic reloader", ) parser.add_argument("--no-debugger", default=True, action="store_false", dest="debugger", help="Disable Werkzeug debugger", ) __commands__ = { "migrate": warehouse.migrations.cli.__commands__, "serve": ServeCommand(), }
Add a command to run the built in werkzeug server
Add a command to run the built in werkzeug server
Python
apache-2.0
robhudson/warehouse,mattrobenolt/warehouse,techtonik/warehouse,robhudson/warehouse,mattrobenolt/warehouse,mattrobenolt/warehouse,techtonik/warehouse
73c676d3ab2c4c668209dc04537561c7a2e3cd49
ideascale/migrations/0011_author_sync.py
ideascale/migrations/0011_author_sync.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('ideascale', '0010_auto_20150513_1146'), ] operations = [ migrations.AddField( model_name='author', name='sync', field=models.BooleanField(default=False), ), ]
Add sync field to Author model
Add sync field to Author model
Python
mit
rebearteta/social-ideation,joausaga/social-ideation,rebearteta/social-ideation,rebearteta/social-ideation,rebearteta/social-ideation,joausaga/social-ideation,joausaga/social-ideation,joausaga/social-ideation
c7fe8e2a186d094d49c7c9218ba63bd8a7cdd024
ProjectEuler/prob15.py
ProjectEuler/prob15.py
# projecteuler.net/problem=15 def main(): answer = LatticePaths(20) print(answer) def LatticePaths(x): x += 1 # each item represent not cell but line (for close lattice n+1) arr = [[0 for i in range(0, x)] for i in range(0, x)] arr[0][0] = 1 for i in range(0, x): arr[0][i] = 1 arr[i][0] = 1 for diag in range(1, x+1): i = diag - 2 j = 1 for t in range(0, diag - 2): arr[i-t][j+t] = arr[i-t-1][j+t] + arr[i-t][j+t-1] j = 1 for diag in reversed(range(1, x)): i = x-1 for t in range(0, diag): arr[i-t][j+t] = arr[i-t-1][j+t] + arr[i-t][j+t-1] j += 1 return arr[x-1][x-1] if __name__ == '__main__': main()
Update script for problem 15 on pe
Update script for problem 15 on pe
Python
apache-2.0
yuriyshapovalov/Prototypes,yuriyshapovalov/Prototypes,yuriyshapovalov/Prototypes
b588d426ec500db82995fa830298757f7a1afd52
scripts/test_uvfits_equal.py
scripts/test_uvfits_equal.py
#! /usr/bin/env python import argparse import os.path as op from uvdata.uv import UVData parser = argparse.ArgumentParser() parser.add_argument('uvfits1', help='name of first uvfits file.') parser.add_argument('uvfits2', help='name of second uvfits file to compare to first.') args = parser.parse_args() uvfits_file1 = args.uvfits1 if not op.isfile(uvfits_file1): raise IOError('There is no file named {}'.format(args.uvfits_file1)) uvfits_file2 = args.uvfits2 if not op.isfile(uvfits_file2): raise IOError('There is no file named {}'.format(args.uvfits_file2)) uv1 = UVData() uv1.read_uvfits(uvfits_file1) uv2 = UVData() uv2.read_uvfits(uvfits_file2) if uv1 == uv2: print('uvdata objects from files are equal') else: print('uvdata objects from files are not equal') del(this_uv)
Add little script to test 2 uvfits files for equality
Add little script to test 2 uvfits files for equality
Python
bsd-2-clause
HERA-Team/pyuvdata,HERA-Team/pyuvdata,HERA-Team/pyuvdata,HERA-Team/pyuvdata
3fcaa8d45c91b8aa03f7bdbdd94beace328a765c
tests/test_convert.py
tests/test_convert.py
import pytest # type: ignore from ppb_vector import Vector2 from utils import * @pytest.mark.parametrize('vector_like', UNIT_VECTOR_LIKES) # type: ignore def test_convert_subclass(vector_like): class V(Vector2): pass # test_binop_vectorlike already checks the output value is correct assert isinstance(V.convert(vector_like), V)
Add a test for Vector2.convert
Add a test for Vector2.convert See https://github.com/ppb/ppb-vector/pull/91#discussion_r241956449
Python
artistic-2.0
ppb/ppb-vector,ppb/ppb-vector
6b25614cbdec4595cedd772ca5d405cfafec741d
ipynb.py
ipynb.py
"""ipynb.py -- helper functions for working with the IPython Notebook This software is licensed under the terms of the MIT License as follows: Copyright (c) 2013 Jessica B. Hamrick Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import IPython.core import sys import time def clear_output(delay=0.0): """Clears the output of a cell. Useful for animating things like progress counters. Parameters ---------- delay : number Seconds to delay before clearing output """ sys.stdout.flush() if delay: time.sleep(delay) IPython.core.display.clear_output()
Add file for ipython notebook snippets
Add file for ipython notebook snippets
Python
mit
jhamrick/python-snippets
dd9e80cb13d41a6faf0fb1340ea7edc949f0fab7
dash/orgs/migrations/0012_auto_20150715_1816.py
dash/orgs/migrations/0012_auto_20150715_1816.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('orgs', '0011_auto_20150710_1612'), ] operations = [ migrations.AlterField( model_name='invitation', name='email', field=models.EmailField(help_text='The email to which we send the invitation of the viewer', max_length=254, verbose_name='Email'), ), ]
Add missing migration for Invitation.email
Add missing migration for Invitation.email
Python
bsd-3-clause
rapidpro/dash,peterayeni/dash,rapidpro/dash,caktus/dash,caktus/dash,peterayeni/dash
4a67508786b9c28e930a4d6ce49001f6bb9be39d
dash/orgs/migrations/0025_auto_20180321_1520.py
dash/orgs/migrations/0025_auto_20180321_1520.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-03-21 15:20 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('orgs', '0024_populate_org_backend'), ] operations = [ migrations.AlterUniqueTogether( name='orgbackend', unique_together=set([('org', 'slug')]), ), ]
Add constraints for unique together for org and slug in org backend, migrations
Add constraints for unique together for org and slug in org backend, migrations
Python
bsd-3-clause
rapidpro/dash,rapidpro/dash
28841d9a7077293b0befab23fb3d1183006edc89
lcapy/nettransform.py
lcapy/nettransform.py
"""This module performs network transformations. Copyright 2020 Michael Hayes, UCECE """ def Z_wye_to_delta(Z1, Z2, Z3): """Perform wye to delta transformation of three impedances. This is equivalent to a tee-pi transform or a star-mesh transform.""" N = Z1 * Z2 + Z1 * Z3 + Z2 * Z3 Za = N / Z1 Zb = N / Z2 Zc = N / Z3 return Za, Zb, Zc def Z_delta_to_wye(Za, Zb, Zc): """Perform wye to delta transformation of three impedances. This is equivalent to a pi-tee transform or a mesh-star transform.""" D = Za + Zb + Zc Z1 = Zb * Zc / D Z2 = Za * Zc / D Z3 = Za * Zb / D return Z1, Z2, Z3 def Y_delta_to_wye(Ya, Yb, Yc): """Perform wye to delta transformation of three admittances. This is equivalent to a tee-pi transform or a star-mesh transform.""" N = Ya * Yb + Ya * Yc + Yb * Yc Y1 = N / Ya Y2 = N / Yb Y3 = N / Yc return Y1, Y2, Y3 def Y_wye_to_delta(Y1, Y2, Y3): """Perform wye to delta transformation of three admittances. This is equivalent to a pi-tee transform or a mesh-star transform.""" D = Y1 + Y2 + Y3 Ya = Y2 * Y3 / D Yb = Y1 * Y3 / D Yc = Y1 * Y2 / D return Ya, Yb, Yv
Add wye-delta and delta-wye transformations
Add wye-delta and delta-wye transformations
Python
lgpl-2.1
mph-/lcapy
0ef0f3528dfd21ff608ea5e59980856e7f673817
longclaw/longclawshipping/fields.py
longclaw/longclawshipping/fields.py
from longclaw.longclawsettings.models import LongclawSettings from longclaw.longclawshipping.models import ShippingRate from django_countries import countries, fields class CountryChoices(object): ''' Helper class which returns a list of available countries based on the selected shipping options. If default_shipping_enabled is ``True`` in the longclaw settings, then all possible countries are returned. Otherwise only countries for which a ``ShippingRate`` has been declared are returned. ''' def __init__(self, **kwargs): request = kwargs.get('request', None) self._all_countries = True if request: settings = LongclawSettings.for_site(request.site) self._all_countries = settings.default_shipping_enabled def __call__(self, *args, **kwargs): if self._all_countries: return countries else: return ShippingRate.objects.values_list('countries').distinct() class ShippingCountryField(fields.CountryField): ''' Country choice field whose choices are constrained by the configured shipping options. ''' def __init__(self, *args, **kwargs): kwargs.update({ 'countries': CountryChoices(**kwargs) }) super(ShippingCountryField, self).__init__(*args, **kwargs)
Add a country field for available shipping countries
Add a country field for available shipping countries
Python
mit
JamesRamm/longclaw,JamesRamm/longclaw,JamesRamm/longclaw,JamesRamm/longclaw
02e907a97eb1cb79b5c427e6153caf8ca0009058
tests/builder_tests.py
tests/builder_tests.py
import contextlib import json import os from nose.tools import istest, assert_equal from whack.tempdir import create_temporary_dir from whack.files import sh_script_description, plain_file, read_file from whack.sources import PackageSource from whack.builder import build @istest def build_uses_params_as_environment_variables_in_build(): with _package_source("echo $VERSION > $1/version", {}) as package_source: with create_temporary_dir() as target_dir: build(package_source, {"version": "42"}, target_dir) assert_equal("42\n", read_file(os.path.join(target_dir, "version"))) @contextlib.contextmanager def _package_source(build_script, description): files = [ plain_file("whack/whack.json", json.dumps(description)), sh_script_description("whack/build", build_script), ] with create_temporary_dir(files) as package_source_dir: yield PackageSource(package_source_dir)
Add basic test for build
Add basic test for build
Python
bsd-2-clause
mwilliamson/whack
b6f2325c153b499c3b79fdb813d80e5423e4919d
flexget/plugins/services/torrent_cache.py
flexget/plugins/services/torrent_cache.py
import logging import re from flexget.plugin import register_plugin, priority log = logging.getLogger('torrent_cache') MIRRORS = ['http://torrage.com/torrent/', 'http://torcache.net/torrent/', 'http://zoink.it/torrent/', 'http://torrage.ws/torrent/'] class TorrentCache(object): """Adds urls to torrent cache sites to the urls list.""" @priority(-255) def on_feed_filter(self, feed, config): for entry in feed.accepted: info_hash = None if entry['url'].startswith('magnet:'): info_hash_search = re.search('btih:([0-9a-f]+)', entry['url'], re.IGNORECASE) if info_hash_search: info_hash = info_hash_search.group(1) elif entry.get('torrent_info_hash'): info_hash = entry['torrent_info_hash'] if info_hash: entry.setdefault('urls', [entry['url']]) entry['urls'].extend(host + info_hash.upper() + '.torrent' for host in MIRRORS) register_plugin(TorrentCache, 'torrent_cache', api_ver=2, builtin=True)
import logging import re import random from flexget.plugin import register_plugin, priority log = logging.getLogger('torrent_cache') MIRRORS = ['http://torrage.com/torrent/', # Now using a landing page instead of going directly to the torrent # TODO: May be fixable by setting the referer #'http://torcache.net/torrent/', 'http://zoink.it/torrent/', 'http://torrage.ws/torrent/'] class TorrentCache(object): """Adds urls to torrent cache sites to the urls list.""" @priority(-255) def on_feed_filter(self, feed, config): for entry in feed.accepted: info_hash = None if entry['url'].startswith('magnet:'): info_hash_search = re.search('btih:([0-9a-f]+)', entry['url'], re.IGNORECASE) if info_hash_search: info_hash = info_hash_search.group(1) elif entry.get('torrent_info_hash'): info_hash = entry['torrent_info_hash'] if info_hash: # Add the mirrors in random order random.shuffle(MIRRORS) entry.setdefault('urls', [entry['url']]) entry['urls'].extend(host + info_hash.upper() + '.torrent' for host in MIRRORS) register_plugin(TorrentCache, 'torrent_cache', api_ver=2, builtin=True)
Disable torcache for now. Randomize order torrent cache mirrors are added to urls list.
Disable torcache for now. Randomize order torrent cache mirrors are added to urls list. git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@2874 3942dd89-8c5d-46d7-aeed-044bccf3e60c
Python
mit
tobinjt/Flexget,tarzasai/Flexget,jacobmetrick/Flexget,offbyone/Flexget,Flexget/Flexget,tvcsantos/Flexget,qk4l/Flexget,jawilson/Flexget,sean797/Flexget,oxc/Flexget,oxc/Flexget,asm0dey/Flexget,Flexget/Flexget,thalamus/Flexget,Danfocus/Flexget,crawln45/Flexget,v17al/Flexget,JorisDeRieck/Flexget,dsemi/Flexget,vfrc2/Flexget,crawln45/Flexget,ibrahimkarahan/Flexget,dsemi/Flexget,jawilson/Flexget,gazpachoking/Flexget,thalamus/Flexget,gazpachoking/Flexget,tsnoam/Flexget,LynxyssCZ/Flexget,JorisDeRieck/Flexget,tarzasai/Flexget,offbyone/Flexget,qvazzler/Flexget,xfouloux/Flexget,lildadou/Flexget,malkavi/Flexget,X-dark/Flexget,grrr2/Flexget,cvium/Flexget,drwyrm/Flexget,voriux/Flexget,ibrahimkarahan/Flexget,antivirtel/Flexget,grrr2/Flexget,voriux/Flexget,lildadou/Flexget,X-dark/Flexget,patsissons/Flexget,xfouloux/Flexget,crawln45/Flexget,Danfocus/Flexget,cvium/Flexget,vfrc2/Flexget,ratoaq2/Flexget,Pretagonist/Flexget,camon/Flexget,sean797/Flexget,jawilson/Flexget,xfouloux/Flexget,OmgOhnoes/Flexget,ianstalk/Flexget,v17al/Flexget,ZefQ/Flexget,qk4l/Flexget,tobinjt/Flexget,Pretagonist/Flexget,malkavi/Flexget,patsissons/Flexget,jawilson/Flexget,JorisDeRieck/Flexget,spencerjanssen/Flexget,vfrc2/Flexget,Danfocus/Flexget,poulpito/Flexget,LynxyssCZ/Flexget,antivirtel/Flexget,tobinjt/Flexget,ibrahimkarahan/Flexget,antivirtel/Flexget,ratoaq2/Flexget,qvazzler/Flexget,ianstalk/Flexget,drwyrm/Flexget,ZefQ/Flexget,tsnoam/Flexget,malkavi/Flexget,tobinjt/Flexget,malkavi/Flexget,ZefQ/Flexget,spencerjanssen/Flexget,jacobmetrick/Flexget,OmgOhnoes/Flexget,offbyone/Flexget,dsemi/Flexget,LynxyssCZ/Flexget,drwyrm/Flexget,poulpito/Flexget,Flexget/Flexget,patsissons/Flexget,OmgOhnoes/Flexget,camon/Flexget,tvcsantos/Flexget,asm0dey/Flexget,JorisDeRieck/Flexget,crawln45/Flexget,grrr2/Flexget,asm0dey/Flexget,ratoaq2/Flexget,lildadou/Flexget,X-dark/Flexget,poulpito/Flexget,Pretagonist/Flexget,ianstalk/Flexget,jacobmetrick/Flexget,LynxyssCZ/Flexget,sean797/Flexget,oxc/Flexget,cvium/Flexget,Flexget/Flexget,thalamus/Flexget,qvazzler/Flexget,tarzasai/Flexget,spencerjanssen/Flexget,v17al/Flexget,qk4l/Flexget,Danfocus/Flexget,tsnoam/Flexget
2a8f064733892b86c2041f3294d5efebd4b565d9
txircd/modules/extra/channelopaccess.py
txircd/modules/extra/channelopaccess.py
from twisted.plugin import IPlugin from txircd.module_interface import IMode, IModuleData, Mode, ModuleData from txircd.utils import ModeType from zope.interface import implements class ChannelOpAccess(ModuleData, Mode): implements(IPlugin, IModuleData, IMode) name = "ChannelOpAccess" affectedActions = { "checkchannellevel": 10, "checkexemptchanops": 10 } def actions(self): return [ ("modeactioncheck-channel-W-checkchannellevel", 1, self.checkMode), ("modeactioncheck-channel-W-checkexemptchanops", 1, self.checkMode) ] def channelModes(self): return [ ("W", ModeType.List, self) ] def checkMode(self, channel, checkType, paramChannel, user): if "W" not in channel.mode: continue for paramData in channel.mode["W"]: if paramData[0] == checkType: return paramData[0] return None def checkSet(self, channel, param): checkedParams = [] for parameter in param.split(","): status, permissionType = parameter.split(":", 1) if status not in self.ircd.channelStatuses: continue checkedParams.append(parameter) return checkedParams def apply(self, actionType, channel, param, checkType, paramChannel, user): status, permissionType = parameter.split(":", 1) if permissionType != checkType: return None if status not in self.ircd.channelStatuses: return False # For security, we'll favor those that were restricting permissions while a certain status was loaded. level = self.ircd.channelStatuses[status][1] return (channel.userRank(user) >= level) chanAccess = ChannelOpAccess()
Allow channel ops to change the required level for specific permissions
Allow channel ops to change the required level for specific permissions
Python
bsd-3-clause
Heufneutje/txircd,ElementalAlchemist/txircd
a00822aeb17eabde80adf16c30498472d5775159
nclxd/tests/test_container_image.py
nclxd/tests/test_container_image.py
# Copyright 2015 Canonical Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from nova import test from nclxd.nova.virt.lxd import container_image from nclxd import tests @ddt.ddt @mock.patch.multiple('nclxd.nova.virt.lxd.container_utils' '.LXDContainerDirectories', get_base_dir=mock.Mock(return_value='/fake/path'), get_container_image=mock.Mock( return_value='/fake/image/path')) class LXDTestContainerImage(test.NoDBTestCase): def setUp(self): super(LXDTestContainerImage, self).setUp() self.container_image = container_image.LXDContainerImage() alias_patcher = mock.patch.object(self.container_image.lxd, 'alias_list', return_value=['alias']) alias_patcher.start() self.addCleanup(alias_patcher.stop) def test_fetch_image_existing_alias(self): instance = tests.MockInstance() context = {} image_meta = {'name': 'alias'} self.assertEqual(None, self.container_image.fetch_image(context, instance, image_meta)) @mock.patch('os.path.exists') @mock.patch('nova.openstack.common.fileutils.ensure_tree') @ddt.data(True, False) def test_fetch_image_existing_file(self, base_exists, mt, mo): mo.side_effect = [base_exists, True] instance = tests.MockInstance() context = {} image_meta = {'name': 'new_image'} self.assertEqual(None, self.container_image.fetch_image(context, instance, image_meta)) if base_exists: self.assertFalse(mt.called) else: mt.assert_called_once_with('/fake/path') self.assertEqual([mock.call('/fake/path'), mock.call('/fake/image/path')], mo.call_args_list)
Add tests for existing image
Add tests for existing image
Python
apache-2.0
tpouyer/nova-lxd,Saviq/nova-compute-lxd,tpouyer/nova-lxd,Saviq/nova-compute-lxd
e2c52c768420357b43394df622a32629155c927e
tests/unit/test_cli.py
tests/unit/test_cli.py
from hamcrest import assert_that, is_ import pytest from pyhttp.cli import parse_args def describe_parse_args(): def it_returns_parsed_arguments(): args = parse_args(['-c', '100', 'http://example.com']) assert_that(args.concurrency, is_(100)) assert_that(args.url, is_('http://example.com'))
Add cli arguments parser tests
Add cli arguments parser tests
Python
mit
tesonet/pyhttp
c699a331ed8976069731f6bc7f61871123810865
wafer/talks/migrations/0002_auto_20150813_2327.py
wafer/talks/migrations/0002_auto_20150813_2327.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('talks', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='talk', options={'permissions': (('view_all_talks', 'Can see all talks'),)}, ), ]
Add migration for view_all_talks permission.
Add migration for view_all_talks permission.
Python
isc
CTPUG/wafer,CTPUG/wafer,CTPUG/wafer,CarlFK/wafer,CarlFK/wafer,CarlFK/wafer,CarlFK/wafer,CTPUG/wafer
9386120ee2d5e27374a53c10ca45bf7b6f0d2e6e
tests/test_listener.py
tests/test_listener.py
#!/usr/bin/env python import pytest import pg_bawler.core @pytest.mark.asyncio async def test_simple_listen(): class NotificationListener( pg_bawler.core.BawlerBase, pg_bawler.core.ListenerMixin ): pass class NotificationSender( pg_bawler.core.BawlerBase, pg_bawler.core.SenderMixin ): pass connection_params = dict( dbname='pg_bawler_test', user='pg_bawler_test', host='postgres', password='postgres') nl = NotificationListener(connection_params=connection_params) ns = NotificationSender(connection_params=connection_params) payload = 'aaa' channel_name = 'pg_bawler_test' await nl.register_channel(channel='pg_bawler_test') await ns.send(channel=channel_name, payload=payload) notification = await nl.get_notification() assert notification.channel == channel_name assert notification.payload == payload
Add initial test for listener module
Add initial test for listener module
Python
bsd-3-clause
beezz/pg_bawler,beezz/pg_bawler
ad1121b941a694b7cb6a65e7e6bf4839147f7551
utils/verify_alerts.py
utils/verify_alerts.py
#!/usr/bin/env python import os import sys from os.path import dirname, join, realpath from optparse import OptionParser # Get the current working directory of this file. # http://stackoverflow.com/a/4060259/120999 __location__ = realpath(join(os.getcwd(), dirname(__file__))) # Add the shared settings file to namespace. sys.path.insert(0, join(__location__, '..', 'src')) import settings # Add the analyzer file to namespace. sys.path.insert(0, join(__location__, '..', 'src', 'analyzer')) from alerters import trigger_alert parser = OptionParser() parser.add_option("-t", "--trigger", dest="trigger", default=False, help="Actually trigger the appropriate alerts (default is False)") parser.add_option("-m", "--metric", dest="metric", default='skyline.horizon.queue_size', help="Pass the metric to test (default is skyline.horizon.queue_size)") (options, args) = parser.parse_args() try: alerts_enabled = settings.ENABLE_ALERTS alerts = settings.ALERTS except: print "Exception: Check your settings file for the existence of ENABLE_ALERTS and ALERTS" sys.exit() print 'Verifying alerts for metric: "' + options.metric + '"' # Send alerts if alerts_enabled: print 'Alerts Enabled' for alert in alerts: print 'Checking metric name contains: "' + alert[0] + '" to send via ' + alert[1] if alert[0] in options.metric: print '...Trigger alert via ' + alert[1] if options.trigger: trigger_alert(alert, options.metric) else: print 'Alerts are Disabled'
Add script to test/verify alert configuration
Add script to test/verify alert configuration We're looking to build out another alerter or two, and wanted a way to test our current alert configurations as well as be able to trigger alerts as we developer new alerters.
Python
mit
aelialper/skyline,hcxiong/skyline,loggly/skyline,triplekill/skyline,PaytmLabs/skyline,100star/skyline,CDKGlobal/skyline,hcxiong/skyline,loggly/skyline,sdgdsffdsfff/skyline,klynch/skyline,aelialper/skyline,triplekill/skyline,100star/skyline,sdgdsffdsfff/skyline,hcxiong/skyline,loggly/skyline,sdgdsffdsfff/skyline,PaytmLabs/skyline,pombredanne/skyline,aelialper/skyline,etsy/skyline,hcxiong/skyline,sdgdsffdsfff/skyline,PaytmLabs/skyline,etsy/skyline,triplekill/skyline,klynch/skyline,loggly/skyline,pombredanne/skyline,pombredanne/skyline,MyNameIsMeerkat/skyline,aelialper/skyline,etsy/skyline,CDKGlobal/skyline,etsy/skyline,klynch/skyline,PaytmLabs/skyline,triplekill/skyline,pombredanne/skyline,CDKGlobal/skyline,MyNameIsMeerkat/skyline,CDKGlobal/skyline
d195358bc5814301e91a97f5d2ae95ede4a72bbc
scripts/feature/warnings/simple_county.py
scripts/feature/warnings/simple_county.py
import matplotlib.colors as mpcolors import matplotlib.cm as cm import psycopg2 import numpy as np from pyiem.plot import MapPlot POSTGIS = psycopg2.connect(database='postgis', host='localhost', user='nobody', port=5555) pcursor = POSTGIS.cursor() cmap = cm.get_cmap("jet") cmap.set_under("#ffffff") cmap.set_over("black") m = MapPlot(sector='conus', axisbg='#EEEEEE', title='Hour of Day with Most Number of Severe T\'Storm Warnings Issued', subtitle='Hours presented are local to the NWS Office that issued the warning', cwas=True) bins = np.arange(0, 25, 1) norm = mpcolors.BoundaryNorm(bins, cmap.N) pcursor.execute(""" WITH data as ( SELECT ugc, issue at time zone tzname as v from warnings w JOIN stations t ON (w.wfo = (case when length(t.id) = 4 then substr(t.id, 1, 3) else t.id end)) WHERE t.network = 'WFO' and phenomena = 'SV' and significance = 'W' and issue is not null), agg as ( SELECT ugc, extract(hour from v) as hr, count(*) from data GROUP by ugc, hr), ranks as ( SELECT ugc, hr, rank() OVER (PARTITION by ugc ORDER by count DESC) from agg) SELECT ugc, hr from ranks where rank = 1 """) data = {} for row in pcursor: data[row[0]] = float(row[1]) cl = ['Mid', '', '2 AM', '', '4 AM', '', '6 AM', '', '8 AM', '', '10 AM', '', 'Noon', '', '2 PM', '', '4 PM', '', '6 PM', '', '8 PM', '', '10 PM', ''] m.fill_ugcs(data, bins, cmap=cmap, units='Hour of Day', clevstride=2, clevlabels=cl) m.postprocess(filename='test.png')
Make some twitter plots of SVR,TOR freq hour
Make some twitter plots of SVR,TOR freq hour https://twitter.com/akrherz/status/703776738422050816 https://twitter.com/akrherz/status/703776599057895424
Python
mit
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
e3f97f30291a407a54a021d7e4a1805e75c1ec41
midonet/neutron/api.py
midonet/neutron/api.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (C) 2014 Midokura SARL. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from midonet.neutron.common import util from neutron.api.v2 import base CREATE = base.Controller.CREATE DELETE = base.Controller.DELETE LIST = base.Controller.LIST SHOW = base.Controller.SHOW UPDATE = base.Controller.UPDATE @util.generate_methods(LIST, SHOW, CREATE, UPDATE, DELETE) class TunnelzoneHandlerMixin(object): """The mixin of the request handler for the tunnel zones.""" @util.generate_methods(LIST, SHOW, CREATE, UPDATE, DELETE) class TunnelzonehostHandlerMixin(object): """The mixin of the request handler for the tunnel zone hosts.""" class MidoNetApiMixin(TunnelzoneHandlerMixin, TunnelzonehostHandlerMixin): """MidoNet REST API plugin."""
Add Tunnel Zone and Tunnel Zone Host API handlers
Add Tunnel Zone and Tunnel Zone Host API handlers This patch adds TunnelzoneHandlerMixin and TunnelzonehostHandlerMixin to the core plugin using generate_methods decorators. This patch also adds symbols for 'list', 'show', 'create', 'update' and 'delete'. It'd be too easy to have typos for them and I added symbols for them to make it easier to detect typos using the static checker. Change-Id: Ia652c5afa0e00a358325b80d6c6dab27152ced56 Signed-off-by: Taku Fukushima <[email protected]>
Python
apache-2.0
midonet/python-neutron-plugin-midonet,midonet/python-neutron-plugin-midonet,JoeMido/networking-midonet,yamt/networking-midonet,midokura/python-neutron-plugin-midonet,yamt/networking-midonet,JoeMido/networking-midonet,midokura/python-neutron-plugin-midonet
b6bb33aae60239d882cd86640313ed9e9802a5cc
neutron/tests/unit/ml2/test_type_local.py
neutron/tests/unit/ml2/test_type_local.py
# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import exceptions as exc from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_local from neutron.tests import base class LocalTypeTest(base.BaseTestCase): def setUp(self): super(LocalTypeTest, self).setUp() self.driver = type_local.LocalTypeDriver() self.session = None def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_with_unallowed_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL, api.PHYSICAL_NETWORK: 'phys_net'} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_unallowed_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL, api.SEGMENTATION_ID: 2} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_reserve_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} self.driver.reserve_provider_segment(self.session, segment) self.driver.release_segment(self.session, segment) def test_allocate_tenant_segment(self): expected = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} observed = self.driver.allocate_tenant_segment(self.session) self.assertEqual(expected, observed)
Add local type driver unittests
Add local type driver unittests Partial-Bug: #1269127 Change-Id: I5b34dc09128bcb879ea46be64cc5104eeefd4ab4
Python
apache-2.0
gkotton/vmware-nsx,gkotton/vmware-nsx
1edb86e0c92bb186fccd9e0179b0d8b6dcc27902
pygments/styles/igor.py
pygments/styles/igor.py
from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic class IgorStyle(Style): default_style = "" styles = { Comment: 'italic #FF0000', Keyword: '#0000FF', Name.Function: '#C34E00', Name.Class: '#007575', String: '#009C00' }
Add custom style which imitates the offical coloring
Add custom style which imitates the offical coloring --HG-- branch : igor-pro-changes-v2
Python
bsd-2-clause
kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial
a47b82f7feb18da55cf402e363508141764a180f
2014/round-1/labelmaker-v2.py
2014/round-1/labelmaker-v2.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- def solve(): L, N = input().split(' ') N = int(N) result = '' while N > 0: N -= 1 result = L[N % len(L)] + result N = int(N / len(L)) return result def main(): T = int(input()) for i in range(T): print('Case #{i}: {answer}'.format(i=i+1, answer=solve())) if __name__ == '__main__': main()
Add solution v2 for Labelmaker.
Add solution v2 for Labelmaker.
Python
mit
changyuheng/hacker-cup-solutions
da5a3d5bf9d7a6130f05339f36798b3e5d93e7a2
scripts/prism_4km_monthly_modern.py
scripts/prism_4km_monthly_modern.py
#retriever """Retriever script for direct download of PRISM climate data""" from retriever.lib.templates import Script import urlparse class main(Script): def __init__(self, **kwargs): Script.__init__(self, **kwargs) self.name = "PRISM Climate Data" self.shortname = "PRISM" self.ref = "http://prism.oregonstate.edu/" self.urls = {"climate": "http://services.nacse.org/prism/data/public/4km/"} def get_file_names(self, clim_var, mval, year, month): """Create a list of all filenames in a given monthly data zip file """ file_extensions = ['bil', 'bil.aux.xml', 'hdr', 'info.txt', 'prj', 'stx', 'xml'] file_names = [] for extension in file_extensions: file_names.append("PRISM_{}_stable_4km{}_{}{}_bil.{}".format(clim_var, mval, year, month, extension)) return file_names def download(self, engine=None, debug=False): if engine.name != "Download Only": raise Exception("The PRISM dataset contains only non-tabular data files, and can only be used with the 'download only' engine.") Script.download(self, engine, debug) clim_vars = ['ppt', 'tmax', 'tmean', 'tmin'] years = range(1981, 2015) months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12'] for clim_var in clim_vars: mval = "M3" if clim_var == 'ppt' else "M2" for year in years: for month in months: file_names = self.get_file_names(clim_var, mval, year, month) file_url = urlparse.urljoin(self.urls["climate"], "{}/{}{}".format(clim_var, year, month)) self.engine.download_files_from_archive(file_url, file_names, archive_prefix='PRISM_{}_'.format(clim_var)) self.engine.register_files(file_names) SCRIPT = main()
Add script for downloading the modern PRISM climate data
Add script for downloading the modern PRISM climate data The modern PRISM data provides monthly raster precip and temperature measures for the United States from 1981 to present.
Python
mit
goelakash/retriever,davharris/retriever,goelakash/retriever,davharris/retriever,henrykironde/deletedret,embaldridge/retriever,embaldridge/retriever,embaldridge/retriever,henrykironde/deletedret,davharris/retriever
62513585437d2c86f8668eb26eebb6f8c5a1a656
Testiranje/Serijski.py
Testiranje/Serijski.py
''' Created on Nov 24, 2013 @author: gregor ''' from serial import * from threading import Thread last_received = '' def receiving(ser): while True: print ser.readline() if __name__ == '__main__': #ser = Serial(port=None, baudrate=9600, bytesize=EIGHTBITS, parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=0.1, xonxoff=0, rtscts=0, interCharTimeout=None ) ser = Serial('/dev/ttyACM1', 9600) Thread(target=receiving, args=(ser,)).start()
Test branja serijskega vmesnika na Ubuntu-ju. Branje z naprave Arduino UNO.
Test branja serijskega vmesnika na Ubuntu-ju. Branje z naprave Arduino UNO.
Python
mit
blazdivjak/rzpproject