commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
6855bfdc910c0c74743906f195f430817f2399b3 | Add rel-fra creation | moreymat/omw-graph,moreymat/omw-graph,moreymat/omw-graph | omwg/fra2tab.py | omwg/fra2tab.py | #!/usr/share/python
# -*- encoding: utf-8 -*-
#
# Extract synset-word pairs from the WOLF (Wordnet Libre du Français)
# Remap 'b' to 'r'
# Some clean up (remove ' ()', '|fr.*')
import sys, re
import codecs, collections
### Change this!
wndata = "../wordnet/"
wnname = "WOLF (Wordnet Libre du Français)"
wnurl = "http://alpage.inria.fr/~sagot/wolf-en.html"
wnlang = "fra"
wnlicense = "CeCILL-C"
wnversion = "1.0b"
#
# header
#
outfile = "wn-data-%s.tab" % wnlang
o = codecs.open(outfile, "w", "utf-8" )
o.write("# %s\t%s\t%s\t%s\n" % (wnname, wnlang, wnurl, wnlicense))
#
# Data is in the file wolf-1.0b.xml
#<?xml version="1.0" encoding="utf-8"?>
#<!DOCTYPE WN SYSTEM "debvisdic-strict.dtd">
#<WN>
#<SYNSET><ILR type="near_antonym">eng-30-00002098-a</ILR><ILR type="be_in_state">eng-30-05200169-n</ILR><ILR type="be_in_state">eng-30-05616246-n</ILR><ILR type="eng_derivative">eng-30-05200169-n</ILR><ILR type="eng_derivative">eng-30-05616246-n</ILR><ID>eng-30-00001740-a</ID><SYNONYM><LITERAL lnote="2/2:fr.csbgen,fr.csen">comptable</LITERAL></SYNONYM><DEF>(usually followed by `to') having the necessary means or skill or know-how or authority to do something</DEF><USAGE>able to swim</USAGE><USAGE>she was able to program her computer</USAGE><USAGE>we were at last able to buy a car</USAGE><USAGE>able to get a grant for the project</USAGE><BCS>3</BCS><POS>a</POS></SYNSET>
synset = str()
lemma = str()
### need to do some cleanup, so store once to remove duplicates
wn = collections.defaultdict(set)
hyper = collections.defaultdict(list)
f = codecs.open(wndata + 'wolf-1.0b4.xml', 'rb', encoding='utf-8')
for l in f:
m = re.search(r'<ID>eng-30-(.*-[avnrb])<\/ID>',l)
if(m):
synset = m.group(1).strip().replace('-b', '-r')
i = re.finditer(r"<LITERAL[^>]*>([^<]+)<",l)
for m in i:
lemma = m.group(1).strip()
#lemma = re.sub(r'[ _]\(.*\)', ' ', lemma).strip()
#lemma = re.sub(r'\|fr.*$', '', lemma).strip()
if lemma != '_EMPTY_':
wn[synset].add(lemma)
i = re.finditer(r"<ILR type=\"hypernym\">([^<]+)<", l)
for m in i:
if lemma != '_EMPTY_':
key = m.group(1).strip().replace('-b', '-r').split('-')[2] + '-' + m.group(1).strip().replace('-b', '-r').split('-')[3]
hyper[key].append(synset)
for synset in sorted(wn):
for lemma in wn[synset]:
o.write("%s\t%s:%s\t%s\n" % (synset, wnlang, 'lemma', lemma))
rels = open('rels-fra.csv', 'a')
for key in hyper.keys():
for g in wn[key]:
for g2 in hyper[key]:
for w in wn[g2]:
g = str(g)
w = str(w)
g = g.replace(" ", "_")
w = w.replace(" ", "_")
rels.write(str(key)+str(g)+'fra' + '\t' + str(g2)+str(w)+'fra' + '\t' + str(g) + '\t' + str(w) + '\tHYPER' + '\n')
rels.write(str(g2)+str(w)+'fra' + '\t' +str(key)+str(g)+'fra' + '\t' + str(w) + '\t' + str(g) + '\tHYPO' + '\n')
| mit | Python |
|
4731e99882d035a59555e5352311d00c4e122f09 | Print useful information about a GTFS feed | transitland/transitland-python-client,srthurman/transitland-python-client | onestop/info.py | onestop/info.py | """Provide useful information about a GTFS file."""
import argparse
import geohash
import gtfs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='GTFS Information')
parser.add_argument('filename', help='GTFS File')
parser.add_argument('--debug', help='Show helpful debugging information', action='store_true')
args = parser.parse_args()
g = gtfs.GTFSReader(args.filename)
stops_centroid = g.stops_centroid()
stops_centroid_geohash = g.stops_geohash(debug=args.debug)
print "==== GTFS:", g.filename
print "Stops centroid:",stops_centroid
print "Stops centroid geohash:", geohash.encode(stops_centroid)
print "Stops centroid geohash with all stops in neighbors:", stops_centroid_geohash
| mit | Python |
|
335881f4644a6bb2b5f2abb5b193f39d304dbc71 | Fix user agent for the bnn_ sites | chilland/scraper,openeventdata/scraper | pages_scrape.py | pages_scrape.py | import logging
import requests
def scrape(url, extractor):
"""
Function to request and parse a given URL. Returns only the "relevant"
text.
Parameters
----------
url : String.
URL to request and parse.
extractor : Goose class instance.
An instance of Goose that allows for parsing of content.
Returns
-------
text : String.
Parsed text from the specified website.
meta : String.
Parsed meta description of an article. Usually equivalent to the
lede.
"""
logger = logging.getLogger('scraper_log')
try:
headers = {'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36"}
page = requests.get(url, headers=headers)
try:
article = extractor.extract(raw_html=page.content)
text = article.cleaned_text
meta = article.meta_description
return text, meta
#Generic error catching is bad
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem scraping URL: {}. {}.'.format(url, e))
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem requesting url: {}. {}'.format(url, e))
| import logging
import requests
def scrape(url, extractor):
"""
Function to request and parse a given URL. Returns only the "relevant"
text.
Parameters
----------
url : String.
URL to request and parse.
extractor : Goose class instance.
An instance of Goose that allows for parsing of content.
Returns
-------
text : String.
Parsed text from the specified website.
meta : String.
Parsed meta description of an article. Usually equivalent to the
lede.
"""
logger = logging.getLogger('scraper_log')
try:
page = requests.get(url)
try:
article = extractor.extract(raw_html=page.content)
text = article.cleaned_text
meta = article.meta_description
return text, meta
#Generic error catching is bad
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem scraping URL: {}. {}.'.format(url, e))
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem requesting url: {}. {}'.format(url, e))
| mit | Python |
dddf634f8445fac66aa25265c7f7e859dab4c000 | add test file for python | KeitaNakamura/highlighter.nvim | test/test.py | test/test.py | # Highlighter Demo
class Person:
def __init__(self, x):
self.x = x
def show(self):
print(self.x)
person = Person("Ken")
person.show()
| mit | Python |
|
abcbe6443492ba2f011dec0132a0afb3b8cc9b0b | Create __init__.py | terry-12345/hello-world | hello-world/__init__.py | hello-world/__init__.py | bsd-2-clause | Python |
||
8d36c444fe379b5901692485c2850e86ed714f89 | Add sql connection tester | stormaaja/csvconverter,stormaaja/csvconverter,stormaaja/csvconverter | sql_connection_test.py | sql_connection_test.py | import mysql.connector
import json
with open("config.json") as f:
config = json.load(f)
try:
conn = mysql.connector.connect(
user=config["database_connection"]["username"],
password=config["database_connection"]["password"],
host=config["database_connection"]["host"],
database=config["database_connection"]["database"])
cursor = conn.cursor()
cursor.close()
print "Connection success"
except mysql.connector.errors.ProgrammingError as err:
print "Error connecting to database: \n{}".format(err)
| mit | Python |
|
05741f17ffac95d66290d2ec705cbfb66fc74ff9 | Add dummpy documentation/stats/plot_sky_locations.py | gammapy/gamma-cat | documentation/stats/plot_sky_locations.py | documentation/stats/plot_sky_locations.py | from bokeh.plotting import figure, output_file, show
output_file("example.html")
x = [1, 2, 3, 4, 5]
y = [6, 7, 6, 4, 5]
p = figure(title="example", plot_width=300, plot_height=300)
p.line(x, y, line_width=2)
p.circle(x, y, size=10, fill_color="white")
show(p)
| bsd-3-clause | Python |
|
fe4b226b9b3d6fbc7be7d545c185ed7950f3a5fd | Add Python benchmark | stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib | lib/node_modules/@stdlib/math/base/dist/beta/logpdf/benchmark/python/benchmark.scipy.py | lib/node_modules/@stdlib/math/base/dist/beta/logpdf/benchmark/python/benchmark.scipy.py | #!/usr/bin/env python
"""Benchmark scipy.stats.beta.logpdf."""
import timeit
name = "beta:logpdf"
repeats = 3
iterations = 1000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = iterations / elapsed
print(" ---")
print(" iterations: " + str(iterations))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.stats import beta; from random import random;"
stmt = "y = beta.logpdf(random(), 100.56789, 55.54321)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in xrange(repeats):
print("# python::" + name)
elapsed = t.timeit(number=iterations)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(repeats, repeats)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| apache-2.0 | Python |
|
c36e390910b62e1ad27066a0be0450c81a6f87c6 | Add context manager for logging | DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python | d1_common_python/src/d1_common/logging_context.py | d1_common_python/src/d1_common/logging_context.py | # -*- coding: utf-8 -*-
"""Context manager that enables temporary changes in logging level.
Note: Not created by DataONE.
Source: https://docs.python.org/2/howto/logging-cookbook.html
"""
import logging
import sys
class LoggingContext(object):
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
# implicit return of None => don't swallow exceptions
| apache-2.0 | Python |
|
4e36e520cb8fef8f07b545a3109e8507789e64bf | add tests, most are still stubbed out | mohierf/mod-ui-graphite,mohierf/mod-ui-graphite | tests/test.py | tests/test.py | import unittest
import urlparse
import sys
import os
import time
from datetime import datetime
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(FILE_PATH, '../'))
sys.path.append(ROOT_PATH)
from module.util import JSONTemplate
from module.graphite_utils import GraphStyle, graphite_time
class TestGraphiteTarget(unittest.TestCase):
pass
class TestGraphiteURL(unittest.TestCase):
pass
class TestGraphiteMetric(unittest.TestCase):
pass
class TestGraphiteTime(unittest.TestCase):
def test_unixtime_0(self):
self.assertEqual(graphite_time(0), '17:00_19691231')
def test_unixtime_now(self):
self.assertEqual(graphite_time(time.time()), datetime.now().strftime('%H:%M_%Y%m%d'))
def test_string(self):
self.assertEqual(graphite_time('test'), 'test')
class TestGraphiteStyle(unittest.TestCase):
def test_base(self):
style = GraphStyle()
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['586'], 'height': ['308'], 'fontSize': ['8']})
def test_width(self):
style = GraphStyle(width=10)
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['10'], 'height': ['308'], 'fontSize': ['8']})
with self.assertRaises(ValueError):
GraphStyle(width='test')
def test_height(self):
style = GraphStyle(height=7)
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['586'], 'height': ['7'], 'fontSize': ['8']})
with self.assertRaises(ValueError):
GraphStyle(height='test')
def test_font(self):
style = GraphStyle(font_size=16)
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['586'], 'height': ['308'], 'fontSize': ['16']})
with self.assertRaises(ValueError):
GraphStyle(font_size='test')
def test_line_style(self):
style = GraphStyle(line_style='connected')
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['586'], 'height': ['308'], 'fontSize': ['8'], 'lineMode': ['connected']})
class TestJSONTemplate(unittest.TestCase):
data = [
{
"width": 586,
"height": 308,
"title": "Response Time on {{host}}",
"min": 0,
"targets": [
{
"target": "legendValue(alias({{host}}.{{service}}.rta,\"Response Time\"),\"last\")"
}
]
},
{
"width": 586,
"height": 308,
"title": "Packet Loss Percentage on {{host}}",
"min": 0,
"max": 100,
"targets": [
{
"target": "legendValue(alias({{host}}.{{service}}.pl,\"Packet loss percentage\"),\"last\")"
}
]
}
]
def test_load_file_path(self):
file_path = os.path.join(ROOT_PATH, 'tempaltes', 'graphite', 'check-host-alive.graph')
template = JSONTemplate(file_path)
self.assertEqual(template.data, self.data)
class TestGraphFactory(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main() | agpl-3.0 | Python |
|
6c7a927e2fc0a054470c2a87fa98d07e993657ac | Add tests | ordian/pydirectio,ordian/pydirectio | test/test.py | test/test.py | import os
import unittest
try:
import directio
except ImportError:
import sys
sys.exit("""
Please install directio:
take a look at directio/README""")
class TestDirectio(unittest.TestCase):
def setUp(self):
super(TestDirectio, self).setUp()
flags = os.O_RDWR | os.O_DIRECT | os.O_SYNC | os.O_CREAT | os.O_TRUNC
self.file = os.open('test.txt', flags, 0o666)
self.buffer = bytearray(512)
self.msg = b'It just works!'
self.buffer[:len(self.msg)] = self.msg
def tearDown(self):
super(TestDirectio, self).tearDown()
os.close(self.file)
def test_read_after_write(self):
# can write only immutable buffer, so we buffer wrap in bytes
written = directio.write(self.file, bytes(self.buffer))
self.assertEqual(written, len(self.buffer))
os.lseek(self.file, 0, os.SEEK_SET)
got = directio.read(self.file, len(self.buffer))
self.assertEqual(got, self.buffer)
def test_fails_to_write_not_multiple_of_512(self):
self.assertRaises(ValueError, directio.write, self.file, self.msg)
def test_fails_to_read_not_multiple_of_512(self):
os.lseek(self.file, 0, os.SEEK_SET)
self.assertRaises(ValueError, directio.read, self.file, 511)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
45148b72cb69c49b2a6ef6e278f23d63328a7942 | Clean up and docs | kristofvanmoffaert/python-omniture,dancingcactus/python-omniture,kristofvanmoffaert/python-omniture,kristofvanmoffaert/python-omniture,dancingcactus/python-omniture,dancingcactus/python-omniture | testQuery.py | testQuery.py | #!/usr/bin/python
import unittest
import omniture
import sys
import os
import pprint
creds = {}
creds['username'] = os.environ['OMNITURE_USERNAME']
creds['secret'] = os.environ['OMNITURE_SECRET']
class QueryTest(unittest.TestCase):
def setUp(self):
self.analytics = omniture.authenticate(creds['username'], creds['secret'])
reportdef = self.analytics.suites[0].report
queue = []
queue.append(reportdef)
self.report = omniture.sync(queue)
def test_ranked(self):
basic_report = self.analytics.suites[0].report.element("page")
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.elements[0].id, "page", "The element is wrong")
self.assertEqual(len(report.elements), 1, "There are too many elements")
self.assertEqual(report.type, "ranked", "This is the wrong type of report it should be ranked")
def test_report_run(self):
self.assertIsInstance(self.analytics.suites[0].report.run(), omniture.Report, "The run method doesn't work to create a report")
def test_bad_element(self):
self.assertRaises(KeyError,self.analytics.suites[0].report.element, "pages")
def test_overtime(self):
basic_report = self.analytics.suites[0].report.metric("orders").granularity("hour")
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
def test_double_element(self):
basic_report = self.analytics.suites[0].report.element("page").element("browser")
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.elements[0].id,"page", "The 1st element is wrong")
self.assertEqual(report.elements[1].id,"browser", "The 2nd element is wrong")
self.assertEqual(len(report.elements), 2, "The number of elements is wrong")
self.assertEqual(report.type, "ranked", "This is the wrong type of report it should be ranked")
def test_double_metric(self):
basic_report = self.analytics.suites[0].report.metric("pageviews").metric("visits")
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.metrics[0].id,"pageviews", "The 1st element is wrong")
self.assertEqual(report.metrics[1].id,"visits", "The 2nd element is wrong")
self.assertEqual(len(report.metrics), 2, "The number of elements is wrong")
self.assertEqual(report.type, "overtime", "This is the wrong type of report it should be overtime")
def test_element_paratmers(self):
"""Test the top and startingWith parameters
This isn't a conclusive test. I really should run to two reports and compare the results to make sure it is corrent
However, these tests need to be able run on any report suite and some reports suites (like ones that are currenly being
used) don't have 10 items in the page name
"""
basic_report = self.analytics.suites[0].report.element("page", top=5, startingWith=5)
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.elements['page'].id, "page" ,"The parameters might have screwed this up")
@unittest.skip("don't have this one done yet")
def test_anamoly_detection(self):
basic_report = self.analytics.suites[0].report.metric("pageviews").range('2014-05-1', '2014-05-07').anomaly_detection()
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.metrics, "upper bound" ,"Anomaly Detection isn't working")
def test_sortBy(self):
""" Make sure sortBy gets put in report description """
basic_report = self.analytics.suites[0].report.element('page').metric('pageviews').metric('visits').sortBy('visits')
self.assertEqual(basic_report.raw['sortBy'], "visits")
def test_current_data(self):
""" Make sure the current data flag gets set correctly """
basic_report = self.analytics.suites[0].report.element('page').metric('pageviews').metric('visits').currentData()
self.assertEqual(basic_report.raw['currentData'], True)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
96a3fc178c9da5a8f917378e40454a0702d746e5 | Initialize construction module | Nekroze/drydock,Nekroze/drydock | drydock/construction.py | drydock/construction.py | """DryDock container construction."""
def construct(spec):
pass | mit | Python |
|
4c33fe7a927cde83aa53374e9fcaedfa18e51e77 | Add function to delete collection | tracek/gee_asset_manager | utilities.py | utilities.py | def delete_collection(ee, id):
if 'users' not in id:
root_path_in_gee = ee.data.getAssetRoots()[0]['id']
id = root_path_in_gee + '/' + id
params = {'id': id}
items_in_collection = ee.data.getList(params)
for item in items_in_collection:
ee.data.deleteAsset(item['id'])
ee.data.deleteAsset(id) | apache-2.0 | Python |
|
4d9d286ec96e834fcb9acf1f1f52876e81668996 | Test script | billingstack/python-fakturo-billingstack | tools/test.py | tools/test.py | from fakturo.billingstack.client import Client
client = Client('http://localhost:9090/billingstack', username='ekarlso', password='secret0')
merchants = client.merchant.list()
| apache-2.0 | Python |
|
583b520a6dada6e7a8bf984469fd6d2e9d8eaf28 | add general methods to instruments | rstoneback/pysat | pysat/instruments/methods/general.py | pysat/instruments/methods/general.py | # -*- coding: utf-8 -*-
"""Provides generalized routines for integrating instruments into pysat.
"""
from __future__ import absolute_import, division, print_function
import pandas as pds
import pysat
import logging
logger = logging.getLogger(__name__)
def list_files(tag=None, sat_id=None, data_path=None, format_str=None,
supported_tags=None, fake_daily_files_from_monthly=False,
two_digit_year_break=None):
"""Return a Pandas Series of every file for chosen satellite data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are <tag strings>.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : (dict or NoneType)
keys are sat_id, each containing a dict keyed by tag
where the values file format template strings. (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
appends daily dates to monthly files internally. These dates are
used by load routine in this module to provide data by day.
two_digit_year_break : int
If filenames only store two digits for the year, then
'1900' will be added for years >= two_digit_year_break
and '2000' will be added for years < two_digit_year_break.
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Examples
--------
::
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'dc_b': fname}
list_files = functools.partial(nasa_cdaweb.list_files,
supported_tags=supported_tags)
fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'': fname}
list_files = functools.partial(mm_gen.list_files,
supported_tags=supported_tags)
"""
if data_path is not None:
if format_str is None:
try:
format_str = supported_tags[sat_id][tag]
except KeyError as estr:
raise ValueError('Unknown sat_id or tag: ' + estr)
out = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if (not out.empty) and fake_daily_files_from_monthly:
out.loc[out.index[-1] + pds.DateOffset(months=1)
- pds.DateOffset(days=1)] = out.iloc[-1]
out = out.asfreq('D', 'pad')
out = out + '_' + out.index.strftime('%Y-%m-%d')
return out
return out
else:
estr = ''.join(('A directory must be passed to the loading routine ',
'for <Instrument Code>'))
raise ValueError(estr)
| bsd-3-clause | Python |
|
79a236133ea00fa1d1af99426380392fe51ec0f4 | Create iis_shortname.py | nanshihui/PocCollect,nanshihui/PocCollect | middileware/iis/iis_shortname.py | middileware/iis/iis_shortname.py | #!/usr/bin/env python
# encoding: utf-8
from t import T
import re
import urllib2,requests,urllib2,json,urlparse
requests.packages.urllib3.disable_warnings()
class P(T):
def __init__(self):
T.__init__(self)
def verify(self,head='',context='',ip='',port='',productname={},keywords='',hackinfo='',verify=False):
timeout=5
if int(port) == 443:
protocal = "https"
else:
protocal = "http"
target_url = protocal + "://"+ip+":"+str(port)
result = {}
result['result']=False
r=None
try:
status_1=requests.get(url=target_url+'/*~1****/a.aspx',timeout=timeout,verify=verify,allow_redirects=False).status_code
status_2=requests.get(url=target_url+'/l1j1e*~1****/a.aspx',timeout=timeout,verify=verify,allow_redirects=False).status_code
#print target_url
if status_1 == 404 and status_2 == 400:
result['result']=True
result['VerifyInfo'] = {}
result['VerifyInfo']['type']='iis short name Vulnerability'
result['VerifyInfo']['URL'] =target_url
result['VerifyInfo']['payload']= 'null'
result['VerifyInfo']['result'] =r.content
except Exception,e:
#print '[-]error',
print e.text
#pass
#print traceback.print_exc()
finally:
if r is not None:
r.close()
del r
return result
if __name__ == '__main__':
print P().verify(ip='cos.99.com',port='80')
| mit | Python |
|
50af4f518912f758e7961055342642c9d31832a0 | Create 6-pwm2.py | CamJam-EduKit/EduKit3 | Code/6-pwm2.py | Code/6-pwm2.py | # CamJam EduKit 3 - Robotics
# Worksheet 6 – Varying the speed of each motor with PWM
import RPi.GPIO as GPIO # Import the GPIO Library
import time # Import the Time library
# Set the GPIO modes
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set variables for the GPIO motor pins
pinMotorAForwards = 10
pinMotorABackwards = 9
pinMotorBForwards = 8
pinMotorBBackwards = 7
# How many times to turn the pin on and off each second
Frequency = 20
# How long the pin stays on each cycle, as a percent
DutyCycleA = 30
DutyCycleB = 30
# Settng the duty cycle to 0 means the motors will not turn
Stop = 0
# Set the GPIO Pin mode to be Output
GPIO.setup(pinMotorAForwards, GPIO.OUT)
GPIO.setup(pinMotorABackwards, GPIO.OUT)
GPIO.setup(pinMotorBForwards, GPIO.OUT)
GPIO.setup(pinMotorBBackwards, GPIO.OUT)
# Set the GPIO to software PWM at 'Frequency' Hertz
pwmMotorAForwards = GPIO.PWM(pinMotorAForwards, Frequency)
pwmMotorABackwards = GPIO.PWM(pinMotorABackwards, Frequency)
pwmMotorBForwards = GPIO.PWM(pinMotorBForwards, Frequency)
pwmMotorBBackwards = GPIO.PWM(pinMotorBBackwards, Frequency)
# Start the software PWM with a duty cycle of 0 (i.e. not moving)
pwmMotorAForwards.start(Stop)
pwmMotorABackwards.start(Stop)
pwmMotorBForwards.start(Stop)
pwmMotorBBackwards.start(Stop)
# Turn all motors off
def StopMotors():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors forwards
def Forwards():
pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors backwards
def Backwards():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)
# Turn left
def Left():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)
pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn Right
def Right():
pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)
# Your code to control the robot goes below this line
Forwards()
time.sleep(1) # Pause for 1 second
Left()
time.sleep(0.5) # Pause for half a second
Forwards()
time.sleep(1)
Right()
time.sleep(0.5)
Backwards()
time.sleep(0.5)
StopMotors()
GPIO.cleanup()
| mit | Python |
|
2d25c2329a9ae4d084671ab99cf53290fe7547ab | add tests for cython script | adrn/streams,adrn/streams | streams/simulation/tests/test_integrate_lm10.py | streams/simulation/tests/test_integrate_lm10.py | # coding: utf-8
"""
Test the Cython integrate code
"""
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os, sys
import glob
import time
# Third-party
import numpy as np
import pytest
import astropy.units as u
import matplotlib.pyplot as plt
from .._integrate_lm10 import lm10_acceleration, leapfrog_lm10
from ...potential import LawMajewski2010
from ...integrate import leapfrog
def test_cython_vs_python1():
r = np.random.random((100,3))
a = time.time()
for ii in range(10000):
lm10_acceleration(r, 2, 1.6, 1.6, 1.69, 0.121)
cython = (time.time() - a) / 10000.
lm10 = LawMajewski2010()
a = time.time()
for ii in range(10000):
lm10.acceleration_at(r)
pure_python = (time.time() - a) / 10000.
assert cython < pure_python
def test_cython_vs_python2():
r = np.random.random((100,3))
v = np.random.random((100,3))
t = np.arange(0, 7000, 10.)
a = time.time()
for ii in range(10):
leapfrog_lm10(r, v, 1.6, 1.6, 1.69, 0.121, t=t)
cython = (time.time() - a) / 10.
lm10 = LawMajewski2010()
a = time.time()
for ii in range(10):
leapfrog(lm10.acceleration_at, r, v, t)
pure_python = (time.time() - a) / 10.
print(cython, pure_python)
#assert cython < pure_python
| mit | Python |
|
af3333906125e9bde3cc5b3ebdb7209c25bcf6ff | Add pinger script | johan-andersson01/biblion,johan-andersson01/biblion,johan-andersson01/biblion,johan-andersson01/biblion | pinger.py | pinger.py | #!/usr/bin/python3
import requests
import datetime
import time
while True:
hour = datetime.datetime.now().hour
if hour > 7:
requests.get('https://biblion.se')
time.sleep(60*29) | mit | Python |
|
e140c21cd0b7d5b0e7cbe7895096476105d03f91 | Create update_sql.py | dichild/Searching,dichld/A-framework-of-search-engine-using-python-and-mysql | update_sql.py | update_sql.py | __author__ = 'userme865'
# ver 0.1
import MySQLdb
def update_db():
try: # start msql and creat stable at first time
conn = MySQLdb.connect(host='localhost', user='root', passwd='', port=3306)
cur = conn.cursor()
conn.select_db('python')
cur.execute('DROP TABLE dataexchange')
cur.execute(
"CREATE TABLE dataexchange SELECT indexer.words, group_concat(indexer.pages ORDER BY indexer.words SEPARATOR ',') AS 'pages',group_concat(indexer.pagewords ORDER BY indexer.words SEPARATOR ',') AS 'pagewords' from indexer GROUP BY indexer.words")
cur.execute("DROP TABLE indexer")
cur.execute("CREATE TABLE indexer SELECT* FROM dataexchange")
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error, e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
| mit | Python |
|
3921f1522851767444644d1dc3c126521476d9dc | add util script to help troll autoplot feature ideas | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | scripts/util/list_stale_autoplots.py | scripts/util/list_stale_autoplots.py | """Look into which autoplots have not been used in a while"""
import psycopg2
import re
import pandas as pd
QRE = re.compile("q=([0-9]+)")
pgconn = psycopg2.connect(database='mesosite', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""SELECT valid, appurl from feature WHERE appurl is not null
and appurl != ''
""")
q = {}
for row in cursor:
appurl = row[1]
valid = row[0]
if appurl.find("/plotting/auto/") != 0:
continue
tokens = QRE.findall(appurl)
if len(tokens) == 0:
print("appurl: %s valid: %s failed RE" % (appurl, valid))
continue
appid = int(tokens[0])
res = q.setdefault(appid, valid)
if res < valid:
q[appid] = valid
df = pd.DataFrame.from_dict(q, orient='index')
df.columns = ['valid']
df.sort_values(by='valid', inplace=True)
print df.head()
| mit | Python |
|
faa6872cf008171afa3db6687d23c1bcc9b6dbac | Add views to the main files | Etskh/TheDruid,Etskh/TheDruid,Etskh/TheDruid,Etskh/TheDruid | Druid/views.py | Druid/views.py | from django.shortcuts import render
from gfx.models import Material
from django.template import RequestContext
def home( request ):
rc = RequestContext(request)
return render( request, 'Druid/index.html', context_instance=rc ) | mit | Python |
|
fc23860b1adbf7c75dfd53dc213c24a65b455597 | Create ExtractData.py | anfederico/Stock-Talk | ExtractData.py | ExtractData.py | bsd-2-clause | Python |
||
ff3b36b4d64af54b6bd22f107a9d5dd5cf4f4473 | solve problem no.1152 | ruby3141/algo_solve,ruby3141/algo_solve,ruby3141/algo_solve | 1152/answer.py | 1152/answer.py | from sys import stdin
input = stdin.readline().strip()
if input == "":
print(0)
exit()
i = 1
for char in input:
if char == ' ':
i += 1
print(i) | mit | Python |
|
f5706084caca2c6f6235914cb70e79c16438e1a0 | Create OverlappingAMR.py | lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples | src/Python/CompositeData/OverlappingAMR.py | src/Python/CompositeData/OverlappingAMR.py | #!/usr/bin/env python
import vtk
def MakeScalars(dims, origin, spacing, scalars):
# Implicit function used to compute scalars
sphere = vtk.vtkSphere()
sphere.SetRadius(3)
sphere.SetCenter(5, 5, 5)
scalars.SetNumberOfTuples(dims[0]*dims[1]*dims[2])
for k in range(0, dims[2]):
z = origin[2] + spacing[2]*k
for j in range(0, dims[1]):
y = origin[1] + spacing[1]*j
for i in range(0,dims[0]):
x = origin[0] + spacing[0]*i
scalars.SetValue(k*dims[0]*dims[1] + j*dims[0] + i, sphere.EvaluateFunction(x, y, z))
def main():
# Create and populate the AMR dataset
# The dataset should look like
# Level 0
# uniform grid, dimensions 11, 11, 11, AMR box (0, 0, 0) - (9, 9, 9)
# Level 1 - refinement ratio : 2
# uniform grid, dimensions 11, 11, 11, AMR box (0, 0, 0) - (9, 9, 9)
# uniform grid, dimensions 11, 11, 11, AMR box (10, 10, 10) - (19, 19, 19)
# Use MakeScalars() above to fill the scalar arrays
amr = vtk.vtkOverlappingAMR()
blocksPerLevel = [1, 2]
amr.Initialize(2, blocksPerLevel)
origin = [0.0, 0.0, 0.0]
spacing = [1.0, 1.0, 1.0]
dims = [11, 11, 11]
ug1 = vtk.vtkUniformGrid()
# Geometry
ug1.SetOrigin(origin)
ug1.SetSpacing(spacing)
ug1.SetDimensions(dims)
# Data
scalars = vtk.vtkFloatArray()
ug1.GetPointData().SetScalars(scalars)
MakeScalars(dims, origin, spacing, scalars)
lo = [0, 0, 0]
hi = [9, 9, 9]
box1 = vtk.vtkAMRBox()
amr.SetAMRBox(0, 0, box1)
amr.SetDataSet(0, 0, ug1)
spacing2 = [0.5, 0.5, 0.5]
ug2 = vtk.vtkUniformGrid()
# Geometry
ug2.SetOrigin(origin)
ug2.SetSpacing(spacing2)
ug2.SetDimensions(dims)
# Data
scalars = vtk.vtkFloatArray()
ug2.GetPointData().SetScalars(scalars)
MakeScalars(dims, origin, spacing2, scalars)
lo2 = [0, 0, 0]
hi2 = [9, 9, 9]
box2 = vtk.vtkAMRBox()
amr.SetAMRBox(1, 0, box2)
amr.SetDataSet(1, 0, ug2)
origin3 = [5, 5, 5]
ug3 = vtk.vtkUniformGrid()
# Geometry
ug3.SetOrigin(origin3)
ug3.SetSpacing(spacing2)
ug3.SetDimensions(dims)
# Data
scalars = vtk.vtkFloatArray()
ug3.GetPointData().SetScalars(scalars)
MakeScalars(dims, origin3, spacing2, scalars)
lo3 = [10, 10, 10]
hi3 = [19, 19, 19]
box3 = vtk.vtkAMRBox()
amr.SetAMRBox(1, 1, box3)
amr.SetDataSet(1, 1, ug3)
amr.SetRefinementRatio(0, 2)
# Render the amr data here.
of = vtk.vtkOutlineFilter()
of.SetInputData(amr)
geomFilter = vtk.vtkCompositeDataGeometryFilter()
geomFilter.SetInputConnection(of.GetOutputPort())
# Create an iso-surface - at 10.
cf = vtk.vtkContourFilter()
cf.SetInputData(amr)
cf.SetNumberOfContours(1)
cf.SetValue(0, 10.0)
geomFilter2 = vtk.vtkCompositeDataGeometryFilter()
geomFilter2.SetInputConnection(cf.GetOutputPort())
# Create the render window, renderer, and interactor.
aren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(aren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Associate the geometry with a mapper and the mapper to an actor.
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(geomFilter.GetOutputPort())
actor1 = vtk.vtkActor()
actor1.SetMapper(mapper)
# Associate the geometry with a mapper and the mapper to an actor.
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(geomFilter2.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
# Add the actor to the renderer and start handling events.
aren.AddActor(actor1)
aren.AddActor(actor2)
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
82bfe668b11ac76159f2a599734ba33c4ef57026 | Add another views_graph_service file | corpulentcoffee/globus-sdp,globus/globus-sample-data-portal | portal/views_graph_service.py | portal/views_graph_service.py | from flask import (flash, redirect, render_template, request,
session, url_for)
import requests
from portal import app, datasets
from portal.decorators import authenticated
from portal.utils import get_portal_tokens
@app.route('/graph', methods=['GET', 'POST'])
@authenticated
def graph():
if request.method == 'GET':
return render_template('graph.jinja2', datasets=datasets)
selected_ids = request.form.getlist('dataset')
selected_year = request.form.get('year')
if not (selected_ids and selected_year):
flash("Please select at least one dataset and a year to graph.")
return redirect(url_for('graph'))
service_token = get_portal_tokens()['service']
service_url = '{}/{}'.format(app.config['SERVICE_URL_BASE'], 'api/doit')
req_headers = dict(Authorization='Bearer {}'.format(service_token))
req_data = dict(datasets=selected_ids,
year=selected_year,
user_identity_id=session.get('primary_identity'),
user_identity_name=session.get('primary_username'))
resp = requests.post(service_url, headers=req_headers, data=req_data,
verify=False)
resp.raise_for_status()
resp_data = resp.json()
dest_ep = resp_data.get('dest_ep')
dest_path = resp_data.get('dest_path')
dest_name = resp_data.get('dest_name')
graph_count = resp_data.get('graph_count')
flash("%d-file SVG upload to %s on %s completed!" %
(graph_count, dest_path, dest_name))
return redirect(url_for('browse', endpoint_id=dest_ep,
endpoint_path=dest_path.lstrip('/')))
@app.route('/graph/clean-up', methods=['POST'])
@authenticated
def graph_cleanup():
service_token = get_portal_tokens()['service']
service_url = '{}/{}'.format(app.config['SERVICE_URL_BASE'], 'api/cleanup')
req_headers = dict(Authorization='Bearer {}'.format(service_token))
resp = requests.post(service_url,
headers=req_headers,
data=dict(
user_identity_name=session['primary_username']
),
verify=False)
resp.raise_for_status()
task_id = resp_data['task_id']
msg = '{} ({}).'.format('Your existing processed graphs have been removed',
task_id)
flash(msg)
return redirect(url_for('graph'))
| unlicense | Python |
|
3684e8be098300006b09c6677a2805e10d623acd | Add GYP file tld_cleanup tool. | Crystalnix/house-of-life-chromium,yitian134/chromium,gavinp/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,adobe/chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,ropik/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,ropik/chromium,gavinp/chromium,gavinp/chromium,ropik/chromium,ropik/chromium,yitian134/chromium,ropik/chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,adobe/chromium,adobe/chromium,adobe/chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,ropik/chromium,adobe/chromium,adobe/chromium,adobe/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,yitian134/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,ropik/chromium,yitian134/chromium,gavinp/chromium,yitian134/chromium,ropik/chromium | net/tools/tld_cleanup/tld_cleanup.gyp | net/tools/tld_cleanup/tld_cleanup.gyp | # Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../../../build/common.gypi',
],
'targets': [
{
'target_name': 'tld_cleanup',
'type': 'executable',
'dependencies': [
'../../../base/base.gyp:base',
'../../../build/temp_gyp/googleurl.gyp:googleurl',
],
'sources': [
'tld_cleanup.cc',
],
},
],
}
| bsd-3-clause | Python |
|
5f9c7d10957c7b0b0da46b031120fe2434315d0d | Test of new persistence layer. | seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core | ndtable/persistence/simple.py | ndtable/persistence/simple.py | from ndtable.carray import carray, cparams
from bloscpack import pack_list, unpack_file
from numpy import array, frombuffer
def test_simple():
filename = 'output'
# hackish, just experimenting!
arr = carray(xrange(10000)).chunks
ca = [bytes(chunk.viewof) for chunk in arr]
pack_list(ca, {}, filename, {'typesize': 8, 'clevel': 0, 'shuffle': False})
out_list, meta_info = unpack_file('output')
assert out_list[0] == ca[0]
assert out_list[1] == ca[1]
def test_compressed():
filename = 'output'
# hackish, just experimenting!
arr = carray(xrange(10000), cparams(clevel=5, shuffle=True)).chunks
ca = [bytes(chunk.viewof) for chunk in arr]
pack_list(ca, {}, filename, {'typesize': 8, 'clevel': 5, 'shuffle': True})
out_list, meta_info = unpack_file('output')
assert out_list[0] == ca[0]
assert out_list[1] == ca[1]
| bsd-2-clause | Python |
|
e6a4863d9663791fabc4bd6ccdf0ab45ba2a86eb | Add standalone benchmark runner | urschrei/lonlat_bng,urschrei/rust_bng,urschrei/lonlat_bng,urschrei/lonlat_bng,urschrei/rust_bng | remote_bench.py | remote_bench.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Standalone benchmark runner
"""
import cProfile
import pstats
import profile
import numpy as np
print("Running Rust and Pyproj benchmarks\n")
# calibrate
pr = profile.Profile()
calibration = np.mean([pr.calibrate(100000) for x in xrange(5)])
# add the bias
profile.Profile.bias = calibration
cProfile.run(open('benches/cprofile_rust.py', 'rb'), 'benches/output_stats_rust')
rust = pstats.Stats('benches/output_stats_rust')
cProfile.run(open('benches/cprofile_pyproj.py', 'rb'), 'benches/output_stats_pyproj')
pyproj_ = pstats.Stats('benches/output_stats_pyproj')
print("Rust Benchmark\n")
rust.sort_stats('cumulative').print_stats(5)
print("Pyproj Benchmark\n")
pyproj_.sort_stats('cumulative').print_stats(5)
| mit | Python |
|
b4042f23d02e77c45d772fe64ae5e98db8b5e4e4 | Add new package: re2 (#18302) | LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/re2/package.py | var/spack/repos/builtin/packages/re2/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Re2(CMakePackage):
"""RE2 is a fast, safe, thread-friendly alternative to backtracking
regular expression engines like those used in PCRE, Perl, and Python."""
homepage = "https://github.com/google/re2"
url = "https://github.com/google/re2/archive/2020-08-01.tar.gz"
version('2020-08-01', sha256='6f4c8514249cd65b9e85d3e6f4c35595809a63ad71c5d93083e4d1dcdf9e0cd6')
version('2020-04-01', sha256='98794bc5416326817498384a9c43cbb5a406bab8da9f84f83c39ecad43ed5cea')
| lgpl-2.1 | Python |
|
9c0bcd4e0317aa8b76ebbf3c9ecae82d1b90027d | Create initial night sensor code for Pi | wodiesan/senior_design_spring | night_sensor/night_feature.py | night_sensor/night_feature.py | """
@author: Sze "Ron" Chau
@e-mail: [email protected]
@source: https://github.com/wodiesan/sweet-skoomabot
@desc Night sensor-->RPi for Senior Design 1
"""
import logging
import os
import RPi.GPIO as GPIO
import serial
import subprocess
import sys
import time
import traceback
# GPIO pins. Uses the BCM numbering system based on RPi B+ board.
IR1 = 26
IR2 = 19
IR3 = 13
IR4 = 6
def init_serial():
"""Initialize the serial connection to the light sensor."""
ser = serial.Serial()
#ser.port = "\\.\COM4" # Windows
ser.port = "/dev/ttyUSB0" # Linux
ser.baudrate = 57600
try:
ser.open()
except Exception, e:
logger.info("Possible open serial port: " + str(e))
print 'Check the serial USB port.'
exit()
return ser
def init_leds():
"""Initial setup for light sensor and IR LEDs. Currently uses the BCM
numbering system based on RPi B+ board."""
GPIO.setmode(GPIO.BCM)
GPIO.setup(IR1, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IR2, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IR3, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IR4, GPIO.OUT, initial=GPIO.HIGH)
thread = threading.Thread(target=warnings)
thread.daemon = False
thread.start()
return thread
| mit | Python |
|
b7e1e05bfe5aa7a8d91a4d8ee786e61b4aa7bd1b | Add ArrayQueue | xliiauo/leetcode,xliiauo/leetcode,xiao0720/leetcode,xliiauo/leetcode,xiao0720/leetcode | ArrayQueue.py | ArrayQueue.py | class ArrayQueue:
def __init__(self, max=10):
self._data = [None] * max
self._size = 0
self._front = 0
self._max = max
def enqueue(self, e):
self._data[(self._front + self._size) % self._max] = e
self._size += 1
def dequeue(self):
rst, self._data[self._front] = self._data[self._front], None
self._front = (self._front + 1) % self._max
self._size -= 1
return rst
def __len__(self):
return self._size | mit | Python |
|
55b6d19fc8c80e3d4ff7842f20d284879f5ea151 | Create BubbleSort.py | CindyDing1115/DataStruct_Sort | BubbleSort.py | BubbleSort.py | """
冒泡:
原始版本:将i由0开始,与后面每一个j=i+1 进行比较,交换
再i=1 ...这样好不容易换到前面第一位的容易被序列最后一个最小值直接怼到末尾去
现在的更新版:i由0开始
j = length-2 与 j = length-1 进行比较,换位
确保移到上面的较小值不会有太大的变动 -- 见P381 图
"""
def bubble_sort(lists):
count = len(lists)
for i in range(0, count):
for j in range(i, count-1)[::-1]:
if lists[j] > lists[j+1]:
lists[j], lists[j+1] = lists[j+1], lists[j]
return lists
| epl-1.0 | Python |
|
17966b6af3039aa6d6308e1592c14527513c70c1 | apply oa start date from journals to relative update requests - script | DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj | portality/migrate/3053_oa_start_date_from_journals_to_urs/migrate.py | portality/migrate/3053_oa_start_date_from_journals_to_urs/migrate.py | """
This script can be run to generate a CSV output of accounts which do not have their passwords set, along
with some useful account information, and possible explanations for the lack of password
```
python accounts_with_missing_passwords.py -o accounts.csv
```
"""
import csv
import esprit
from portality.core import es_connection
from portality.util import ipt_prefix
from portality import models
JOURNALS_WITH_OA_START_DATE = {
"query": {
"filtered": {
"filter": {
"exists" : {
"field" : "bibjson.oa_start"
}
},
"query": {
"match_all": {}
}
}
},
"size": 200000
}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--out", help="output file path")
args = parser.parse_args()
if not args.out:
print("Please specify an output file path with the -o option")
parser.print_help()
exit()
conn = es_connection
with open(args.out, "w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(["ID", "OA Start Date", "Current Application ID", "Application found"])
for j in esprit.tasks.scroll(conn, ipt_prefix('journal'),
q=JOURNALS_WITH_OA_START_DATE,
page_size=100, keepalive='1m'):
journal = models.Journal(_source=j)
bibjson = journal.bibjson()
if journal.current_application is not None:
ur = models.Application.pull(journal.current_application)
application_found = True
if ur is not None:
application_found = False
urb = ur.bibjson()
urb.oa_start = bibjson.oa_start
ur.save()
try:
writer.writerow(
[journal.id, bibjson.oa_start, journal.current_application, application_found])
except AttributeError:
print("Error reading attributes for journal {0}".format(j['id']))
| apache-2.0 | Python |
|
ab50818c18b4275c205419c4c844bfc9ecb7a4c8 | add rename.py | jiangzhonghui/ToolScripts,jiangzhonghui/ToolScripts,jiangzhonghui/ToolScripts | FileUtils/rename.py | FileUtils/rename.py | import os
import sys
import re
dirname, filename = os.path.split(os.path.abspath(sys.argv[0]))
os.chdir(dirname)
fileList = os.listdir(dirname)
print dirname
name='edge_effect_'
for fileItem in fileList:
dotIndex = fileItem.rfind('.')
fileName = fileItem[: dotIndex]
fileExt = fileItem[dotIndex : ]
print fileName,fileExt
#m=re.search("[^qd]\w+",fileName)
if fileName.find(name)<0 and fileName.find("rename")<0:
print "111"
os.rename(fileItem,name+fileName+fileExt)
pass
#print 'm.group:'m.group(0) | apache-2.0 | Python |
|
dc993796fc15e3670c8a702f43fcb9a5d9b4c84e | Add forgotten file. | astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin | astrobin_apps_donations/utils.py | astrobin_apps_donations/utils.py | from subscription.models import UserSubscription
def user_is_donor(user):
if user.is_authenticated:
return UserSubscription.objects.filter(user = user, subscription__name = 'AstroBin Donor').count() > 0
return False
| agpl-3.0 | Python |
|
e51f3869b4a047489b9bb1e4b88af0e0bdc3078b | Add a command to list all the documents. | maxking/paper-to-git,maxking/paper-to-git | paper_to_git/commands/list_command.py | paper_to_git/commands/list_command.py | """
List the Documents and Folders
"""
from paper_to_git.commands.base import BaseCommand
from paper_to_git.models import PaperDoc, PaperFolder
__all__ = [
'ListCommand',
]
class ListCommand(BaseCommand):
"""List the PaperDocs and Folders
"""
name = 'list'
def add(self, parser, command_parser):
self.parser = parser
command_parser.add_argument('-d', '--docs',
default=False, action='store_true',
help=("""\
List all the documents currently stored."""))
command_parser.add_argument('-fd', '--folders',
default=False, action='store_true',
help=("""List all folders in Dropbox Paper"""))
def process(self, args):
if args.docs:
for doc in PaperDoc.select():
print(doc)
if args.folders:
for folder in PaperFolder.select():
print(folder)
for doc in folder.docs:
print('|----{}'.format(doc))
if not (args.docs or args.folders):
print("Please provide atleast one of the --docs or --folders flags")
| apache-2.0 | Python |
|
a797de9014a3d466bb10e9bc318c3e2edec328be | add base for rendering widgets | VisTrails/VisTrails,Nikea/VisTrails,hjanime/VisTrails,minesense/VisTrails,hjanime/VisTrails,hjanime/VisTrails,VisTrails/VisTrails,minesense/VisTrails,Nikea/VisTrails,celiafish/VisTrails,celiafish/VisTrails,minesense/VisTrails,hjanime/VisTrails,VisTrails/VisTrails,hjanime/VisTrails,minesense/VisTrails,VisTrails/VisTrails,VisTrails/VisTrails,Nikea/VisTrails,Nikea/VisTrails,celiafish/VisTrails,minesense/VisTrails,celiafish/VisTrails | packages/SCIRun/renderbase.py | packages/SCIRun/renderbase.py | from core import system
from core.modules.module_registry import registry
from packages.spreadsheet.basic_widgets import SpreadsheetCell, CellLocation
class Render(SpreadsheetCell):
def compute(self):
pass
def registerRender():
registry.add_module(Render, abstract=True)
| bsd-3-clause | Python |
|
6a9fae290c8ce1618a7207efe669347b9503e3be | Add missing logparse file. | nyu-mll/spinn,nyu-mll/spinn,nyu-mll/spinn | python/spinn/util/logparse.py | python/spinn/util/logparse.py | """
Really easy log parsing.
"""
try:
from parse import *
except:
pass
import json
FMT_TRAIN = "Train-Format: "
FMT_TRAIN_EXTRA = "Train-Extra-Format: "
FMT_EVAL = "Eval-Format: "
FMT_EVAL_EXTRA = "Eval-Extra-Format: "
IS_TRAIN = "Acc:"
IS_TRAIN_EXTRA = "Train Extra:"
IS_EVAL = "Eval acc:"
IS_EVAL_EXTRA = "Eval Extra:"
START_TRAIN = "Step:"
START_TRAIN_EXTRA = "Train Extra:"
START_EVAL = "Step:"
START_EVAL_EXTRA = "Eval Extra:"
def get_format(filename, prefix):
with open(filename) as f:
for line in f:
if prefix in line:
return line[line.find(prefix) + len(prefix):].strip()
raise Exception("Format string not found.")
def get_json_data(filename, prefix):
with open(filename) as f:
for line in f:
if prefix in line:
data = line[line.find(prefix) + len(prefix):].strip()
return json.loads(data)
raise Exception("Format string not found.")
def parse_flags(filename):
PREFIX_FLAGS = "Flag Values:\n"
TERMINAL = "}\n"
data = ""
read_json = False
with open(filename) as f:
for line in f:
if read_json:
data += line
if TERMINAL in line:
break
if PREFIX_FLAGS in line:
read_json = True
return json.loads(data)
def is_train(line):
return line.find(FMT_TRAIN) < 0 and line.find(IS_TRAIN) >= 0
def is_train_extra(line):
return line.find(FMT_TRAIN_EXTRA) < 0 and line.find(IS_TRAIN_EXTRA) >= 0
def is_eval(line):
return line.find(FMT_EVAL) < 0 and line.find(IS_EVAL) >= 0
def is_eval_extra(line):
return line.find(FMT_EVAL_EXTRA) < 0 and line.find(IS_EVAL_EXTRA) >= 0
def read_file(filename):
flags = parse_flags(filename)
train_str, train_extra_str = get_format(filename, FMT_TRAIN), get_format(filename, FMT_TRAIN_EXTRA)
eval_str, eval_extra_str = get_format(filename, FMT_EVAL), get_format(filename, FMT_EVAL_EXTRA)
dtrain, dtrain_extra, deval, deval_extra = [], [], [], []
with open(filename) as f:
for line in f:
line = line.strip()
if is_train(line):
dtrain.append(parse(train_str, line[line.find(START_TRAIN):].strip()))
elif is_train_extra(line):
dtrain_extra.append(parse(train_extra_str, line[line.find(START_TRAIN_EXTRA):].strip()))
elif is_eval(line):
deval.append(parse(eval_str, line[line.find(START_EVAL):].strip()))
elif is_eval_extra(line):
deval_extra.append(parse(eval_extra_str, line[line.find(START_EVAL_EXTRA):].strip()))
return dtrain, dtrain_extra, deval, deval_extra, flags
if __name__ == '__main__':
import gflags
import sys
FLAGS = gflags.FLAGS
gflags.DEFINE_string("path", "scripts/sample.log", "")
FLAGS(sys.argv)
dtrain, dtrain_extra, deval, deval_extra, flags = read_file(FLAGS.path)
print "Flags:"
print "Model={model_type}\nLearning_Rate={learning_rate}".format(**flags)
print
print "Train:"
for d in dtrain:
print("Step: {} Acc: {}".format(d['step'], d['class_acc']))
print
print "Eval:"
for d in deval:
print("Step: {} Acc: {}".format(d['step'], d['class_acc']))
| mit | Python |
|
8b1e6b226d925d7f2ef4890463122ec8046aa07a | add test | anvanza/invenavi,anvanza/invenavi,anvanza/invenavi | sensor/test_compass.py | sensor/test_compass.py | #! /usr/bin/python
from Adafruit_LSM303 import LSM303
lsm = LSM303()
while 1:
print lsm.read() | mit | Python |
|
a5dbda3f429d0a1e6cb4fc28b2a620dc2b40fd59 | Resolve import dependency in consoleauth service | scripnichenko/nova,Tehsmash/nova,angdraug/nova,edulramirez/nova,mikalstill/nova,yatinkumbhare/openstack-nova,projectcalico/calico-nova,viggates/nova,devendermishrajio/nova_test_latest,varunarya10/nova_test_latest,alexandrucoman/vbox-nova-driver,sebrandon1/nova,JioCloud/nova,gooddata/openstack-nova,NeCTAR-RC/nova,gooddata/openstack-nova,mmnelemane/nova,raildo/nova,tudorvio/nova,Metaswitch/calico-nova,rahulunair/nova,BeyondTheClouds/nova,mikalstill/nova,alexandrucoman/vbox-nova-driver,dims/nova,ted-gould/nova,akash1808/nova_test_latest,CCI-MOC/nova,devendermishrajio/nova_test_latest,nikesh-mahalka/nova,dawnpower/nova,jeffrey4l/nova,nikesh-mahalka/nova,eayunstack/nova,CCI-MOC/nova,adelina-t/nova,jianghuaw/nova,mahak/nova,yosshy/nova,zhimin711/nova,alvarolopez/nova,cloudbase/nova-virtualbox,saleemjaveds/https-github.com-openstack-nova,zhimin711/nova,silenceli/nova,Juniper/nova,Juniper/nova,tangfeixiong/nova,barnsnake351/nova,projectcalico/calico-nova,cernops/nova,dims/nova,bgxavier/nova,CloudServer/nova,Francis-Liu/animated-broccoli,cloudbase/nova,varunarya10/nova_test_latest,Yusuke1987/openstack_template,phenoxim/nova,JianyuWang/nova,openstack/nova,mikalstill/nova,maelnor/nova,JioCloud/nova_test_latest,vmturbo/nova,yosshy/nova,badock/nova,CEG-FYP-OpenStack/scheduler,thomasem/nova,fnordahl/nova,TwinkleChawla/nova,Juniper/nova,blueboxgroup/nova,MountainWei/nova,orbitfp7/nova,double12gzh/nova,MountainWei/nova,eayunstack/nova,jeffrey4l/nova,redhat-openstack/nova,cloudbase/nova-virtualbox,blueboxgroup/nova,virtualopensystems/nova,saleemjaveds/https-github.com-openstack-nova,yatinkumbhare/openstack-nova,watonyweng/nova,vmturbo/nova,sebrandon1/nova,redhat-openstack/nova,LoHChina/nova,maelnor/nova,klmitch/nova,bigswitch/nova,JioCloud/nova_test_latest,tianweizhang/nova,kimjaejoong/nova,takeshineshiro/nova,Yusuke1987/openstack_template,bgxavier/nova,takeshineshiro/nova,silenceli/nova,hanlind/nova,tianweizhang/nova,klmitch/nova,jianghuaw/nova,vmturbo/nova,zaina/nova,Juniper/nova,Francis-Liu/animated-broccoli,petrutlucian94/nova,affo/nova,zaina/nova,gooddata/openstack-nova,joker946/nova,belmiromoreira/nova,hanlind/nova,ruslanloman/nova,ted-gould/nova,whitepages/nova,apporc/nova,Metaswitch/calico-nova,CEG-FYP-OpenStack/scheduler,tangfeixiong/nova,devendermishrajio/nova,mgagne/nova,whitepages/nova,bigswitch/nova,vladikr/nova_drafts,cernops/nova,isyippee/nova,mahak/nova,berrange/nova,iuliat/nova,hanlind/nova,zzicewind/nova,cernops/nova,openstack/nova,j-carpentier/nova,jianghuaw/nova,cloudbase/nova,alaski/nova,watonyweng/nova,belmiromoreira/nova,shail2810/nova,rajalokan/nova,orbitfp7/nova,vladikr/nova_drafts,apporc/nova,mgagne/nova,Tehsmash/nova,scripnichenko/nova,eonpatapon/nova,eonpatapon/nova,sebrandon1/nova,phenoxim/nova,vmturbo/nova,zzicewind/nova,JioCloud/nova,devendermishrajio/nova,petrutlucian94/nova,mandeepdhami/nova,angdraug/nova,mahak/nova,virtualopensystems/nova,tudorvio/nova,akash1808/nova,affo/nova,openstack/nova,badock/nova,rajalokan/nova,rahulunair/nova,mandeepdhami/nova,alvarolopez/nova,double12gzh/nova,edulramirez/nova,cyx1231st/nova,berrange/nova,rahulunair/nova,barnsnake351/nova,thomasem/nova,felixma/nova,NeCTAR-RC/nova,noironetworks/nova,gooddata/openstack-nova,mmnelemane/nova,rajalokan/nova,shail2810/nova,Stavitsky/nova,TwinkleChawla/nova,BeyondTheClouds/nova,adelina-t/nova,BeyondTheClouds/nova,kimjaejoong/nova,klmitch/nova,rajalokan/nova,noironetworks/nova,CloudServer/nova,alaski/nova,isyippee/nova,tealover/nova,klmitch/nova,felixma/nova,j-carpentier/nova,akash1808/nova_test_latest,iuliat/nova,raildo/nova,jianghuaw/nova,Stavitsky/nova,cyx1231st/nova,fnordahl/nova,cloudbase/nova,tealover/nova,viggates/nova,ruslanloman/nova,akash1808/nova,LoHChina/nova,dawnpower/nova,joker946/nova,JianyuWang/nova | nova/cmd/consoleauth.py | nova/cmd/consoleauth.py | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VNC Console Proxy Server."""
import sys
from oslo.config import cfg
from nova import config
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import version
CONF = cfg.CONF
def main():
config.parse_args(sys.argv)
logging.setup("nova")
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
server = service.Service.create(binary='nova-consoleauth',
topic=CONF.consoleauth_topic)
service.serve(server)
service.wait()
| # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VNC Console Proxy Server."""
import sys
from oslo.config import cfg
from nova import config
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import version
CONF = cfg.CONF
def main():
config.parse_args(sys.argv)
logging.setup("nova")
gmr.TextGuruMeditation.setup_autorun(version)
server = service.Service.create(binary='nova-consoleauth',
topic=CONF.consoleauth_topic)
service.serve(server)
service.wait()
| apache-2.0 | Python |
23b2578fadd8a7ee0885e9956a10667d647acaf8 | add basic test for bist | cr1901/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware | test/test_bist.py | test/test_bist.py | #!/usr/bin/env python3
from litex.soc.tools.remote import RemoteClient
wb = RemoteClient(csr_data_width=8)
wb.open()
regs = wb.regs
# # #
test_size = 128*1024*1024
regs.generator_reset.write(1)
regs.generator_reset.write(0)
regs.generator_base.write(0)
regs.generator_length.write((test_size*8)//128)
regs.generator_shoot.write(1)
while(not regs.generator_done.read()):
pass
regs.checker_reset.write(1)
regs.checker_reset.write(0)
regs.checker_base.write(0)
regs.checker_length.write((test_size*8)//128)
regs.checker_shoot.write(1)
while(not regs.checker_done.read()):
pass
print("errors: {:d}".format(regs.checker_error_count.read()))
# # #
wb.close()
| bsd-2-clause | Python |
|
f070b3c9a97b16aebc8500af703ed713e170f519 | Fix Dask-on-Ray test: Python 3 dictionary .values() is a view, and is not indexable (#13945) | pcmoritz/ray-1,ray-project/ray,ray-project/ray,pcmoritz/ray-1,pcmoritz/ray-1,pcmoritz/ray-1,ray-project/ray,pcmoritz/ray-1,pcmoritz/ray-1,ray-project/ray,ray-project/ray,pcmoritz/ray-1,ray-project/ray,pcmoritz/ray-1,ray-project/ray,ray-project/ray | python/ray/tests/test_dask_scheduler.py | python/ray/tests/test_dask_scheduler.py | import dask
import numpy as np
import dask.array as da
import pytest
import ray
from ray.util.dask import ray_dask_get
def test_ray_dask_basic(ray_start_regular_shared):
@ray.remote
def stringify(x):
return "The answer is {}".format(x)
zero_id = ray.put(0)
def add(x, y):
# Can retrieve ray objects from inside Dask.
zero = ray.get(zero_id)
# Can call Ray methods from inside Dask.
return ray.get(stringify.remote(x + y + zero))
add = dask.delayed(add)
@ray.remote
def call_add():
z = add(2, 4)
# Can call Dask graphs from inside Ray.
return z.compute(scheduler=ray_dask_get)
ans = ray.get(call_add.remote())
assert ans == "The answer is 6", ans
def test_ray_dask_persist(ray_start_regular_shared):
arr = da.ones(5) + 2
result = arr.persist(scheduler=ray_dask_get)
np.testing.assert_array_equal(
next(iter(result.dask.values())),
np.ones(5) + 2)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| import dask
import numpy as np
import dask.array as da
import pytest
import ray
from ray.util.dask import ray_dask_get
def test_ray_dask_basic(ray_start_regular_shared):
@ray.remote
def stringify(x):
return "The answer is {}".format(x)
zero_id = ray.put(0)
def add(x, y):
# Can retrieve ray objects from inside Dask.
zero = ray.get(zero_id)
# Can call Ray methods from inside Dask.
return ray.get(stringify.remote(x + y + zero))
add = dask.delayed(add)
@ray.remote
def call_add():
z = add(2, 4)
# Can call Dask graphs from inside Ray.
return z.compute(scheduler=ray_dask_get)
ans = ray.get(call_add.remote())
assert ans == "The answer is 6", ans
def test_ray_dask_persist(ray_start_regular_shared):
arr = da.ones(5) + 2
result = arr.persist(scheduler=ray_dask_get)
np.testing.assert_array_equal(result.dask.values()[0], np.ones(5) + 2)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| apache-2.0 | Python |
8e91c1fa76382f3b2568c425b41339f5597f9268 | Add bound and brake solver (initial raw implementation) | Cosiek/KombiVojager | solvers/BoundAndBrake.py | solvers/BoundAndBrake.py | #!/usr/bin/env python
# encoding: utf-8
from collections import deque
from copy import deepcopy
from itertools import permutations
from random import shuffle
from base_solver import BaseSolver
INF = float('inf')
class PartialSolution(object):
lower_bound = INF
upper_bound = INF
partial_route = []
done = False
def __init__(self, partial_route=[]):
self.partial_route = partial_route
def build(self, task, ancestor, next_stop):
self.partial_route = ancestor.partial_route[:]
self.partial_route.insert(-1, next_stop)
self.partial_route
self.lower_bound = task.get_path_distance(self.partial_route)
upper_bound_route = (
self.partial_route[:-1] +
list(set(task.all_nodes.keys()) - set(self.partial_route)) +
[self.partial_route[-1],]
)
self.upper_bound = task.get_path_distance(upper_bound_route)
if self.lower_bound == self.upper_bound:
self.done = True
class BoundAndBrakeDeepFitstSearch(BaseSolver):
deterministic = False # actually it's distance is deterministic,
# but time isn't.
# helper
sort_key = lambda self, x: x.upper_bound
cycles = 0
def __init__(self, *args, **kwargs):
super(BoundAndBrakeDeepFitstSearch, self).__init__(*args, **kwargs)
def run_search(self):
self.current_best = self.get_random_solution()
self.current_score = self.task.get_path_distance(self.current_best)
solution = PartialSolution([self.task.start.name, self.task.finish.name])
solution.lower_bound = self.current_score
self.best_upper = solution
self.to_check = deque([solution,])
self.traverse()
return self.current_best, self.current_score, self.cycles
def traverse(self):
while 1:
try:
solution = self.to_check.pop()
except IndexError:
# all solutions have been checked - this is the end
break
# check if this solution is still worth checking
if not (solution.lower_bound <= self.current_score
and solution.lower_bound < self.best_upper.upper_bound):
# if not, then continue...
continue
self.cycles += 1
partials = []
# iterate over unused stops...
for stop in (set(self.task.all_nodes.keys()) - set(solution.partial_route)):
# and create partial solutions
partial = PartialSolution()
partial.build(self.task, solution, stop)
# check if this is a full solution...
if partial.done:
# ... and if it is the best so far
if partial.lower_bound < self.current_score:
self.current_best = partial.partial_route
self.current_score = partial.lower_bound
# if solutions lower bound is lower then current_best, and lower
# then best partial solutions upper bound...
elif (partial.lower_bound < self.current_score
and partial.lower_bound < self.best_upper.upper_bound):
# ...then add it to the list of potential best solutions
partials.append(partial)
# otherwise - forget about it
else:
pass
partials.sort(key=self.sort_key)
self.to_check.extend(partials)
def get_random_solution(self):
route = [n.name for n in self.task.mid_nodes]
shuffle(route)
route = [self.task.start.name, ] + route
route.append(self.task.finish.name)
return route
| mit | Python |
|
5e008ac92016a092c1ce9c9590a79d72f4cf1cf6 | Initialize tests | kshvmdn/cobalt-uoft-python | tests/__main__.py | tests/__main__.py | import unittest
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
0e3effc3a7402d3b4c1b2c91539c4d1004c5b0e3 | Add test_traitscli.py | tkf/traitscli,tkf/traitscli | test_traitscli.py | test_traitscli.py | import unittest
from traits.api import Event, Callable, Type
from traitscli import TraitsCLIBase
from sample import SampleCLI
class TestingCLIBase(TraitsCLIBase):
def do_run(self):
# Get trait attribute names
names = self.class_trait_names(
# Avoid 'trait_added' and 'trait_modified'
# (See also `HasTraits.traits`):
trait_type=lambda t: not isinstance(t, Event))
self.attributes = dict((n, getattr(self, n)) for n in names)
class TestCaseBase(unittest.TestCase):
cliclass = None
"""Subclass of `TraitsCLIBase`."""
def assert_attributes(self, attributes, args=[]):
ret = self.cliclass.cli(args)
self.assertEqual(ret.attributes, attributes)
class TestSampleCLI(TestCaseBase):
class cliclass(TestingCLIBase, SampleCLI):
pass
def test_empty_args(self):
self.assert_attributes(dict(
yes=False,
no=True,
fnum=0.0,
inum=0,
string='',
choice='a',
not_configurable_from_cli=False,
))
def test_full_args(self):
self.assert_attributes(
dict(
yes=True,
no=False,
fnum=0.2,
inum=2,
string='some string',
choice='b',
not_configurable_from_cli=False,
),
['--yes', '--no',
'--fnum', '0.2',
'--inum', '2',
'--string', 'some string',
'--choice', 'b',
])
def test_invalid_type_int(self):
self.assertRaises(SystemExit, self.cliclass.cli, ['--inum', 'x'])
def test_invalid_type_float(self):
self.assertRaises(SystemExit, self.cliclass.cli, ['--fnum', 'x'])
def test_invalid_type_enum(self):
self.assertRaises(SystemExit, self.cliclass.cli, ['--choice', 'x'])
class TestEvalType(TestCaseBase):
class cliclass(TestingCLIBase):
callable = Callable(config=True)
type = Type(config=True)
def test_full_args(self):
self.assert_attributes(
dict(
callable=id,
type=int,
),
['--callable', 'id',
'--type', 'int',
])
| bsd-3-clause | Python |
|
31622652980f603ddc308dff514eae65635eb318 | Add serializers to serialize Image to: - A PIL image (optionally resized) - A binary object (optionally resized) | comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django | app/grandchallenge/retina_api/serializers.py | app/grandchallenge/retina_api/serializers.py | from io import BytesIO
import SimpleITK as sitk
from PIL import Image as PILImage
from django.http import Http404
from rest_framework import serializers
class PILImageSerializer(serializers.BaseSerializer):
"""
Read-only serializer that returns a PIL image from a Image instance.
If "width" and "height" are passed as extra serializer content, the
PIL image will be resized to those dimensions.
"""
def to_representation(self, instance):
image_itk = instance.get_sitk_image()
if image_itk is None:
raise Http404
pil_image = self.convert_itk_to_pil(image_itk)
try:
pil_image.thumbnail(
(self.context["width"], self.context["height"]),
PILImage.ANTIALIAS,
)
except KeyError:
pass
return pil_image
@staticmethod
def convert_itk_to_pil(image_itk):
depth = image_itk.GetDepth()
image_nparray = sitk.GetArrayFromImage(image_itk)
if depth > 0:
# Get center slice of image if 3D
image_nparray = image_nparray[depth // 2]
return PILImage.fromarray(image_nparray)
class BytesImageSerializer(PILImageSerializer):
"""
Read-only serializer that returns a BytesIO image from an Image instance.
"""
def to_representation(self, instance):
image_pil = super().to_representation(instance)
return self.create_thumbnail_as_bytes_io(image_pil)
@staticmethod
def create_thumbnail_as_bytes_io(image_pil):
buffer = BytesIO()
image_pil.save(buffer, format="png")
return buffer.getvalue()
| apache-2.0 | Python |
|
101a4c1288ddadbad6dbe0186adde3921ef2546f | add ctrl-c handler | DexterLB/bookrat,DexterLB/bookrat,DexterLB/bookrat,DexterLB/bookrat,DexterLB/bookrat | lib/ctrlc.py | lib/ctrlc.py | import sys
import time
import signal
class CtrlC:
pressed = False
@classmethod
def handle(cls, signal, frame):
print('Ctrl-C pressed, will exit soon')
if cls.pressed:
print('Ctrl-C pressed twice. Committing violent suicide.')
sys.exit(1)
cls.pressed = True
signal.signal(signal.SIGINT, CtrlC.handle)
if __name__ == '__main__':
time.sleep(2)
if CtrlC.pressed:
print('yay')
time.sleep(2)
| mit | Python |
|
1298cf9c7a40ce73d46067035ded2318c62f7380 | Add simple tests for DrsSymbol and DrsIndexed | tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge | tests/drs_test.py | tests/drs_test.py | """Tests for drudge scripts."""
from sympy import Symbol, IndexedBase
from drudge.drs import DrsSymbol
from drudge.utils import sympy_key
#
# Unit tests for the utility classes and functions
# ------------------------------------------------
#
def test_basic_drs_symb():
"""Test the symbol class for basic operations.
"""
name = 'a'
ref = Symbol(name)
dict_ = {ref: 1}
symbs = [
DrsSymbol(None, name),
DrsSymbol([], name)
]
for i in symbs:
assert isinstance(i, DrsSymbol)
assert ref == i
assert i == ref
assert hash(ref) == hash(i)
assert dict_[i] == 1
assert sympy_key(ref) == sympy_key(i)
ref = Symbol(name + 'x')
for i in symbs:
assert ref != i
assert i != ref
assert hash(ref) != hash(i)
assert sympy_key(ref) != sympy_key(i)
def test_basic_drs_indexed():
"""Test basic properties of drudge script indexed object."""
base_name = 'a'
orig_base = IndexedBase(base_name)
for drudge in [None, []]:
matching_indices = [
(Symbol('x'), DrsSymbol(drudge, 'x')),
(
(Symbol('x'), Symbol('y')),
(DrsSymbol(drudge, 'x'), DrsSymbol(drudge, 'y'))
)
]
drs_base = DrsSymbol(drudge, base_name)
for orig_indices, drs_indices in matching_indices:
ref = orig_base[orig_indices]
for i in [
orig_base[drs_indices],
drs_base[orig_indices],
drs_base[drs_indices]
]:
assert ref == i
assert hash(ref) == hash(i)
assert sympy_key(ref) == sympy_key(i)
| mit | Python |
|
a412295b09481113d6f42565520d03ce8bfd36b8 | Create ECIScraper.py | mvineetmenon/ECIResultScraper | ECIScraper.py | ECIScraper.py | from bs4 import BeautifulSoup as bs
import httplib
class ECIScrapper:
def __init__(self, url):
self.url = url.split("/")[0]
self.getrequest = '/'.join(url.split('/')[1:])
print self.url, self.getrequest
self.connection = httplib.HTTPConnection(self.url)
self.connection.request("GET", '/'+self.getrequest)
self.response = self.connection.getresponse()
self.page = self.response.read()
self.soup = bs(self.page)
print self.soup.find_all('table', style="margin: auto; width: 100%; font-family: Verdana; border: solid 1px black;font-weight:lighter")
style = "margin: auto; width: 100%; font-family: Verdana; border: solid 1px black;font-weight:lighter"
def getData(self):
print url;
if __name__=="__main__":
url = "eciresults.ap.nic.in/ConstituencywiseS2653.htm?ac=53"
ECIScrapper(url)
| mit | Python |
|
67018fa6dc38f0035b1ce17dee4a7840f37cab30 | Move documentation to Sphinx/RST | Juniper/libslax,Juniper/libslax,Juniper/libslax | doc/conf.py | doc/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# libslax documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 10 10:18:55 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'slax'
# General information about the project.
project = 'libslax'
copyright = '2017, Juniper Networks'
author = 'Phil Shafer'
default_role = 'code'
primary_domain = 'c'
smart_quotes = False
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.22.0'
# The full version, including alpha/beta/rc tags.
release = '0.22.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
"sidebarwidth": 320,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
alabaster_html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'libxo-manual'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'libxo.tex', 'libxo Documentation',
'Phil Shafer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'libxo', 'libxo Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'libxo', 'libxo Documentation',
author, 'libxo', 'A Library for Generating Text, XML, JSON, and HTML Output',
'Miscellaneous'),
]
| bsd-3-clause | Python |
|
1753de3492b76d9c13d72bde7f13c0f696499e3a | Add configuration of pytest with some fixtures related to tests based on fantasy example | vovanbo/aiohttp_json_api | tests/conftest.py | tests/conftest.py | import json
import socket
import uuid
import docker as libdocker
import pathlib
import invoke
import psycopg2
import pytest
import time
from jsonschema import Draft4Validator
DSN_FORMAT = 'postgresql://{user}:{password}@{host}:{port}/{dbname}'
@pytest.fixture(scope='session')
def session_id():
return str(uuid.uuid4())
@pytest.fixture(scope='session')
def docker():
return libdocker.APIClient()
@pytest.fixture(scope='session')
def unused_port():
def f():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 0))
return s.getsockname()[1]
return f
@pytest.fixture(scope='session')
def here():
return pathlib.Path(__file__).parent
@pytest.yield_fixture(scope='session')
def pg_server(unused_port, session_id, docker):
docker_image = 'postgres:10-alpine'
database = 'example'
user = 'example'
password = 'somepassword'
port = unused_port()
host_config_options = {'port_bindings': {5432: port}}
host_config = dict(
tmpfs={'/var/lib/postgresql/data': ''},
**host_config_options
)
docker.pull(docker_image)
container = docker.create_container(
image=docker_image,
name=f'test-fantasy-example-{session_id}',
ports=[5432],
detach=True,
environment={
'POSTGRES_USER': user,
'POSTGRES_PASSWORD': password
},
host_config=docker.create_host_config(**host_config)
)
docker.start(container=container['Id'])
host = '0.0.0.0'
pg_params = dict(dbname=database,
user=user,
password=password,
host=host,
port=port,
connect_timeout=2)
delay = 0.001
for i in range(20):
try:
conn = psycopg2.connect(**pg_params)
conn.close()
break
except psycopg2.Error:
time.sleep(delay)
delay *= 2
else:
pytest.fail("Cannot start postgres server")
inspection = docker.inspect_container(container['Id'])
container['host'] = inspection['NetworkSettings']['IPAddress']
container['port'] = 5432
container['pg_params'] = pg_params
yield container
docker.kill(container=container['Id'])
docker.remove_container(container['Id'])
@pytest.fixture(scope='session')
def pg_params(pg_server):
return dict(**pg_server['pg_params'])
@pytest.fixture(scope='session')
def populated_db(here, pg_params):
from examples.fantasy.tasks import populate_db
populate_db(
invoke.context.Context(),
data_folder=here.parent / 'examples' / 'fantasy' / 'fantasy-database',
dsn=DSN_FORMAT.format(**pg_params)
)
@pytest.fixture(scope='session')
def jsonapi_validator(here):
path = here / 'spec' / 'schema.dms'
with open(path) as fp:
schema = json.load(fp)
Draft4Validator.check_schema(schema)
return Draft4Validator(schema)
@pytest.fixture
async def fantasy_app(loop, pg_params, populated_db):
from examples.fantasy.main import init
return await init(DSN_FORMAT.format(**pg_params), loop=loop)
| mit | Python |
|
82617f295ed21c179bab6ad3c3c2af5c417f40ba | Install pandas and scipy from Anaconda as part of upgrade process. Provides final installation fix for burden testing code. #167 #191 | xuzetan/gemini,bpow/gemini,udp3f/gemini,bpow/gemini,xuzetan/gemini,bw2/gemini,bw2/gemini,brentp/gemini,arq5x/gemini,xuzetan/gemini,udp3f/gemini,bw2/gemini,bpow/gemini,brentp/gemini,bgruening/gemini,xuzetan/gemini,heuermh/gemini,arq5x/gemini,brentp/gemini,udp3f/gemini,bgruening/gemini,heuermh/gemini,bgruening/gemini,bgruening/gemini,udp3f/gemini,brentp/gemini,bw2/gemini,heuermh/gemini,heuermh/gemini,arq5x/gemini,arq5x/gemini,bpow/gemini | gemini/gemini_update.py | gemini/gemini_update.py | """Perform in-place updates of gemini and databases when installed into virtualenv.
"""
import os
import subprocess
import sys
import gemini.config
def release(parser, args):
"""Update gemini to the latest release, along with associated data files.
"""
url = "https://raw.github.com/arq5x/gemini/master/requirements.txt"
# update locally isolated python
pip_bin = os.path.join(os.path.dirname(sys.executable), "pip")
activate_bin = os.path.join(os.path.dirname(sys.executable), "activate")
conda_bin = os.path.join(os.path.dirname(sys.executable), "conda")
if os.path.exists(conda_bin):
pkgs = ["cython", "distribute", "ipython", "nose", "numpy",
"pip", "pycrypto", "pyparsing", "pysam", "pyyaml",
"pyzmq", "pandas", "scipy"]
subprocess.check_call([conda_bin, "install", "--yes"] + pkgs)
elif os.path.exists(activate_bin):
subprocess.check_call([pip_bin, "install", "--upgrade", "distribute"])
else:
raise NotImplementedError("Can only upgrade gemini installed in anaconda or virtualenv")
# update libraries
#subprocess.check_call([pip_bin, "install", "-r", url])
# update datafiles
config = gemini.config.read_gemini_config()
install_script = os.path.join(os.path.dirname(__file__), "install-data.py")
subprocess.check_call([sys.executable, install_script, config["annotation_dir"]])
print "Gemini upgraded to latest version"
# update tests
test_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(pip_bin))),
"gemini")
if os.path.exists(test_dir) and os.path.exists(os.path.join(test_dir, "master-test.sh")):
os.chdir(test_dir)
subprocess.check_call(["git", "pull", "origin", "master"])
print "Run test suite with: cd %s && bash master-test.sh" % test_dir
| """Perform in-place updates of gemini and databases when installed into virtualenv.
"""
import os
import subprocess
import sys
import gemini.config
def release(parser, args):
"""Update gemini to the latest release, along with associated data files.
"""
url = "https://raw.github.com/arq5x/gemini/master/requirements.txt"
# update locally isolated python
pip_bin = os.path.join(os.path.dirname(sys.executable), "pip")
activate_bin = os.path.join(os.path.dirname(sys.executable), "activate")
conda_bin = os.path.join(os.path.dirname(sys.executable), "conda")
if os.path.exists(conda_bin):
pkgs = ["cython", "distribute", "ipython", "nose", "numpy",
"pip", "pycrypto", "pyparsing", "pysam", "pyyaml", "pyzmq"]
subprocess.check_call([conda_bin, "install", "--yes"] + pkgs)
elif os.path.exists(activate_bin):
subprocess.check_call([pip_bin, "install", "--upgrade", "distribute"])
else:
raise NotImplementedError("Can only upgrade gemini installed in anaconda or virtualenv")
# update libraries
#subprocess.check_call([pip_bin, "install", "-r", url])
# update datafiles
config = gemini.config.read_gemini_config()
install_script = os.path.join(os.path.dirname(__file__), "install-data.py")
subprocess.check_call([sys.executable, install_script, config["annotation_dir"]])
print "Gemini upgraded to latest version"
# update tests
test_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(pip_bin))),
"gemini")
if os.path.exists(test_dir) and os.path.exists(os.path.join(test_dir, "master-test.sh")):
os.chdir(test_dir)
subprocess.check_call(["git", "pull", "origin", "master"])
print "Run test suite with: cd %s && bash master-test.sh" % test_dir
| mit | Python |
41553e2c2a9ad7f2396e8492ce11d053c2fe5c7a | Add a console application template | dakside/pydemo,dakside/pydemo,letuananh/pydemo,letuananh/pydemo,dakside/pydemo | basic/template.py | basic/template.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
A template for writing Python application with MIT license.
Latest version can be found at https://github.com/letuananh/pydemo
References:
Python documentation:
https://docs.python.org/
argparse module:
https://docs.python.org/2/howto/argparse.html
PEP 257 - Python Docstring Conventions:
https://www.python.org/dev/peps/pep-0257/
@author: Le Tuan Anh <[email protected]>
'''
# Copyright (c) 2015, Le Tuan Anh <[email protected]>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
__author__ = "Le Tuan Anh <[email protected]>"
__copyright__ = "Copyright 2015, pydemo"
__credits__ = [ "Le Tuan Anh" ]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Le Tuan Anh"
__email__ = "<[email protected]>"
__status__ = "Prototype"
########################################################################
import sys
import os
import argparse
########################################################################
def echo(input_str):
print(input_str)
########################################################################
def main():
'''Main entry of this demo application.
'''
# It's easier to create a user-friendly console application by using argparse
# See reference at the top of this script
parser = argparse.ArgumentParser(description="Display a line of text.")
# Positional argument(s)
parser.add_argument('input', help='The string to be printed.')
# Optional argument(s)
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
# Main script
if len(sys.argv) == 1:
# User didn't pass any value in, show help
parser.print_help()
else:
# Parse input arguments
args = parser.parse_args()
# Now do something ...
if args.verbose:
print("You have activated my talkative mode ...")
if args.input:
echo(args.input)
elif not args.quiet:
print("Eh, I have nothing to print (You can shut me up by passing in the option -q) ...")
if args.verbose:
print("Bye sweetie ...")
pass
if __name__ == "__main__":
main()
| mit | Python |
|
f69de6e6cf63f9b3770ffdf4da32ca2149006a2e | add fit test for record, test is renamed so nose doesn't run it | jasonmccampbell/scipy-refactor,lesserwhirls/scipy-cwt,scipy/scipy-svn,jasonmccampbell/scipy-refactor,lesserwhirls/scipy-cwt,lesserwhirls/scipy-cwt,scipy/scipy-svn,jasonmccampbell/scipy-refactor,jasonmccampbell/scipy-refactor,lesserwhirls/scipy-cwt,scipy/scipy-svn,scipy/scipy-svn | scipy/stats/tests/test_fit.py | scipy/stats/tests/test_fit.py | # NOTE: contains only one test, _est_cont_fit, that is renamed so that
# nose doesn't run it
# I put this here for the record and for the case when someone wants to
# verify the quality of fit
# with current parameters:
import numpy.testing as npt
import numpy as np
from scipy import stats
from test_continuous_basic import distcont
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
n_repl1 = 1000 # sample size for first run
n_repl2 = 5000 # sample size for second run, if first run fails
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
#distcont = [['genextreme', (3.3184017469423535,)]]
def test_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample size <= 10000
for distname, arg in distcont:
yield check_cont_fit, distname,arg
def check_cont_fit(distname,arg):
distfn = getattr(stats, distname)
rvs = distfn.rvs(size=n_repl1,*arg)
est = distfn.fit(rvs) #,*arg) # start with default values
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
txt = ''
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.ones(distfn.numargs+2)*thresh_min]),0)
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min])
if np.any(np.isnan(est)):
raise AssertionError, 'nan returned in fit'
else:
if np.any((np.abs(diff) - diffthreshold) > 0.0):
## txt = 'WARNING - diff too large with small sample'
## print 'parameter diff =', diff - diffthreshold, txt
rvs = np.concatenate([rvs,distfn.rvs(size=n_repl2-n_repl1,*arg)])
est = distfn.fit(rvs) #,*arg)
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
if np.any((np.abs(diff) - diffthreshold) > 0.0):
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError, 'fit not very good in %s\n' % distfn.name + txt
if __name__ == "__main__":
import nose
#nose.run(argv=['', __file__])
nose.runmodule(argv=[__file__,'-s'], exit=False)
| bsd-3-clause | Python |
|
6908f6cb06ed1d15510bc51780d4109f5bdb7423 | Add cs2cs_test.py to excercise the cs2cs binary via subprocess | schwehr/gdal-autotest2,schwehr/gdal-autotest2 | python/third_party/proj/cs2cs_test.py | python/third_party/proj/cs2cs_test.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Proj cs2cs command line application."""
import os
import subprocess
import unittest
from pyglib import flags
from pyglib import resources
FLAGS = flags.FLAGS
class Cs2CsTest(unittest.TestCase):
def setUp(self):
self.cs2cs = os.path.join(resources.GetARootDirWithAllResources(),
'third_party/proj4/cs2cs')
def testHelp(self):
cmd = [self.cs2cs]
result = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
self.assertIn('usage:', result)
def testList(self):
cmd = [self.cs2cs, '-l']
result = subprocess.check_output(cmd)
self.assertIn('wintri : Winkel Tripel', result)
def testListLowerP(self):
cmd = [self.cs2cs, '-lp']
result = subprocess.check_output(cmd)
self.assertIn('wink2 : Winkel II', result)
def testListP(self):
# Detailed list
cmd = [self.cs2cs, '-lP']
result = subprocess.check_output(cmd)
self.assertIn('PCyl', result)
def testListEqual(self):
# Detailed list
cmd = [self.cs2cs, '-l=ups']
result = subprocess.check_output(cmd)
self.assertIn('Universal Polar Stereographic', result)
self.assertIn('Azi', result)
self.assertNotIn('PCyl', result)
self.assertNotIn('wintri', result)
def testListEllipsoidIdentifiers(self):
cmd = [self.cs2cs, '-le']
result = subprocess.check_output(cmd)
self.assertIn('evrst30', result)
self.assertIn('a=6377276.345', result)
self.assertIn('rf=300.8017', result)
self.assertIn('Everest 1830', result)
def testListUnits(self):
cmd = [self.cs2cs, '-lu']
result = subprocess.check_output(cmd)
self.assertIn('ch', result)
self.assertIn('20.1168', result)
self.assertIn('International Chain', result)
def testListDatums(self):
cmd = [self.cs2cs, '-ld']
result = subprocess.check_output(cmd)
self.assertIn('NAD27', result)
self.assertIn('clrk66', result)
self.assertIn('conus', result)
def testTransform(self):
cmd = [self.cs2cs, '+proj=latlong', '+datum=NAD83',
'+to', '+proj=utm', '+zone=10', '+datum=NAD27', '-r']
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Pass in latitude longitude to transform to UTM.
stdout, _ = proc.communicate('45d15\'33.1" 111.5W\n')
result = [float(val) for val in stdout.replace('\t', ' ').split(' ')]
self.assertEqual(len(result), 3)
self.assertAlmostEqual(result[0], 1402285.98, delta=0.001)
self.assertAlmostEqual(result[1], 5076292.42)
self.assertAlmostEqual(result[2], 0.0)
# TODO(schwehr): Add more tests
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
|
a911d8720ad7dd8bfff2fa4230e1a4cef1a232f5 | add logistic | nicepear/machine-learning | logRegres.py | logRegres.py | '''
Created on Oct 27, 2015
Logistic Regression Working Module
@author: Gu
'''
from numpy import *
def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+exp(-inX))
def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) #convert to NumPy matrix
labelMat = mat(classLabels).transpose() #convert to NumPy matrix
m,n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n,1))
for k in range(maxCycles): #heavy on matrix operations
h = sigmoid(dataMatrix*weights) #matrix mult
error = (labelMat - h) #vector subtraction
weights = weights + alpha * dataMatrix.transpose()* error #matrix mult
return weights
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
def stocGradAscent0(dataMatrix, classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n) #initialize to all ones
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = shape(dataMatrix)
weights = ones(n) #initialize to all ones
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4/(1.0+j+i)+0.0001 #apha decreases with iteration, does not
randIndex = int(random.uniform(0,len(dataIndex)))#go to 0 because of the constant
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5: return 1.0
else: return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt'); frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 1000)
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights))!= int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print ("the error rate of this test is: %f" % errorRate)
return errorRate
def multiTest():
numTests = 10; errorSum=0.0
for k in range(numTests):
errorSum += colicTest()
print ("after %d iterations the average error rate is: %f" % (numTests, errorSum/float(numTests)))
weights=ones((3,1))
print(weights)
| mit | Python |
|
7720fbc1d8a81430c38598fd96b95d8b4da4a74c | fix a bug about can not import ChineseAnalyzer with change tab to 4 wihte spaces under PEP8 | wtl-zju/jieba,dennisit/jieba,lcwrey/jieba,WangYueFt/jieba,sx4452/jieba,Yinzo/jieba,Acanzyh/jieba,jango2015/jieba,insflow/jieba,HUANG-YI-CHEN/jieba,bladehd/jieba,GaussDing/jieba,beni55/jieba,liqueur/jieba,lcwrey/jieba,xuerenlv/jieba,PegasusWang/jieba,fbukevin/jieba,polossk/jieba,ZhangYet/jieba,tigerneil/jieba,didoteebin/jieba,kevan/jieba,Ph0enixxx/jieba,wfxiang08/jieba,chenshiyang/jieba,laixintao/jieba,w15971597/jieba,DotNetAge/jieba,lsqtongxin/jieba,JsNoNo/jieba,GuillaumeSalha/jieba,swkyer/jieba,oxmcvusd/jieba,visaxin/jieba,PoisonBOx/jieba,felixonmars/jieba,leisurelicht/jieba,xuecai/jieba,mujiansu/jieba,XDF-server/jieba,ywang2014/jieba,xiao26/jieba,gasongjian/jieba,nervenXC/jieba,abandons/jieba,wangg12/jieba,mavarick/jieba,digoal/jieba,hncg/jieba,dong-y/jieba,liufuqiang/jieba,zhoulingjun/jieba,fxsjy/jieba,bearlin/jieba,jayfans3/jieba,gumblex/jieba,t-k-/jieba,a9261/jieba,witcxc/jieba,implus/jieba,popbones/jieba,ycchuang/jieba,Liaoqinchao/jieba | jieba/analyse/__init__.py | jieba/analyse/__init__.py | import jieba
import os
try:
from analyzer import ChineseAnalyzer
except ImportError:
pass
_curpath=os.path.normpath( os.path.join( os.getcwd(), os.path.dirname(__file__) ) )
f_name = os.path.join(_curpath,"idf.txt")
content = open(f_name,'rb').read().decode('utf-8')
idf_freq = {}
lines = content.split('\n')
for line in lines:
word,freq = line.split(' ')
idf_freq[word] = float(freq)
median_idf = sorted(idf_freq.values())[len(idf_freq)/2]
stop_words= set([
"the","of","is","and","to","in","that","we","for","an","are","by","be","as","on","with","can","if","from","which","you","it","this","then","at","have","all","not","one","has","or","that"
])
def extract_tags(sentence,topK=20):
words = jieba.cut(sentence)
freq = {}
for w in words:
if len(w.strip())<2: continue
if w.lower() in stop_words: continue
freq[w]=freq.get(w,0.0)+1.0
total = sum(freq.values())
freq = [(k,v/total) for k,v in freq.iteritems()]
tf_idf_list = [(v * idf_freq.get(k,median_idf),k) for k,v in freq]
st_list = sorted(tf_idf_list,reverse=True)
top_tuples= st_list[:topK]
tags = [a[1] for a in top_tuples]
return tags
| import jieba
import os
try:
from analyzer import ChineseAnalyzer
except ImportError:
pass
_curpath=os.path.normpath( os.path.join( os.getcwd(), os.path.dirname(__file__) ) )
f_name = os.path.join(_curpath,"idf.txt")
content = open(f_name,'rb').read().decode('utf-8')
idf_freq = {}
lines = content.split('\n')
for line in lines:
word,freq = line.split(' ')
idf_freq[word] = float(freq)
median_idf = sorted(idf_freq.values())[len(idf_freq)/2]
stop_words= set([
"the","of","is","and","to","in","that","we","for","an","are","by","be","as","on","with","can","if","from","which","you","it","this","then","at","have","all","not","one","has","or","that"
])
def extract_tags(sentence,topK=20):
words = jieba.cut(sentence)
freq = {}
for w in words:
if len(w.strip())<2: continue
if w.lower() in stop_words: continue
freq[w]=freq.get(w,0.0)+1.0
total = sum(freq.values())
freq = [(k,v/total) for k,v in freq.iteritems()]
tf_idf_list = [(v * idf_freq.get(k,median_idf),k) for k,v in freq]
st_list = sorted(tf_idf_list,reverse=True)
top_tuples= st_list[:topK]
tags = [a[1] for a in top_tuples]
return tags
| mit | Python |
ec96ce58076ba5aa54abeb423937a629cbe1e3d5 | Work in progress | Commonists/DonationsLogParser,Commonists/DonationsLogParser | logparser.py | logparser.py | #!/usr/bin/python
""" Log parser. """
from HTMLParser import HTMLParser
import urllib
class DailyParser(HTMLParser):
"""
HTML parser for the donations log of Wikimedia France
Attributes:
status (int): status variable of the parser.
donations (list data.Donation): list of donations read.
"""
START_PARSER = 0
FOUND_DONATION_TABLE = 1
READ_HOURS = 2
READ_DONATOR = 3
READ_DONATION = 4
END_OF_DONATION_TABLE = 5
def __init__(self):
super(DonationsParser, self).__init__()
self.status = DailyParser.START_PARSER
self.donations = []
def handle_starttag(self, tag, attrs):
pass
def handle_endtag(self, tag):
pass
def handle_data(self, data):
pass
class LogParser:
def __init__(self):
self.parser = DailyParser()
@staticmethod
def daypage(day):
""" Returns the page content containing the donations from a specific
day.
Args:
day (datetime.date): day to fetch donation.
Returns:
str: page content with the donation of the day specified as args.
"""
url_args = date.strftime("%Y-%m-%d")
url = "https://dons.wikimedia.fr/journal/%s" % url_args
return urllib.urlopen(url).read()
def fetchday(self, day):
""" Returns donations from a day. """
day_content = self.daypage(day)
self.parser.feed(day_content)
| mit | Python |
|
162b82b64d319e0c854c08b3bd2e412ab5e67d97 | add pytables testing file | cowlicks/blaze,scls19fr/blaze,alexmojaki/blaze,maxalbert/blaze,jcrist/blaze,cpcloud/blaze,cpcloud/blaze,mrocklin/blaze,xlhtc007/blaze,cowlicks/blaze,mrocklin/blaze,dwillmer/blaze,xlhtc007/blaze,jdmcbr/blaze,caseyclements/blaze,jdmcbr/blaze,ChinaQuants/blaze,LiaoPan/blaze,dwillmer/blaze,ChinaQuants/blaze,scls19fr/blaze,ContinuumIO/blaze,maxalbert/blaze,alexmojaki/blaze,ContinuumIO/blaze,nkhuyu/blaze,nkhuyu/blaze,jcrist/blaze,LiaoPan/blaze,caseyclements/blaze | blaze/compute/tests/test_pytables_compute.py | blaze/compute/tests/test_pytables_compute.py | from __future__ import absolute_import, division, print_function
import pytest
tables = pytest.importorskip('tables')
import numpy as np
import tempfile
from contextlib import contextmanager
import os
from blaze.compute.core import compute
from blaze.compute.pytables import *
from blaze.compute.numpy import *
from blaze.expr.table import *
from blaze.compatibility import xfail
t = TableSymbol('t', '{id: int, name: string, amount: int}')
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])
@contextmanager
def data():
filename = tempfile.mktemp()
f = tables.open_file(filename, 'w')
d = f.createTable('/', 'title', x)
yield d
d.close()
f.close()
os.remove(filename)
def eq(a, b):
return (a == b).all()
def test_table():
with data() as d:
assert compute(t, d) == d
def test_projection():
with data() as d:
assert eq(compute(t['name'], d), x['name'])
@xfail(reason="ColumnWise not yet supported")
def test_eq():
with data() as d:
assert eq(compute(t['amount'] == 100, d),
x['amount'] == 100)
def test_selection():
with data() as d:
assert eq(compute(t[t['amount'] == 100], d), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], d), x[x['amount'] < 0])
@xfail(reason="ColumnWise not yet supported")
def test_arithmetic():
with data() as d:
assert eq(compute(t['amount'] + t['id'], d),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], d),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], d),
x['amount'] % x['id'])
def test_Reductions():
with data() as d:
assert compute(t['amount'].count(), d) == len(x['amount'])
@xfail(reason="TODO: sorting could work if on indexed column")
def test_sort():
with data() as d:
assert eq(compute(t.sort('amount'), d),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), d),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), d),
np.sort(x, order=['amount', 'id']))
def test_head():
with data() as d:
assert eq(compute(t.head(2), d),
x[:2])
| bsd-3-clause | Python |
|
67df732067847af15e41b8eed05137b6ab2bb6d2 | add __version__ (forgot to commit) | marcelm/cutadapt,Chris7/cutadapt | libcutadapt/__init__.py | libcutadapt/__init__.py | __version__ = '0.9.2'
| mit | Python |
|
188d583caea0e640f41e400839552fe593154eda | Set 2, challenge 9 completed. | walshman23/Cryptopals | set2/crypto9.py | set2/crypto9.py | #!/usr/local/bin/python
__author__ = 'Walshman23'
import sys
sys.path.insert(1, "../common") # Want to locate modules in our 'common' directory
# A block cipher transforms a fixed-sized block (usually 8 or 16 bytes) of plaintext into ciphertext.
# But we almost never want to transform a single block; we encrypt irregularly-sized messages.
#
# One way we account for irregularly-sized messages is by padding, creating a plaintext that is an even
# multiple of the blocksize. The most popular padding scheme is called PKCS#7.
#
# So: pad any block to a specific block length, by appending the number of bytes of padding to the end of the block.
# For instance,
#
# "YELLOW SUBMARINE"
#
# ... padded to 20 bytes would be:
#
# "YELLOW SUBMARINE\x04\x04\x04\x04"
# Get block from stdin
# Use 16 as block size
blocksize=16
buf = sys.stdin.read()
if len(buf) < blocksize:
padlen = blocksize - len(buf)
else:
padlen = len(buf) % blocksize
sys.stdout.write(buf)
if padlen != 0:
sys.stdout.write(chr(padlen)*padlen)
| bsd-3-clause | Python |
|
ed33a8dc90468f2873a4a581c22027f10d9393d4 | Add Wordpress_2_Instances testcase | dims/heat,redhat-openstack/heat,cryptickp/heat,rickerc/heat_audit,openstack/heat,cwolferh/heat-scratch,maestro-hybrid-cloud/heat,Triv90/Heat,cryptickp/heat,miguelgrinberg/heat,varunarya10/heat,varunarya10/heat,JioCloud/heat,takeshineshiro/heat,openstack/heat,steveb/heat,Triv90/Heat,miguelgrinberg/heat,jasondunsmore/heat,dims/heat,pratikmallya/heat,rickerc/heat_audit,jasondunsmore/heat,rh-s/heat,cwolferh/heat-scratch,pshchelo/heat,citrix-openstack-build/heat,steveb/heat,citrix-openstack-build/heat,rh-s/heat,ntt-sic/heat,dragorosson/heat,takeshineshiro/heat,pshchelo/heat,NeCTAR-RC/heat,gonzolino/heat,pratikmallya/heat,redhat-openstack/heat,maestro-hybrid-cloud/heat,ntt-sic/heat,noironetworks/heat,rdo-management/heat,NeCTAR-RC/heat,JioCloud/heat,dragorosson/heat,Triv90/Heat,rdo-management/heat,noironetworks/heat,srznew/heat,gonzolino/heat,srznew/heat | heat/tests/functional/test_WordPress_2_Intances.py | heat/tests/functional/test_WordPress_2_Intances.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
import unittest
import os
@attr(speed='slow')
@attr(tag=['func', 'wordpress', '2instance', 'ebs',
'WordPress_2_Instances.template'])
class WordPress2Instances(unittest.TestCase):
def setUp(self):
template = 'WordPress_2_Instances.template'
stack_paramstr = ';'.join(['InstanceType=m1.xlarge',
'DBUsername=dbuser',
'DBPassword=' + os.environ['OS_PASSWORD']])
self.stack = util.Stack(template, 'F17', 'x86_64', 'cfntools',
stack_paramstr)
self.DatabaseServer = util.Instance('DatabaseServer')
self.DatabaseServer.check_cfntools()
self.DatabaseServer.wait_for_provisioning()
self.WebServer = util.Instance('WebServer')
self.WebServer.check_cfntools()
self.WebServer.wait_for_provisioning()
def test_instance(self):
# ensure wordpress was installed
self.assertTrue(self.WebServer.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.stack.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
self.stack.cleanup()
| apache-2.0 | Python |
|
fb7bc8af34f3ed375d30b43655366e6368080e76 | Create Import_Libraries.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/INMOOV/Config/ExtraConfig/Import_Libraries.py | home/INMOOV/Config/ExtraConfig/Import_Libraries.py | from java.lang import String
from org.myrobotlab.net import BareBonesBrowserLaunch
from datetime import datetime
from subprocess import Popen, PIPE
#######################
import threading
import time
import random
import urllib, urllib2
import json
import io
import itertools
import textwrap
import codecs
import socket
import os
import shutil
import hashlib
import subprocess
import csv
| apache-2.0 | Python |
|
4de971725601ed5f630ec103ad01cf5c624ad866 | Add the occupancy sensor_class (#3176) | shaftoe/home-assistant,rohitranjan1991/home-assistant,ct-23/home-assistant,philipbl/home-assistant,varunr047/homefile,JshWright/home-assistant,rohitranjan1991/home-assistant,ct-23/home-assistant,DavidLP/home-assistant,xifle/home-assistant,LinuxChristian/home-assistant,eagleamon/home-assistant,happyleavesaoc/home-assistant,DavidLP/home-assistant,FreekingDean/home-assistant,pschmitt/home-assistant,Smart-Torvy/torvy-home-assistant,LinuxChristian/home-assistant,tboyce1/home-assistant,Duoxilian/home-assistant,philipbl/home-assistant,home-assistant/home-assistant,ma314smith/home-assistant,sdague/home-assistant,srcLurker/home-assistant,HydrelioxGitHub/home-assistant,robjohnson189/home-assistant,auduny/home-assistant,LinuxChristian/home-assistant,jamespcole/home-assistant,alexmogavero/home-assistant,morphis/home-assistant,happyleavesaoc/home-assistant,Teagan42/home-assistant,LinuxChristian/home-assistant,hexxter/home-assistant,jamespcole/home-assistant,eagleamon/home-assistant,kennedyshead/home-assistant,titilambert/home-assistant,oandrew/home-assistant,Smart-Torvy/torvy-home-assistant,kyvinh/home-assistant,HydrelioxGitHub/home-assistant,jaharkes/home-assistant,postlund/home-assistant,miniconfig/home-assistant,lukas-hetzenecker/home-assistant,ct-23/home-assistant,PetePriority/home-assistant,sdague/home-assistant,fbradyirl/home-assistant,eagleamon/home-assistant,aequitas/home-assistant,happyleavesaoc/home-assistant,ct-23/home-assistant,jawilson/home-assistant,dmeulen/home-assistant,MungoRae/home-assistant,stefan-jonasson/home-assistant,florianholzapfel/home-assistant,Smart-Torvy/torvy-home-assistant,stefan-jonasson/home-assistant,sander76/home-assistant,happyleavesaoc/home-assistant,varunr047/homefile,jabesq/home-assistant,open-homeautomation/home-assistant,srcLurker/home-assistant,molobrakos/home-assistant,lukas-hetzenecker/home-assistant,joopert/home-assistant,qedi-r/home-assistant,PetePriority/home-assistant,joopert/home-assistant,oandrew/home-assistant,mezz64/home-assistant,jnewland/home-assistant,auduny/home-assistant,florianholzapfel/home-assistant,nugget/home-assistant,toddeye/home-assistant,sander76/home-assistant,toddeye/home-assistant,oandrew/home-assistant,JshWright/home-assistant,Teagan42/home-assistant,persandstrom/home-assistant,ewandor/home-assistant,robjohnson189/home-assistant,varunr047/homefile,kennedyshead/home-assistant,philipbl/home-assistant,MungoRae/home-assistant,dmeulen/home-assistant,nkgilley/home-assistant,shaftoe/home-assistant,eagleamon/home-assistant,srcLurker/home-assistant,tboyce021/home-assistant,JshWright/home-assistant,miniconfig/home-assistant,MartinHjelmare/home-assistant,jabesq/home-assistant,tchellomello/home-assistant,xifle/home-assistant,turbokongen/home-assistant,leoc/home-assistant,fbradyirl/home-assistant,adrienbrault/home-assistant,MungoRae/home-assistant,ewandor/home-assistant,Zac-HD/home-assistant,molobrakos/home-assistant,mKeRix/home-assistant,leoc/home-assistant,betrisey/home-assistant,turbokongen/home-assistant,robbiet480/home-assistant,tboyce1/home-assistant,miniconfig/home-assistant,robjohnson189/home-assistant,kyvinh/home-assistant,persandstrom/home-assistant,jaharkes/home-assistant,alexmogavero/home-assistant,alexmogavero/home-assistant,w1ll1am23/home-assistant,balloob/home-assistant,MartinHjelmare/home-assistant,open-homeautomation/home-assistant,nkgilley/home-assistant,open-homeautomation/home-assistant,mKeRix/home-assistant,jnewland/home-assistant,robbiet480/home-assistant,Duoxilian/home-assistant,Cinntax/home-assistant,DavidLP/home-assistant,kyvinh/home-assistant,Danielhiversen/home-assistant,keerts/home-assistant,betrisey/home-assistant,betrisey/home-assistant,LinuxChristian/home-assistant,soldag/home-assistant,fbradyirl/home-assistant,dmeulen/home-assistant,Zac-HD/home-assistant,Zac-HD/home-assistant,philipbl/home-assistant,varunr047/homefile,tboyce1/home-assistant,jaharkes/home-assistant,hexxter/home-assistant,alexmogavero/home-assistant,partofthething/home-assistant,morphis/home-assistant,varunr047/homefile,hexxter/home-assistant,tboyce1/home-assistant,morphis/home-assistant,leoc/home-assistant,ma314smith/home-assistant,persandstrom/home-assistant,titilambert/home-assistant,dmeulen/home-assistant,Cinntax/home-assistant,jabesq/home-assistant,ewandor/home-assistant,Danielhiversen/home-assistant,shaftoe/home-assistant,mezz64/home-assistant,robjohnson189/home-assistant,nugget/home-assistant,HydrelioxGitHub/home-assistant,MartinHjelmare/home-assistant,rohitranjan1991/home-assistant,mKeRix/home-assistant,jaharkes/home-assistant,keerts/home-assistant,tboyce021/home-assistant,partofthething/home-assistant,miniconfig/home-assistant,aequitas/home-assistant,nugget/home-assistant,Duoxilian/home-assistant,mKeRix/home-assistant,stefan-jonasson/home-assistant,MungoRae/home-assistant,GenericStudent/home-assistant,tinloaf/home-assistant,aronsky/home-assistant,srcLurker/home-assistant,balloob/home-assistant,w1ll1am23/home-assistant,adrienbrault/home-assistant,shaftoe/home-assistant,aronsky/home-assistant,kyvinh/home-assistant,florianholzapfel/home-assistant,FreekingDean/home-assistant,postlund/home-assistant,Duoxilian/home-assistant,GenericStudent/home-assistant,oandrew/home-assistant,tchellomello/home-assistant,soldag/home-assistant,PetePriority/home-assistant,jamespcole/home-assistant,pschmitt/home-assistant,JshWright/home-assistant,leoc/home-assistant,tinloaf/home-assistant,Zac-HD/home-assistant,Smart-Torvy/torvy-home-assistant,leppa/home-assistant,home-assistant/home-assistant,hexxter/home-assistant,MungoRae/home-assistant,molobrakos/home-assistant,jnewland/home-assistant,leppa/home-assistant,open-homeautomation/home-assistant,auduny/home-assistant,ma314smith/home-assistant,qedi-r/home-assistant,keerts/home-assistant,ma314smith/home-assistant,xifle/home-assistant,keerts/home-assistant,balloob/home-assistant,tinloaf/home-assistant,morphis/home-assistant,ct-23/home-assistant,florianholzapfel/home-assistant,betrisey/home-assistant,stefan-jonasson/home-assistant,jawilson/home-assistant,xifle/home-assistant,aequitas/home-assistant | homeassistant/components/binary_sensor/__init__.py | homeassistant/components/binary_sensor/__init__.py | """
Component to interface with binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
import logging
import voluptuous as vol
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'binary_sensor'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SENSOR_CLASSES = [
None, # Generic on/off
'cold', # On means cold (or too cold)
'connectivity', # On means connection present, Off = no connection
'gas', # CO, CO2, etc.
'heat', # On means hot (or too hot)
'light', # Lightness threshold
'moisture', # Specifically a wetness sensor
'motion', # Motion sensor
'moving', # On means moving, Off means stopped
'occupancy', # On means occupied, Off means not occupied
'opening', # Door, window, etc.
'power', # Power, over-current, etc
'safety', # Generic on=unsafe, off=safe
'smoke', # Smoke detector
'sound', # On means sound detected, Off means no sound
'vibration', # On means vibration detected, Off means no vibration
]
SENSOR_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(SENSOR_CLASSES))
def setup(hass, config):
"""Track states and offer events for binary sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
component.setup(config)
return True
# pylint: disable=no-self-use
class BinarySensorDevice(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def sensor_class(self):
"""Return the class of this sensor, from SENSOR_CLASSES."""
return None
@property
def state_attributes(self):
"""Return device specific state attributes."""
attr = {}
if self.sensor_class is not None:
attr['sensor_class'] = self.sensor_class
return attr
| """
Component to interface with binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
import logging
import voluptuous as vol
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'binary_sensor'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SENSOR_CLASSES = [
None, # Generic on/off
'cold', # On means cold (or too cold)
'connectivity', # On means connection present, Off = no connection
'gas', # CO, CO2, etc.
'heat', # On means hot (or too hot)
'light', # Lightness threshold
'moisture', # Specifically a wetness sensor
'motion', # Motion sensor
'moving', # On means moving, Off means stopped
'opening', # Door, window, etc.
'power', # Power, over-current, etc
'safety', # Generic on=unsafe, off=safe
'smoke', # Smoke detector
'sound', # On means sound detected, Off means no sound
'vibration', # On means vibration detected, Off means no vibration
]
SENSOR_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(SENSOR_CLASSES))
def setup(hass, config):
"""Track states and offer events for binary sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
component.setup(config)
return True
# pylint: disable=no-self-use
class BinarySensorDevice(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def sensor_class(self):
"""Return the class of this sensor, from SENSOR_CLASSES."""
return None
@property
def state_attributes(self):
"""Return device specific state attributes."""
attr = {}
if self.sensor_class is not None:
attr['sensor_class'] = self.sensor_class
return attr
| mit | Python |
6f0b5a0dc44269d9e72f3698317604d90d6cecf3 | add script for migrate user mailchimp | mattclark/osf.io,cslzchen/osf.io,saradbowman/osf.io,erinspace/osf.io,brianjgeiger/osf.io,adlius/osf.io,felliott/osf.io,leb2dg/osf.io,leb2dg/osf.io,icereval/osf.io,aaxelb/osf.io,erinspace/osf.io,pattisdr/osf.io,TomBaxter/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,adlius/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,chennan47/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,mattclark/osf.io,mattclark/osf.io,caseyrollins/osf.io,sloria/osf.io,HalcyonChimera/osf.io,chennan47/osf.io,caseyrollins/osf.io,binoculars/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,felliott/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,binoculars/osf.io,aaxelb/osf.io,TomBaxter/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,saradbowman/osf.io,mfraezz/osf.io,laurenrevere/osf.io,mfraezz/osf.io,laurenrevere/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,sloria/osf.io,chennan47/osf.io,brianjgeiger/osf.io,erinspace/osf.io,leb2dg/osf.io,adlius/osf.io,cslzchen/osf.io,cslzchen/osf.io,icereval/osf.io,icereval/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,adlius/osf.io,HalcyonChimera/osf.io,laurenrevere/osf.io,TomBaxter/osf.io,mfraezz/osf.io,baylee-d/osf.io,baylee-d/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io | scripts/fix_user_mailchimp.py | scripts/fix_user_mailchimp.py | import logging
import sys
from datetime import datetime
from django.db import transaction
from django.utils import timezone
from website.app import setup_django
setup_django()
from osf.models import OSFUser
from scripts import utils as script_utils
from website.mailchimp_utils import subscribe_mailchimp
from website import settings
logger = logging.getLogger(__name__)
def main():
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
start_time = datetime.strptime('2017-12-20 08:25:25', '%Y-%m-%d %H:%M:%S')
start_time = start_time.replace(tzinfo=timezone.now().tzinfo)
end_time = datetime.strptime('2017-12-20 18:05:00', '%Y-%m-%d %H:%M:%S')
end_time = end_time.replace(tzinfo=timezone.now().tzinfo)
users = OSFUser.objects.filter(is_registered=True, date_disabled__isnull=True, date_registered__range=[start_time, end_time])
if not dry:
for user in users:
subscribe_mailchimp(settings.MAILCHIMP_GENERAL_LIST, user._id)
logger.info('User {} has been subscribed to OSF general mailing list'.format(user._id))
logger.info('{} users have been subscribed to OSF general mailing list'.format(users.count()))
if dry:
raise Exception('Abort Transaction - Dry Run')
print('Done')
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
9571acd941cb7ecac96676ead87c43fadda3e74f | Create TimeUpload.py | GallaghG/Piditarod,GallaghG/Piditarod | TimeUpload.py | TimeUpload.py | from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import time
import csv
timeID='0B9ffTjUEqeFEZ28zdTRhMlJlY0k'
for i in range(10):
#get the curret time
date_time=time.asctime()
date_time_split=date_time.split(' ') #gives a list with the date and time components
time_only=date_time_split[3] # gives just the current time
date_only = str(date_time_split[1] + ' ' + date_time_split[2]+' ' +date_time_split[4])
#get the current csv from the GDrive and append the date and time and upload the new file to Gdrive
gauth = GoogleAuth()
# Try to load saved client credentials
gauth.LoadCredentialsFile("mycreds.txt")
if gauth.credentials is None:
# Authenticate if they're not there
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
# Refresh them if expired
gauth.Refresh()
else:
# Initialize the saved creds
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("mycreds.txt")
drive = GoogleDrive(gauth)
#Download the prior file that we will append the new data to
current=drive.CreateFile({'id': timeID})
current.GetContentFile('current.csv')
#delete the prior data file to keep these files from accumulating on the GDrive
#current.DeleteFile(timeID)
with open('current.csv', 'a') as csvfile:
fieldnames = ['Time', 'Date']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'Time': time_only, 'Date': date_only})
csvfile.close()
file1 = drive.CreateFile({'title':'time.csv', 'id': timeID}) #open a new file on the GDrive
file1.SetContentFile('current.csv') #sets the file content to the CSV file created above from the working directory
file1.Upload() #upload the file
timeID=file1['id']
time.sleep(30) #pause for 30seconds
| mit | Python |
|
7c6bbe3860e7cce0f464dc0d95683de3c5ca57a5 | Add test of `ResNet50FeatureProducer()` | dave-lab41/pelops,d-grossman/pelops,Lab41/pelops,dave-lab41/pelops,d-grossman/pelops,Lab41/pelops | testci/test_resnet50_feature.py | testci/test_resnet50_feature.py | from PIL import Image
import collections
import datetime
import numpy as np
import pytest
from pelops.features.resnet50 import ResNet50FeatureProducer
@pytest.fixture
def img_data():
DATA = [[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[255, 255, 255],
[ 0, 0, 0],
[255, 255, 255]],
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]]]
return np.array(DATA, dtype=np.uint8)
@pytest.fixture
def chip_producer(img_data):
Chip = collections.namedtuple("Chip", ["filepath", "car_id", "cam_id", "time", "img_data", "misc"])
CHIPS = (
# filepath, car_id, cam_id, time, img_data, misc
("car1_cam1.png", 1, 1, datetime.datetime(2016, 10, 1, 0, 1, 2, microsecond=100), img_data, {}),
)
chip_producer = {"chips": {}}
for filepath, car_id, cam_id, time, img_data, misc in CHIPS:
chip = Chip(filepath, car_id, cam_id, time, img_data, misc)
chip_producer["chips"][filepath] = chip
return chip_producer
@pytest.fixture
def feature_producer(chip_producer):
res = ResNet50FeatureProducer(chip_producer)
return res
def test_features(feature_producer, chip_producer):
for _, chip in chip_producer["chips"].items():
features = feature_producer.produce_features(chip)
assert features.shape == (1, 2048)
assert np.sum(features) != 0
def test_preprocess_image(feature_producer, img_data):
img = Image.fromarray(img_data)
img_resized = feature_producer.preprocess_image(img, 224, 224)
assert img_resized.shape == (1, 224, 224, 3)
| apache-2.0 | Python |
|
d8a3f92a06971ba6fe24f71914a466ff91f00f5f | Create WikiBot3.5.py | COLAMAroro/WikiBot | WikiBot3.5.py | WikiBot3.5.py | import discord
import wikipedia
token = "Mjg3NjU2MjM1MjU0NDE1MzYx.C-5xKQ.khJ9dPouM9783FMA0Ht-92XkS6A"
language = "en"
client = discord.Client()
@client.event
async def on_ready():
print("Bot is ready")
print(client.user.name)
print(client.user.id)
@client.event
async def on_server_join(server):
await client.send_message(server.default_channel, "Oi, i'm the WikiBot! https://en.wikipedia.org/wiki/Main_Page")
@client.event
async def on_message(message):
if message.channel.is_private and message.author.id != client.user.id:
await printout(message, message.content)
else:
ping = "<@" + client.user.id + ">"
if message.content.startswith(ping):
print("I'm called!")
toretract = len(ping)
query = message.content[toretract:]
if query[0] == " ":
query = query[1:]
print("Query = " + query)
await printout(message, query)
async def printout(message, query):
wikipage = None
lookup = True
print("printout")
try:
wikipage = wikipedia.page(query)
print("I found directly")
except wikipedia.exceptions.PageError:
print("Can't access by default. Trying to search")
except Exception:
lookup = False
if wikipage is None and lookup:
wikipage = wikipedia.suggest(query)
if wikipage is None and lookup:
await client.send_message(message.channel, "Sorry, cannot find " + query + " :v")
elif not lookup:
await client.send_message(message.channel, "Something went wrong. Try to be more specific in search, or maybe I can't reach Wikipedia")
else:
imglist = wikipage.images
if len(imglist) == 0:
em = discord.Embed(title=wikipage.title, description=wikipedia.summary(query, sentences=2), colour=0x2DAAED, url=wikipage.url)
else:
em = discord.Embed(title=wikipage.title, description=wikipedia.summary(query, sentences=2), colour=0x2DAAED, url=wikipage.url, image=imglist[0])
em.set_author(name=client.user.name, icon_url="https://wikibot.rondier.io")
await client.send_message(message.channel, embed=em)
await client.send_message(message.channel, "More at " + wikipage.url)
client.run(token)
| bsd-3-clause | Python |
|
3ef6866b39601dfafa10895a69c5d348a77ded3e | add test for eject and eject_all | missionpinball/mpf,missionpinball/mpf | mpf/tests/test_BallDevice_SmartVirtual.py | mpf/tests/test_BallDevice_SmartVirtual.py | from mpf.tests.MpfTestCase import MpfTestCase
class TestBallDeviceSmartVirtual(MpfTestCase):
def getConfigFile(self):
return 'test_ball_device.yaml'
def getMachinePath(self):
return 'tests/machine_files/ball_device/'
def get_platform(self):
return 'smart_virtual'
def test_eject(self):
# add initial balls to trough
self.hit_switch_and_run("s_ball_switch1", 1)
self.hit_switch_and_run("s_ball_switch2", 1)
self.assertEqual(2, self.machine.ball_devices.test_trough.balls)
self.assertEqual(2, self.machine.ball_devices.test_trough.available_balls)
# call eject
self.machine.ball_devices.test_trough.eject()
self.assertEqual(2, self.machine.ball_devices.test_trough.balls)
self.assertEqual(1, self.machine.ball_devices.test_trough.available_balls)
# one ball should be gone
self.advance_time_and_run(30)
self.assertEqual(1, self.machine.ball_devices.test_trough.balls)
self.assertEqual(1, self.machine.ball_devices.test_trough.available_balls)
def test_eject_all(self):
# add initial balls to trough
self.hit_switch_and_run("s_ball_switch1", 1)
self.hit_switch_and_run("s_ball_switch2", 1)
self.assertEqual(2, self.machine.ball_devices.test_trough.balls)
self.assertEqual(2, self.machine.ball_devices.test_trough.available_balls)
# call eject_all
self.machine.ball_devices.test_trough.eject_all()
self.advance_time_and_run(30)
# all balls should be gone
self.assertEqual(0, self.machine.ball_devices.test_trough.balls)
self.assertEqual(0, self.machine.ball_devices.test_trough.available_balls)
| mit | Python |
|
104fcfc4eed7f3233d329602283093c7f86484c3 | add development server | rollandf/codeandtalk.com,szabgab/codeandtalk.com,szabgab/codeandtalk.com,szabgab/codeandtalk.com,rollandf/codeandtalk.com,mhorvvitz/codeandtalk.com,szabgab/codeandtalk.com,mhorvvitz/codeandtalk.com,shaylavi/codeandtalk.com,mhorvvitz/codeandtalk.com,rollandf/codeandtalk.com,rollandf/codeandtalk.com,shaylavi/codeandtalk.com,shaylavi/codeandtalk.com | server.py | server.py | from http.server import HTTPServer, BaseHTTPRequestHandler
class StaticServer(BaseHTTPRequestHandler):
def do_GET(self):
root = 'html'
#print(self.path)
if self.path == '/':
filename = root + '/index.html'
else:
filename = root + self.path
self.send_response(200)
if filename[-4:] == '.css':
self.send_header('Content-type', 'text/css')
elif filename[-5:] == '.json':
self.send_header('Content-type', 'application/javascript')
elif filename[-3:] == '.js':
self.send_header('Content-type', 'application/javascript')
elif filename[-4:] == '.ico':
self.send_header('Content-type', 'image/x-icon')
else:
self.send_header('Content-type', 'text/html')
self.end_headers()
with open(filename, 'rb') as fh:
html = fh.read()
#html = bytes(html, 'utf8')
self.wfile.write(html)
def run(server_class=HTTPServer, handler_class=StaticServer, port=8000):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd on port {}'.format(port))
httpd.serve_forever()
run()
# vim: expandtab
| apache-2.0 | Python |
|
e88ba0984f3e6045b407342fa7231887142380e2 | Add migration to create roles | dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq | corehq/apps/accounting/migrations/0031_create_report_builder_roles.py | corehq/apps/accounting/migrations/0031_create_report_builder_roles.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from corehq.apps.hqadmin.management.commands.cchq_prbac_bootstrap import cchq_prbac_bootstrap
from corehq.sql_db.operations import HqRunPython
class Migration(migrations.Migration):
dependencies = [
('accounting', '0030_remove_softwareplan_visibility_trial_internal'),
]
operations = [
HqRunPython(cchq_prbac_bootstrap),
]
| bsd-3-clause | Python |
|
df34c1a07fa6029efbd4df41cbd2009ac5031aca | Create matrixAlg.py | ghevcoul/matrixAlg | matrixAlg.py | matrixAlg.py | #!/usr/bin/python
#####################################
# Written by Gavin Heverly-Coulson
# Email: gavin <at> quantumgeranium.com
#####################################
# A set of matrix algebra functions for performing
# basic matrix algebra operations.
#
# Tested with Python 2.6/2.7
#
# This work is licensed under a Simplified BSD License
# Copyright (c) 2014, Gavin Heverly-Coulson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
# Print a nicely formatted matrix
def printMat(mat):
newStr = ""
for i in range(len(mat)):
for j in range(len(mat[0])):
newStr = newStr + str(mat[i][j]) + " "
newStr += "\n"
print newStr
# Calculates the determinant of a 3x3 matrix, using the 2x2 sub-matrices method
def det3(mat):
return ( ( mat[0][0]*det2([[mat[1][1], mat[1][2]], [mat[2][1], mat[2][2]]]) ) - ( mat[0][1]*det2([[mat[1][0], mat[1][2]], [mat[2][0], mat[2][2]]]) ) + (mat[0][2]*det2([[mat[1][0], mat[1][1]], [mat[2][0], mat[2][1]]])) )
# Calculates the determinant of a 2x2 matrix
def det2(mat):
return ((mat[0][0]*mat[1][1]) - (mat[0][1]*mat[1][0]))
# Calculates the transpose of a matrix
# Works for arbitrary NxM size
def transpose(mat):
cols = len(mat) # number of rows in mat
rows = len(mat[0]) # number of columns in mat
transMat = [x[:] for x in [[None]*cols]*rows] # cols, rows
for a in range(rows):
for b in range(cols):
transMat[a][b] = mat[b][a]
return transMat
# Calculates the dot product of two vectors, A and B
def dotProduct(A, B):
counter = 0
product = 0
while counter < len(A):
product = product + (A[counter] * B[counter])
counter += 1
return product
# Calculates the length of a vector
def vectLength(A):
sumSquares = 0
for i in A:
sumSquares = sumSquares + (i**2)
return math.sqrt(sumSquares)
# Multiplies two matrices (A and B) and returns the result
def matMult(A, B):
if len(A[0]) != len(B):
print "Matrix dimensions don't match!\nA has {0} columns and B has {1} rows.".format(len(A[0]), len(B))
else:
newMat = [[0.0 for cols in range(len(B[0]))] for rows in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
newMat[i][j] += A[i][k]*B[k][j]
return newMat
# Converts a given matrix (not necessarily square) to
# reduced row echelon form
def toRedRowEchelon(mat):
colPos = 0
rows = len(mat)
cols = len(mat[0])
for r in range(rows):
if colPos >= cols:
return mat
i = r
while mat[i][colPos] == 0.0:
i += 1
if i == rows:
i = r
colPos += 1
if colPos == cols:
return mat
mat[i], mat[r] = mat[r], mat[i] # swap rows i and r
lv = mat[r][colPos]
mat[r] = [mrx / lv for mrx in mat[r]]
for i in range(rows):
if i != r:
lv = mat[i][colPos]
mat[i] = [iv - lv * rv for rv, iv in zip(mat[r], mat[i])]
colPos += 1
return mat
# Finds the inverse of a given matrix
def invMat(mat):
matDim = len(mat)
idenMat = [[0.0 for col in range(matDim)] for row in range(matDim)]
for i in range(matDim):
idenMat[i][i] = 1.0
newMat = [None] * matDim
for i in range(matDim):
newMat[i] = mat[i] + idenMat[i]
solvedMat = toRedRowEchelon(newMat)
invertMat = [None] * matDim
for i in range(matDim):
invertMat[i] = solvedMat[i][-1*matDim:]
return invertMat
| bsd-2-clause | Python |
|
7b7ec9cdd1f0ed213608a5c309702e49e44b36e2 | Add simple test. | tsotetsi/django-seed | tests/integration/test_smoke.py | tests/integration/test_smoke.py | from django.test import TestCase
URLS_PUBLIC = [
"/",
]
class SimpleTests(TestCase):
def test_urls(self):
for url in URLS_PUBLIC:
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| mit | Python |
|
c44001ec697faf7552764f91e52fa927056b1538 | Add solution for porblem 31 | cifvts/PyEuler | euler031.py | euler031.py | #!/usr/bin/python
LIMIT = 200
coins = [1, 2, 5, 10, 20, 50, 100, 200]
def rec_count(total, step):
if total == LIMIT:
return 1
if total > LIMIT:
return 0
c = 0
for x in coins:
if x < step:
continue
c += rec_count(total + x, x)
return c
count = 0
for x in coins:
count += rec_count(x, x)
print(count)
| mit | Python |
|
84c5bfa0252814c5797cf7f20b04808dafa9e1fa | Create MergeIntervals_001.py | Chasego/cod,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/codirit,Chasego/codi,Chasego/cod,cc13ny/algo,Chasego/codi,cc13ny/Allin,Chasego/codirit,Chasego/codirit,cc13ny/Allin,cc13ny/Allin,cc13ny/Allin,cc13ny/Allin,cc13ny/algo,Chasego/codi,cc13ny/algo,Chasego/cod,cc13ny/algo,Chasego/cod,Chasego/cod,Chasego/codi,Chasego/codirit | leetcode/056-Merge-Intervals/MergeIntervals_001.py | leetcode/056-Merge-Intervals/MergeIntervals_001.py | # Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
# @param {Interval[]} intervals
# @return {Interval[]}
def sortmeg(self, intervals):
ls = []
for i in intervals:
ls.append(i.start)
idx = sorted(range(len(ls)),key=lambda x:ls[x])
sortedintv = []
for i in idx:
sortedintv.append(intervals[i])
return sortedintv
def merge(self, intervals):
if len(intervals) < 2:
return intervals
intervals = self.sortmeg(intervals)
p = 0
while p + 1 <= len(intervals) - 1:
if intervals[p+1].start <= intervals[p].end:
if intervals[p+1].end > intervals[p].end:
intervals[p].end = intervals[p+1].end
del intervals[p+1]
else:
p += 1
return intervals
| mit | Python |
|
8471516294d5b28a81cae73db591ae712f44bc01 | Add failing cairo test | lazka/pgi,lazka/pgi | tests/pygobject/test_structs.py | tests/pygobject/test_structs.py | # Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
import unittest
from gi.repository import Gtk
from tests import is_gi
class StructTest(unittest.TestCase):
@unittest.skipUnless(is_gi, "FIXME")
def test_foreign_cairo(self):
window = Gtk.OffscreenWindow()
area = Gtk.DrawingArea()
window.add(area)
def foo(area, context):
self.assertTrue(hasattr(context, "set_source_rgb"))
area.connect("draw", foo)
window.show_all()
while Gtk.events_pending():
Gtk.main_iteration()
window.destroy()
| lgpl-2.1 | Python |
|
c46e6d170f4d641c3bb5045a701c7810d77f28a6 | add update-version script | trustathsh/ifmapcli,trustathsh/ifmapcli | update-version.py | update-version.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import xml.etree.ElementTree as et
NS = "http://maven.apache.org/POM/4.0.0"
POM_NS = "{http://maven.apache.org/POM/4.0.0}"
def getModuleNames(mainPom):
pom = et.parse(mainPom)
modules = pom.findall("./{ns}modules/{ns}module".format(ns=POM_NS))
return map(lambda element: element.text, modules)
def updateVersionInModule(module, newVersion):
pomPath = os.path.join(module, "pom.xml")
modulePom = et.parse(pomPath)
parentVersion = modulePom.find("./{ns}parent/{ns}version".format(ns=POM_NS))
parentVersion.text = newVersion
modulePom.write(pomPath, xml_declaration=False, encoding="utf-8", method="xml")
if __name__ == '__main__':
et.register_namespace('', NS)
parser = argparse.ArgumentParser(description='Update parent version in all submodules.')
parser.add_argument('version', help='the new parent version')
args = parser.parse_args()
allModules = getModuleNames("pom.xml")
for module in allModules:
updateVersionInModule(module, args.version) | apache-2.0 | Python |
|
0bc4d105bd649ed9e174b26b5017572f08fd5c2f | Write unit tests for physics library | cnlohr/bridgesim,cnlohr/bridgesim,cnlohr/bridgesim,cnlohr/bridgesim | src/server/test_physics.py | src/server/test_physics.py | #!/usr/bin/env python
import unittest
from physics import *
class TestNVectors(unittest.TestCase):
def setUp(self):
self.v11 = NVector(1, 1)
self.v34 = NVector(3, 4)
self.v10 = NVector(10, 0)
self.vneg = NVector(-2, -2)
def test_dimensionality(self):
"""Test counting of number of dimensionality"""
self.assertEqual(self.v11.dimensionality(), 2)
def test_magnitude(self):
"""Test magnitude calculation"""
self.assertEqual(self.v34.magnitude(), 5)
def test_norm(self):
"""Test unit vector calculation"""
self.assertEqual(self.v10.norm(), NVector(1, 0))
self.assertEqual(self.v11.norm(),
NVector(0.7071067811865475, 0.7071067811865475))
def test_init(self):
"""Check initialization"""
self.assertEqual(self.v11.dimensions, (1, 1))
self.assertEqual(self.v34.dimensions, (3, 4))
self.assertEqual(self.vneg.dimensions, (-2, -2))
def test_equality(self):
"""Check equality between vectors"""
self.assertEqual(NVector(5, 5), NVector(5, 5))
self.assertNotEqual(NVector(3, 4), NVector(4, 3))
def test_neg(self):
"""Check negation"""
self.assertEqual(NVector(1, -1), -NVector(-1, 1))
self.assertNotEqual(NVector(10, 5), -NVector(10, 5))
def test_truth(self):
"""Check truth values"""
self.assertFalse(NVector(0, 0, 0, 0))
self.assertTrue(NVector(1, 0))
self.assertTrue(NVector(-10, -20, -30))
def test_addition(self):
"""Check vector addition"""
self.assertEqual(NVector(3, 2, 1, 0) + NVector(0, 1, 2, 3),
NVector(3, 3, 3, 3))
# Make sure some exceptions are raised.
with self.assertRaises(DimensionalityError):
NVector(2, 2) + NVector(3, 3, 3)
with self.assertRaises(TypeError):
NVector(1, 1) + 10
def test_subtraction(self):
"""Check vector subtraction"""
self.assertEqual(NVector(3, 2, 1, 0) - NVector(0, 1, 2, 3),
NVector(3, 1, -1, -3))
# Make sure some exceptions are raised.
with self.assertRaises(DimensionalityError):
NVector(2, 2) - NVector(3, 3, 3)
with self.assertRaises(TypeError):
NVector(1, 1) - 10
def test_multiplication(self):
"""Check vector and scalar multiplication"""
self.assertEqual(NVector(4, 2) * 10, NVector(40, 20))
self.assertEqual(2 * NVector(1, 1), NVector(2, 2))
self.assertEqual(NVector(3, 3) * NVector(2, 2), NVector(6, 6))
# Make sure some exceptions are raised.
with self.assertRaises(DimensionalityError):
NVector(1) * NVector(2, 2)
def test_division(self):
"""Check vector and scalar true and floor division"""
self.assertEqual(NVector(5, 5) / NVector(2, 2), NVector(2.5, 2.5))
self.assertEqual(NVector(5, 5) // NVector(2, 2), NVector(2, 2))
self.assertEqual(NVector(5, 5) / 2, NVector(2.5, 2.5))
self.assertEqual(NVector(5, 5) // 2, NVector(2, 2))
with self.assertRaises(DimensionalityError):
NVector(3, 3, 3) / NVector(2, 2)
with self.assertRaises(DimensionalityError):
NVector(3, 3, 3) // NVector(2, 2)
with self.assertRaises(TypeError):
5 / NVector(1, 1)
with self.assertRaises(TypeError):
5 // NVector(1, 1)
def test_stringy(self):
"""Test string formatting"""
self.assertEqual(str(NVector(1, 1)), "<1.000000, 1.000000>")
if __name__ == "__main__":
unittest.main()
| mit | Python |
|
b9cf46407eea6df9bb3fef5eb3103c7353b249a9 | solve problem 11 | edpark13/euler | problem11.py | problem11.py | def largest_grid_product(grid):
max = float("-inf")
for i in xrange(0, len(grid)):
for j in xrange(0, len(grid[i]) - 3):
productx = grid[i][j] * grid[i][j+1] * grid[i][j+2] * grid[i][j+3]
producty = grid[j][i] * grid[j+1][i] * grid[j+2][i] * grid[j+3][i]
if productx > max:
max = productx
elif producty > max:
max = producty
for i in xrange(0, len(grid) - 3):
productd = grid[i][i] * grid[i+1][i+1] * grid[i+2][i+2] * grid[i+3][i+3]
if productd > max: max = productd
print max
if __name__ == '__main__':
L = []
L.append("08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08")
L.append("49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00")
L.append("81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65")
L.append("52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91")
L.append("22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80")
L.append("24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50")
L.append("32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70")
L.append("67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21")
L.append("24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72")
L.append("21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95")
L.append("78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92")
L.append("16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57")
L.append("86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58")
L.append("19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40")
L.append("04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66")
L.append("88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69")
L.append("04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36")
L.append("20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16")
L.append("20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54")
L.append("01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48")
M = [i.split() for i in L]
M = [[int(j) for j in i] for i in M]
largest_grid_product(M)
| mit | Python |
|
1d3719bcd03b92d04efae10933928f953d95c7a4 | Add a simple basicmap python example | SunGuo/learning-spark,asarraf/learning-spark,SunGuo/learning-spark,jaehyuk/learning-spark,asarraf/learning-spark,negokaz/learning-spark,kod3r/learning-spark,diogoaurelio/learning-spark,databricks/learning-spark,zaxliu/learning-spark,JerryTseng/learning-spark,anjuncc/learning-spark-examples,gaoxuesong/learning-spark,huixiang/learning-spark,mmirolim/learning-spark,jaehyuk/learning-spark,XiaoqingWang/learning-spark,mohitsh/learning-spark,bhagatsingh/learning-spark,databricks/learning-spark,mmirolim/learning-spark,jaehyuk/learning-spark,ellis429/learning-spark-examples,DINESHKUMARMURUGAN/learning-spark,holdenk/learning-spark-examples,DINESHKUMARMURUGAN/learning-spark,diogoaurelio/learning-spark,jindalcastle/learning-spark,huixiang/learning-spark,kpraveen420/learning-spark,zaxliu/learning-spark,SunGuo/learning-spark,coursera4ashok/learning-spark,huydx/learning-spark,bhagatsingh/learning-spark,tengteng/learning-spark,baokunguo/learning-spark-examples,diogoaurelio/learning-spark,noprom/learning-spark,ellis429/learning-spark,anjuncc/learning-spark-examples,noprom/learning-spark,ellis429/learning-spark-examples,GatsbyNewton/learning-spark,GatsbyNewton/learning-spark,DINESHKUMARMURUGAN/learning-spark,SunGuo/learning-spark,qingkaikong/learning-spark-examples,bhagatsingh/learning-spark,zaxliu/learning-spark,ellis429/learning-spark,databricks/learning-spark,mohitsh/learning-spark,mohitsh/learning-spark,kpraveen420/learning-spark,holdenk/learning-spark-examples,kod3r/learning-spark,SunGuo/learning-spark,UsterNes/learning-spark,concerned3rdparty/learning-spark,junwucs/learning-spark,coursera4ashok/learning-spark,jindalcastle/learning-spark,feynman0825/learning-spark,UsterNes/learning-spark,JerryTseng/learning-spark,ellis429/learning-spark-examples,feynman0825/learning-spark,dsdinter/learning-spark-examples,shimizust/learning-spark,baokunguo/learning-spark-examples,tengteng/learning-spark,gaoxuesong/learning-spark,gaoxuesong/learning-spark,UsterNes/learning-spark,XiaoqingWang/learning-spark,kpraveen420/learning-spark,NBSW/learning-spark,kpraveen420/learning-spark,tengteng/learning-spark,negokaz/learning-spark,ramyasrigangula/learning-spark,obinsanni/learning-spark,GatsbyNewton/learning-spark,holdenk/learning-spark-examples,JerryTseng/learning-spark,obinsanni/learning-spark,mohitsh/learning-spark,holdenk/learning-spark-examples,junwucs/learning-spark,negokaz/learning-spark,concerned3rdparty/learning-spark,anjuncc/learning-spark-examples,diogoaurelio/learning-spark,bhagatsingh/learning-spark,DINESHKUMARMURUGAN/learning-spark,DINESHKUMARMURUGAN/learning-spark,ellis429/learning-spark-examples,noprom/learning-spark,shimizust/learning-spark,huydx/learning-spark,ramyasrigangula/learning-spark,dsdinter/learning-spark-examples,anjuncc/learning-spark-examples,ellis429/learning-spark-examples,ellis429/learning-spark,gaoxuesong/learning-spark,gaoxuesong/learning-spark,GatsbyNewton/learning-spark,kod3r/learning-spark,concerned3rdparty/learning-spark,dsdinter/learning-spark-examples,negokaz/learning-spark,baokunguo/learning-spark-examples,asarraf/learning-spark,XiaoqingWang/learning-spark,baokunguo/learning-spark-examples,tengteng/learning-spark,anjuncc/learning-spark-examples,coursera4ashok/learning-spark,coursera4ashok/learning-spark,qingkaikong/learning-spark-examples,obinsanni/learning-spark,feynman0825/learning-spark,dsdinter/learning-spark-examples,mmirolim/learning-spark,concerned3rdparty/learning-spark,mmirolim/learning-spark,NBSW/learning-spark,noprom/learning-spark,rex1100/learning-spark,holdenk/learning-spark-examples,huydx/learning-spark,huixiang/learning-spark,kpraveen420/learning-spark,noprom/learning-spark,XiaoqingWang/learning-spark,negokaz/learning-spark,XiaoqingWang/learning-spark,qingkaikong/learning-spark-examples,kod3r/learning-spark,junwucs/learning-spark,ellis429/learning-spark,feynman0825/learning-spark,NBSW/learning-spark,bhagatsingh/learning-spark,jindalcastle/learning-spark,NBSW/learning-spark,coursera4ashok/learning-spark,concerned3rdparty/learning-spark,shimizust/learning-spark,NBSW/learning-spark,jindalcastle/learning-spark,dsdinter/learning-spark-examples,UsterNes/learning-spark,diogoaurelio/learning-spark,obinsanni/learning-spark,tengteng/learning-spark,huixiang/learning-spark,huydx/learning-spark,zaxliu/learning-spark,qingkaikong/learning-spark-examples,huydx/learning-spark,ramyasrigangula/learning-spark,junwucs/learning-spark,jindalcastle/learning-spark,databricks/learning-spark,ramyasrigangula/learning-spark,jaehyuk/learning-spark,qingkaikong/learning-spark-examples,ellis429/learning-spark,feynman0825/learning-spark,shimizust/learning-spark,asarraf/learning-spark,rex1100/learning-spark,jaehyuk/learning-spark,zaxliu/learning-spark,shimizust/learning-spark,UsterNes/learning-spark,ramyasrigangula/learning-spark,mmirolim/learning-spark,obinsanni/learning-spark,JerryTseng/learning-spark,kod3r/learning-spark,mohitsh/learning-spark,baokunguo/learning-spark-examples,databricks/learning-spark,huixiang/learning-spark,junwucs/learning-spark,rex1100/learning-spark,GatsbyNewton/learning-spark,asarraf/learning-spark,JerryTseng/learning-spark | src/python/BasicMap.py | src/python/BasicMap.py | """
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.parallelize([1, 2, 3, 4])
>>> sorted(basicSquare(b).collect())
[1, 4, 9, 12]
"""
import sys
from pyspark import SparkContext
def basicSquare(nums):
"""Square the numbers"""
return nums.map(lambda x: x * x)
if __name__ == "__main__":
master = "local"
if len(sys.argv) = 2:
master = sys.argv[1]
sc = SparkContext(master, "BasicMap")
nums = sc.parallelize([1, 2, 3, 4])
output = countWords(nums)
for num in output:
print "%i " % (num)
| mit | Python |
|
41220718d0e9a32fc9e95d55acdb989b2f87563f | Add @job tasks | RyanBalfanz/django-smsish | smsish/tasks.py | smsish/tasks.py | import django_rq
from rq.decorators import job
DEFAULT_QUEUE_NAME = "default"
DEFAULT_REDIS_CONNECTION = django_rq.get_connection()
@job(DEFAULT_QUEUE_NAME, connection=DEFAULT_REDIS_CONNECTION)
def send_sms(*args, **kwargs):
from smsish.sms import send_sms as _send_sms
return _send_sms(*args, **kwargs)
@job(DEFAULT_QUEUE_NAME, connection=DEFAULT_REDIS_CONNECTION)
def send_mass_sms(*args, **kwargs):
from smsish.sms import send_mass_sms as _send_mass_sms
return _send_mass_sms(*args, **kwargs)
| mit | Python |
|
6ee145c7af7084f228ee48754ef2a0bfc37c5946 | Add missing hooks.py module | ColinDuquesnoy/pyqt_distutils | pyqt_distutils/hooks.py | pyqt_distutils/hooks.py | """
A pyqt-distutils hook is a python function that is called after the
compilation of a ui script to let you customise its content. E.g. you
might want to write a hook to change the translate function used or replace
the PyQt imports by your owns if you're using a shim,...
The hook function is a simple python function which must take a single
argument: the path to the generated python script.
Hooks are exposed as setuptools entrypoint using :attr:`ENTRYPOINT` as the
entrypoint key. E.g., in your setup.py::
setup(
...,
entry_points={
'pyqt_distutils_hooks': [
'hook_name = package_name.module_name:function_name']
},
...)
There is a "hooks" config key where you can list the hooks
that you want to run on all your ui/qrc scripts. E.g.::
{
"files": [
["forms/*.ui", "foo_gui/forms/"],
["resources/*.qrc", "foo_gui/forms/"]
],
"pyrcc": "pyrcc5",
"pyrcc_options": "",
"pyuic": "pyuic5",
"pyuic_options": "--from-imports",
"hooks": ["gettext", "spam", "eggs"]
}
At the moment, we provide one builtin hook: **gettext**. This hook let you
use ``gettext.gettext`` instead of ``QCoreApplication.translate``.
"""
import pkg_resources
import traceback
#: Name of the entrypoint to use in setup.py
ENTRYPOINT = 'pyqt_distutils_hooks'
def load_hooks():
"""
Load the exposed hooks.
Returns a dict of hooks where the keys are the name of the hook and the
values are the actual hook functions.
"""
hooks = {}
for entrypoint in pkg_resources.iter_entry_points(ENTRYPOINT):
name = str(entrypoint).split('=')[0].strip()
try:
hook = entrypoint.load()
except Exception:
traceback.print_exc()
else:
hooks[name] = hook
return hooks
def hook(ui_file_path):
"""
This is the prototype of a hook function.
"""
pass
GETTEXT_REPLACEMENT = ''' import gettext
def _translate(_, string):
return gettext.gettext(string)
'''
def gettext(ui_file_path):
"""
Let you use gettext instead of the Qt tools for l18n
"""
with open(ui_file_path, 'r') as fin:
content = fin.read()
with open(ui_file_path, 'w') as fout:
fout.write(content.replace(
' _translate = QtCore.QCoreApplication.translate',
GETTEXT_REPLACEMENT))
| mit | Python |
|
ff79343cb1feda5259244199b4f0d503da401f24 | Create quick_sort_iterativo.py | jeffmorais/estrutura-de-dados | quick_sort_iterativo.py | quick_sort_iterativo.py | import unittest
def _quick_recursivo(seq, inicio, final):
if inicio >= final:
return seq
indice_pivot = final
pivot = seq[indice_pivot]
i_esquerda = inicio
i_direita = final - 1
while i_esquerda<=i_direita:
while i_esquerda<=i_direita and seq[i_esquerda]<=pivot:
i_esquerda=i_esquerda+1
while i_esquerda<=i_direita and seq[i_direita]>=pivot:
i_direita=i_direita-1
if i_esquerda<i_direita:
aux=seq[i_esquerda]
seq[i_esquerda]=seq[i_direita]
seq[i_direita]=aux
aux=seq[i_esquerda]
seq[i_esquerda]=seq[final]
seq[final]=aux
_quick_recursivo(seq, inicio, i_esquerda - 1)
_quick_recursivo(seq, i_esquerda + 1, final)
return seq
def quick_sort(seq):
return _quick_recursivo(seq, 0, len(seq) - 1)
class OrdenacaoTestes(unittest.TestCase):
def teste_lista_vazia(self):
self.assertListEqual([], quick_sort([]))
def teste_lista_unitaria(self):
self.assertListEqual([1], quick_sort([1]))
def teste_lista_binaria(self):
self.assertListEqual([1, 2], quick_sort([2, 1]))
def teste_lista_desordenada(self):
self.assertListEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], quick_sort([9, 7, 1, 8, 5, 3, 6, 4, 2, 0]))
def teste_lista_com_elementos_repetidos(self):
self.assertListEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9], quick_sort([9, 7, 1, 8, 5, 3, 6, 4, 2, 0, 9, 9]))
def teste_lista_so_com_elementos_repetidos(self):
self.assertListEqual([9, 9, 9], quick_sort([9, 9, 9]))
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
8543b2bf12c25163be62a8d44b48d32396f3ac9b | Add source. | ADEHJKNTV/GraphingSolver | solver.py | solver.py | #!usr/bin/env python3
import sys, time
from tkinter import messagebox, Tk
game_w, game_h = 50, 30 # total width and height of the game board in game coordinates
formula_mode = "axis"
from pymouse import PyMouse, PyMouseEvent
from pykeyboard import PyKeyboard, PyKeyboardEvent
m = PyMouse()
k = PyKeyboard()
class PointMouseSelector(PyMouseEvent):
def __init__(self):
PyMouseEvent.__init__(self)
self.x, self.y = None, None
def click(self, x, y, button, press):
if press: return # only handle button up events
if button == 1: # left click
print("selecting", x, y)
self.x, self.y = x, y
self.stop()
elif button == 2: # right click
self.stop()
def select_point():
S = PointMouseSelector()
try: S.run()
except: pass
return (S.x, S.y)
def calculate_formula_axis(point_list):
sorted_points = sorted(points, key=lambda x: x[0])
start = point_list[0]
x1, y1 = 0, 0
result = ""
normalize = lambda x: str(x) if "-" in str(x) else "+" + str(x)
for point in points[1:]:
x2, y2 = point[0] - start[0], point[1] - start[1]
if x2 == x1: # jump discontinuity, skip to get a jump
pass
else:
slope = (y2 - y1) / (x2 - x1)
result += "+(sign(x{0})-sign(x{1}))*({2}*x{3})/2".format(normalize(-x1), normalize(-x2), str(round(-slope, 3)), normalize(round(-(y1 - slope * x1), 3))) # add a line segment with correct slope
x1, y1 = x2, y2
result = result[1:] + "+0.5*sin(800*x)" # remove the leading plus sign
return result
def calculate_formula_graphwar(point_list):
sorted_points = sorted(points, key=lambda x: x[0])
start = point_list[0]
x1, y1 = start[0], 0
result = ""
normalize = lambda x: str(x) if "-" in str(x) else "+" + str(x)
for point in points[1:]:
x2, y2 = point[0], point[1] - start[1]
if x2 == x1: # jump discontinuity, skip to get a jump
raise Exception("bad thing happen")
else:
slope = (y2 - y1) / (x2 - x1)
result += "+(1/(1+exp(-1000*(x{0})))-1/(1+exp(-1000*(x{1}))))*({2}*x{3})".format(normalize(round(-x1)), normalize(round(-x2)), str(round(-slope, 3)), normalize(round(-(y1 - slope * x1), 3))) # add a line segment with correct slope
x1, y1 = x2, y2
result = result[1:] + "+0.1*sin(60*x)" # remove the leading plus sign
return result
messagebox.showinfo("Select Point", "Press OK and left click on the top left corner and then the bottom right corner of the game axes.")
top_left = select_point()
if top_left[0] == None: sys.exit()
bottom_right = select_point()
if bottom_right[0] == None: sys.exit()
scale_w, scale_h = (bottom_right[0] - top_left[0]) / game_w, (bottom_right[1] - top_left[1]) / game_h
print("window size", bottom_right[0] - top_left[0], bottom_right[1] - top_left[1])
while True:
messagebox.showinfo("Game Start", "Press OK and right click path points when your turn starts, starting with the player. Right click on the formula entry box to complete.")
# get start point
start = select_point()
start = (start[0] - top_left[0], start[1] - top_left[1])
if start[0] == None: sys.exit()
# get path points
points = [(start[0] / scale_w - game_w / 2, start[1] / scale_h - game_h / 2)]
current_x = start[0]
while True:
point = select_point()
if point[0] == None: break # completed
point = (point[0] - top_left[0], point[1] - top_left[1])
if point[0] <= current_x: # left or same as current one, which means jump down
points.append((current_x / scale_w - game_w / 2, point[1] / scale_h - game_h / 2))
else: # normal line segment
points.append((point[0] / scale_w - game_w / 2, point[1] / scale_h - game_h / 2))
current_x = point[0]
print("selected points ", points)
if formula_mode == "axis": # axisthgame style formulas
formula = calculate_formula_axis(points)
elif formula_mode == "graphwar": # graphwar style formulas
formula = calculate_formula_graphwar(points)
else: raise Exception("bad thing happen")
print(formula)
try:
import win32clipboard
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(formula, win32clipboard.CF_TEXT)
win32clipboard.CloseClipboard()
except ImportError: pass
| mit | Python |
|
4324eaf427731db3943cf130e42e29509bdbd4df | Fix for Python 3 | pv/asv,qwhelan/asv,mdboom/asv,giltis/asv,airspeed-velocity/asv,spacetelescope/asv,pv/asv,airspeed-velocity/asv,qwhelan/asv,giltis/asv,airspeed-velocity/asv,ericdill/asv,mdboom/asv,pv/asv,edisongustavo/asv,spacetelescope/asv,edisongustavo/asv,pv/asv,cpcloud/asv,spacetelescope/asv,waylonflinn/asv,airspeed-velocity/asv,waylonflinn/asv,spacetelescope/asv,qwhelan/asv,edisongustavo/asv,waylonflinn/asv,mdboom/asv,mdboom/asv,giltis/asv,ericdill/asv,ericdill/asv,cpcloud/asv,ericdill/asv,cpcloud/asv,qwhelan/asv | asv/config.py | asv/config.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
from . import util
class Config(object):
"""
Manages the configuration for a benchmark project.
"""
api_version = 1
def __init__(self):
self.project = "project"
self.project_url = "#"
self.repo = None
self.pythons = ["{0[0]}.{0[1]}".format(sys.version_info)]
self.matrix = {}
self.env_dir = "env"
self.benchmark_dir = "benchmarks"
self.results_dir = "results"
self.html_dir = "html"
self.show_commit_url = "#"
self.hash_length = 8
@classmethod
def load(cls, path=None):
"""
Load a configuration from a file. If no file is provided,
defaults to `asv.conf.json`.
"""
if not path:
path = "asv.conf.json"
if not os.path.exists(path):
raise RuntimeError("Config file {0} not found.".format(path))
conf = Config()
d = util.load_json(path, cls.api_version)
conf.__dict__.update(d)
if not getattr(conf, "repo", None):
raise ValueError(
"No repo specified in {0} config file.".format(path))
return conf
@classmethod
def update(cls, path=None):
if not path:
path = "asv.conf.json"
if not os.path.exists(path):
raise RuntimeError("Config file {0} not found.".format(path))
util.update_json(cls, path, cls.api_version)
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
from . import util
class Config(object):
"""
Manages the configuration for a benchmark project.
"""
api_version = 1
def __init__(self):
self.project = "project"
self.project_url = "#"
self.repo = None
self.pythons = ["{0.major}.{0.minor}".format(sys.version_info)]
self.matrix = {}
self.env_dir = "env"
self.benchmark_dir = "benchmarks"
self.results_dir = "results"
self.html_dir = "html"
self.show_commit_url = "#"
self.hash_length = 8
@classmethod
def load(cls, path=None):
"""
Load a configuration from a file. If no file is provided,
defaults to `asv.conf.json`.
"""
if not path:
path = "asv.conf.json"
if not os.path.exists(path):
raise RuntimeError("Config file {0} not found.".format(path))
conf = Config()
d = util.load_json(path, cls.api_version)
conf.__dict__.update(d)
if not getattr(conf, "repo", None):
raise ValueError(
"No repo specified in {0} config file.".format(path))
return conf
@classmethod
def update(cls, path=None):
if not path:
path = "asv.conf.json"
if not os.path.exists(path):
raise RuntimeError("Config file {0} not found.".format(path))
util.update_json(cls, path, cls.api_version)
| bsd-3-clause | Python |
0eb579b00c7e42813d45aa841df3f42607db0a7e | add thermoengineTest | pierrelb/RMG-Py,pierrelb/RMG-Py | rmgpy/thermo/thermoengineTest.py | rmgpy/thermo/thermoengineTest.py | #!/usr/bin/env python
# encoding: utf-8 -*-
"""
This module contains unit tests of the rmgpy.parallel module.
"""
import os
import sys
import unittest
import random
from external.wip import work_in_progress
from rmgpy import settings
from rmgpy.data.rmg import RMGDatabase
from rmgpy.rmg.main import RMG
from rmgpy.scoop_framework.framework import TestScoopCommon
from rmgpy.species import Species
from rmgpy.thermo.thermoengine import submit
try:
from scoop import futures, _control, shared
except ImportError, e:
import logging as logging
logging.debug("Could not properly import SCOOP.")
def load():
tearDown()
rmg = RMG()#for solvent
database = RMGDatabase()
database.loadThermo(os.path.join(settings['database.directory'], 'thermo'))
database.loadTransport(os.path.join(settings['database.directory'], 'transport'))
database.loadSolvation(os.path.join(settings['database.directory'], 'solvation'))
def tearDown():
"""
Reset the loaded database
"""
import rmgpy.data.rmg
rmgpy.data.rmg.database = None
def funcSubmit():
"""
Test that we can submit a number of species.
"""
load()
spcs = [
Species().fromSMILES('C'),\
Species().fromSMILES('CC'), \
Species().fromSMILES('CCC')
]
for spc in spcs:
submit(spc)
return True
def funcGet():
"""
Test if we can retrieve thermo of species even before we have submitted them explicitly.
"""
load()
spcs = [
Species().fromSMILES('C'),
Species().fromSMILES('CC'), \
Species().fromSMILES('CCC')
]
output = []
for spc in spcs:
data = spc.getThermoData()
output.append((spc, data))
for spc, data in output:
if not data:
return False
return True
def funcSubmitGet():
"""
Test if we can retrieve thermo of species after submitting some of them.
"""
load()
spcs = [
Species().fromSMILES('C'),\
Species().fromSMILES('CC'), \
Species().fromSMILES('CCC')
]
for spc in spcs:
submit(spc)
absent = Species().fromSMILES('[CH3]')
data = absent.getThermoData()
if not data: return False
present = Species().fromSMILES('CC')
data = present.getThermoData()
if not data: return False
random.shuffle(spcs)
for spc in spcs:
data = spc.getThermoData()
if not data: return False
return True
@work_in_progress
class AsyncThermoTest(TestScoopCommon):
def __init__(self, *args, **kwargs):
# Parent initialization
super(self.__class__, self).__init__(*args, **kwargs)
# Only setup the scoop framework once, and not in every test method:
super(self.__class__, self).setUp()
@unittest.skipUnless(sys.platform.startswith("linux"),
"test currently only runs on linux")
def testSubmit(self):
"""
Test that we can submit a request to generate
thermo/transport for a number of species.
"""
result = futures._startup(funcSubmit)
self.assertEquals(result, True)
@unittest.skipUnless(sys.platform.startswith("linux"),
"test currently only runs on linux")
def testGet(self):
"""
Test that we can get the data of a number of species.
"""
result = futures._startup(funcGet)
self.assertEquals(result, True)
if __name__ == '__main__' and os.environ.get('IS_ORIGIN', "1") == "1":
unittest.main()
| mit | Python |
|
6a47c684012b98679c9274ca4087958c725a1fa7 | support extensions in tests | evansde77/dockerstache,evansde77/dockerstache,evansde77/dockerstache | test/unit/dockerstache_tests.py | test/unit/dockerstache_tests.py | #!/usr/bin/env python
"""
dockerstache module test coverage for API calls
"""
import os
import tempfile
import json
import unittest
import mock
from dockerstache.dockerstache import run
class RunAPITests(unittest.TestCase):
"""tests for run API call"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.defaults = os.path.join(self.tempdir, 'defaults.json')
self.context = os.path.join(self.tempdir, 'context.json')
self.dotfile = os.path.join(self.tempdir, '.dockerstache')
with open(self.defaults, 'w') as handle:
json.dump(
{"defaults": {"value1": 1, "value2": 2}, "default_value": 99},
handle
)
with open(self.context, 'w') as handle:
json.dump(
{
"defaults": {"value2": 100},
"context": {"value3": 3, "value4": 4}
},
handle
)
with open(self.dotfile, 'w') as handle:
json.dump(
{
"context": self.context,
"defaults": self.defaults
},
handle
)
self.opts = {}
self.opts['input'] = self.tempdir
self.opts['output'] = None
self.opts['context'] = None
self.opts['defaults'] = None
def tearDown(self):
"""cleanup test data """
if os.path.exists(self.tempdir):
os.system("rm -rf {}".format(self.tempdir))
@mock.patch('dockerstache.dockerstache.process_templates')
def test_run(self, mock_process):
"""test run method"""
run(**self.opts)
self.failUnless(mock_process.called)
@mock.patch('dockerstache.dockerstache.process_templates')
def test_run_extend_context(self, mock_process):
"""test run method with extras for context"""
extend = {'extensions': {'extras': 'values'}}
self.opts['extend_context'] = extend
run(**self.opts)
self.failUnless(mock_process.called)
context = mock_process.call_args[0][2]
self.failUnless('extensions' in context)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
|
3618ce5749517c7757a04f0c08a74275e8e82b69 | Create fasttext.py | botlabio/autonomio,botlabio/autonomio | fasttext.py | fasttext.py | from __future__ import print_function
import numpy as np
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Embedding
from keras.layers import GlobalAveragePooling1D
from keras.datasets import imdb
class FastText:
'''
Takes in pandas dataframe with at least two columns where one
is the dependent variable, and one is text.
EXAMPLE USE:
FastText(data,var)
If there is more than one possible depedent variable in df then
there you can run the moddle for any of it.
'''
def __init__(self,data,var):
self.data = data
self.var = var
self.null = self._configuration()
self.null = self._get_cube()
self.null = self._padding()
self.model = self._build_model()
def _configuration(self):
self.max_features = 125000
self.maxlen = 800
self.batch_size = 16
self.embedding_dims = 20
self.epochs = 2
return "NULL"
def _get_cube(self):
o = Cube(self.data,self.var)
self.x_train = o.x_train
self.y_train = o.y_train
self.x_test = o.x_test
self.y_test = o.y_test
return 'NULL'
def create_ngram_set(self,input_list, ngram_value=2):
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
def add_ngram(self,sequences, token_indice, ngram_range=2):
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for i in range(len(new_list) - ngram_range + 1):
for ngram_value in range(2, ngram_range + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
def _padding(self):
self.x_train = sequence.pad_sequences(self.x_train, maxlen=self.maxlen)
self.x_test = sequence.pad_sequences(self.x_test, maxlen=self.maxlen)
return 'NULL'
def _build_model(self):
model = Sequential()
model.add(Embedding(self.max_features, # efficient embedding layer which maps
self.embedding_dims, # vocab indices into embedding_dims dimensions
input_length=self.maxlen))
model.add(GlobalAveragePooling1D()) # avg the embeddings of all words in the document
model.add(Dense(1, activation='hard_sigmoid')) # project onto a single unit
# output layer, and squash it
model.compile(loss='binary_crossentropy',
optimizer='adagrad',
metrics=['accuracy'])
model.fit(self.x_train, self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
validation_data=(self.x_test, self.y_test))
return model
| mit | Python |
|
d23b83f8052f1ca5a988b05c3893b884eb3be6cc | Add link.py | dustalov/watset,dustalov/watset | misc/link.py | misc/link.py | #!/usr/bin/env python
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
import argparse
import csv
import sys
import itertools
from collections import defaultdict, Counter
from math import log
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics.pairwise import cosine_similarity as sim
from operator import itemgetter
from multiprocessing import Pool, cpu_count
parser = argparse.ArgumentParser()
parser.add_argument('--synsets', required=True)
parser.add_argument('--isas', required=True)
parser.add_argument('-k', nargs='?', type=int, default=6)
args = vars(parser.parse_args())
synsets, index, lexicon = {}, defaultdict(lambda: set()), set()
with open(args['synsets']) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
synsets[int(row[0])] = [word.lower() for word in row[2].split(', ') if word]
for word in synsets[int(row[0])]:
index[word].add(int(row[0]))
lexicon.update(synsets[int(row[0])])
isas = defaultdict(lambda: set())
with open(args['isas']) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for hyponym, hypernym in reader:
if hyponym in lexicon and hypernym in lexicon:
isas[hyponym].add(hypernym)
idf, D = defaultdict(lambda: 0), .0
for words in synsets.values():
hypernyms = [isas[word] for word in words if word in isas]
if not hypernyms:
continue
for hypernym in set.union(*hypernyms):
idf[hypernym] += 1
D += 1
idf = {hypernym: log(D / df) for hypernym, df in idf.items()}
def tf(w, words):
return float(Counter(words)[w])
def tfidf(w, words):
return tf(w, words) * idf.get(w, 1.)
hctx = {}
for id, words in synsets.items():
hypernyms = list(itertools.chain(*(isas[word] for word in words if word in isas)))
if not hypernyms:
continue
hctx[id] = {word: tfidf(word, hypernyms) for word in hypernyms}
v = DictVectorizer().fit(hctx.values())
def emit(id):
hypernyms, vector, hsenses = hctx[id], v.transform(hctx[id]), {}
for hypernym in hypernyms:
candidates = {hid: synsets[hid] for hid in index[hypernym]}
if not candidates:
continue
candidates = {hid: {word: tfidf(word, words) for word in words} for hid, words in candidates.items()}
candidates = {hid: sim(vector, v.transform(words)) for hid, words in candidates.items()}
hid, cosine = max(candidates.items(), key=itemgetter(1))
if cosine > 0:
hsenses[(hypernym, hid)] = cosine
hsenses = dict(dict(sorted(hsenses.items(), key=itemgetter(1), reverse=True)[:args['k']]).keys())
return (id, hsenses)
i = 0
with Pool(cpu_count()) as pool:
for id, hsenses in pool.imap_unordered(emit, hctx):
i += 1
print('%d\t%s' % (id, ', '.join(('%s#%d' % e for e in hsenses.items()))))
if i % 1000 == 0:
print('%d entries out of %d done.' % (i, len(hctx)), file=sys.stderr, flush=True)
if len(hctx) % 1000 != 0:
print('%d entries out of %d done.' % (len(hctx), len(hctx)), file=sys.stderr, flush=True)
| mit | Python |
|
7655e376696a04aa1c3596274861515953f592e8 | Add profiling script for savings code | ebmdatalab/openprescribing,ebmdatalab/openprescribing,annapowellsmith/openpresc,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc | openprescribing/frontend/price_per_unit/profile.py | openprescribing/frontend/price_per_unit/profile.py | """
Basic profiling code for working out where we're spending our time
Invoke with:
./manage.py shell -c 'from frontend.price_per_unit.profile import profile; profile()'
"""
from cProfile import Profile
import datetime
import time
from .savings import get_all_savings_for_orgs
def test():
get_all_savings_for_orgs("2019-11-01", "ccg", ["99C"])
# get_all_savings_for_orgs("2019-11-01", "all_standard_practices", [None])
def profile():
num_attempts = 5
attempts = []
for _ in range(num_attempts):
profiler = Profile()
start = time.time()
profiler.runcall(test)
duration = time.time() - start
attempts.append((duration, profiler))
attempts.sort()
profile_file = "profile.{}.prof".format(
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
attempts[0][1].dump_stats(profile_file)
print(
"{}s (best of {}), profile saved as: {}".format(
attempts[0][0], num_attempts, profile_file
)
)
| mit | Python |
|
40caa4c9b720388207e338ffde3cd7f2d85cdf0d | add a single script to perform formatting of base log files | aliclark/irctail,aliclark/irctail | base-format.py | base-format.py | #!/usr/bin/python
from __future__ import print_function
import sys
import re
import datetime
import ircformatlib as il
timeformat_format = '%H:%M:%S'
timeformat_formatlen = 8
timeformat_filler = ' ' * timeformat_formatlen
def timeformat(time):
try:
x = int(time)
dt = datetime.datetime.fromtimestamp(round(x / 1000.0))
return dt.strftime(timeformat_format)
except:
return timeformat_filler
def colorized_newstate():
return { 'maxlen': 0, 'hits': {}, 'counts': {}, 'allocated': {},
'textmatcher': {} }
def colorized_text(state, text, leadstr=''):
state['maxlen'] = il.getmaxlen(leadstr + text, state['maxlen'])
color = il.getcolor(text, state['allocated'], state['counts'],
state['hits'])
il.uplogs(color, state['hits'])
return (il.getmaxpad(leadstr + text, state['maxlen']) + leadstr +
color + text + il.clearseq)
chanformat_state = colorized_newstate()
def chanformat(channel):
if not channel:
return ''
return colorized_text(chanformat_state, channel)
nameformat_state = colorized_newstate()
def nameformat(name):
leadstr = ''
for lead in ('--- ', '* '):
if name.startswith(lead):
leadstr = lead
name = name[len(lead):]
break
for perm in ('@', '+', '%', '*'):
if name.startswith(perm):
leadstr += perm
name = name[len(perm):]
break
return colorized_text(nameformat_state, name, leadstr)
def textformat(text):
return il.text_colorize(il.text_colorize(text,
chanformat_state['textmatcher'],
chanformat_state['allocated']),
nameformat_state['textmatcher'],
nameformat_state['allocated'])
def combine_parts(channel, time, name, text):
tcsep = ''
if time and channel:
tcsep = ' '
return time + tcsep + channel + ' ' + name + ' ' + text
def main():
try:
m = re.compile(r'(([^\t]+)\t)?([^\t]+)\t([^\t]+)\t([^\t]+)')
line = sys.stdin.readline()
while line:
r = m.match(line)
if r:
line = combine_parts(chanformat(r.group(2)),
timeformat(r.group(3)),
nameformat(r.group(4)),
textformat(r.group(5)))
else:
line = textformat(line)
print(line, end='')
sys.stdout.flush()
line = sys.stdin.readline()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| isc | Python |
|
2766e8797515497e5569b31696416db68641c9b4 | Extend MediaRemovalMixin to move media files on updates | matus-stehlik/roots,rtrembecky/roots,matus-stehlik/glowing-batman,tbabej/roots,rtrembecky/roots,rtrembecky/roots,matus-stehlik/roots,matus-stehlik/roots,tbabej/roots,matus-stehlik/glowing-batman,tbabej/roots | base/models.py | base/models.py | import os
from django.conf import settings
class MediaRemovalMixin(object):
"""
Removes all files associated with the model, as returned by the
get_media_files() method.
"""
# Models that use this mixin need to override this method
def get_media_files(self):
return
def delete(self, *args, **kwargs):
for media_file in self.get_media_files():
path = settings.MEDIA_ROOT + media_file
if os.path.exists(path):
os.remove(path)
return super(MediaRemovalMixin, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
if self.pk:
# Primary key exists, object is being edited
old_object = self.__class__.objects.get(pk=self.pk)
path_pairs = zip(old_object.get_media_files(),
self.get_media_files())
# Move each associated file to its new location
for (old_path, new_path) in path_pairs:
full_old_path = settings.MEDIA_ROOT + old_path
full_new_path = settings.MEDIA_ROOT + new_path
if old_path != new_path and os.path.exists(full_old_path):
os.rename(full_old_path, full_new_path)
return super(MediaRemovalMixin, self).save(*args, **kwargs)
| import os
from django.conf import settings
class MediaRemovalMixin(object):
"""
Removes all files associated with the model, as returned by the
get_media_files() method.
"""
# Models that use this mixin need to override this method
def get_media_files(self):
return
def delete(self):
for media_file in self.get_media_files():
path = settings.MEDIA_ROOT + media_file
if os.path.exists(path):
os.remove(path)
return super(MediaRemovalMixin, self).delete()
| mit | Python |
24c642063ffcb3313545b2e1ba3abbb62aa98437 | Add cuit validator to utils module | coyotevz/nobix-app | nbs/utils/validators.py | nbs/utils/validators.py | # -*- coding: utf-8-*-
def validate_cuit(cuit):
"from: http://python.org.ar/pyar/Recetario/ValidarCuit by Mariano Reingart"
# validaciones minimas
if len(cuit) != 13 or cuit[2] != "-" or cuit [11] != "-":
return False
base = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]
cuit = cuit.replace("-", "")
# calculo digito verificador
aux = 0
for i in range(10):
aux += int(cuit[i]*base[i])
aux = 11 - (aux - (int(aux/11) * 11))
if aux == 11:
aux = 0
if aux == 10:
aux = 9
return aux == int(cuit[10])
| mit | Python |
|
7274f9286bd267970c286954e9d21e601af30cb7 | Create messenger.py | wanglei6111/-Python- | messenger.py | messenger.py | # -*- coding: utf-8 -*-
import requests
apiurl = '你的地址'
apiheaders = {'U-ApiKey': '你的key'}
code="动态码"
response = requests.get(apiurl, params={"media_id":'gh_3fc78df4c9d2',"auth_code":code, "scene":1,"device_no":1,"location":'jia'})
json = response.json()
print(json)
| apache-2.0 | Python |
|
620ad7f4dc5ed9403f468f592b99a22a92d22072 | make python -m i3configger work | obestwalter/i3configger | i3configger/__main__.py | i3configger/__main__.py | import i3configger.main
if __name__ == "__main__":
i3configger.main.main()
| mit | Python |
|
ad2178a8973ce2de55611321c0b7b57b1488fc6b | move utilities in a private module | masci/django-appengine-toolkit,masci/django-appengine-toolkit,masci/django-appengine-toolkit | appengine_toolkit/management/commands/_utils.py | appengine_toolkit/management/commands/_utils.py | import pkg_resources
import os
class RequirementNotFoundError(Exception):
pass
def collect_dependency_paths(package_name):
"""
TODO docstrings
"""
deps = []
try:
dist = pkg_resources.get_distribution(package_name)
except ValueError:
message = "Distribution '{}' not found.".format(package_name)
raise RequirementNotFoundError(message)
if dist.has_metadata('top_level.txt'):
for line in dist.get_metadata('top_level.txt').split():
deps.append(os.path.join(dist.location, line))
for req in dist.requires():
deps.extend(collect_dependency_paths(req.project_name))
return deps
| bsd-3-clause | Python |
|
79b99968d7c9e728efe05f8c962bdda5c9d56559 | Add LDAP authentication plugin | ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo | web/utils/auth.py | web/utils/auth.py | # http://www.djangosnippets.org/snippets/501/
from django.contrib.auth.models import User
from django.conf import settings
import ldap
class ActiveDirectoryBackend:
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None):
if username:
username = username.lower()
if not self.is_valid(username, password):
return None
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
l = ldap.initialize(settings.AD_LDAP_URL)
binddn = '%s@%s' % (username, settings.AD_NT4_DOMAIN)
l.simple_bind_s(binddn, password)
result = l.search_ext_s(settings.AD_SEARCH_DN, ldap.SCOPE_SUBTREE,
'sAMAccountName=%s' % username, settings.AD_SEARCH_FIELDS)[0][1]
l.unbind_s()
# givenName == First Name
if 'givenName' in result:
first_name = result['givenName'][0]
else:
first_name = None
# sn == Last Name (Surname)
if 'sn' in result:
last_name = result['sn'][0]
else:
last_name = None
# mail == Email Address
if 'mail' in result:
email = result['mail'][0]
else:
email = None
user = User(username=username, first_name=first_name, last_name=last_name, email=email)
user.is_staff = False
user.is_superuser = False
user.set_password(password)
user.save()
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def is_valid(self, username=None, password=None):
# Disallowing null or blank string as password
# as per comment: http://www.djangosnippets.org/snippets/501/#c868
if password is None or password == '':
return False
if username:
username = username.lower()
binddn = '%s@%s' % (username, settings.AD_NT4_DOMAIN)
try:
l = ldap.initialize(settings.AD_LDAP_URL)
l.simple_bind_s(binddn, password)
l.unbind_s()
return True
except ldap.LDAPError:
return False
| agpl-3.0 | Python |
|
6f8460b10827a9877fd0c3f0d45a01e7b2d42014 | Create ios.py | wizardofozzie/pybitcointools | bitcoin/ios.py | bitcoin/ios.py | import ecdsa
import binascii
import hashlib
import struct
from bitcoin.main import *
from bitcoin.pyspecials import *
# https://gist.github.com/b22e178cff75c4b432a8
# Returns byte string value, not hex string
def varint(n):
if n < 0xfd:
return struct.pack('<B', n)
elif n < 0xffff:
return struct.pack('<cH', '\xfd', n)
elif n < 0xffffffff:
return struct.pack('<cL', '\xfe', n)
else:
return struct.pack('<cQ', '\xff', n)
# Takes and returns byte string value, not hex string
def varstr(s):
return varint(len(s)) + s
def privtopub(s):
# accepts hex encoded (sec) key, returns hex pubkey
sk = ecdsa.SigningKey.from_string(s.decode('hex'), curve=ecdsa.SECP256k1)
#vk = sk.verifying_key
return '04' binascii.hexlify(sk.verifying_key.to_string()) # TODO: add compressed func
# Input is a hex-encoded, DER-encoded signature
# Output is a 64-byte hex-encoded signature
def derSigToHexSig(s):
s, junk = ecdsa.der.remove_sequence(s.decode('hex'))
if junk != '':
print 'JUNK', junk.encode('hex')
assert(junk == '')
x, s = ecdsa.der.remove_integer(s)
y, s = ecdsa.der.remove_integer(s)
return '%064x%064x' % (x, y)
def readyRawTx(rawtx, scriptpubkey, hashcode=1):
# takes rawtx and inserts scriptpubkey into scriptsig and appends '01000000'
seqidx = rawtx.find('00ffffffff')
rawtx.replace('00fffffffff', scriptpubkey+'ffffffff')
return rawtx + binascii.hexlify(struct.pack('<L',1))
def signTx(rawtx, privkey, spk, hashcode=1):
# rawtx = unsigned Tx w/ scriptPubKey in ScriptSig and '01000000' appended
rawtx = readyRawTx(rawtx, spk, hashcode=hashcode)
s256 = hashlib.sha256(hashlib.sha256(rawtx.decode('hex')).digest()).digest()
sk = ecdsa.SigningKey.from_string(privkey.decode('hex'), curve=ecdsa.SECP256k1)
sig = sk.sign_digest(s256, sigencode=ecdsa.util.sigencode_der) + '\01' # 01 is hashtype
pubKey = privtopub(privkey)
scriptSig = varstr(sig).encode('hex') + varstr(pubKey.decode('hex')).encode('hex')
return scriptSig
def privkey_to_pubkey(privkey):
f = get_privkey_format(privkey)
privkey = decode_privkey(privkey, f)
if privkey >= N:
raise Exception("Invalid privkey")
if f in ['bin', 'bin_compressed', 'hex', 'hex_compressed', 'decimal']:
try:
return encode_pubkey(fast_multiply(G, privkey), f)
except RuntimeError:
assert f is 'hex'
import bitcoin.ios as ios
return ios.privtopub(privkey)
else:
try: return encode_pubkey(fast_multiply(G, privkey), f.replace('wif', 'hex'))
except RuntimeError:
assert f in ('hex', 'wif')
import bitcoin.ios as ios
return ios.privtopub(privkey)
# SIG = '47304402202c2e1a746c556546f2c959e92f2d0bd2678274823cc55e11628284e4a13016f80220797e716835f9dbcddb752cd0115a970a022ea6f2d8edafff6e087f928e41baac014104392b964e911955ed50e4e368a9476bc3f9dcc134280e15636430eb91145dab739f0d68b82cf33003379d885a0b212ac95e9cddfd2d391807934d25995468bc55'
#if __name__ == '__main__':
# unittest.main()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.