commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
2dff474fe7723ebc7d7559fc77791924532d58db | reorder imports for pep8 | zeroSteiner/boltons,mgaitan/boltons,kevinastone/boltons,markrwilliams/boltons,siemens/boltons,suranap/boltons,doublereedkurt/boltons,neuropil/boltons | boltons/timeutils.py | boltons/timeutils.py | # -*- coding: utf-8 -*-
import bisect
import datetime
from datetime import timedelta
from strutils import cardinalize
def total_seconds(td):
"""\
A pure-Python implementation of Python 2.7's timedelta.total_seconds().
Accepts a timedelta object, returns number of total seconds.
>>> td = datetime.timedelta(days=4, seconds=33)
>>> total_seconds(td)
345633.0
"""
a_milli = 1000000.0
td_ds = td.seconds + (td.days * 86400) # 24 * 60 * 60
td_micro = td.microseconds + (td_ds * a_milli)
return td_micro / a_milli
_BOUNDS = [(0, timedelta(seconds=1), 'second'),
(1, timedelta(seconds=60), 'minute'),
(1, timedelta(seconds=3600), 'hour'),
(1, timedelta(days=1), 'day'),
(1, timedelta(days=7), 'week'),
(2, timedelta(days=30), 'month'),
(1, timedelta(days=365), 'year')]
_BOUNDS = [(b[0] * b[1], b[1], b[2]) for b in _BOUNDS]
_BOUND_DELTAS = [b[0] for b in _BOUNDS]
def decimal_relative_time(d, other=None, ndigits=0):
"""\
>>> now = datetime.datetime.utcnow()
>>> decimal_relative_time(now - timedelta(days=1, seconds=3600), now)
(1.0, 'day')
>>> decimal_relative_time(now - timedelta(seconds=0.002), now, ndigits=5)
(0.002, 'seconds')
>>> '%g %s' % _
'0.002 seconds'
"""
if other is None:
other = datetime.datetime.utcnow()
diff = other - d
diff_seconds = total_seconds(diff)
abs_diff = abs(diff)
b_idx = bisect.bisect(_BOUND_DELTAS, abs_diff) - 1
bbound, bunit, bname = _BOUNDS[b_idx]
#f_diff, f_mod = divmod(diff_seconds, total_seconds(bunit))
f_diff = diff_seconds / total_seconds(bunit)
rounded_diff = round(f_diff, ndigits)
return rounded_diff, cardinalize(bname, abs(rounded_diff))
def relative_time(d, other=None, ndigits=0):
"""\
>>> now = datetime.datetime.utcnow()
>>> relative_time(now, ndigits=1)
'0 seconds ago'
>>> relative_time(now - timedelta(days=1, seconds=36000), ndigits=1)
'1.4 days ago'
>>> relative_time(now + timedelta(days=7), now, ndigits=1)
'1 week from now'
"""
drt, unit = decimal_relative_time(d, other, ndigits)
phrase = 'ago'
if drt < 0:
phrase = 'from now'
return '%g %s %s' % (abs(drt), unit, phrase)
| # -*- coding: utf-8 -*-
import datetime
from datetime import timedelta
from strutils import cardinalize
def total_seconds(td):
"""\
A pure-Python implementation of Python 2.7's timedelta.total_seconds().
Accepts a timedelta object, returns number of total seconds.
>>> td = datetime.timedelta(days=4, seconds=33)
>>> total_seconds(td)
345633.0
"""
a_milli = 1000000.0
td_ds = td.seconds + (td.days * 86400) # 24 * 60 * 60
td_micro = td.microseconds + (td_ds * a_milli)
return td_micro / a_milli
import bisect
_BOUNDS = [(0, timedelta(seconds=1), 'second'),
(1, timedelta(seconds=60), 'minute'),
(1, timedelta(seconds=3600), 'hour'),
(1, timedelta(days=1), 'day'),
(1, timedelta(days=7), 'week'),
(2, timedelta(days=30), 'month'),
(1, timedelta(days=365), 'year')]
_BOUNDS = [(b[0] * b[1], b[1], b[2]) for b in _BOUNDS]
_BOUND_DELTAS = [b[0] for b in _BOUNDS]
def decimal_relative_time(d, other=None, ndigits=0):
"""\
>>> now = datetime.datetime.utcnow()
>>> decimal_relative_time(now - timedelta(days=1, seconds=3600), now)
(1.0, 'day')
>>> decimal_relative_time(now - timedelta(seconds=0.002), now, ndigits=5)
(0.002, 'seconds')
>>> '%g %s' % _
'0.002 seconds'
"""
if other is None:
other = datetime.datetime.utcnow()
diff = other - d
diff_seconds = total_seconds(diff)
abs_diff = abs(diff)
b_idx = bisect.bisect(_BOUND_DELTAS, abs_diff) - 1
bbound, bunit, bname = _BOUNDS[b_idx]
#f_diff, f_mod = divmod(diff_seconds, total_seconds(bunit))
f_diff = diff_seconds / total_seconds(bunit)
rounded_diff = round(f_diff, ndigits)
return rounded_diff, cardinalize(bname, abs(rounded_diff))
def relative_time(d, other=None, ndigits=0):
"""\
>>> now = datetime.datetime.utcnow()
>>> relative_time(now, ndigits=1)
'0 seconds ago'
>>> relative_time(now - timedelta(days=1, seconds=36000), ndigits=1)
'1.4 days ago'
>>> relative_time(now + timedelta(days=7), now, ndigits=1)
'1 week from now'
"""
drt, unit = decimal_relative_time(d, other, ndigits)
phrase = 'ago'
if drt < 0:
phrase = 'from now'
return '%g %s %s' % (abs(drt), unit, phrase)
| bsd-3-clause | Python |
c86835059c6fcc657290382e743922b14e7e7656 | add server | kartikanand/kartikanand.github.io,kartikanand/kartikanand.github.io | server.py | server.py | from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def root():
print(request.json)
return "hi"
if __name__ == '__main__':
app.run(debug=True, port=5000)
| mit | Python |
|
a4bc16a375dc30e37034993bd07d3014f3b936e1 | Fix corrupt abstract field data | pferreir/indico,mic4ael/indico,ThiefMaster/indico,mvidalgarcia/indico,DirkHoffmann/indico,indico/indico,DirkHoffmann/indico,mvidalgarcia/indico,indico/indico,pferreir/indico,indico/indico,ThiefMaster/indico,OmeGak/indico,DirkHoffmann/indico,indico/indico,mic4ael/indico,mic4ael/indico,mic4ael/indico,ThiefMaster/indico,OmeGak/indico,DirkHoffmann/indico,pferreir/indico,OmeGak/indico,mvidalgarcia/indico,OmeGak/indico,ThiefMaster/indico,mvidalgarcia/indico,pferreir/indico | migrations/versions/201610041721_8b5ab7da2d5_fix_corrupt_abstract_field_data.py | migrations/versions/201610041721_8b5ab7da2d5_fix_corrupt_abstract_field_data.py | """Fix corrupt abstract field data
Revision ID: 8b5ab7da2d5
Revises: 52d970fb6a74
Create Date: 2016-10-04 17:21:19.186125
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '8b5ab7da2d5'
down_revision = '52d970fb6a74'
def upgrade():
# We don't want any dicts in abstract field values...
# Single choice fields with no value should be `null`, text fields should be empty
op.execute('''
UPDATE event_abstracts.abstract_field_values fv
SET data = 'null'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'single_choice';
UPDATE event_abstracts.abstract_field_values fv
SET data = '""'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'text';
''')
def downgrade():
pass
| mit | Python |
|
402911310eee757a0dd238466f11477c98c0748b | Add NARR solar radiation point sampler | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | scripts/coop/narr_solarrad.py | scripts/coop/narr_solarrad.py | """
Sample the NARR solar radiation analysis into estimated values for the
COOP point archive
1 langley is 41840.00 J m-2 is 41840.00 W s m-2 is 11.622 W hr m-2
So 1000 W m-2 x 3600 is 3,600,000 W s m-2 is 86 langleys
"""
import netCDF4
import datetime
import pyproj
import numpy
import iemdb
import sys
COOP = iemdb.connect('coop', bypass=True)
ccursor = COOP.cursor()
ccursor2 = COOP.cursor()
P4326 = pyproj.Proj(init="epsg:4326")
LCC = pyproj.Proj("+lon_0=-107.0 +y_0=0.0 +R=6367470.21484 +proj=lcc +x_0=0.0 +units=m +lat_2=50.0 +lat_1=50.0 +lat_0=50.0")
def get_gp(xc, yc, x, y):
""" Return the grid point closest to this point """
distance = []
xidx = (numpy.abs(xc-x)).argmin()
yidx = (numpy.abs(yc-y)).argmin()
dx = x - xc[xidx]
dy = y - yc[yidx]
movex = -1
if dx >= 0:
movex = 1
movey = -1
if dy >= 0:
movey = 1
gridx = [xidx, xidx+movex, xidx+movex, xidx]
gridy = [yidx, yidx, yidx+movey, yidx+movey]
for myx, myy in zip(gridx, gridy):
d = ((y - yc[myy])**2 + (x - xc[myx])**2)**0.5
distance.append( d )
return gridx, gridy, distance
def do( date ):
""" Process for a given date
6z file has 6z to 9z data
"""
sts = date.replace(hour=6) # 6z
ets = sts + datetime.timedelta(days=1)
now = sts
interval = datetime.timedelta(hours=3)
while now < ets:
fn = now.strftime("/mesonet/ARCHIVE/data/%Y/%m/%d/model/NARR/"+
"rad_%Y%m%d%H00.nc")
nc = netCDF4.Dataset( fn )
rad = nc.variables['Downward_shortwave_radiation_flux'][0,:,:]
if now == sts:
xc = nc.variables['x'][:] * 1000.0 # convert to meters
yc = nc.variables['y'][:] * 1000.0 # convert to meters
total = rad * 10800.0 # 3 hr rad to total rad
else:
total += (rad * 10800.0)
nc.close()
now += interval
ccursor.execute("""
SELECT station, x(geom), y(geom) from alldata a JOIN stations t on
(a.station = t.id) where day = %s
""", (date.strftime("%Y-%m-%d"), ))
for row in ccursor:
(x,y) = pyproj.transform(P4326, LCC, row[1], row[2])
(gridxs, gridys, distances) = get_gp(xc, yc, x, y)
z0 = total[gridys[0], gridxs[0]]
z1 = total[gridys[1], gridxs[1]]
z2 = total[gridys[2], gridxs[2]]
z3 = total[gridys[3], gridxs[3]]
val = ((z0/distances[0] + z1/distances[1] + z2/distances[2]
+ z3/distances[3]) / (1./distances[0] + 1./distances[1] +
1./distances[2] + 1./distances[3] ))
langleys = val / 41840.0
if langleys < 0:
print 'WHOA! Negative RAD: %.2f, station: %s' % (langleys, row[0])
continue
ccursor2.execute("""
UPDATE alldata_"""+ row[0][:2] +""" SET narr_srad = %s WHERE
day = %s and station = %s
""", (langleys, date.strftime("%Y-%m-%d"), row[0]))
do( datetime.datetime(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])) )
ccursor2.close()
COOP.commit()
COOP.close() | mit | Python |
|
e4e572925e987fba59c3421a80d9bc247e04026d | add scraper of NDBC metadata | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | scripts/dbutil/scrape_ndbc.py | scripts/dbutil/scrape_ndbc.py | """See if we can get metadata dynmically from NDBC
var currentstnlat = 29.789;
var currentstnlng = -90.42;
var currentstnname = '8762482 - West Bank 1, Bayou Gauche, LA';
<b>Site elevation:</b> sea level<br />
"""
import requests
import psycopg2
from pyiem.reference import nwsli2country, nwsli2state
OUTPUT = open('insert.sql', 'w')
def compute_network(nwsli):
country = nwsli2country.get(nwsli[3:])
state = nwsli2state.get(nwsli[3:])
if country == 'US' and state is not None:
return "US", state, "%s_DCP" % (state,)
if country != 'US' and state is not None:
return country, state, "%s_%s_DCP" % (country, state)
print(("Failure to compute state for nwsli: %s [country:%s, state:%s]"
) % (nwsli, country, state))
return None, None, None
def do(nwsli):
uri = "http://www.ndbc.noaa.gov/station_page.php?station=%s" % (nwsli,)
req = requests.get(uri)
if req.status_code != 200:
print("do(%s) failed with status code: %s" % (nwsli, req.status_code))
return
html = req.content
meta = {'elevation': -999}
for line in html.split("\n"):
if line.strip().startswith("var currentstn"):
tokens = line.strip().split()
meta[tokens[1]] = " ".join(tokens[3:]).replace(
'"', "").replace(";", "").replace("'", "")
if line.find("<b>Site elevation:</b>") > -1:
elev = line.strip().replace(
"<b>Site elevation:</b>",
"").replace("<br />", "").replace("above mean sea level",
"").strip()
meta['elevation'] = (float(elev.replace("m", ""))
if elev != 'sea level' else 0)
if 'currentstnlng' not in meta:
print("Failure to scrape: %s" % (nwsli,))
return
tokens = meta['currentstnname'].split("-")
name = "%s - %s" % (tokens[1].strip(), tokens[0].strip())
country, state, network = compute_network(nwsli)
if network is None:
return
lon = float(meta['currentstnlng'])
lat = float(meta['currentstnlat'])
sql = """INSERT into stations(id, name, network, country, state,
plot_name, elevation, online, metasite, geom) VALUES ('%s', '%s', '%s',
'%s', '%s', '%s', %s, 't', 'f', 'SRID=4326;POINT(%s %s)');
""" % (nwsli, name, network, country, state, name, meta['elevation'], lon,
lat)
OUTPUT.write(sql)
def main():
pgconn = psycopg2.connect(database='hads', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""SELECT distinct nwsli from unknown where
product ~* 'OSO' ORDER by nwsli""")
for row in cursor:
do(row[0])
OUTPUT.close()
if __name__ == '__main__':
main()
| mit | Python |
|
ef745ed086ebd8e77e158c89b577c77296630320 | Add solution for 118 pascals triangle | comicxmz001/LeetCode,comicxmz001/LeetCode | Python/118_Pascals_Triangle.py | Python/118_Pascals_Triangle.py | class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1],[1,1]]
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
old = [1,1]
for i in xrange(numRows-2):
temp = [1]
for j in xrange(len(old)-1):
temp.append(old[j]+old[j+1])
temp.append(1)
res.append(temp)
old = temp
return res
if __name__ == '__main__':
print Solution().generate(6)
| mit | Python |
|
af7abc0fc476f7c048790fc8b378ac1af8ae8b33 | Create top-k-frequent-words.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015 | Python/top-k-frequent-words.py | Python/top-k-frequent-words.py | # Time: O(n + klogk) on average
# Space: O(n)
# Given a non-empty list of words, return the k most frequent elements.
#
# Your answer should be sorted by frequency from highest to lowest.
# If two words have the same frequency, then the word with the lower alphabetical order comes first.
#
# Example 1:
# Input: ["i", "love", "leetcode", "i", "love", "coding"], k = 2
# Output: ["i", "love"]
# Explanation: "i" and "love" are the two most frequent words.
# Note that "i" comes before "love" due to a lower alphabetical order.
# Example 2:
# Input: ["the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"], k = 4
# Output: ["the", "is", "sunny", "day"]
# Explanation: "the", "is", "sunny" and "day" are the four most frequent words,
# with the number of occurrence being 4, 3, 2 and 1 respectively.
# Note:
# You may assume k is always valid, 1 ≤ k ≤ number of unique elements.
# Input words contain only lowercase letters.
#
# Follow up:
# Try to solve it in O(n log k) time and O(n) extra space.
# Can you solve it in O(n) time with only O(k) extra space?
from random import randint
class Solution(object):
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
counts = collections.defaultdict(int)
for i in words:
counts[i] += 1
p = []
for key, val in counts.iteritems():
p.append((-val, key))
self.kthElement(p, k);
result = []
sorted_p = sorted(p[:k])
print sorted_p
for i in xrange(k):
result.append(sorted_p[i][1])
return result
def kthElement(self, nums, k): # O(n) on average
def PartitionAroundPivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] < pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = PartitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
| mit | Python |
|
60841e5b5a5f7e89c986fa202633ccf1a0f35315 | Add main module | othieno/geotagx-tool-validator | src/validator.py | src/validator.py | # -*- coding: utf-8 -*-
#
# This module is part of the GeoTag-X project validator tool.
#
# Author: Jeremy Othieno ([email protected])
#
# Copyright (c) 2016 UNITAR/UNOSAT
#
# The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
def main():
"""Executes the application.
"""
import sys
parser = _get_argparser()
return run(parser.parse_args(sys.argv[1:]))
def run(arguments):
"""Executes the application with the specified command-line arguments.
Args:
arguments (list): A list of command-line argument strings.
"""
raise NotImplementedError()
def _version():
"""Returns the tool's version string.
"""
from __init__ import __version__
return "GeoTag-X Project Validator v%s, Copyright (C) 2016 UNITAR/UNOSAT." % __version__
def _get_argparser():
"""Constructs the application's command-line argument parser.
Returns:
argparse.ArgumentParser: A command-line argument parser instance.
"""
raise NotImplementedError()
| mit | Python |
|
344ee4f5aafa19271a428d171f14b52d26a3f588 | Create solver.py | ThatClyde/Simple_sudoku_solver | solver.py | solver.py | from models import Table
from utils import sector_counter, clearscreen
#start with blank screen
clearscreen()
# building the blank sudoku table
sudoku = Table()
# Having the user enter the sudoku puzzle
sudoku.get_table()
print("This is your sudoku puzzle:")
print(sudoku)
num = 1
row = 0
col = 0
counter = 0
max_tries = 1000
# This will loop through while the puzzle isn't solved, or until it's reached the maximum tries.
while sudoku.puzzle_has_blanks() and counter < max_tries:
for num in range(10):
# this will cause it to iterate through the sectors in the grid
for sector_id in range(9):
#setting the number of flagged/possible spots to 0
sudoku.flagged_spots = 0
# the massive if statements that looks at a box in the puzzle to determine if those things are all true.
for number_in_block,row,col in sudoku.iter_sector(sector_id):
if (sudoku.current_box_is_blank(row,col)
and sudoku.num_is_not_in_sector(row, col, num)
and sudoku.num_is_not_in_row(row, col, num)
and sudoku.num_is_not_in_col(row, col, num)):
# if all are true, it flags that spot as a possible solution, and records it.
sudoku.flagged_spots += 1
sudoku.flag_num = num
sudoku.flag_row = row
sudoku.flag_col = col
number_that_was_in_block = number_in_block
# print("I'm flagging {},{}, for number: {} which is in sector {}, and this is the {} flag.".format(row,col,num,sector_id,sudoku.flagged_spots))
# prior to going to the next number, if only one flag has been created in the section, the spot must be good, so it updates the table.
if sudoku.flagged_spots == 1:
sudoku.table[sudoku.flag_row][sudoku.flag_col] = sudoku.flag_num
print("Putting {} in sector {} at {} row {} col.".format(num, sector_id+1, sudoku.flag_row+1, sudoku.flag_col+1))
counter +=1
if counter == max_tries:
print ("The solver took {} passes at it, and this is the best if could do:".format(counter))
else:
print("Here is your solved puzzle! It took {} passes.".format(counter))
print(sudoku)
| mit | Python |
|
8752c36c89e3b2a6b012761d1b24183391245fea | Create Node.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | service/Node.py | service/Node.py | #########################################
# Node.py
# description: embedded node js
# categories: [programming]
# possibly more info @: http://myrobotlab.org/service/Node
#########################################
# start the service
node = Runtime.start("node","Node")
| apache-2.0 | Python |
|
3874a618fa30787b48578430d8abcdc29549102d | solve problem no.1991 | ruby3141/algo_solve,ruby3141/algo_solve,ruby3141/algo_solve | 01xxx/1991/answer.py | 01xxx/1991/answer.py | from typing import Dict
class Node:
def __init__(self, value):
self.value: str = value
self.left: Node = None
self.right: Node = None
def preorder_traversal(self):
print(self.value, end='')
if self.left:
self.left.preorder_traversal()
if self.right:
self.right.preorder_traversal()
def inorder_traversal(self):
if self.left:
self.left.inorder_traversal()
print(self.value, end='')
if self.right:
self.right.inorder_traversal()
def postorder_traversal(self):
if self.left:
self.left.postorder_traversal()
if self.right:
self.right.postorder_traversal()
print(self.value, end='')
Nodes: Dict[str, Node] = {}
N: int = int(input())
value:str
left:str
right:str
for _ in range(N):
value, left, right = input().split()
if value not in Nodes:
Nodes[value] = Node(value)
if left != '.':
if left not in Nodes:
Nodes[left] = Node(left)
Nodes[value].left = Nodes[left]
if right != '.':
if right not in Nodes:
Nodes[right] = Node(right)
Nodes[value].right = Nodes[right]
Nodes['A'].preorder_traversal()
print()
Nodes['A'].inorder_traversal()
print()
Nodes['A'].postorder_traversal() | mit | Python |
|
f77b45b06f88912d154a5fd5b04d69780618110b | Fix migration [WAL-616] | opennode/nodeconductor-openstack | src/nodeconductor_openstack/openstack_tenant/migrations/0024_add_backup_size.py | src/nodeconductor_openstack/openstack_tenant/migrations/0024_add_backup_size.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from nodeconductor_openstack.openstack_tenant.models import Backup
def add_backup_size_to_metadata(apps, schema_editor):
for backup in Backup.objects.iterator():
backup.metadata['size'] = backup.instance.size
backup.save()
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0023_remove_instance_external_ip'),
]
operations = [
migrations.RunPython(add_backup_size_to_metadata),
]
| mit | Python |
|
41d6401780b63a6d835ad48a40df183d6748c99a | add moe plotter utility | bouhlelma/smt,relf/smt,SMTorg/smt,bouhlelma/smt,relf/smt,SMTorg/smt | smt/extensions/moe_plotter.py | smt/extensions/moe_plotter.py | import six
import numpy as np
from matplotlib import colors
import matplotlib.pyplot as plt
class MOEPlotter(object):
def __init__(self, moe, xlimits):
self.moe = moe
self.xlimits = xlimits
################################################################################
def plot_cluster(self, x_, y_):
"""
Plot distribsian cluster
Parameters:
-----------
xlimits: array_like
array[ndim, 2]
x_: array_like
Input training samples
y_: array_like
Output training samples
Optionnals:
-----------
heaviside: float
Heaviside factor. Default to False
"""
GMM=self.moe.cluster
xlim = self.xlimits
if GMM.n_components > 1:
colors_ = list(six.iteritems(colors.cnames))
dim = xlim.shape[0]
weight = GMM.weights_
mean = GMM.means_
cov = GMM.covars_
prob_ = self.moe._proba_cluster(x_)
sort = np.apply_along_axis(np.argmax, 1, prob_)
if dim == 1:
fig = plt.figure()
x = np.linspace(xlim[0, 0], xlim[0, 1])
prob = self.moe._proba_cluster(x)
for i in range(len(weight)):
plt.plot(x, prob[:, i], ls='--')
plt.xlabel('Input Values')
plt.ylabel('Membership probabilities')
plt.title('Cluster Map')
fig = plt.figure()
for i in range(len(sort)):
color_ind = int(((len(colors_) - 1) / sort.max()) * sort[i])
color = colors_[color_ind][0]
plt.plot(x_[i], y_[i], c=color, marker='o')
plt.xlabel('Input Values')
plt.ylabel('Output Values')
plt.title('Samples with clusters')
if dim == 2:
x0 = np.linspace(xlim[0, 0], xlim[0, 1], 20)
x1 = np.linspace(xlim[1, 0], xlim[1, 1], 20)
xv, yv = np.meshgrid(x0, x1)
x = np.array(zip(xv.reshape((-1,)), yv.reshape((-1,))))
prob = self.moe._proba_cluster(x)
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
for i in range(len(weight)):
color = colors_[int(((len(colors_) - 1) / len(weight)) * i)][0]
ax1.plot_trisurf(x[:, 0], x[:, 1], prob[:, i], alpha=0.4, linewidth=0,
color=color)
plt.title('Cluster Map 3D')
fig1 = plt.figure()
for i in range(len(weight)):
color = colors_[int(((len(colors_) - 1) / len(weight)) * i)][0]
plt.tricontour(x[:, 0], x[:, 1], prob[:, i], 1, colors=color, linewidths=3)
plt.title('Cluster Map 2D')
fig = plt.figure()
ax2 = fig.add_subplot(111, projection='3d')
for i in range(len(sort)):
color = colors_[int(((len(colors_) - 1) / sort.max()) * sort[i])][0]
ax2.scatter(x_[i][0], x_[i][1], y_[i], c=color)
plt.title('Samples with clusters')
plt.show()
| bsd-3-clause | Python |
|
393735aaf76b6ddf773a06a72f0872334e56557e | add litgtk.py file | mit-dci/lit,mit-dci/lit,mit-dci/lit,mit-dci/lit | cmd/litgtk/litgtk.py | cmd/litgtk/litgtk.py | #!/usr/bin/env python
import websocket # `pip install websocket-client`
import json
import pygtk
pygtk.require('2.0')
import gtk
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # global for socket connection
def getBal():
rpcCmd = {
"method": "LitRPC.Bal",
"params": [{
}]
}
rpcCmd.update({"jsonrpc": "2.0", "id": "93"})
print(json.dumps(rpcCmd))
s.sendall(json.dumps(rpcCmd))
r = json.loads(s.recv(8000000))
print(r)
return r["result"]["TotalScore"]
def getAdr():
rpcCmd = {
"method": "LitRPC.Address",
"params": [{
"NumToMake": 0,
}]
}
rpcCmd.update({"jsonrpc": "2.0", "id": "94"})
print(json.dumps(rpcCmd))
s.sendall(json.dumps(rpcCmd))
r = json.loads(s.recv(8000000))
print(r)
n = len(r["result"]["PreviousAddresses"]) -1
return r["result"]["PreviousAddresses"][n] #[len(r["result"]["PreviousAddresses"]-1)]
def prSend(adr, amt):
rpcCmd = {
"method": "LitRPC.Send",
"params": [{"DestAddrs": [adr,],"Amts": [amt,]}]
}
rpcCmd.update({"jsonrpc": "2.0", "id": "95"})
print(json.dumps(rpcCmd))
s.sendall(json.dumps(rpcCmd))
r = json.loads(s.recv(8000000))
print(r)
if r["error"] != None:
return "send error: " + r["error"]
return "Sent. TXID: " + r["result"]["Txids"][0]
class lndrpcui:
def dialogg(self, widget, adrWidget, amtWidget):
txid = prSend(adrWidget.get_text(), amtWidget.get_value_as_int())
d = gtk.MessageDialog(
type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK,message_format=txid)
d.run()
d.destroy()
def gBal(self, widget, balWidget, rcvadrWidget):
bal = getBal()
balWidget.set_text("Balance: " + "{:,}".format(bal) + " (" + str(bal/100000000.0) + "BTC)")
adr = getAdr()
rcvadrWidget.set_text(adr)
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window = window
window.connect("destroy", lambda w: gtk.main_quit())
window.set_title("lit-gtk")
main_vbox = gtk.VBox(False, 5)
main_vbox.set_border_width(10)
window.add(main_vbox)
rcvFrame = gtk.Frame("Receive Address")
main_vbox.pack_start(rcvFrame, True, False, 0)
#~ recvHbox
rcvhbox = gtk.HBox(False, 0)
rcvhbox.set_border_width(5)
rcvFrame.add(rcvhbox)
rcvLabel = gtk.Label("receive address here")
rcvLabel.set_selectable(True)
rcvhbox.pack_start(rcvLabel, False, False, 5)
balFrame = gtk.Frame("balance")
main_vbox.pack_start(balFrame, True, False, 0)
#~ balHbox
balhbox = gtk.HBox(False, 0)
balhbox.set_border_width(5)
balFrame.add(balhbox)
balLabel = gtk.Label("balance here")
refreshButton = gtk.Button("Refresh")
refreshButton.connect("clicked", self.gBal, balLabel, rcvLabel)
balhbox.pack_start(refreshButton, False, False, 5)
balhbox.pack_end(balLabel, False, False, 5)
#~ adr / amt vbox
frame = gtk.Frame("send coins (satoshis)")
main_vbox.pack_start(frame, True, False, 0)
vbox = gtk.VBox(False, 0)
vbox.set_border_width(5)
frame.add(vbox)
#~ adr / amt hbox
hbox = gtk.HBox(False, 0)
vbox.pack_start(hbox, False, False, 5)
sendButton = gtk.Button("Send")
vbox.pack_start(sendButton, False, False, 5)
#~ adrVbox
adrVbox = gtk.VBox(False, 0)
hbox.pack_start(adrVbox, True, True, 5)
adrLabel = gtk.Label("send to address")
adrLabel.set_alignment(0, 1)
adrVbox.pack_start(adrLabel, False, False, 0)
adrEntry = gtk.Entry(50)
adrEntry.set_size_request(500, -1)
adrVbox.pack_start(adrEntry, True, True, 0)
#~ amtVbox
amtVbox = gtk.VBox(False, 0)
hbox.pack_start(amtVbox, False, False, 5)
label = gtk.Label("amount")
label.set_alignment(0, 1)
amtVbox.pack_start(label, False, False, 0)
adj = gtk.Adjustment(0, 1000000, 100000000.0, 1.0)
sendamtSpinner = gtk.SpinButton(adj, 1.0, 0)
sendamtSpinner.set_wrap(False)
#~ sendamtSpinner.set_size_request(100, -1)
amtVbox.pack_start(sendamtSpinner, False, False, 0)
#~ sendButton.connect("clicked", lambda w: prSend(adrEntry, sendamtSpinner))
sendButton.connect("clicked", self.dialogg, adrEntry, sendamtSpinner)
quitButton = gtk.Button("Quit")
quitButton.connect("clicked", lambda w: gtk.main_quit())
buttonBox = gtk.HBox(False, 0)
buttonBox.pack_start(quitButton, False, False, 5)
main_vbox.pack_start(buttonBox, False, False, 5)
window.show_all()
def main():
s.connect(("127.0.0.1", 8001))
gtk.main()
return 0
if __name__ == "__main__":
lndrpcui()
main()
| mit | Python |
|
398a6d23266e52436e6b8efd9d7ab053f490eb45 | add a lib to support requests with retries | JiscPER/magnificent-octopus,JiscPER/magnificent-octopus,JiscPER/magnificent-octopus | octopus/lib/requests_get_with_retries.py | octopus/lib/requests_get_with_retries.py | import requests
from time import sleep
def http_get_with_backoff_retries(url, max_retries=5, timeout=30):
if not url:
return
attempt = 0
r = None
while attempt <= max_retries:
try:
r = requests.get(url, timeout=timeout)
break
except requests.exceptions.Timeout:
attempt += 1
sleep(2 ** attempt)
return r | apache-2.0 | Python |
|
a8b524318d7f9d4406193d610b2bb3ef8e56e147 | Add frameless drag region example. | r0x0r/pywebview,r0x0r/pywebview,r0x0r/pywebview,r0x0r/pywebview,r0x0r/pywebview | examples/frameless_drag_region.py | examples/frameless_drag_region.py | import webview
'''
This example demonstrates a user-provided "drag region" to move a frameless window
around, whilst maintaining normal mouse down/move events elsewhere. This roughly
replicates `-webkit-drag-region`.
'''
html = '''
<head>
<style type="text/css">
.pywebview-drag-region {
width: 50px;
height: 50px;
margin-top: 50px;
margin-left: 50px;
background: orange;
}
</style>
</head>
<body>
<div class="pywebview-drag-region">Drag me!</div>
</body>
'''
if __name__ == '__main__':
window = webview.create_window(
'API example',
html=html,
frameless=True,
easy_drag=False,
)
webview.start()
| bsd-3-clause | Python |
|
6fde041c3a92f0d0a0b92da55b12c8e60ecc7196 | Create handle_file.py | jadnohra/handle_file,jadnohra/handle_file | handle_file.py | handle_file.py | import os,sys,subprocess
g_dbg = '-dbg' in sys.argv or False
def handle_generic(fp,fn,fe):
print 'Unknown extension for [{}]'.format(fp)
def handle_md(fp,fn,fe):
started = False; exec_cmd = [];
with open(fp, "r") as ifile:
lines = [x.rstrip().strip() for x in ifile.readlines()]
for line in lines:
if started or line.startswith('<!---'):
started = True
exec_cmd.append(line.replace('<!---', ''))
if '-->' in exec_cmd[-1]:
exec_cmd[-1] = exec_cmd[-1].split('-->')[0]
break
if len(exec_cmd):
exec_cmd = ''.join(exec_cmd)
if g_dbg:
print 'exec_cmd = [{}]'.format(exec_cmd)
sys.stdout.flush()
print 'running ...'; sys.stdout.flush()
os.chdir(os.path.dirname(fp))
pop = subprocess.Popen(exec_cmd.split())
pop.wait()
print 'done'
else:
print 'No command found for [{}]'.format(fp)
k_ext_handlers = {'.md': handle_md}
fp,(fn,fe) = sys.argv[1], os.path.splitext(sys.argv[1])
if g_dbg:
print 'fp,(fn,fe) = ', fp,(fn,fe)
k_ext_handlers.get(fe, handle_generic)(fp,fn,fe)
| unlicense | Python |
|
829dbfe0c13284345e0fa305f71937738a6c8f50 | Create cms-checker.py | agusmakmun/Some-Examples-of-Simple-Python-Script,agusmakmun/Some-Examples-of-Simple-Python-Script | web/cms-checker.py | web/cms-checker.py | #!/usr/bin/env python
# Original source: http://www.blackcoder.info/c/cmschecker.txt
# Usage example: $ python cms-checker.py --addr 192.168.1.1
import urllib2
import argparse
import os
import sys
class neo:
def cmsfinder(self):
url = "http://api.hackertarget.com/reverseiplookup/?q="+args.addr
rever = urllib2.Request(url)
conn = urllib2.urlopen(rever).read()
sp = conn.split("\n")
for s in sp:
CMS_URL = "http://"+s
_Con = urllib2.Request(CMS_URL)
_Data = urllib2.urlopen(_Con).read()
if 'Joomla' in _Data:
print "\t\t"+ s + '\t\033[1;34m --> Joomla\033[1;m'
with open('Joomla.txt', 'a') as j:
j.write(s+"\n")
elif 'wordpress' in _Data:
print "\t\t"+ s + '\t\033[1;38m --> WordPress\033[1;m'
with open('Wordpress.txt', 'a') as w:
w.write(s+"\n")
elif 'Drupal' in _Data:
print "\t\t"+ s + '\t\033[1;36m --> Drupal\033[1;m'
with open('Drupal.txt', 'a') as D:
D.write(s+"\n")
elif 'vBulletin' in _Data:
print "\t\t"+ s + '\t\033[1;38m --> vBulletin \033[1;m'
with open('vBulletin.txt', 'a') as vB:
vB.write(s+"\n")
else:
print "\t\t"+ s + '\t\033[1;37m --> No CMS \033[1;m'
with open('normal_site.txt', 'a') as f:
f.write(s+"\n")
def __init__(self):
if os.name == "nt":
os.system('cls')
else:
os.system('clear')
banner = """ \t\t\t\t
\t\t\t\t\033[1;31m
\t\t\t\t _____ _____ _____ _____ _ _
\t\t\t\t| | | __| | __|_|___ _| |___ ___
\t\t\t\t| --| | | |__ | | __| | | . | -_| _|
\t\t\t\t|_____|_|_|_|_____| |__| |_|_|_|___|___|_|
\t\t\t\t \033[1;m
\t\t\t\tCoded By Ne0-h4ck3r
"""
print banner
print ""
print "\t\t" + "-" * 100
print ""
if len(sys.argv) == 1:
print ""
print "\t\tHow To Use: python sitecheker.py --addr [ IP ]"
print ""
sys.exit(1)
black = argparse.ArgumentParser()
black.add_argument('--addr', help="Enter Your IP-ADDRESS: ")
args = black.parse_args()
if __name__ == "__main__":
neo().cmsfinder()
| agpl-3.0 | Python |
|
85a604a1991b5dc9a017514848645723921247a7 | add missing file | sassoftware/mcp,sassoftware/mcp | mcp/constants.py | mcp/constants.py | #
# Copyright (c) 2007 rPath, Inc.
#
# All rights reserved
#
version = '1.0.0'
| apache-2.0 | Python |
|
a857340a2d67a8055b9e3802327800dcdd652df4 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/037eb7657cb3f49c70c18f959421831e6cb9e4ad. | yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,yongtang/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "037eb7657cb3f49c70c18f959421831e6cb9e4ad"
TFRT_SHA256 = "80194df160fb8c91c7fcc84f34a60c16d69bd66f6b2f8e5404fae5e7d1221533"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "d6884515d2b821161b35c1375d6ea25fe6811d62"
TFRT_SHA256 = "0771a906d327a92bdc46b02c8ac3ee1593542ceadc07220e53790aa8c7ea5702"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
c563f12bcb8b10daca64e19ade3c373c112cb659 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/8f7619fa042357fa754002104f575a8a72ee69ed. | frreiss/tensorflow-fred,frreiss/tensorflow-fred,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,karllessard/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,Intel-Corporation/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,frreiss/tensorflow-fred | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "8f7619fa042357fa754002104f575a8a72ee69ed"
TFRT_SHA256 = "2cb8410fb4655d71c099fb9f2d3721d0e485c8db518553347ce21ca09e0e1b43"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "584ba7eab84bd45941fabc28fbe8fa43c74673d8"
TFRT_SHA256 = "e2f45638580ba52116f099d52b73c3edcf2ad81736a434fb639def38ae4cb225"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| apache-2.0 | Python |
29e3d6b706a33780b1cb4863200ec7525ff035ce | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/cdf6d36e9a5c07770160ebac25b153481c37a247. | tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,gautam1858/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,yongtang/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,Intel-Corporation/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,yongtang/tensorflow,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "cdf6d36e9a5c07770160ebac25b153481c37a247"
TFRT_SHA256 = "c197f9b3584cae2d65fc765f999298ae8b70d9424ec0d4dd30dbdad506fb98bb"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "6ca8a6dff0e5d4f3a17b0c0879aa5de622683680"
TFRT_SHA256 = "09779efe84cc84e859e206dd49ae6b993577d7dae41f90491e5b862d7a70ba51"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| apache-2.0 | Python |
6276cf142d233db377dc490a47c5ad56d2906c75 | Add version module | jhauberg/cards.py,jhauberg/cards.py,jhauberg/cards.py,jhauberg/cards.py | cards/version.py | cards/version.py | # coding=utf-8
__version__ = '0.4.9'
| mit | Python |
|
6c7df140c6dccb4b56500ba25f6b66ab7ea3b605 | solve 1 problem | Shuailong/Leetcode | solutions/reverse-bits.py | solutions/reverse-bits.py | #!/usr/bin/env python
# encoding: utf-8
"""
reverse-bits.py
Created by Shuailong on 2016-03-02.
https://leetcode.com/problems/reverse-bits/.
"""
class Solution(object):
def reverseBits(self, n):
"""
:type n: int
:rtype: int
"""
res = 0
count = 0
while n:
d = n & 1
n >>= 1
res <<= 1
res += d
count += 1
res <<= 32-count
return res
def main():
solution = Solution()
n = 43261596
print solution.reverseBits(n)
if __name__ == '__main__':
main()
| mit | Python |
|
793b273c3fdcef428ffb6aec5dbcbb768989f175 | Add 0004 file | Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python | Drake-Z/0004/0004.py | Drake-Z/0004/0004.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'第 0004 题:任一个英文的纯文本文件,统计其中的单词出现的个数。'
__author__ = 'Drake-Z'
import re
def tongji(file_path):
f = open(file_path, 'r').read()
f = re.split(r'[\s\,\;,\n]+', f)
print(len(f))
return 0
if __name__ == '__main__':
file_path = 'English.txt'
tongji(file_path) | mit | Python |
|
06f66859c305465c3f6f38617ecada4da94d41ff | set up skeleton | BradleyMoore/Algorithms | algorithms/sorting/quicksort.py | algorithms/sorting/quicksort.py | from random import randint
def quicksort(unsorted):
if len(unsorted) <= 1:
return unsorted
start = 0
end = start + 1
pivot = choose_pivot(start, end)
sort(unsorted, start, pivot, end)
def choose_pivot(start, end):
pivot = randint(start, end)
return pivot
def sort(unsorted, start, pivot, end):
pass
if __name__ == '__main__':
unsorted = [3,345,456,7,879,970,7,4,23,123,45,467,578,78,6,4,324,145,345,3456,567,5768,6589,69,69]
sort = quicksort(unsorted)
print '%r <-- unsorted' % unsorted
print '%r <-- sorted' % sort
| mit | Python |
|
0e31b15e4dae95b862fd4777659a9210e5e4ec86 | change of file path | openego/data_processing | preprocessing/python_scripts/renpass_gis/simple_feedin/renpassgis_feedin.py | preprocessing/python_scripts/renpass_gis/simple_feedin/renpassgis_feedin.py | """
ToDO:
* Greate one scaled time series
*
Database table:
* model_draft.ego_weather_measurement_point
* model_draft.ego_simple_feedin_full
Change db.py and add ego_simple_feedin_full
"""
__copyright__ = "ZNES"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://github.com/openego/data_processing/blob/master/LICENSE"
__author__ = "wolf_bunke"
from oemof.db import coastdat
import db
import pandas as pd
# get Classes and make settings
points = db.Points
conn = db.conn
scenario_name = 'eGo 100'
weather_year = 2011
filename = '2017-08-21_simple_feedin_ego-100-wj2011_all.csv'
config = 'config.ini'
import pandas as pd
import psycopg2
from sqlalchemy import create_engine
import numpy as np
from db import conn, readcfg, dbconnect
import os
# Settings
#filename = '2017-08-07_simple_feedin_All_subids_weatherids_ego_weatherYear2011.csv'
filename = 'simple_feedin_full.csv'
conn = conn
# read configuration file
path = os.path.join(os.path.expanduser("~"), '.open_eGo', 'config.ini')
config = readcfg(path=path)
# establish DB connection
section = 'oedb'
conn = dbconnect(section=section, cfg=config)
| agpl-3.0 | Python |
|
cbd64641f30c1a464528a2ec6d5323d29766830d | Add word embedding | sobhe/hazm,sobhe/hazm,sobhe/hazm | hazm/embedding.py | hazm/embedding.py | from . import word_tokenize
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
import fasttext, os
supported_embeddings = ['fasttext', 'keyedvector', 'glove']
class WordEmbedding:
def __init__(self, model_type, model=None):
if model_type not in supported_embeddings:
raise KeyError(f'Model type "{model_type}" is not supported! Please choose from {supported_embeddings}')
if model:
self.model = model
self.model_type = model_type
def load_model(self, model_file):
if self.model_type == 'fasttext':
self.model = fasttext.load_model(model_file)
elif self.model_type == 'keyedvector':
if model_file.endswith('bin'):
self.model = KeyedVectors.load_word2vec_format(model_file, binary=True)
else:
self.model = KeyedVectors.load_word2vec_format(model_file)
elif self.model_type == 'glove':
word2vec_addr = str(model_file) + '_word2vec_format.vec'
if not os.path.exists(word2vec_addr):
_ = glove2word2vec(model_file, word2vec_addr)
self.model = KeyedVectors.load_word2vec_format(word2vec_addr)
self.model_type = 'keyedvector'
else:
raise KeyError(f'{self.model_type} not supported! Please choose from {supported_embeddings}')
def __getitem__(self, word):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model[word]
def doesnt_match(self, txt):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model.doesnt_match(word_tokenize(txt))
def similarity(self, word1, word2):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model.similarity(word1, word2)
def get_vocab(self):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model.get_words(include_freq=True)
def nearest_words(self, word, topn):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model.get_nearest_neighbors(word, topn)
| mit | Python |
|
f34c91a6969567b23ad880dc43a0346cc5a5b513 | Add get_arxiv.py to download PDF from arXiv | liweitianux/atoolbox,liweitianux/atoolbox,liweitianux/atoolbox,liweitianux/atoolbox,liweitianux/atoolbox,liweitianux/atoolbox | cli/get_arxiv.py | cli/get_arxiv.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Get the arxiv abstract data and PDF for a given arxiv id.
#
# Weitian LI <[email protected]>
# 2015/01/23
#
import sys
import re
import urllib
import subprocess
import time
import mimetypes
from bs4 import BeautifulSoup
mirror = "http://jp.arxiv.org/"
def get_url(arxiv_id):
"""
Determine the full arxiv URL from the given ID/URL.
"""
if re.match(r'^[0-9]{7}$', arxiv_id):
print("ERROR: 7-digit ID not supported, please use the full URL")
sys.exit(2)
elif re.match(r'^[0-9]{4}\.[0-9]{4,5}$', arxiv_id):
arxiv_url = mirror + "abs/" + arxiv_id
elif re.match(r'^https{0,1}://.*arxiv.*/([0-9]{7}|[0-9]{4}\.[0-9]{4,5})$',
arxiv_id):
arxiv_url = arxiv_id
elif re.match(r'[a-zA-Z0-9.-]*arxiv.*/([0-9]{7}|[0-9]{4}\.[0-9]{4,5})$',
arxiv_id):
arxiv_url = "http://" + arxiv_id
else:
print("ERROR: unknown arxiv ID: %s" % arxiv_id)
exit(3)
return arxiv_url
def get_id(arxiv_url):
"""
Extract the ID from the URL.
"""
return arxiv_url.split('/')[-1]
def get_arxiv_abstract(arxiv_url):
"""
Get the arxiv abstract data and save to file '${id}.txt'.
"""
request = urllib.request.urlopen(arxiv_url)
arxiv_html = request.read()
soup = BeautifulSoup(arxiv_html)
title = soup.body.find('h1', attrs={'class': 'title'}).text\
.replace('\n', ' ')
authors = soup.body.find('div', attrs={'class': 'authors'}).text\
.replace('\n', ' ')
date = soup.body.find('div', attrs={'class': 'dateline'}).text\
.strip('()')
abstract = soup.body.find('blockquote', attrs={'class': 'abstract'})\
.text.replace('\n', ' ')[1:]
comments = soup.body.find('td', attrs={'class': 'comments'}).text
subjects = soup.body.find('td', attrs={'class': 'subjects'}).text
arxiv_id = get_id(arxiv_url)
filename = arxiv_id + '.txt'
f = open(filename, 'w')
f.write("URL: %s\n" % arxiv_url)
f.write("arXiv: %s\n" % arxiv_id)
f.write("%s\n\n" % date)
f.write("%s\n%s\n\n" % (title, authors))
f.write("%s\n\n" % abstract)
f.write("Comments: %s\n" % comments)
f.write("Subjects: %s\n" % subjects)
f.close()
def get_arxiv_pdf(arxiv_url):
"""
Get the arxiv PDF with cURL.
If the PDF is not generated yet, then retry after 10 seconds.
"""
p = re.compile(r'/abs/')
arxiv_pdf_url = p.sub('/pdf/', arxiv_url)
arxiv_id = get_id(arxiv_url)
filename = arxiv_id + '.pdf'
cmd = 'curl -o %(filename)s %(url)s' %\
{'filename': filename, 'url': arxiv_pdf_url}
print("CMD: %(cmd)s" % {'cmd': cmd})
subprocess.call(cmd, shell=True)
output = subprocess.check_output(['file', '-ib', filename])
filetype = output.decode(encoding='UTF-8').split(';')[0]
pdftype = 'application/pdf'
while filetype != pdftype:
time.sleep(10)
subprocess.call(cmd, shell=True)
output = subprocess.check_output(['file', '-ib', filename])
filetype = output.decode(encoding='UTF-8').split(';')[0]
def main():
if len(sys.argv) != 2:
print("Usage: %s <arxiv_id | arxiv_url>\n")
sys.exit(1)
arxiv_url = get_url(sys.argv[1])
arxiv_id = get_id(arxiv_url)
print("arxiv_url: %s" % arxiv_url)
print("arxiv_id: %s" % arxiv_id)
get_arxiv_abstract(arxiv_url)
print("downloading pdf ...")
get_arxiv_pdf(arxiv_url)
if __name__ == '__main__':
main()
| mit | Python |
|
13851dd6f2101ceea917504bd57540a4e54f0954 | Create __init__.py | de-crypto/Facebook-Chatbot | fb_nsitbot/migrations/__init__.py | fb_nsitbot/migrations/__init__.py | mit | Python |
||
14647b71fec7a81d92f044f6ac88304a4b11e5fd | create http server module | pasaunders/http-server | src/step1.py | src/step1.py | """A simple HTTP server."""
def response_ok():
"""Testing for 200 response code."""
pass
| mit | Python |
|
0a97f34b4ae4f7f19bfe00c26f495f399f827fab | Add file_regex | yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program,yukihirai0505/tutorial-program | python/file_regex/tmp.py | python/file_regex/tmp.py | # -*- coding: utf-8 -*-
import re
file_name = 'hogehoge'
org_file = open(file_name + '.txt')
lines = org_file.readlines()
org_file.close()
dist_file = open(file_name + '_after.txt', 'w')
pattern = r'title=\".+?\"'
all_title = re.findall(pattern, ''.join(lines))
if all_title:
for title in all_title:
dist_file.write(title.replace('\"', '').replace('title=', '') + '\n')
dist_file.close()
| mit | Python |
|
0297e8b1762d495ffd696106bc6498def0ddf600 | Add membership.utils.monthRange to calculate start and end dates of months easily | SYNHAK/spiff,SYNHAK/spiff,SYNHAK/spiff | spiff/membership/utils.py | spiff/membership/utils.py | import datetime
import calendar
from django.utils.timezone import utc
def monthRange(today=None):
if today is None:
today = datetime.datetime.utcnow().replace(tzinfo=utc)
lastDayOfMonth = calendar.monthrange(today.year, today.month)[1]
startOfMonth = datetime.datetime(today.year, today.month, 1, tzinfo=utc)
endOfMonth = datetime.datetime(today.year, today.month, lastDayOfMonth, tzinfo=utc)
return (startOfMonth, endOfMonth)
| agpl-3.0 | Python |
|
6441ce1bc3132220e2d86bb75eff9169b3675751 | add spiderNormal | Achilles-Z/python-learn | spiderNormal.py | spiderNormal.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#author zeck.tang 2016.03.03
"""
文件头两行注释是用于避免文件中包含中文导致如下错误
SyntaxError: Non-ASCII character XXX in file xxx.py on line xx, but no encoding declared
see http://python.org/dev/peps/pep-0263/ for details
如果遇到
IndentationError: unexpected indent
这样的错误,请仔细检查每个空格和tab
"""
import urllib
import urllib2
import re
# 这个是百度手机助手里面优酷的用户评论的信息获取url,咱是用Charles抓包拿到的
# ps:这个只有第一页数据,可以改写下动态传入pn参数值获取后续页面的信息
url = 'http://shouji.baidu.com/comment?action_type=getCommentList&groupid=3528441&pn=1'
# UserAgent设置 如果有需要UA的话
#user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
#headers = { 'User-Agent' : user_agent }
try:
# UA塞进头里
#request = urllib2.Request(url,headers = headers)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
content = response.read().decode('utf-8') # utf-8格式读出来避免乱码
# 可以先打印看看content内容然后看怎么写正则
# print content
# content内容非常规则,每个<li></li>包含一条评论信息内容
# <em>(.*?)</em>是用户名称
# <p>(.*?)</p>是评论内容
# <div.*?time">(.*?)</div>是评论时间
pattern = re.compile('<em>(.*?)</em>.*?<p>(.*?)</p>.*?<div.*?time">(.*?)</div>',re.S)
items = re.findall(pattern,content)
for item in items:
#打印每一条评论
print item[0] #用户名称
print item[1] #用户评论
print item[2] #评论时间
print "----------"
except urllib2.URLError, e:
if hasattr(e,"code"):
print e.code
else :
print "code else"
if hasattr(e,"reason"):
print e.reason
else:
print "reason else"
| apache-2.0 | Python |
|
7303672fe9cf98c22afd83ae6c0dd7a136f4e5c8 | Create hyperventilate.py | wherrera10/asus-lighting | hyperventilate.py | hyperventilate.py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 02 14:39:39 2015
@author: William Herrera
IMPORTANT: run as administrator
Color breathing package for G20aj series PC
"""
import light_acpi as la
from time import sleep
def make_rgb(cred, cgreen, cblue):
"""
make rgb for components
"""
redval = int(round(cred))
greenval = int(round(cgreen))
blueval = int(round(cblue))
ret = int(redval * 0x10000 + greenval * 0x100 + blueval)
if ret < 0:
ret = 0
if ret > 0x00ffffff:
ret = 0x00ffffff
return ret
def split_rgb(color):
"""
split rgb into red, green, blue
"""
red = (color & 0xff0000) / 0x10000
green = (color & 0xff00) / 0x100
blue = color & 0xff
return red, green, blue
def make_ratios(cred, cgreen, cblue):
"""
get ratios of colors
"""
maxcolor = max(cred, cgreen, cblue)
return float(cred)/float(maxcolor), \
float(cgreen)/float(maxcolor), \
float(cblue)/float(maxcolor)
def make_gamut(color):
"""
make a sequence of 256 colors
"""
sequence256 = []
cred, cgreen, cblue = split_rgb(color)
rred, rgreen, rblue = make_ratios(cred, cgreen, cblue)
for step in range(256):
tred = float(step) * rred
tgreen = float(step) * rgreen
tblue = float(step) * rblue
sequence256.append(make_rgb(tred, tgreen, tblue))
return sequence256, sequence256.index(color)
def dim_up_down_up_sequence(gamut, idex, frames):
"""
up color intensity to full
"""
# initial compiled list is size 512
cseq = gamut[idex:] + gamut[::-1] + gamut[0:idex]
# adjust size
reframed = []
ratio = 512.0 / frames
for frameidx in range(frames):
gamut_pos = int(round(frameidx * ratio))
reframed.append(cseq[gamut_pos])
return reframed
def run_color_sequence(lighting, colors, sleepinterval):
"""
run a breathing type change sequence through once
sleepinterval is in seconds or fractions of seconds
"""
for colr in colors:
lighting.set_color(colr)
sleep(sleepinterval)
def continuous_cycle(lighti, startcolor, frames=32, sleepinterval=0.1):
"""
breathe in color saturation
"""
gam, orig_idx = make_gamut(startcolor)
seq = dim_up_down_up_sequence(gam, orig_idx, frames)
while True:
run_color_sequence(lighti, seq, sleepinterval)
def run_triple_sequence(lightlist, colorlist, sleeptime):
"""
do all 3 lights given list of all 3
"""
lli = lightlist[0]
rli = lightlist[1]
bli = lightlist[2]
lcol = colorlist[0]
rcol = colorlist[1]
bcol = colorlist[2]
for idx in range(len(lcol)):
lli.set_color(lcol[idx])
rli.set_color(rcol[idx])
bli.set_color(bcol[idx])
sleep(sleeptime)
def all_cycle(scolors, frames=32, sleepinterval=0.1):
"""
all LED lighting do continuous cycle breathing
"""
# make light and color lists
lights = [la.ASUSLighting(la.DPATH, la.LEFT_VERTICAL), \
la.ASUSLighting(la.DPATH, la.RIGHT_VERTICAL), \
la.ASUSLighting(la.DPATH, la.BASE_HORIZONTAL)]
clists = []
for idx in range(len(lights)):
gam, orig_idx = make_gamut(scolors[idx])
seq = dim_up_down_up_sequence(gam, orig_idx, frames)
clists.append(seq)
while True:
run_triple_sequence(lights, clists, sleepinterval)
if __name__ == '__main__':
SCOL = 0x1111ff
LLIGHT = la.ASUSLighting(la.DPATH, la.LEFT_VERTICAL)
continuous_cycle(LLIGHT, SCOL)
| apache-2.0 | Python |
|
0ec3fd40c85f2a61eee5960031318c7f5ab06bc5 | Allow whitelisted shell calls in transforms | ox-it/humfrey,ox-it/humfrey,ox-it/humfrey | humfrey/update/transform/shell.py | humfrey/update/transform/shell.py | import logging
import subprocess
import tempfile
from django.conf import settings
from .base import Transform, TransformException
SHELL_TRANSFORMS = getattr(settings, 'SHELL_TRANSFORMS', {})
logger = logging.getLogger(__name__)
class Shell(Transform):
def __init__(self, name, extension, params):
self.shell = SHELL_TRANSFORMS[name]
self.extension = extension
def execute(self, transform_manager, input):
params = self.params.copy()
if 'store' not in params:
params['store'] = transform_manager.store.slug
popen_args = [input if arg is None else arg.format(params) for arg in self.shell]
with open(transform_manager(self.extension), 'w') as output:
with tempfile.TemporaryFile() as stderr:
transform_manager.start(self, [input])
returncode = subprocess.call(popen_args, stdout=output, stderr=stderr)
if stderr.tell():
stderr.seek(0)
logger.warning("Shell warnings:\n\n%s\n", stderr.read())
if returncode != 0:
logger.error("Shell transform failed with code %d", returncode)
raise TransformException
transform_manager.end([output.name])
return output.name
| bsd-3-clause | Python |
|
eae844f96417ce0ec32fada7737a2d4ae8b03497 | Add commandline tool | TronPaul/TwitchHQ | twitch.py | twitch.py | import re
import argparse
import urllib
import json
import os
from twitchapi import TwitchAPI
from twitchapi.twitch import TwitchToken
TOKEN_FILE='.access_token'
AUTH_SETTINGS_FILE='.auth_settings'
args_pattern = re.compile(r'code=(?P<code>.*?)&scope=(?P<scopes>.*?)')
def get_auth_settings(auth_settings_file):
fp = open(auth_settings_file)
return json.load(fp)
def auth_url(client_id, redirect_uri, scopes):
base_url = (u'https://api.twitch.tv/kraken/oauth2/authorize'
u'?response_type=code')
return base_url + u'&' + urllib.urlencode({'client_id':client_id,
'redirect_uri':redirect_uri, 'scope':scopes}, True)
def make_token(token_file):
if os.path.isfile(token_file):
json_token = json.load(open(token_file))
return TwitchToken(json_token['access_token'],
json_token['scope'])
return None
def make_client(token_file):
return TwitchAPI(make_token(token_file))
def clear_token(token_file):
if os.path.isfile(token_file):
os.remove(token_file)
def prompt(name, default=None):
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = raw_input(prompt)
if rv:
return rv
if default is not None:
return default
def auth(twitch_client, token_file, auth_settings_file):
auth_settings = get_auth_settings(auth_settings_file)
print 'Navigate to: %s' % auth_url(auth_settings['client_id'],
auth_settings['redirect_uri'], auth_settings['scopes'])
args = {}
while not args:
args_text = urllib.unquote(
prompt('Args (copy the text after the ? in the url)'))
m = args_pattern.match(args_text)
if m:
args['code'] = m.group('code')
args['scopes'] = m.group('scopes').split()
args = {
'client_id':auth_settings['client_id'],
'client_secret':auth_settings['client_secret'],
'grant_type':'authorization_code',
'redirect_uri':auth_settings['redirect_uri'],
'code':args['code']
}
resp, con = twitch_client.post('oauth2/token', args=args)
token = json.loads(con)
clear_token(token_file)
json.dump(token, open(TOKEN_FILE, 'w'))
def check(twitch_client, token_file):
if os.path.isfile(token_file):
resp, con = twitch_client.get('/')
d = json.loads(con)
if d['token']['valid']:
print ('Authenticated! Scopes: %s' %
d['token']['authorization']['scopes'])
return
print 'Not authenticated!'
clear_token(token_file)
def update(twitch_client, channel, status, game):
resp, con = twitch_client.update_channel(channel, status, game)
if resp.status != 200:
print 'Error occurred!'
print resp, con
else:
print 'Update successful.'
def channel_info(twitch_client):
print twitch_client.my_channel()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--token-file', default=TOKEN_FILE, dest='token_file')
parser.add_argument('--auth-settings-file', default=AUTH_SETTINGS_FILE,
dest='auth_settings_file')
subparsers = parser.add_subparsers(dest='subparser_name')
auth_parser = subparsers.add_parser('auth')
check_parser = subparsers.add_parser('check')
up_parser = subparsers.add_parser('update')
up_parser.add_argument('channel', type=str)
up_parser.add_argument('--status', type=str)
up_parser.add_argument('--game', type=str)
channel_info_parser = subparsers.add_parser('channel_info')
args = parser.parse_args()
twitch_client = make_client(args.token_file)
if args.subparser_name == 'auth':
auth(twitch_client, args.token_file, args.auth_settings_file)
elif args.subparser_name == 'check':
check(twitch_client, args.token_file)
elif args.subparser_name == 'update':
if args.game or args.status:
update(twitch_client, args.channel, args.status, args.game)
elif args.subparser_name == 'channel_info':
channel_info(twitch_client)
| mit | Python |
|
2a1b46740c4cf14f7db4f344431aced9bf06d1e7 | Add a little program that calls sync until is is done | paolobolzoni/useful-conf,paolobolzoni/useful-conf,paolobolzoni/useful-conf | scripts/sync_for_real.py | scripts/sync_for_real.py | #!/usr/bin/env python3
import subprocess
import sys
from time import time
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
nr_fast = 3
while nr_fast > 0:
eprint('syncing... ', end='', flush=True)
start_t = time()
subprocess.Popen('/usr/bin/sync', stdout=None, stderr=None).wait()
time_length = time() - start_t
eprint('{0:0.3f}'.format(time_length))
if time_length < 0.10:
nr_fast = nr_fast - 1
else:
nr_fast = 3
return 0
if __name__ == '__main__':
sys.exit(main())
| unlicense | Python |
|
3e2c4f19d1eb5d66430ea46abe18a6a7022e13ef | Create svg_filter.py | brunommauricio/domusdome | svg_filter.py | svg_filter.py | f = open("domusdomezones.svg", "r")
svg = []
for line in f:
line = line.strip()
svg.append(line)
f.close()
vector_paths = []
for i in range(0, len(svg)):
if svg[i] == "<path":# spot the paths location
i = i+1
svg[i] = svg[i].replace(',', ' ')# remove the first 5 items in each path, replace spaces with commas
svg[i] = svg[i][5:-1].split(' ')# remove the first 5 and the last item of the line of each path, split each vector into a iterable list
vector_paths.append(svg[i])
paths = []
for n in range(0, len(vector_paths)):
paths.append(vector_paths[n])
for m in range(0, len(vector_paths[n])):
vector_paths[n][m] = float(vector_paths[n][m]) # float all strings of the vector_paths list
for p in range(0, len(paths)):
for o in range(2, len(paths[p])-1):# loop to sum vectors
paths[p][o] = paths[p][o-2] + paths[p][o]# sum the vectors of each cordinate
for o in range(0, len(paths[p])):#loop to round each cordinate
paths[p][o] = round(paths[p][o],2) #round the floating points to a two decimal float
print paths
| mit | Python |
|
353d717c425cca9941d650d715c3ed8caf0aae64 | Reset tooltip timer also when cell editor is closed | HelioGuilherme66/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,caio2k/RIDE,fingeronthebutton/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,caio2k/RIDE | src/robotide/editor/tooltips.py | src/robotide/editor/tooltips.py | # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
import wx.grid
from popupwindow import Tooltip
class GridToolTips(object):
def __init__(self, grid):
self._tooltip = Tooltip(grid, (250, 80), False, True)
self._information_popup = Tooltip(grid, (450, 300))
self._grid = grid
self._tooltip_timer = wx.Timer(grid.GetGridWindow())
grid.GetGridWindow().Bind(wx.EVT_MOTION, self.OnMouseMotion)
grid.GetGridWindow().Bind(wx.EVT_TIMER, self.OnShowToolTip)
grid.Bind(wx.grid.EVT_GRID_EDITOR_HIDDEN, self.OnGridEditorHidden)
def OnMouseMotion(self, event):
self._hide_tooltip()
self._start_tooltip_timer()
event.Skip()
def _start_tooltip_timer(self):
self._tooltip_timer.Start(500, True)
def OnShowToolTip(self, event):
self._hide_tooltip()
content = self._grid.get_tooltip_content()
if content:
self._show_tooltip_at(content, self._calculate_tooltip_position())
self._grid.SetFocus()
def OnGridEditorHidden(self, event):
cell = event.Row, event.Col
if cell == self._grid.cell_under_cursor:
self._start_tooltip_timer()
def _show_tooltip_at(self, content, position):
if not self._information_popup.IsShown():
self._tooltip.set_content(content)
self._tooltip.show_at(position)
def _calculate_tooltip_position(self):
x, y = wx.GetMousePosition()
return x+5, y+5
def _hide_tooltip(self):
self._tooltip.hide()
def hide_information(self):
self._information_popup.hide()
def hide(self):
self._hide_tooltip()
self.hide_information()
def show_info_at(self, info, title, position):
self._tooltip.hide()
self._information_popup.set_content(info, title)
self._information_popup.show_at(position)
| # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from popupwindow import Tooltip
class GridToolTips(object):
def __init__(self, grid):
self._tooltip = Tooltip(grid, (250, 80), False, True)
self._information_popup = Tooltip(grid, (450, 300))
self._grid = grid
self._tooltip_timer = wx.Timer(grid.GetGridWindow())
grid.GetGridWindow().Bind(wx.EVT_MOTION, self.OnMouseMotion)
grid.GetGridWindow().Bind(wx.EVT_TIMER, self.OnShowToolTip)
def OnMouseMotion(self, event):
self._hide_tooltip()
self._tooltip_timer.Start(500, True)
event.Skip()
def OnShowToolTip(self, event):
self._hide_tooltip()
content = self._grid.get_tooltip_content()
if content:
self._show_tooltip_at(content, self._calculate_tooltip_position())
self._grid.SetFocus()
def _show_tooltip_at(self, content, position):
if not self._information_popup.IsShown():
self._tooltip.set_content(content)
self._tooltip.show_at(position)
def _calculate_tooltip_position(self):
x, y = wx.GetMousePosition()
return x+5, y+5
def _hide_tooltip(self):
self._tooltip.hide()
def hide_information(self):
self._information_popup.hide()
def hide(self):
self._hide_tooltip()
self.hide_information()
def show_info_at(self, info, title, position):
self._tooltip.hide()
self._information_popup.set_content(info, title)
self._information_popup.show_at(position)
| apache-2.0 | Python |
86434fb902caeea7bb740c35607dc6f9f7766d88 | Fix searching for notes in the django admin | jaredjennings/snowy,GNOME/snowy,GNOME/snowy,widox/snowy,jaredjennings/snowy,leonhandreke/snowy,syskill/snowy,leonhandreke/snowy,nekohayo/snowy,sandyarmstrong/snowy,NoUsername/PrivateNotesExperimental,jaredjennings/snowy,nekohayo/snowy,NoUsername/PrivateNotesExperimental,syskill/snowy,sandyarmstrong/snowy,widox/snowy,jaredjennings/snowy | notes/admin.py | notes/admin.py | #
# Copyright (c) 2009 Brad Taylor <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from snowy.accounts.models import UserProfile
from snowy.notes.models import Note, NoteTag
from reversion.admin import VersionAdmin
from django.contrib import admin
class NoteAdmin(VersionAdmin):
list_display = ('created', 'author', 'title')
search_fields = ['content', 'title']
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Note, NoteAdmin)
admin.site.register(NoteTag)
admin.site.register(UserProfile)
| #
# Copyright (c) 2009 Brad Taylor <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from snowy.accounts.models import UserProfile
from snowy.notes.models import Note, NoteTag
from reversion.admin import VersionAdmin
from django.contrib import admin
class NoteAdmin(VersionAdmin):
list_display = ('created', 'author', 'title')
search_fields = ['body', 'title']
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Note, NoteAdmin)
admin.site.register(NoteTag)
admin.site.register(UserProfile)
| agpl-3.0 | Python |
75717747ffbc36f306e0f771a65ed101bd3ca9be | Create parser.py | anacoimbrag/information-retrieval,anacoimbrag/information-retrieval | parser.py | parser.py | from HTMLParser import HTMLParser
# create a subclass and override the handler methods
class Parser(HTMLParser):
tag = ""
doc = new Document()
def handle_starttag(self, tag, attrs):
tag = tag
print "Encountered a start tag:", tag
def handle_endtag(self, tag):
tag = ""
print "Encountered an end tag :", tag
def handle_data(self, data):
print "Encountered some data :", data
if tag == "DOCNO":
doc.setId(data)
if tag == "title":
doc.setTitle(data)
if tag == "h1":
doc.addH1(data)
if tag == "h2":
doc.addH2(data)
if tag == "h3":
doc.addH4(data)
elif tag != "":
doc.addContent(data)
class Document():
def __init__():
id = ""
title = ""
h1 = []
h2 = []
h3 = []
content = ""
def setId(self, id):
self.id = id
def setTitle(self, title):
self.title = title
def addH1(self, h1):
self.append(h1)
def addH2(self, h2):
self.append(h2)
def addH3(self, h3):
self.append(h3)
def addContent(self, content):
self.content += content + " "
| mit | Python |
|
ecde3b823724e612fd4e5cc575eb75f0d3652a4b | add script for running test | lotabout/pymustache | test/run-test.py | test/run-test.py | import imp
import json
import os
mustache = imp.load_source('mustache', '../src/mustache.py')
#test_files = ['comments.json',
#'delimiters.json',
#'interpolation.json',
#'inverted.json',
#'~lambdas.json',
#'partials.json',
#'sections.json']
test_files = ['interpolation.json',
'delimiters.json']
for filename in test_files:
with open(os.path.join('./spec/specs/', filename)) as fp:
data = json.load(fp)['tests']
for test in data:
context = test['data']
template = test['template']
expected = test['expected']
result = mustache.render(template, [context])
if result != expected:
print('>>>>>>>>> Error >>>>>>>>>>>>')
print('template:', template)
print('expected:', expected)
print('result :', result)
| mit | Python |
|
0feb8f3ae65fadaf600e7681349cfa537b41a8c3 | Add ParseBigCSV.py | awensaunders/BuSHAX0rZ,awensaunders/BuSHAX0rZ,awensaunders/BuSHAX0rZ | parseBigCSV.py | parseBigCSV.py | import csv
import json
with open("evidata.csv", "r") as bigCSV:
with open("file.json", "w") as outFile:
reader = csv.DictReader(bigCSV)
output = json.dumps(list(reader))
outFile.write(output)
| mit | Python |
|
31434ff2f5b208ae1d93b4340e1d28cfe5cb2e42 | Add IMDB Mocked Unit Test (#1579) | pytorch/text,pytorch/text,pytorch/text,pytorch/text | test/datasets/test_imdb.py | test/datasets/test_imdb.py | import os
import random
import string
import tarfile
from collections import defaultdict
from unittest.mock import patch
from parameterized import parameterized
from torchtext.datasets.imdb import IMDB
from ..common.case_utils import TempDirMixin, zip_equal
from ..common.torchtext_test_case import TorchtextTestCase
def _get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
base_dir = os.path.join(root_dir, "IMDB")
temp_dataset_dir = os.path.join(base_dir, "temp_dataset_dir")
os.makedirs(temp_dataset_dir, exist_ok=True)
seed = 1
mocked_data = defaultdict(list)
for split in ("train", "test"):
neg_dir = os.path.join(temp_dataset_dir, split, "neg")
pos_dir = os.path.join(temp_dataset_dir, split, "pos")
os.makedirs(neg_dir, exist_ok=True)
os.makedirs(pos_dir, exist_ok=True)
for i in range(5):
# all negative labels are read first before positive labels in the
# IMDB dataset implementation
label = "neg" if i < 2 else "pos"
cur_dir = pos_dir if label == "pos" else neg_dir
txt_file = os.path.join(cur_dir, f"{i}{i}_{i}.txt")
with open(txt_file, "w") as f:
rand_string = " ".join(
random.choice(string.ascii_letters) for i in range(seed)
)
dataset_line = (label, rand_string)
# append line to correct dataset split
mocked_data[split].append(dataset_line)
f.write(rand_string)
seed += 1
compressed_dataset_path = os.path.join(base_dir, "aclImdb_v1.tar.gz")
# create tar file from dataset folder
with tarfile.open(compressed_dataset_path, "w:gz") as tar:
tar.add(temp_dataset_dir, arcname="aclImdb_v1")
return mocked_data
class TestIMDB(TempDirMixin, TorchtextTestCase):
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.root_dir = cls.get_base_temp_dir()
cls.samples = _get_mock_dataset(cls.root_dir)
cls.patcher = patch(
"torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True
)
cls.patcher.start()
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
super().tearDownClass()
@parameterized.expand(["train", "test"])
def test_imdb(self, split):
dataset = IMDB(root=self.root_dir, split=split)
samples = list(dataset)
expected_samples = self.samples[split]
for sample, expected_sample in zip_equal(samples, expected_samples):
self.assertEqual(sample, expected_sample)
@parameterized.expand(["train", "test"])
def test_imdb_split_argument(self, split):
dataset1 = IMDB(root=self.root_dir, split=split)
(dataset2,) = IMDB(root=self.root_dir, split=(split,))
for d1, d2 in zip_equal(dataset1, dataset2):
self.assertEqual(d1, d2)
| bsd-3-clause | Python |
|
3536b98a3adf5087c78b92432585654bec40d64e | add problem 045 | smrmkt/project_euler | problem_045.py | problem_045.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
'''
import math
import timeit
def is_pentagonal(n):
if (1+math.sqrt(1+24*n)) % 6 == 0:
return True
else:
return False
def calc():
i = 143
while True:
i += 1
n = i*(2*i-1)
if is_pentagonal(n):
return n
if __name__ == '__main__':
print calc()
print timeit.Timer('problem_045.calc()', 'import problem_045').timeit(1)
| mit | Python |
|
91bb7506bd20ed22b8787e7a8b9975cc07e97175 | Add owners client to depot_tools. | CoherentLabs/depot_tools,CoherentLabs/depot_tools | owners_client.py | owners_client.py | # Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class OwnersClient(object):
"""Interact with OWNERS files in a repository.
This class allows you to interact with OWNERS files in a repository both the
Gerrit Code-Owners plugin REST API, and the owners database implemented by
Depot Tools in owners.py:
- List all the owners for a change.
- Check if a change has been approved.
- Check if the OWNERS configuration in a change is valid.
All code should use this class to interact with OWNERS files instead of the
owners database in owners.py
"""
def __init__(self, host):
self._host = host
def ListOwnersForFile(self, project, branch, path):
"""List all owners for a file."""
raise Exception('Not implemented')
def IsChangeApproved(self, change_number):
"""Check if the latest patch set for a change has been approved."""
raise Exception('Not implemented')
def IsOwnerConfigurationValid(self, change_number, patch):
"""Check if the owners configuration in a change is valid."""
raise Exception('Not implemented')
| bsd-3-clause | Python |
|
beae2bdc47949f78e95e3444d248ce035766e719 | Add ascii table test | KSchopmeyer/smipyping,KSchopmeyer/smipyping,KSchopmeyer/smipyping,KSchopmeyer/smipyping,KSchopmeyer/smipyping | smipyping/_asciitable.py | smipyping/_asciitable.py | # (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internal module with utilities to write to ascii outputs.
"""
from __future__ import print_function, absolute_import
from textwrap import wrap
import six
from terminaltables import SingleTable
def print_ascii_table(title, table_header, table_data, inner_border=False,
outer_border=False):
""" Print table data as an ascii table. The input is a dictionary
of table data in the format used by terminaltable package.
title: list of strings defining the row titles
table_header:
list of strings defining the column names
table data:
List of lists of strings. Each list of strings represents the
data for a single row in the table
inner_border:
optional flag that tells table builder to create inner borders
outer_border:
Optional flag that tells table builder to create border around
complete table
NOTE: Currently this outputs in the terminatable SingleTable format
It may be extended in the future to allow other formats such as the
asciitable format, etc. However these only differ in the table
boundary character representation
"""
table_data = [table_header] + table_data
table_instance = SingleTable(table_data, title)
table_instance.inner_column_border = inner_border
table_instance.outer_border = outer_border
print(table_instance.table)
print()
def fold_cell(cell_string, max_cell_width):
""" Fold a string within a maximum width to fit within a table entry
Parameters:
cell_string:
The string of data to go into the cell
max_cell_width:
Maximum width of cell. Data is folded into multiple lines to
fit into this width.
Return:
String representing the folded string
"""
new_cell = cell_string
if isinstance(cell_string, six.string_types):
if max_cell_width < len(cell_string):
new_cell = '\n'.join(wrap(cell_string, max_cell_width))
return new_cell
| mit | Python |
|
672e4378421d2014644e23195706ef011934ffdb | test for fixes on #55 | wdm0006/categorical_encoding,scikit-learn-contrib/categorical-encoding,wdm0006/categorical_encoding,scikit-learn-contrib/categorical-encoding | category_encoders/tests/test_basen.py | category_encoders/tests/test_basen.py | import category_encoders as ce
import unittest
import pandas as pd
__author__ = 'willmcginnis'
class TestBasen(unittest.TestCase):
"""
"""
def test_basen(self):
df = pd.DataFrame({'col1': ['a', 'b', 'c'], 'col2': ['d', 'e', 'f']})
df_1 = pd.DataFrame({'col1': ['a', 'b', 'd'], 'col2': ['d', 'e', 'f']})
enc = ce.BaseNEncoder(verbose=1)
enc.fit(df)
print(enc.transform(df_1))
| bsd-3-clause | Python |
|
a4c8818225941b84e6958dcf839fc78c2adc5cee | Create test_pxssh.py | shipcod3/commandsshbotnet | test_pxssh.py | test_pxssh.py | # commandsshbotnet.py
# author: @shipcod3
#
# >> used for testing the pxssh module
import pxssh
import getpass
try:
s = pxssh.pxssh()
hostname = raw_input('SET HOST: ')
username = raw_input('SET USERNAME: ')
password = getpass.getpass('SET PASSWORD: ')
s.login (hostname, username, password)
s.sendline ('uptime') # run a command
s.prompt() # match the prompt
print s.before # print everything before the prompt.
s.sendline ('ls -l')
s.prompt()
print s.before
s.sendline ('df')
s.prompt()
print s.before
s.logout()
except pxssh.ExceptionPxssh, e:
print "pxssh failed on login."
print str(e)
| mit | Python |
|
1be4e6f97b3d062c4fa07f70b05305bf32593fd4 | Add test cases for smudge | oohlaf/dotsecrets | dotbriefs/tests/test_smudge.py | dotbriefs/tests/test_smudge.py | import unittest
from dotbriefs.smudge import SmudgeTemplate
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = {}
self.secrets['password'] = 's3cr3t'
self.secrets['question'] = 'h1dd3n 4g3nd4'
self.template = []
self.template.append(SmudgeTemplate('name', self.secrets))
def test_nosecret_sub(self):
self.assertEqual(self.template[0].sub('password = hi # comment'),
'password = hi # comment')
def test_nokey_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: $ # comment'),
'password = $DotBriefs: $ # comment')
def test_nomatch_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: notfound$ # comment'),
'password = $DotBriefs: notfound$ # comment')
def test_single_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$ # comment'),
'password = s3cr3t # comment')
def test_double_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$; security question = $DotBriefs: question$ # comment'),
'password = s3cr3t; security question = h1dd3n 4g3nd4 # comment')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
|
aa2d97dfe52628e1bb7ab123890a895f7f630cda | add problem 070 | smrmkt/project_euler | problem_070.py | problem_070.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
'''
from fractions import Fraction
import itertools
import math
import timeit
primes = [2, 3, 5, 7]
def is_prime(n):
for p in primes:
if n % p == 0:
return False
for i in range(max(primes), int(math.sqrt(n))+1):
if n % i == 0:
return False
return True
def factorize(n, factors):
for i in range(2, int(math.sqrt(n))+1):
if n % i == 0:
factors.add(i)
return factorize(int(n/i), factors)
factors.add(n)
return factors
def totient(n):
factors = factorize(n, set())
return n*reduce(lambda x,y: x*y, map(lambda x: 1-Fraction(1, x), factors))
# slow
def loop(n):
num, min_ratio = None, None
for i in range(2, n+1):
phi = totient(i)
if sorted(str(i)) == sorted(str(phi)):
ratio = n/phi
if min_ratio is None or ratio < min_ratio:
num = i
min_ratio = ratio
return num, min_ratio
def multiple_prime(n):
# prepare primes
for i in range(1000, 5000): # narrow search space
if is_prime(i):
primes.append(i)
# main loop
num, min_ratio = None, None
for i, j in itertools.combinations(primes, 2):
m = i*j
if m > n:
continue
phi = totient(m)
if sorted(str(m)) == sorted(str(phi)):
ratio = m/phi
if min_ratio is None or ratio < min_ratio:
num = m
min_ratio = ratio
return num, min_ratio
if __name__ == '__main__':
# print loop(10**7)
print multiple_prime(10**7)
print timeit.Timer('problem_070.multiple_prime(10**7)', 'import problem_070').timeit(1)
| mit | Python |
|
16ccb2a670461e8ceb9934fd4ba8823b866c9d8e | Create plot.py | ogasawaraShinnosuke/ds | src/plot.py | src/plot.py | import pandas as pd
from matplotlib import pyplot as plt
from abc import ABCMeta, abstractmethod
class Plot(metaclass=ABCMeta):
@abstractmethod
def show(self):
plt.show()
class CsvPlot(Plot):
def __init__(self, parent_path):
self.parent_path = parent_path
def show(self, is_execute=False):
super().show() if is_execute else None
def plot(self, file_name, title, is_execute=False):
(lambda f, t: pd.read_csv(self.parent_path.format(f)).plot(title=t))(
file_name, title)
self.show(is_execute)
def plots(self, file_names, titles, is_execute=False):
[self.plot(f, t) for f, t in zip(file_names, titles)]
self.show(is_execute)
| mit | Python |
|
254239102955bb8916aab98530251b5cdd79ce50 | Add script to write base signatures | jdkato/codetype,jdkato/codetype | cypher/siggen.py | cypher/siggen.py | #!/usr/bin/env python
import argparse
import subprocess
import os
import shutil
import sys
from util import write_signature
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--language",
help="Source code language.",
required=True
)
TEMP_DIR = os.path.join(os.getcwd(), "cypher", "temp")
if os.path.exists(TEMP_DIR):
shutil.rmtree(TEMP_DIR)
lang = vars(parser.parse_args())["language"]
if lang == "Python":
repo = "https://github.com/django/django.git"
ext = [".py"]
elif lang == "Ruby":
repo = "https://github.com/Homebrew/legacy-homebrew.git"
ext = [".rb"]
elif lang == "C":
repo = "https://github.com/git/git.git"
ext = [".c", ".h"]
elif lang == "C++":
repo = "https://github.com/apple/swift.git"
ext = [".cpp", ".cc", ".h"]
elif lang == "R":
repo = "https://github.com/rstudio/shiny.git"
ext = [".R", ".r"]
else:
print("{} not found.".format(lang))
sys.exit(0)
os.makedirs(TEMP_DIR)
pro = subprocess.Popen(
["git", "clone", repo],
cwd=TEMP_DIR,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(out, error) = pro.communicate()
src_dir = os.path.join(TEMP_DIR, repo.split("/")[-1].split(".")[0])
write_signature(src_dir, lang, ext)
shutil.rmtree(TEMP_DIR)
| mit | Python |
|
fe8d131a3cb9484cfe3f1b96102c0333077ffe89 | Add some basic tests | msanders/cider | tests/test_basic.py | tests/test_basic.py | from cider import Cider
from mock import MagicMock, call
import pytest
import random
@pytest.mark.randomize(formulas=[str], cask=bool, force=bool)
def test_install(tmpdir, formulas, cask, force):
cider = Cider(cider_dir=str(tmpdir), cask=cask)
cider.brew = MagicMock()
cider.install(*formulas, force=force)
cider.brew.install.assert_called_once_with(*formulas, force=force)
key = "casks" if cask else "formulas"
for formula in formulas:
assert formula in cider.read_bootstrap().get(key, [])
@pytest.mark.randomize(formulas=[str], cask=bool)
def test_rm(tmpdir, formulas, cask):
cider = Cider(cider_dir=str(tmpdir), cask=cask)
cider.brew = MagicMock()
cider.rm(*formulas)
cider.brew.rm.assert_called_once_with(*formulas)
key = "casks" if cask else "formulas"
for formula in formulas:
assert formula not in cider.read_bootstrap().get(key, [])
@pytest.mark.randomize(
domain=str, key=str, values=[str, int, float], force=bool
)
def test_set_default(tmpdir, domain, key, values, force):
def expected(value):
return {
"true": True,
"false": False
}.get(value, value)
cider = Cider(cider_dir=str(tmpdir))
cider.defaults = MagicMock()
for value in values:
cider.set_default(domain, key, value, force=force)
cider.defaults.write.assert_called_with(
domain, key, expected(value), force
)
assert cider.read_defaults()[domain][key] == value
# Verify str(value) => defaults.write(value)
cider.set_default(domain, key, str(value), force=force)
_assert_roughly_called_with(
cider.defaults.write, domain, key, value, force
)
@pytest.mark.randomize(domain=str, key=str)
def test_remove_default(tmpdir, domain, key):
cider = Cider(cider_dir=str(tmpdir))
cider.defaults = MagicMock()
cider.remove_default(domain, key)
cider.defaults.delete.assert_called_with(domain, key)
assert key not in cider.read_defaults().get(domain, [])
@pytest.mark.randomize(tap=str)
def test_tap(tmpdir, tap):
cider = Cider(cider_dir=str(tmpdir))
cider.brew = MagicMock()
cider.tap(tap)
cider.brew.tap.assert_called_with(tap)
assert tap in cider.read_bootstrap().get("taps", [])
@pytest.mark.randomize(tap=str)
def test_untap(tmpdir, tap):
cider = Cider(cider_dir=str(tmpdir))
cider.brew = MagicMock()
cider.untap(tap)
cider.brew.untap.assert_called_with(tap)
assert tap not in cider.read_bootstrap().get("taps", [])
def _assert_roughly_called_with(mock_self, *args, **kwargs):
def assert_roughly_equal(actual, expected):
if isinstance(actual, float) and isinstance(expected, float):
assert abs(actual - expected) <= threshold
else:
assert actual == expected
threshold = 0.01
_, actual_args, actual_kwargs = mock_self.mock_calls[-1]
for actual, expected in zip(actual_args, args):
assert_roughly_equal(actual, expected)
for key, expected in kwargs.iteritems():
assert_roughly_equal(actual_kwargs.get(key), expected)
| mit | Python |
|
19b13f0fb9b86ec99025bd1baf2c4d5fe757f809 | Add a test to make sure exception is raised | alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin | tests/test_tests.py | tests/test_tests.py | import pytest
def test_BeautifulSoup_methods_are_overridden(
client_request,
mock_get_service_and_organisation_counts,
):
client_request.logout()
page = client_request.get("main.index", _test_page_title=False)
with pytest.raises(AttributeError) as exception:
page.find("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find – try BeautifulSoup.select_one instead"
with pytest.raises(AttributeError) as exception:
page.find_all("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find_all – try BeautifulSoup.select instead"
| mit | Python |
|
1f4190a6d4ef002e75a8ac5ef80d326c712c749c | add test to verify the trace assignment | uber/tchannel-python,uber/tchannel-python | tests/test_trace.py | tests/test_trace.py | from __future__ import absolute_import
import pytest
from tchannel import TChannel, schemes
from tchannel.errors import BadRequestError
from tchannel.event import EventHook
@pytest.mark.gen_test
def test_error_trace():
tchannel = TChannel('test')
class ErrorEventHook(EventHook):
def __init__(self):
self.request_trace = None
self.error_trace = None
def before_receive_request(self, request):
self.request_trace = request.tracing
def after_send_error(self, error):
self.error_trace = error.tracing
hook = ErrorEventHook()
tchannel.hooks.register(hook)
tchannel.listen()
with pytest.raises(BadRequestError):
yield tchannel.call(
scheme=schemes.RAW,
service='test',
arg1='endpoint',
hostport=tchannel.hostport,
timeout=0.02,
)
assert hook.error_trace
assert hook.request_trace
assert hook.error_trace == hook.request_trace
| mit | Python |
|
7fa8417cb7635e238f1e95971fa0a86a95b64dca | Migrate deleted_at fields away | pudo/aleph,alephdata/aleph,alephdata/aleph,pudo/aleph,alephdata/aleph,alephdata/aleph,pudo/aleph,alephdata/aleph | aleph/migrate/versions/aa486b9e627e_hard_deletes.py | aleph/migrate/versions/aa486b9e627e_hard_deletes.py | """Hard delete various model types.
Revision ID: aa486b9e627e
Revises: 9dcef7592cea
Create Date: 2020-07-31 08:56:43.679019
"""
from alembic import op
import sqlalchemy as sa
revision = "aa486b9e627e"
down_revision = "9dcef7592cea"
def upgrade():
meta = sa.MetaData()
meta.bind = op.get_bind()
meta.reflect()
for table_name in ("alert", "entity", "mapping", "permission"):
table = meta.tables[table_name]
q = sa.delete(table).where(table.c.deleted_at != None) # noqa
meta.bind.execute(q)
table = meta.tables["permission"]
q = sa.delete(table).where(table.c.read == False) # noqa
meta.bind.execute(q)
op.drop_column("alert", "deleted_at")
op.drop_column("entity", "deleted_at")
op.drop_column("mapping", "deleted_at")
op.drop_column("permission", "deleted_at")
op.alter_column("entityset", "label", existing_type=sa.VARCHAR(), nullable=True)
op.alter_column("role", "is_muted", existing_type=sa.BOOLEAN(), nullable=False)
def downgrade():
pass
| mit | Python |
|
c61d3d5b4b31912c48e86425fe7e4861fc2f8c28 | test for read_be_array that fails in Python 2.x (see GH-6) | mitni455/psd-tools,vgatto/psd-tools,makielab/psd-tools,vgatto/psd-tools,a-e-m/psd-tools,ssh-odoo/psd-tools,psd-tools/psd-tools,a-e-m/psd-tools,mitni455/psd-tools,EvgenKo423/psd-tools,kmike/psd-tools,codercarl/psd-tools,makielab/psd-tools,codercarl/psd-tools,ssh-odoo/psd-tools,a-e-m/psd-tools,EvgenKo423/psd-tools,kmike/psd-tools,vovkasm/psd-tools,makielab/psd-tools,codercarl/psd-tools,vovkasm/psd-tools | tests/test_utils.py | tests/test_utils.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from io import BytesIO
from psd_tools.utils import read_be_array
def test_read_be_array_from_file_like_objects():
fp = BytesIO(b"\x00\x01\x00\x05")
res = read_be_array("H", 2, fp)
assert list(res) == [1, 5]
| mit | Python |
|
eb82e816e4dece07aeebd7b9112156dacdb2d9bc | Add set_global_setting.py, not sure how this file dissapeared | rwols/CMakeBuilder | commands/set_global_setting.py | commands/set_global_setting.py | from .command import CmakeCommand
class CmakeSetGlobalSettingCommand(CmakeCommand):
def run(self):
self.server.global_settings()
| mit | Python |
|
96bea6812919067c28e0c28883226434d81f6e8d | add locusattrs class | LinkageIO/LocusPocus | locuspocus/locusattrs.py | locuspocus/locusattrs.py |
class LocusAttrs():
# a restricted dict interface to attributes
def __init__(self,attrs=None):
self._attrs = attrs
def __len__(self):
if self.empty:
return 0
else:
return len(self._attrs)
def __eq__(self, other):
if self.empty and other.empty:
return True
elif len(self) != len(other):
# Short circuit on length
return False
else:
return sorted(self.items()) == sorted(other.items())
@property
def empty(self):
if self._attrs is None:
return True
else:
return False
def keys(self):
if self.empty:
return []
else:
return self._attrs.keys()
def values(self):
if self.empty:
return []
else:
return self._attrs.values()
def items(self):
if self.empty:
return {}
else:
return self._attrs.items()
def __contains__(self,key):
if self.empty:
return False
return key in self._attrs
def __getitem__(self,key):
if self.empty:
raise KeyError()
return self._attrs[key]
def __setitem__(self,key,val):
if self.empty:
self._attrs = {}
self._attrs[key] = val
def __repr__(self):
if self.empty:
return repr({})
return repr(self._attrs)
| mit | Python |
|
cebb3a9cdbdee7c02b0c86e1879d0c20d36b4276 | add example | UWSEDS-aut17/uwseds-group-city-fynders | examples/example_cityfynder.py | examples/example_cityfynder.py | # Which city would like to live?
# Created by City Fynders - University of Washington
import pandas as pd
import numpy as np
import geopy as gy
from geopy.geocoders import Nominatim
import data_processing as dp
from plotly_usmap import usmap
# import data
(natural, human, economy, tertiary) = dp.read_data()
# Add ranks in the DataFrame
(natural, human, economy, tertiary) = dp.data_rank(natural, human, economy, tertiary)
# Get location information
(Lat, Lon) = dp.find_loc(human)
# Create a rank DataFrame and save as csv file
rank = dp.create_rank(natural, human, economy, tertiary, Lat, Lon)
# Plot US city general ranking usmap
usmap(rank)
| mit | Python |
|
9269afee9099ef172ac2ef55ea0af85b0c77587a | Add databases.py | ollien/Timpani,ollien/Timpani,ollien/Timpani | py/database.py | py/database.py | import sqlalchemy
import sqlalchemy.orm
import uuid
import configmanager
class ConnectionManager():
_connections = {}
@staticmethod
def addConnection(self, connection, connectionName = uuid.uuid4().hex):
if type(connectionName) == str:
if type(connection) == DatabaseConnection:
_connections[connectionName] = connection
return connectionName
else if type(connection) == str:
c = DatabaseConnection(connection)
_connections[connectionName] = connection
return connectionName
else:
raise ValueError("connection must be of type str, not %s", type(connection))
else:
raise ValueError("connectionName must be of type str, not %s", type(name))
@staticmethod
def getConnection(self, connectionName):
if type(connectionName) == str:
try:
return _connections[connectionName]
except KeyError:
return None
@staticmethod
def closeConnection(self, connectionName):
if type(connectionName) == str:
_connections[connectionName].session.close()
_connections.close()
del _connections[connectionName]
else:
raise ValueError("connectionName must be of type str, not %s", type(name))
class DatabaseConnection():
def __init__(self, connectionString):
self.connectionString = configs['connection_string']
self.engine = sqlalchemy.create_engine(bind = self.connectionString)
self._Session = sqlalchemy.orm.create_session(bind = engine)
self.session = Session()
def getSelectedDatabase(self):
result = session.execute("SELECT DATABASE()").fetchone()
if result != None:
return result[0]
return None
| mit | Python |
|
5dfb7ad67216b31544c5f4dc785930ef0d9ffd56 | add faceAssigned tester | sol-ansano-kim/medic,sol-ansano-kim/medic,sol-ansano-kim/medic | python/medic/plugins/Tester/faceAssigned.py | python/medic/plugins/Tester/faceAssigned.py | from medic.core import testerBase
from maya import OpenMaya
class FaceAssigned(testerBase.TesterBase):
Name = "FaceAssigned"
def __init__(self):
super(FaceAssigned, self).__init__()
def Match(self, node):
return node.object().hasFn(OpenMaya.MFn.kDagNode)
def Test(self, node):
inst_grp = node.dg().findPlug("instObjGroups", True)
if not inst_grp:
return False
obj_grp = None
for i in range(inst_grp.numChildren()):
child = inst_grp.child(i)
if child.partialName() == "iog[-1].og":
obj_grp = child
break
if not obj_grp:
return False
if obj_grp.numConnectedElements() > 0:
return True
return False
Tester = FaceAssigned
| mit | Python |
|
971570b4288c9ac7131a1756e17574acbe6d1b9a | Add script for converting a solarized dark file to solarized dark high contrast | bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile | python/misc/solarized-dark-high-contrast.py | python/misc/solarized-dark-high-contrast.py | #!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main() | mit | Python |
|
b72c421696b5714d256b7ac461833bc692ca5354 | Add an autonomous mode to strafe and shoot. Doesn't work | frc1418/2014 | robot/robot/src/autonomous/hot_aim_shoot.py | robot/robot/src/autonomous/hot_aim_shoot.py |
try:
import wpilib
except ImportError:
from pyfrc import wpilib
import timed_shoot
class HotShootAutonomous(timed_shoot.TimedShootAutonomous):
'''
Based on the TimedShootAutonomous mode. Modified to allow
shooting based on whether the hot goal is enabled or not.
'''
DEFAULT = False
MODE_NAME = "Hot Aim shoot"
def __init__(self, components):
super().__init__(components)
wpilib.SmartDashboard.PutNumber('DriveStrafeSpeed', 0.5)
wpilib.SmartDashboard.PutBoolean('IsHotLeft', False)
wpilib.SmartDashboard.PutBoolean('IsHotRight', False)
def on_enable(self):
'''these are called when autonomous starts'''
super().on_enable()
self.drive_strafe_speed = wpilib.SmartDashboard.GetNumber('DriveStrafeSpeed')
print("-> Drive strafe:", self.drive_strafe_speed)
self.decided = False
self.start_time = None
def on_disable(self):
'''This function is called when autonomous mode is disabled'''
pass
def update(self, time_elapsed):
'''The actual autonomous program'''
# decide if it's hot or not
if not self.decided:
self.hotLeft = wpilib.SmartDashboard.GetBoolean("IsHotLeft")
self.hotRight = wpilib.SmartDashboard.GetBoolean("IsHotRight")
if (self.hotLeft or self.hotRight) and not (self.hotLeft and self.hotRight):
self.decided = True
if self.hotLeft:
self.drive_strafe_speed *= -1
elif time_elapsed > 6:
# at 6 seconds, give up and shoot anyways
self.decided = True
# always keep the arm down
self.intake.armDown()
# wait a split second for the arm to come down, then
# keep bringing the catapult down so we're ready to go
if time_elapsed > 0.3:
self.catapult.pulldown()
# wait some period before we start driving
if time_elapsed < self.drive_wait:
pass
else:
if self.decided:
# only set this once, so we can calculate time from this
# point on
if self.start_time is None:
self.start_time = time_elapsed
time_elapsed = time_elapsed - self.start_time
if time_elapsed < self.drive_time:
# Drive slowly forward for N seconds
self.drive.move(self.drive_strafe_speed, self.drive_speed, 0)
elif time_elapsed < self.drive_time + 1.0:
# Finally, fire and keep firing for 1 seconds
self.catapult.launchNoSensor()
| bsd-3-clause | Python |
|
3c046062af376603145545f37b917a5c927b3aba | Create mergesort_recursive.py | ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms | recursive_algorithms/mergesort_recursive.py | recursive_algorithms/mergesort_recursive.py | def merge_sort(array):
temp = []
if( len(array) == 1):
return array;
half = len(array) / 2
lower = merge_sort(array[:half])
upper = merge_sort(array[half:])
lower_len = len(lower)
upper_len = len(upper)
i = 0
j = 0
while i != lower_len or j != upper_len:
if( i != lower_len and (j == upper_len or lower[i] < upper[j])):
temp.append(lower[i])
i += 1
else:
temp.append(upper[j])
j += 1
return temp
array = [11, 12, 3, 28, 41, 62,16, 10]
ar = merge_sort(array)
print " ".join(str(x) for x in ar)
| cc0-1.0 | Python |
|
06b0f93ecd5fac8eda02fce96c1e4ec0306a7989 | Increase coverage | proyectos-analizo-info/pybossa-analizo-info,stefanhahmann/pybossa,inteligencia-coletiva-lsd/pybossa,jean/pybossa,OpenNewsLabs/pybossa,PyBossa/pybossa,proyectos-analizo-info/pybossa-analizo-info,harihpr/tweetclickers,Scifabric/pybossa,PyBossa/pybossa,geotagx/pybossa,jean/pybossa,harihpr/tweetclickers,OpenNewsLabs/pybossa,CulturePlex/pybossa,proyectos-analizo-info/pybossa-analizo-info,geotagx/pybossa,stefanhahmann/pybossa,inteligencia-coletiva-lsd/pybossa,CulturePlex/pybossa,CulturePlex/pybossa,Scifabric/pybossa | test/test_google.py | test/test_google.py | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from base import web, model, Fixtures
import pybossa.view.google as google
class TestGoogle:
def setUp(self):
self.app = web.app
model.rebuild_db()
Fixtures.create()
def test_manage_user(self):
"""Test GOOGLE manage_user works."""
with self.app.test_request_context('/'):
# First with a new user
user_data = dict(id='1', name='google',
email='[email protected]')
token = 't'
user = google.manage_user(token, user_data, None)
assert user.email_addr == user_data['email'], user
assert user.name == user_data['name'], user
assert user.fullname == user_data['name'], user
assert user.google_user_id == user_data['id'], user
# Second with the same user
user = google.manage_user(token, user_data, None)
assert user.email_addr == user_data['email'], user
assert user.name == user_data['name'], user
assert user.fullname == user_data['name'], user
assert user.google_user_id == user_data['id'], user
# Finally with a user that already is in the system
user_data = dict(id='10', name=Fixtures.name,
email=Fixtures.email_addr)
token = 'tA'
user = google.manage_user(token, user_data, None)
assert user is None
| agpl-3.0 | Python |
|
17e2b9ecb67c8b1f3a6f71b752bc70b21584092e | Add initial tests for scriptserver. | cnelsonsic/SimpleMMO,cnelsonsic/SimpleMMO,cnelsonsic/SimpleMMO | tests/test_scriptserver.py | tests/test_scriptserver.py | import unittest
from mock import patch, Mock
import sys
sys.path.append(".")
from scriptserver import ZoneScriptRunner
class TestZoneScriptRunner(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mongoengine_patch = patch('scriptserver.me')
cls.mongoengine_patch.start()
@classmethod
def tearDownClass(cls):
cls.mongoengine_patch.stop()
def test___init__(self):
zoneid = "zoneid"
with patch('scriptserver.Object'):
with patch.object(ZoneScriptRunner, 'load_scripts') as mock_load_scripts:
zone_script_runner = ZoneScriptRunner(zoneid)
self.assertTrue(zone_script_runner)
self.assertEqual(1, mock_load_scripts.call_count)
def test_load_scripts(self):
expected = {}
zoneid = "zoneid"
with patch.object(ZoneScriptRunner, 'load_scripts'):
with patch('scriptserver.Object'):
zone_script_runner = ZoneScriptRunner(zoneid)
with patch('scriptserver.ScriptedObject') as ScriptedObject:
MockThing = Mock()
with patch.dict('sys.modules', {'thing': MockThing, 'thing.fake': MockThing.fake,
'thing.fake.chicken': MockThing.fake.chicken}):
MockThing.fake.chicken.Chicken.tick = Mock()
MockScriptedObject = Mock()
MockScriptedObject.scripts = ['thing.fake.chicken']
ScriptedObject.objects.return_value = [MockScriptedObject]
result = zone_script_runner.load_scripts()
self.assertNotEqual(expected, result)
self.assertIn('thing.fake.chicken', result)
def test_start(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.start())
pass # TODO: implement your test here
def test_tick(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.tick())
pass # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | Python |
|
e5243d0fb792e82825633f1afdd6e799238a90f3 | Add portable buildtools update script (#46) | flutter/buildroot,flutter/buildroot,flutter/buildroot,flutter/buildroot | tools/buildtools/update.py | tools/buildtools/update.py | #!/usr/bin/python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pulls down tools required to build flutter."""
import os
import subprocess
import sys
SRC_ROOT = (os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
BUILDTOOLS = os.path.join(SRC_ROOT, 'buildtools')
sys.path.insert(0, os.path.join(SRC_ROOT, 'tools'))
import find_depot_tools
DEPOT_PATH = find_depot_tools.add_depot_tools_to_path()
def Update():
path = os.path.join(BUILDTOOLS, 'update.sh')
return subprocess.call(['/bin/bash', path, '--toolchain', '--gn'], cwd=SRC_ROOT)
def UpdateOnWindows():
sha1_file = os.path.join(BUILDTOOLS, 'win', 'gn.exe.sha1')
downloader_script = os.path.join(DEPOT_PATH, 'download_from_google_storage.py')
download_cmd = [
'python',
downloader_script,
'--no_auth',
'--no_resume',
'--quiet',
'--platform=win*',
'--bucket',
'chromium-gn',
'-s',
sha1_file
]
return subprocess.call(download_cmd)
def main(argv):
if sys.platform.startswith('win'):
return UpdateOnWindows()
return Update()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | Python |
|
d1f02226fe805fb80a17f1d22b84b748b65b4e7f | add sam2fq.py | likit/BioUtils,likit/BioUtils | sam2fq.py | sam2fq.py | import sys
from collections import namedtuple
Read = namedtuple('Read', ['name','qual','seq'])
read1 = None
left = open('pe_1.fq', 'w')
right = open('pe_2.fq', 'w')
unpaired = open('unpaired.fq', 'w')
for line in sys.stdin:
items = line.split('\t')
name, qual, seq = items[0], items[10], items[9]
if not read1:
read1 = Read(name, qual, seq)
continue
else:
read2 = Read(name, qual, seq)
if read1.name == read2.name:
print read1.name, '<-->', read2.name
#print >> left, '@%s\n%s\n+\n%s' % (read1.name, read1.seq, read1.qual)
#print >> right, '@%s\n%s\n+\n%s' % (read2.name, read2.seq, read2.qual)
read1 = None
else:
print read1.name
#print >> unpaired, '@%s\n%s\n+\n%s' % (read1.name, read1.seq, read1.qual)
read1 = read2
read2 = None
if read1:
print read1.name
#print >> unpaired, '@%s\n%s\n+\n%s' % (read1.name, read1.seq, read1.qual)
read1 = read2
read2 = None
| bsd-2-clause | Python |
|
dbdfb1b5a703e0392ca67a03113e607678015a66 | add kattis/settlers2 | mjenrungrot/competitive_programming,mjenrungrot/algorithm,mjenrungrot/competitive_programming,mjenrungrot/competitive_programming,mjenrungrot/competitive_programming | Kattis/settlers2.py | Kattis/settlers2.py | """
Problem: settlers2
Link: https://open.kattis.com/problems/settlers2
Source: NWERC 2009
"""
from collections import defaultdict
import math
MAXN = 10000
currentPosition = (0,0)
currentNum = 1
counter = defaultdict()
layers = 1
direction = 0
directionCounter = 0
limitDirectionCounter = [layers, layers-1, layers, layers, layers, layers]
transitionVectors = [(-1,1), (-2,0), (-1,-1), (1,-1), (2,0), (1,1)]
nMoves = 0
tilesMap = dict()
tilesMap[currentPosition] = currentNum
tilesList = [None, currentNum]
for num in [1,2,3,4,5]: counter[num] = 0
counter[currentNum] += 1
def add(position, vector):
return (position[0] + vector[0], position[1] + vector[1])
# Preprocess
while len(tilesList) - 1 < MAXN:
currentPosition = add(currentPosition, transitionVectors[direction])
directionCounter += 1
while limitDirectionCounter[direction] == directionCounter:
# Increase limit counter for next round
limitDirectionCounter[direction] += 1
# Change direction
direction = (direction + 1) % len(transitionVectors)
# Reset direction counter
directionCounter = 0
neighbors = [add(currentPosition, transitionVector) for transitionVector in transitionVectors]
possibilities = set([1,2,3,4,5])
# Eliminate similar tiles
for neighbor in neighbors:
if neighbor in tilesMap and tilesMap[neighbor] in possibilities:
possibilities.remove(tilesMap[neighbor])
# Keep only the least number of tiles
minCounter = math.inf
for possibility in possibilities:
minCounter = min(minCounter, counter[possibility])
possibilityToRemove = []
for possibility in possibilities:
if counter[possibility] != minCounter:
possibilityToRemove.append(possibility)
for possibility in possibilityToRemove:
possibilities.remove(possibility)
# Sort by number
possibilities = sorted(possibilities)
currentNum = possibilities[0]
tilesMap[currentPosition] = currentNum
tilesList.append(currentNum)
counter[currentNum] += 1
# Post-process
C = int(input())
for i in range(C):
n = int(input())
print(tilesList[n])
| mit | Python |
|
48443f8a8f5a15b3116ba7b4a842189f5e659f26 | test script for pymatbridge | srvanrell/libsvm-weka-python | test_pymatbridge.py | test_pymatbridge.py | #!/usr/bin/python
from pymatbridge import Matlab
mlab = Matlab()
mlab.start()
print "Matlab started?", mlab.started
print "Matlab is connected?", mlab.is_connected()
mlab.run_code("conteo = 1:10")
mlab.run_code("magica = magic(5)")
mlab.stop()
| mit | Python |
|
4d08d50d73e8d3d3a954c9ef8ddffc23444d7d28 | Create script.py | caecilius/CocoToPy | script.py | script.py | #!/usr/bin/env python3
# première tentative de documenter l'API de coco.fr
import random
import requests
pseudo = "caecilius" # doit être en minuscule et de plus de 4 caractères
age = "22" # minimum "18"
sexe = "1" # "1" pour homme, "2" pour femme
codeville = "30929" # à récuperer ici http://coco.fr/cocoland/foo.js par exemple pour Paris 15 :
# http://coco.fr/cocoland/75015.js va donner "var cityco='30929*PARIS*'; procecodo();"
# le codeville est donc "30929"
referenz = "0" # aucune idée de ce que ça fait, pour l'instant la valeur est toujours "0"
salt = str(random.randrange(100000000, 999999999))
url = str("http://coco.fr#" + pseudo + "_" + sexe + "_" + age + "_" + codeville + "_0_" + salt + "_" + referenz)
r = requests.get(url)
| mit | Python |
|
cc0d6a3b782c5646b9742ebe7308b42507ed2714 | Add python API draft interface | pf-aics-riken/kmr,pf-aics-riken/kmr,pf-aics-riken/kmr,pf-aics-riken/kmr,pf-aics-riken/kmr,pf-aics-riken/kmr | python/kmr4py.py | python/kmr4py.py | class MapReduce(object):
def reply_to_spawner(self):
pass
def get_spawner_communicator(self, index):
pass
def send_kvs_to_spawner(self, kvs):
pass
def concatenate_kvs(self, kvss):
pass
def map_once(self, mapfn, kvo_key_type=None,
rank_zero_only=False, nothreading=False,
inspect=False, keep_open=False, take_ckpt=False):
# HIGH PRIORITY
pass
def map_on_rank_zero(self, mapfn, kvo_key_type=None,
nothreading=False, inspect=False, keep_open=False,
take_ckpt=False):
# HIGH PRIORITY
pass
def read_files_reassemble(self, filename, color, offset, bytes):
pass
def read_file_by_segments(self, filename, color):
pass
class KVS(object):
def map(self, mapfn, kvo_key_type=None,
nothreading=False, inspect=False, keep_open=False, take_ckpt=False):
# HIGH PRIORITY
pass
def map_rank_by_rank(self, mapfn, opt):
pass
def map_ms(self, mapfn, opt):
pass
def map_ms_commands(self, mapfn, opt, sopt):
pass
def map_for_some(self, mapfn, opt):
pass
def map_via_spawn(self, mapfn, sopt):
pass
def map_processes(self, mapfn, nonmpi, sopt):
pass
def map_parallel_processes(self, mapfn, sopt):
pass
def map_serial_processes(self, mapfn, sopt):
pass
def reverse(self, kvo_key_type=None,
nothreading=False, inspect=False, keep_open=False,
take_ckpt=False):
# HIGH PRIORITY (as used in wordcount.py)
pass
def reduce(self, redfn, kvo_key_type=None,
nothreading=False, inspect=False, take_ckpt=False):
# HIGH PRIORITY
pass
def reduce_as_one(self, redfn, opt):
pass
def reduce_for_some(self, redfn, opt):
pass
def shuffle(self, kvo_key_type=None,
key_as_rank=False, take_ckpt=False):
# HIGH PRIORITY
pass
def replicate(self, kvo_key_type=None,
inspect=False, rank_zero=False, take_ckpt=False):
# HIGH PRIORITY
pass
def distribute(self, cyclic, opt):
pass
def sort_locally(self, shuffling, opt):
pass
def sort(self, inspect=False):
# HIGH PRIORITY (as used in wordcount.py)
pass
def sort_by_one(self, opt):
pass
def free(self):
# HIGH PRIORITY
pass
def add_kv(self, kv_tuple):
# HIGH PRIORITY
pass
def add_kv_done(self):
# HIGH PRIORITY
pass
def get_element_count(self):
# HIGH PRIORITY
pass
def local_element_count(self):
pass
def to_list(self):
# retrieve_kvs_entries
pass
# def from_list():
# pass
def dump(self, flag):
pass
def __str__(self):
self.dump(0)
def dump_stats(self, level):
pass
| bsd-2-clause | Python |
|
dbacf8cd0c2bae394b6c67a810836668d510787d | test for index (re)generation | sciyoshi/CheesePrism,sciyoshi/CheesePrism,whitmo/CheesePrism,SMFOSS/CheesePrism,whitmo/CheesePrism,SMFOSS/CheesePrism,whitmo/CheesePrism | tests/test_index.py | tests/test_index.py | from cheeseprism.utils import resource_spec
from itertools import count
from path import path
from pprint import pprint
import unittest
class IndexTestCase(unittest.TestCase):
counter = count()
base = "egg:CheesePrism#tests/test-indexes"
def make_one(self, index_name='test-index'):
from cheeseprism import index
index_path = path(resource_spec(self.base)) / "%s-%s" %(next(self.counter), index_name)
return index.IndexManager(index_path)
def setUp(self):
self.im = self.make_one()
dummy = path(__file__).parent / "dummypackage/dist/dummypackage-0.0dev.tar.gz"
dummy.copy(self.im.path)
self.dummypath = self.im.path / dummy.name
def test_regenerate_index(self):
self.im.regenerate(self.im.path)
pth = self.im.path
file_structure = [(x.parent.name, x.name) for x in pth.walk()]
expected = [(u'0-test-index', u'dummypackage'),
(u'dummypackage', u'index.html'),
(u'0-test-index', u'dummypackage-0.0dev.tar.gz'),
(u'0-test-index', u'index.html')]
assert file_structure == expected, "File structure does not match:\nexpected: %s.\n actual: %s" %(pprint(expected), pprint(file_structure))
| bsd-2-clause | Python |
|
554c6490330760690fbbd1cd5ece3da563e342eb | update queen4.py | skywind3000/language,skywind3000/language,skywind3000/language | python/queen4.py | python/queen4.py | f = lambda A, x, y: y < 0 or (not (A[y] in (A[x], A[x] + (x - y), A[x] - (x - y))))
g = lambda A, x, y: (not x) or (f(A, x, y) and ((y < 0) or g(A, x, y - 1)))
h = lambda A, x: sum([ g(A, x, x - 1) and 1 or 0 for A[x] in range(len(A)) ])
q = lambda A, x: h(A, x) if (x == 7) else sum([ q(A, x + 1) for A[x] in range(8) if g(A, x, x - 1) ])
print(q([ 0 for i in range(8) ], 0))
| mit | Python |
|
7d2c728cb121a0aefef11fd3c8ab7b7f700516e8 | read grove pi sensors | vdbg/ST,vdbg/ST | readSensors.py | readSensors.py | import time
import decimal
import grovepi
import math
from grovepi import *
from grove_rgb_lcd import *
sound_sensor = 0 # port A0
light_sensor = 1 # port A1
temperature_sensor = 2 # port D2
led = 4 # port D3
lastTemp = 0.1 # initialize a floating point temp variable
lastHum = 0.1 # initialize a floating Point humidity variable
lastLight = 0.1
lastSound = 0.1
tooLow = 16.0 # Too low temp
justRight = 20.0 # OK temp
tooHigh = 23.0 # Too high temp
grovepi.pinMode(led,"OUTPUT")
grovepi.analogWrite(led,255) #turn led to max to show readiness
def calcColorAdj(variance): # Calc the adjustment value of the background color
"Because there is 6 degrees mapping to 255 values, 42.5 is the factor for 12 degree spread"
factor = 42.5;
adj = abs(int(factor * variance));
if adj > 255:
adj = 255;
return adj;
def calcBG(ftemp):
"This calculates the color value for the background"
variance = ftemp - justRight; # Calculate the variance
adj = calcColorAdj(variance); # Scale it to 8 bit int
bgList = [0,0,0] # initialize the color array
if(variance < 0):
bgR = 0; # too cold, no red
bgB = adj; # green and blue slide equally with adj
bgG = 255 - adj;
elif(variance == 0): # perfect, all on green
bgR = 0;
bgB = 0;
bgG = 255;
elif(variance > 0): #too hot - no blue
bgB = 0;
bgR = adj; # Red and Green slide equally with Adj
bgG = 255 - adj;
bgList = [bgR,bgG,bgB] #build list of color values to return
return bgList;
while True:
# Error handling in case of problems communicating with the GrovePi
try:
time.sleep(1)
light = grovepi.analogRead(light_sensor) / 10
sound = grovepi.analogRead(sound_sensor)
[t,h]=[0,0]
[t,h] = grovepi.dht(temperature_sensor,0)
grovepi.analogWrite(led,light*2)
if (h != lastHum) or (t != lastTemp) or (sound != lastSound) or (light != lastLight):
out_str ="Temperature:%d C; Humidity:%d %%; Light:%d; Sound:%d" %(t,h,light,sound)
print (out_str)
bgList = calcBG(t) # Calculate background colors
setRGB(bgList[0],bgList[1],bgList[2]) # parse our list into the color settings
out_str ="Tmp:%d Hum:%d\nLght:%d Snd:%d" %(t,h,light,sound)
setText(out_str)
lastHum = h
lastTemp = t
lastSound = sound
lastLight = light
except IOError:
print("IO Error")
except KeyboardInterrupt:
print("EXITNG")
setRGB(0,0,0)
grovepi.analogWrite(led,0)
exit()
except Exception as e:
print("Error: {}".format(e))
| mit | Python |
|
076fcbb4876bd76887f7d64b533fec66f8366b70 | Add tests for cancellation | openprocurement/openprocurement.tender.esco,Scandie/openprocurement.tender.esco | openprocurement/tender/esco/tests/cancellation.py | openprocurement/tender/esco/tests/cancellation.py | # -*- coding: utf-8 -*-
import unittest
from openprocurement.api.tests.base import snitch
from openprocurement.tender.belowthreshold.tests.cancellation import (
TenderCancellationResourceTestMixin,
TenderCancellationDocumentResourceTestMixin
)
from openprocurement.tender.belowthreshold.tests.cancellation_blanks import (
# TenderLotsCancellationResourceTest
create_tender_lots_cancellation,
patch_tender_lots_cancellation,
# TenderLotCancellationResourceTest
create_tender_lot_cancellation,
patch_tender_lot_cancellation,
)
from openprocurement.tender.openua.tests.cancellation_blanks import (
# TenderCancellationResourceTest
create_tender_cancellation,
patch_tender_cancellation,
)
from openprocurement.tender.esco.tests.base import (
BaseESCOEUContentWebTest,
test_bids,
test_lots
)
class TenderCancellationResourceTest(BaseESCOEUContentWebTest, TenderCancellationResourceTestMixin):
initial_auth = ('Basic', ('broker', ''))
test_create_tender_cancellation = snitch(create_tender_cancellation)
test_patch_tender_cancellation = snitch(patch_tender_cancellation)
class TenderLotCancellationResourceTest(BaseESCOEUContentWebTest):
initial_lots = test_lots
initial_auth = ('Basic', ('broker', ''))
test_create_tender_cancellation = snitch(create_tender_lot_cancellation)
test_patch_tender_cancellation = snitch(patch_tender_lot_cancellation)
class TenderLotsCancellationResourceTest(BaseESCOEUContentWebTest):
initial_lots = 2 * test_lots
initial_auth = ('Basic', ('broker', ''))
test_create_tender_cancellation = snitch(create_tender_lots_cancellation)
test_patch_tender_cancellation = snitch(patch_tender_lots_cancellation)
class TenderCancellationDocumentResourceTest(BaseESCOEUContentWebTest, TenderCancellationDocumentResourceTestMixin):
initial_auth = ('Basic', ('broker', ''))
def setUp(self):
super(TenderCancellationDocumentResourceTest, self).setUp()
# Create cancellation
response = self.app.post_json('/tenders/{}/cancellations?acc_token={}'.format(
self.tender_id, self.tender_token), {'data': {'reason': 'cancellation reason'}})
cancellation = response.json['data']
self.cancellation_id = cancellation['id']
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderCancellationDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderCancellationResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| apache-2.0 | Python |
|
77c582939734866eee09b55e9db02437b42c5451 | Create stemming.py | Semen52/GIBDD | stemming.py | stemming.py | # -*- coding: utf-8 -*-
# Портирован с Java по мотивам http://www.algorithmist.ru/2010/12/porter-stemmer-russian.html
import re
class Porter:
PERFECTIVEGROUND = re.compile(u"((ив|ивши|ившись|ыв|ывши|ывшись)|((?<=[ая])(в|вши|вшись)))$")
REFLEXIVE = re.compile(u"(с[яь])$")
ADJECTIVE = re.compile(u"(ее|ие|ые|ое|ими|ыми|ей|ий|ый|ой|ем|им|ым|ом|его|ого|ему|ому|их|ых|ую|юю|ая|яя|ою|ею)$")
PARTICIPLE = re.compile(u"((ивш|ывш|ующ)|((?<=[ая])(ем|нн|вш|ющ|щ)))$")
VERB = re.compile(u"((ила|ыла|ена|ейте|уйте|ите|или|ыли|ей|уй|ил|ыл|им|ым|ен|ило|ыло|ено|ят|ует|уют|ит|ыт|ены|ить|ыть|ишь|ую|ю)|((?<=[ая])(ла|на|ете|йте|ли|й|л|ем|н|ло|но|ет|ют|ны|ть|ешь|нно)))$")
NOUN = re.compile(u"(а|ев|ов|ие|ье|е|иями|ями|ами|еи|ии|и|ией|ей|ой|ий|й|иям|ям|ием|ем|ам|ом|о|у|ах|иях|ях|ы|ь|ию|ью|ю|ия|ья|я)$")
RVRE = re.compile(u"^(.*?[аеиоуыэюя])(.*)$")
DERIVATIONAL = re.compile(u".*[^аеиоуыэюя]+[аеиоуыэюя].*ость?$")
DER = re.compile(u"ость?$")
SUPERLATIVE = re.compile(u"(ейше|ейш)$")
I = re.compile(u"и$")
P = re.compile(u"ь$")
NN = re.compile(u"нн$")
def stem(word):
word = word.lower()
word = word.replace(u'ё', u'е')
m = re.match(Porter.RVRE, word)
if m.groups():
pre = m.group(1)
rv = m.group(2)
temp = Porter.PERFECTIVEGROUND.sub('', rv, 1)
if temp == rv:
rv = Porter.REFLEXIVE.sub('', rv, 1)
temp = Porter.ADJECTIVE.sub('', rv, 1)
if temp != rv:
rv = temp
rv = Porter.PARTICIPLE.sub('', rv, 1)
else:
temp = Porter.VERB.sub('', rv, 1)
if temp == rv:
rv = Porter.NOUN.sub('', rv, 1)
else:
rv = temp
else:
rv = temp
rv = Porter.I.sub('', rv, 1)
if re.match(Porter.DERIVATIONAL, rv):
rv = Porter.DER.sub('', rv, 1)
temp = Porter.P.sub('', rv, 1)
if temp == rv:
rv = Porter.SUPERLATIVE.sub('', rv, 1)
rv = Porter.NN.sub(u'н', rv, 1)
else:
rv = temp
word = pre+rv
return word
stem=staticmethod(stem)
if __name__ == '__main__':
print Porter.stem(u'устойчивость')
| apache-2.0 | Python |
|
abe586ac1275901fc9d9cf1bde05b225a9046ab7 | add admin tests | archlinux/arch-security-tracker,jelly/arch-security-tracker,jelly/arch-security-tracker,anthraxx/arch-security-tracker,archlinux/arch-security-tracker,anthraxx/arch-security-tracker,anthraxx/arch-security-tracker | test/test_admin.py | test/test_admin.py | from werkzeug.exceptions import Unauthorized
from flask import url_for
from flask_login import current_user
from .conftest import logged_in, assert_logged_in, assert_not_logged_in, create_user
from app.user import random_string
from app.form.login import ERROR_ACCOUNT_DISABLED
USERNAME = 'cyberwehr87654321'
PASSWORD = random_string()
EMAIL = '{}@cyber.cyber'.format(USERNAME)
@create_user(username=USERNAME, password=PASSWORD)
@logged_in
def test_delete_user(db, client):
resp = client.post(url_for('delete_user', username=USERNAME), follow_redirects=True,
data=dict(confirm='confirm'))
resp = client.post(url_for('logout'), follow_redirects=True)
assert_not_logged_in(resp)
resp = client.post(url_for('login'), follow_redirects=True,
data=dict(username=USERNAME, password=PASSWORD))
assert_not_logged_in(resp, status_code=Unauthorized.code)
@logged_in
def test_create_user(db, client):
resp = client.post(url_for('create_user'), follow_redirects=True,
data=dict(username=USERNAME, password=PASSWORD,
email=EMAIL, active=True))
assert resp.status_code == 200
resp = client.post(url_for('logout'), follow_redirects=True)
assert_not_logged_in(resp)
resp = client.post(url_for('login'), follow_redirects=True,
data=dict(username=USERNAME, password=PASSWORD))
assert_logged_in(resp)
assert USERNAME == current_user.name
@create_user(username=USERNAME, password=PASSWORD)
@logged_in
def test_edit_user(db, client):
resp = client.post(url_for('edit_user', username=USERNAME), follow_redirects=True,
data=dict(username=USERNAME, email=EMAIL, password=PASSWORD))
assert resp.status_code == 200
resp = client.post(url_for('logout'), follow_redirects=True)
assert_not_logged_in(resp)
resp = client.post(url_for('login'), data={'username': USERNAME, 'password': PASSWORD})
assert_not_logged_in(resp, status_code=Unauthorized.code)
assert ERROR_ACCOUNT_DISABLED in resp.data.decode()
| mit | Python |
|
b63e65b1a41f809caf1c2dcd689955df76add20f | Add a plot just of backscatter phase vs. diameter. | dopplershift/Scattering | test/test_delta.py | test/test_delta.py | import matplotlib.pyplot as plt
import numpy as np
import scattering
import scipy.constants as consts
def plot_csec(scatterer, d, var, name):
plt.plot(d / consts.centi, var,
label='%.1f cm' % (scatterer.wavelength / consts.centi))
plt.xlabel('Diameter (cm)')
plt.ylabel(name)
def plot_csecs(d, scatterers):
for s in scatterers:
plt.subplot(1,1,1)
plot_csec(s, d, np.rad2deg(np.unwrap(-np.angle(-s.S_bkwd[0,0].conj() *
s.S_bkwd[1,1]).squeeze())), 'delta')
plt.gca().set_ylim(-4, 20)
d = np.linspace(0.01, 0.7, 200).reshape(200, 1) * consts.centi
sband = 3e8 / 2.8e9
cband = 3e8 / 5.4e9
xband = 3e8 / 9.4e9
temp = 10.0
x_fixed = scattering.scatterer(xband, temp, 'water', diameters=d, shape='oblate')
x_fixed.set_scattering_model('tmatrix')
c_fixed = scattering.scatterer(cband, temp, 'water', diameters=d, shape='oblate')
c_fixed.set_scattering_model('tmatrix')
s_fixed = scattering.scatterer(sband, temp, 'water', diameters=d, shape='oblate')
s_fixed.set_scattering_model('tmatrix')
plot_csecs(d, [x_fixed, c_fixed, s_fixed])
plt.legend(loc = 'upper left')
plt.show()
| bsd-2-clause | Python |
|
e1d8c17746497a46c864f352823cd86b2216781c | Add commit ID milestone helper script (#7100) | michaelschiff/druid,druid-io/druid,himanshug/druid,monetate/druid,gianm/druid,gianm/druid,monetate/druid,knoguchi/druid,implydata/druid,implydata/druid,mghosh4/druid,michaelschiff/druid,implydata/druid,deltaprojects/druid,pjain1/druid,leventov/druid,druid-io/druid,leventov/druid,druid-io/druid,jon-wei/druid,Fokko/druid,implydata/druid,himanshug/druid,implydata/druid,pjain1/druid,leventov/druid,leventov/druid,Fokko/druid,deltaprojects/druid,knoguchi/druid,monetate/druid,monetate/druid,nishantmonu51/druid,deltaprojects/druid,pjain1/druid,nishantmonu51/druid,nishantmonu51/druid,Fokko/druid,mghosh4/druid,michaelschiff/druid,nishantmonu51/druid,monetate/druid,pjain1/druid,gianm/druid,jon-wei/druid,deltaprojects/druid,himanshug/druid,gianm/druid,jon-wei/druid,knoguchi/druid,knoguchi/druid,jon-wei/druid,pjain1/druid,himanshug/druid,nishantmonu51/druid,Fokko/druid,deltaprojects/druid,jon-wei/druid,knoguchi/druid,michaelschiff/druid,nishantmonu51/druid,druid-io/druid,Fokko/druid,pjain1/druid,jon-wei/druid,nishantmonu51/druid,himanshug/druid,deltaprojects/druid,gianm/druid,michaelschiff/druid,gianm/druid,implydata/druid,mghosh4/druid,mghosh4/druid,leventov/druid,Fokko/druid,pjain1/druid,michaelschiff/druid,jon-wei/druid,monetate/druid,druid-io/druid,deltaprojects/druid,monetate/druid,Fokko/druid,mghosh4/druid,mghosh4/druid,mghosh4/druid,michaelschiff/druid,gianm/druid | docs/_bin/get-milestone-prs.py | docs/_bin/get-milestone-prs.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import requests
import subprocess
import sys
import time
if len(sys.argv) != 5:
sys.stderr.write('usage: program <github-username> <upstream-remote> <previous-release-branch> <current-release-branch>\n')
sys.stderr.write(" e.g., program myusername upstream 0.13.0-incubating 0.14.0-incubating")
sys.stderr.write(" It is also necessary to set a GIT_TOKEN environment variable containing a personal access token.")
sys.exit(1)
github_username = sys.argv[1]
upstream_remote = sys.argv[2]
previous_branch = sys.argv[3]
release_branch = sys.argv[4]
master_branch = "master"
upstream_master = "{}/{}".format(upstream_remote, master_branch)
upstream_previous = "{}/{}".format(upstream_remote, previous_branch)
upstream_release = "{}/{}".format(upstream_remote, release_branch)
command = "git log {}..{} --oneline | tail -1".format(upstream_master, upstream_previous)
# Find the commit where the previous release branch was cut from master
previous_branch_first_commit = subprocess.check_output(command, shell=True).decode('UTF-8')
match_result = re.match("(\w+) .*", previous_branch_first_commit)
previous_branch_first_commit = match_result.group(1)
print("Previous branch: {}, first commit: {}".format(upstream_previous, previous_branch_first_commit))
# Find all commits between that commit and the current release branch
command = "git rev-list {}..{}".format(previous_branch_first_commit, upstream_release)
all_release_commits = subprocess.check_output(command, shell=True).decode('UTF-8')
for commit_id in all_release_commits.splitlines():
try:
# wait 3 seconds between calls to avoid hitting the rate limit
time.sleep(3)
search_url = "https://api.github.com/search/issues?q=type:pr+is:merged+is:closed+repo:apache/incubator-druid+SHA:{}"
resp = requests.get(search_url.format(commit_id), auth=(github_username, os.environ["GIT_TOKEN"]))
resp_json = resp.json()
milestone_found = False
closed_pr_nums = []
if (resp_json.get("items") is None):
print("Could not get PRs for commit ID {}, resp: {}".format(commit_id, resp_json))
continue
for pr in resp_json["items"]:
closed_pr_nums.append(pr["number"])
milestone = pr["milestone"]
if milestone is not None:
milestone_found = True
print("COMMIT: {}, PR#: {}, MILESTONE: {}".format(commit_id, pr["number"], pr["milestone"]["url"]))
if not milestone_found:
print("NO MILESTONE FOUND FOR COMMIT: {}, CLOSED PRs: {}".format(commit_id, closed_pr_nums))
except Exception as e:
print("Got exception for commitID: {} ex: {}".format(commit_id, e))
continue
| apache-2.0 | Python |
|
55768b5133d8155b16e798a335cc0f46930aab12 | create my own .py for question 5 | pythonzhichan/DailyQuestion,pythonzhichan/DailyQuestion | totaljfb/Q5.py | totaljfb/Q5.py | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Jason Zhang
#
# Created: 15/11/2017
# Copyright: (c) Jason Zhang 2017
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
import re
#create a URL class
class URL:
def __init__(self, url_scheme, url_netloc, url_path, url_query_params, url_fragment):
self.scheme = url_scheme
self.netloc = url_netloc
self.path = url_path
self.query_params = url_query_params
self.fragment = url_fragment
def display_result(self):
print 'scheme: ' + self.scheme
print 'netloc: ' + self.netloc
print 'path: ' + self.path
print 'query_params: '+ self.query_params
print 'fragment: '+ self.fragment
#the parsing function to parse the url address
def url_parse(url):
regex = re.compile(r'''(
\w*) #scheme
:\/\/ #://, separator
(.*) #netloc
(\/.*) #path
\? #?, separator
(.*) #query_params
\# # #, separator
(.* #fragment
)''',re.VERBOSE)
result = regex.search(url)
#TODO: parse the query_params to get a dictionary
return URL(result.group(1),result.group(2),result.group(3),result.group(4),result.group(5))
url = raw_input("Enter an url address to parse: ")
test = url_parse(url)
test.display_result()
| mit | Python |
|
3cb42b54fa8ed2cac6e05aa521a3a61a037a35ee | add rest cliant on python | temichus/cop | rest/client.py | rest/client.py | # pip install requests
import requests
resp = requests.post("http://127.0.0.1:8008/api/v1/addrecord/3", json='{"id":"name"}')
print resp.status_code
print resp.text
resp = requests.get("http://127.0.0.1:8008/api/v1/getrecord/3")
print resp.status_code
print resp.json()
resp = requests.get("http://127.0.0.1:8008/api/v1/getrecord/4")
print resp.status_code
print resp.json() | mit | Python |
|
d0f6167cb7e95c17997bc42af6cd1766b1ac7864 | add related_name migration | hacklabr/paralapraca,hacklabr/paralapraca,hacklabr/paralapraca | paralapraca/migrations/0005_auto_20171204_1006.py | paralapraca/migrations/0005_auto_20171204_1006.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paralapraca', '0004_contract_classes'),
]
operations = [
migrations.AlterField(
model_name='contract',
name='classes',
field=models.ManyToManyField(related_name='contract', to='core.Class'),
),
migrations.AlterField(
model_name='contract',
name='groups',
field=models.ManyToManyField(help_text='Groups created to enforce this contract restrictions in several other models', related_name='contract', verbose_name='groups', to='auth.Group', blank=True),
),
]
| agpl-3.0 | Python |
|
e84d19bdc580f4d392f5b7abdc4eb8eb30919cf5 | add example: negative binomial maximum likelihood via newton's method | kcarnold/autograd,hips/autograd,HIPS/autograd,barak/autograd,HIPS/autograd,hips/autograd | examples/negative_binomial_maxlike.py | examples/negative_binomial_maxlike.py | from __future__ import division, print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.special import gammaln
from autograd import grad
import scipy.optimize
# The code in this example implements a method for finding a stationary point of
# the negative binomial likelihood via Newton's method, described here:
# https://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation
def newton(f, x0):
# wrap scipy.optimize.newton with our automatic derivatives
return scipy.optimize.newton(f, x0, fprime=grad(f), fprime2=grad(grad(f)))
def negbin_loglike(r, p, x):
# the negative binomial log likelihood we want to maximize
return gammaln(r+x) - gammaln(r) - gammaln(x+1) + x*np.log(p) + r*np.log(1-p)
def fit_maxlike(x, r_guess):
assert np.var(x) > np.mean(x), "Likelihood-maximizing parameters don't exist!"
loglike = lambda r, p: np.sum(negbin_loglike(r, p, x))
p = lambda r: np.sum(x) / np.sum(r+x)
rprime = lambda r: grad(loglike)(r, p(r))
r = newton(rprime, r_guess)
return r, p(r)
def negbin_sample(r, p, size):
# a negative binomial is a gamma-compound-Poisson
return npr.poisson(npr.gamma(r, p/(1-p), size=size))
if __name__ == "__main__":
# generate data
npr.seed(0)
data = negbin_sample(r=5, p=0.5, size=1000)
# fit likelihood-extremizing parameters
r, p = fit_maxlike(data, r_guess=1)
print('Check that we are at a local stationary point:')
print(grad(lambda rp: np.sum(negbin_loglike(rp[0], rp[1], data)))((r, p)))
print('Fit parameters:')
print('r={r}, p={p}'.format(r=r, p=p))
# plot data and fit
import matplotlib.pyplot as plt
xm = data.max()
plt.figure()
plt.hist(data, bins=np.arange(xm+1)-0.5, normed=True, label='normed data counts')
plt.xlim(0,xm)
plt.plot(np.arange(xm), np.exp(negbin_loglike(r, p, np.arange(xm))), label='maxlike fit')
plt.xlabel('k')
plt.ylabel('p(k)')
plt.legend(loc='best')
plt.show()
| mit | Python |
|
6d96e9d67e50d7806be175577968ec8fed8393d7 | Create libBase.py | Soncrates/stock-study,Soncrates/stock-study | test/libBase.py | test/libBase.py | # ./test/testCommon.py
''' There are some assumptions made by this unittest
the directory structure
+ ./
| files -> lib*.py
+----./local/*
| | files -> *.ini
| | files -> *.json
| | files ->*.csv
+----./log/*
| | files -> *.log
+----./test/*
| files -> test*.py
+----./test_input/*
| see ../local
+----./test_output/*
'''
import sys
sys.path.append('../')
import logging as log
import unittest
import libCommon as TEST
class TestFILL_IN_THE_BLANK(unittest.TestCase) :
def setUp(self) : pass
def testEnviron(self) :
log.debug(TEST.load_environ())
def FindFiles(self) :
log.debug(TEST.find_files('test*.py'))
log.debug(TEST.find_files('test_input/*'))
log.debug(TEST.find_files('test_input/'))
def testBuildArgs(self) :
expected = 'test102020'
results = TEST.build_arg('test',10,2020)
log.debug(results)
self.assertTrue( results == expected)
expected = "test102020{'something': 10}"
results = TEST.build_arg('test',10,2020,{'something' : 10})
log.debug(results)
self.assertTrue( results == expected)
def testBuidlPath(self) :
expected = 'test/10/2020'
results = TEST.build_path('test',10,2020)
log.debug(results)
self.assertTrue( results == expected)
expected = "test/10/2020/{'something': 10}"
results = TEST.build_path('test',10,2020,{'something' : 10})
log.debug(results)
self.assertTrue( results == expected)
def testBuildCommand(self) :
expected = 'test 10 2020'
results = TEST.build_command('test',10,2020)
log.debug(results)
self.assertTrue( results == expected)
expected = "test 10 2020 {'something': 10}"
results = TEST.build_command('test',10,2020,{'something' : 10})
log.debug(results)
self.assertTrue( results == expected)
def testJson(self) :
log.debug(TEST.pretty_print(TEST.load_json('test_input/json_test.json')))
def testConfig(self) :
log.debug(TEST.pretty_print(TEST.load_config('test_input/conf_test.ini')))
if __name__ == '__main__' :
log_file = TEST.build_arg(*sys.argv).replace('.py','') + '.log'
log_file = TEST.build_path('../log',log_file)
TEST.remove_file(log_file)
log.basicConfig(filename=log_file, format=TEST.LOG_FORMAT_TEST, level=log.DEBUG)
unittest.main()
| lgpl-2.1 | Python |
|
b209c45fe32ee7b73bddff5419c1931a16da0bbd | Test file request.py | bacchilu/pyweb | test/request.py | test/request.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
import threading
def worker():
urllib2.urlopen('http://localhost:8080').read()
if __name__ == '__main__':
for i in xrange(1024):
threading.Thread(target=worker).start()
print 'Partiti...'
| mit | Python |
|
be544817908ba3f9377d24a61047496c3dbf4f7a | Add test | SciTechStrategies/rlev-model-py3 | test_rlev_model.py | test_rlev_model.py | import os
import unittest
from click.testing import CliRunner
from rlev_model import cli
class TestCli(unittest.TestCase):
def test_cli(self):
runner = CliRunner()
sample_filename = os.path.join('data', 'sample-data.txt')
result = runner.invoke(cli, [sample_filename])
assert result.exit_code == 0
output_filename = os.path.join('data', 'sample-output.txt')
with open(output_filename) as fp:
expected_output = fp.read()
assert result.output == expected_output
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
f211fc4b0467c2d12d0ee60caed4c76910684f65 | Create game.py | elailai94/Bingo-3X3 | Source-Code/game.py | Source-Code/game.py | # ******************************************************************************
# Bingo
#
# @author: Elisha Lai
# @desciption: Program that allows a player to play Bingo
# @version: 1.3 12/03/2014
# ******************************************************************************
# negate_mini_bingo_card: (listof Int) (listof Int) -> (listof Int)
# Conditions:
# PRE: lst1 and lst2 must be non-empty lists.
# len(lst1) = 9
# The first three values in lst1 are between 1 and 15 inclusively.
# The next three values in lst1 are between 16 and 30 inclusively.
# The last three values in lst1 are between 31 and 45 inclusively.
# len(lst2) = 5
# The values in numbers_called are between 1 and 45 inclusively.
# POST: The length of the produced list is the same length as lst1
# Purpose: Consumes two lists of integers, lst1 and lst2. Produces a list of
# integers.
# Effects: Mutates lst1 so that all values in lst1, which are also in lst2, are
# negated.
def negate_mini_bingo_card(lst1,lst2,ind):
if ind<len(lst2):
if lst2[ind] in lst1:
lst1[lst1.index(lst2[ind])] = -lst2[ind]
negate_mini_bingo_card(lst1,lst2,ind+1)
else:
negate_mini_bingo_card(lst1,lst2,ind+1)
# is_row_negative: (listof Int) -> Bool
# Conditions:
# PRE: card must be a non-empty list.
# len(card) = 9
# No values in card are zero.
# Purpose: Consumes a list of integers, card. Produces True if any row of card
# contains three negated numbers. Otherwise, False is produced.
def is_row_negative (card):
if (card[0] < 0 and card[3] < 0 and card[6] < 0) or\
(card[1] < 0 and card[4] < 0 and card[7] < 0) or\
(card[2] < 0 and card[5] < 0 and card[8] < 0):
return True
else:
return False
# is_col_negative: (listof Int) -> Bool
# Conditions:
# PRE: card must be a non-empty list.
# len(card) = 9
# No values in card are zero.
# Purpose: Consumes a list of integers, card. Produces True if any column of
# card contains three negated numbers. Otherwise, False is produced.
def is_col_negative (card):
if (card[0] < 0 and card[1] < 0 and card[2] < 0) or\
(card[3] < 0 and card[4] < 0 and card[5] < 0) or\
(card[6] < 0 and card[7] < 0 and card[8] < 0):
return True
else:
return False
# is_diag_negative: (listof Int) -> Bool
# Conditions:
# PRE: card must be a non-empty list.
# len(card) = 9
# No values in card are zero.
# Purpose: Consumes a list of integers, card. Produces True if diagonal of
# card contains three negated numbers. Otherwise, False is produced.
def is_diag_negative (card):
if (card[0] < 0 and card[4] < 0 and card[8] < 0) or\
(card[2] < 0 and card[4] < 0 and card[6] < 0):
return True
else:
return False
# mini_bingo: (listof Int) (listof Int) -> Bool
# Conditions:
# PRE: mini_bingo_card and numbers_called must be non-empty lists.
# len(mini_bingo_card) = 9
# The first three values in lst1 are between 1 and 15 inclusively.
# The next three values in lst1 are between 16 and 30 inclusively.
# The last three values in lst1 are between 31 and 45 inclusively.
# len(numbers_called) = 5
# The values in numbers_called are between 1 and 45 inclusively.
# Purpose: Consumes two lists of integers, mini_bingo_card and numbers_called.
# Produces True if one or more rows, columns, or diagonals have all
# negative numbers. Otherwise, False is produced.
# Effects: Mutates mini_bingo_card so that all values in mini_bingo_card, which
# are also in numbers_called, are negated. Prints out the mutated
# mini_bingo_card only if one or more rows, columns, or diagonals have
# all negative numbers.
# Examples:
# mini_bingo([5,2,9,17,23,26,33,38,44],[5,10,23,31,44]) will print the
# following screen output:
# -5 17 33
# 2 -23 38
# 9 26 -44
# and True is produced.
# mini_bingo([5,2,9,17,23,26,33,38,44],[1,2,3,4,5]) will have no screen output
# and False is produced.
def mini_bingo(mini_bingo_card, numbers_called):
negate_mini_bingo_card(mini_bingo_card, numbers_called, 0)
if is_row_negative(mini_bingo_card) == True or\
is_col_negative(mini_bingo_card) == True or\
is_diag_negative(mini_bingo_card) == True:
print mini_bingo_card[0],mini_bingo_card[3],mini_bingo_card[6]
print mini_bingo_card[1],mini_bingo_card[4],mini_bingo_card[7]
print mini_bingo_card[2],mini_bingo_card[5],mini_bingo_card[8]
return True
else:
return False
| mit | Python |
|
da2a4fa9e618b212ddbb2fcbc079fa37970ae596 | Add handler for concurrently logging to a file | todddeluca/tfd | tfd/loggingutil.py | tfd/loggingutil.py |
'''
Utilities to assist with logging in python
'''
import logging
class ConcurrentFileHandler(logging.Handler):
"""
A handler class which writes logging records to a file. Every time it
writes a record it opens the file, writes to it, flushes the buffer, and
closes the file. Perhaps this could create problems in a very tight loop.
This handler is an attempt to overcome concurrent write issues that
the standard FileHandler has when multiple processes distributed across
a cluster are all writing to the same log file. Specifically, the records
can become interleaved/garbled with one another.
"""
def __init__(self, filename, mode="a"):
"""
Open the specified file and use it as the stream for logging.
:param mode: defaults to 'a', append.
"""
logging.Handler.__init__(self)
# keep the absolute path, otherwise derived classes which use this
# may come a cropper when the current directory changes
self.filename = os.path.abspath(filename)
self.mode = mode
def _openWriteClose(self, msg):
f = open(self.filename, self.mode)
f.write(msg)
f.flush() # improves consistency of writes in a concurrent environment
f.close()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[N.B. this may be removed depending on feedback]. If exception
information is present, it is formatted using
traceback.print_exception and appended to the stream.
"""
try:
msg = self.format(record)
fs = "%s\n"
self._openWriteClose(fs % msg)
except:
self.handleError(record)
| mit | Python |
|
ba599deb23c75a6dbcbc0de897afedc287c2ea94 | Create 02str_format.py | MurphyWan/Python-first-Practice | 02str_format.py | 02str_format.py | age = 38
name = 'Murphy Wan'
print('{0} is {1} yeaers old'.format(name, age))
print('why is {0} playing with that python?'.format(name))
| mit | Python |
|
700db5c742be8a893b1c362ae0955a934b88c39b | Add test_learning_journal.py with test_app() for configuring the app for testing | sazlin/learning_journal | test_journal.py | test_journal.py | # -*- coding: utf-8 -*-
from contextlib import closing
import pytest
from journal import app
from journal import connect_db
from journal import get_database_connection
from journal import init_db
TEST_DSN = 'dbname=test_learning_journal'
def clear_db():
with closing(connect_db()) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
@pytest.fixture(scope='session')
def test_app():
"""configure our app for use in testing"""
app.config['DATABASE'] = TEST_DSN
app.config['TESTING'] = True
| mit | Python |
|
59d51e90203a20f9e0b01eda43afc268311009e7 | Comment about JSON | HtmlUnit/selenium,JosephCastro/selenium,kalyanjvn1/selenium,gregerrag/selenium,carlosroh/selenium,lrowe/selenium,Appdynamics/selenium,telefonicaid/selenium,gregerrag/selenium,sebady/selenium,yukaReal/selenium,stupidnetizen/selenium,oddui/selenium,carlosroh/selenium,BlackSmith/selenium,rrussell39/selenium,dandv/selenium,lmtierney/selenium,rrussell39/selenium,SouWilliams/selenium,dimacus/selenium,lummyare/lummyare-lummy,zenefits/selenium,isaksky/selenium,alb-i986/selenium,GorK-ChO/selenium,dibagga/selenium,rrussell39/selenium,Dude-X/selenium,xmhubj/selenium,mestihudson/selenium,isaksky/selenium,joshbruning/selenium,lukeis/selenium,Jarob22/selenium,tkurnosova/selenium,markodolancic/selenium,skurochkin/selenium,Sravyaksr/selenium,dbo/selenium,jsarenik/jajomojo-selenium,JosephCastro/selenium,sankha93/selenium,AutomatedTester/selenium,davehunt/selenium,arunsingh/selenium,titusfortner/selenium,houchj/selenium,jsakamoto/selenium,dimacus/selenium,thanhpete/selenium,isaksky/selenium,sebady/selenium,o-schneider/selenium,jknguyen/josephknguyen-selenium,jsarenik/jajomojo-selenium,TheBlackTuxCorp/selenium,oddui/selenium,i17c/selenium,blueyed/selenium,juangj/selenium,tbeadle/selenium,xsyntrex/selenium,vinay-qa/vinayit-android-server-apk,SeleniumHQ/selenium,sag-enorman/selenium,houchj/selenium,lummyare/lummyare-lummy,jknguyen/josephknguyen-selenium,juangj/selenium,slongwang/selenium,bmannix/selenium,dkentw/selenium,knorrium/selenium,alb-i986/selenium,aluedeke/chromedriver,mach6/selenium,mach6/selenium,HtmlUnit/selenium,joshmgrant/selenium,uchida/selenium,rplevka/selenium,minhthuanit/selenium,kalyanjvn1/selenium,compstak/selenium,dimacus/selenium,kalyanjvn1/selenium,SevInf/IEDriver,skurochkin/selenium,pulkitsinghal/selenium,MCGallaspy/selenium,krosenvold/selenium,blackboarddd/selenium,denis-vilyuzhanin/selenium-fastview,onedox/selenium,bmannix/selenium,alb-i986/selenium,livioc/selenium,blueyed/selenium,compstak/selenium,lummyare/lummyare-test,GorK-ChO/selenium,SevInf/IEDriver,uchida/selenium,xsyntrex/selenium,lukeis/selenium,SeleniumHQ/selenium,sag-enorman/selenium,mojwang/selenium,asashour/selenium,sevaseva/selenium,misttechnologies/selenium,tarlabs/selenium,5hawnknight/selenium,gregerrag/selenium,asashour/selenium,DrMarcII/selenium,mach6/selenium,gabrielsimas/selenium,HtmlUnit/selenium,BlackSmith/selenium,krosenvold/selenium,Ardesco/selenium,dbo/selenium,rovner/selenium,sankha93/selenium,carsonmcdonald/selenium,gurayinan/selenium,blueyed/selenium,jsakamoto/selenium,chrisblock/selenium,jsarenik/jajomojo-selenium,xsyntrex/selenium,customcommander/selenium,s2oBCN/selenium,TheBlackTuxCorp/selenium,zenefits/selenium,dkentw/selenium,customcommander/selenium,GorK-ChO/selenium,meksh/selenium,titusfortner/selenium,knorrium/selenium,anshumanchatterji/selenium,gotcha/selenium,tarlabs/selenium,lummyare/lummyare-test,pulkitsinghal/selenium,SouWilliams/selenium,MCGallaspy/selenium,dkentw/selenium,petruc/selenium,lummyare/lummyare-lummy,tkurnosova/selenium,knorrium/selenium,HtmlUnit/selenium,Appdynamics/selenium,joshmgrant/selenium,sevaseva/selenium,slongwang/selenium,dimacus/selenium,oddui/selenium,zenefits/selenium,telefonicaid/selenium,jsarenik/jajomojo-selenium,joshbruning/selenium,SeleniumHQ/selenium,denis-vilyuzhanin/selenium-fastview,carlosroh/selenium,mach6/selenium,JosephCastro/selenium,vinay-qa/vinayit-android-server-apk,gotcha/selenium,minhthuanit/selenium,soundcloud/selenium,gorlemik/selenium,isaksky/selenium,orange-tv-blagnac/selenium,valfirst/selenium,gabrielsimas/selenium,pulkitsinghal/selenium,quoideneuf/selenium,juangj/selenium,Tom-Trumper/selenium,slongwang/selenium,doungni/selenium,stupidnetizen/selenium,thanhpete/selenium,rrussell39/selenium,dibagga/selenium,SeleniumHQ/selenium,oddui/selenium,dibagga/selenium,joshmgrant/selenium,lilredindy/selenium,dibagga/selenium,sankha93/selenium,joshuaduffy/selenium,jknguyen/josephknguyen-selenium,juangj/selenium,sevaseva/selenium,jabbrwcky/selenium,dcjohnson1989/selenium,i17c/selenium,markodolancic/selenium,SouWilliams/selenium,blackboarddd/selenium,dibagga/selenium,bmannix/selenium,rrussell39/selenium,vveliev/selenium,manuelpirez/selenium,BlackSmith/selenium,jsakamoto/selenium,i17c/selenium,Tom-Trumper/selenium,jerome-jacob/selenium,houchj/selenium,TikhomirovSergey/selenium,RamaraoDonta/ramarao-clone,dcjohnson1989/selenium,RamaraoDonta/ramarao-clone,meksh/selenium,anshumanchatterji/selenium,joshuaduffy/selenium,rplevka/selenium,lmtierney/selenium,s2oBCN/selenium,jerome-jacob/selenium,zenefits/selenium,meksh/selenium,petruc/selenium,lummyare/lummyare-lummy,carsonmcdonald/selenium,sankha93/selenium,denis-vilyuzhanin/selenium-fastview,sebady/selenium,blueyed/selenium,jsarenik/jajomojo-selenium,manuelpirez/selenium,carsonmcdonald/selenium,xsyntrex/selenium,MeetMe/selenium,lummyare/lummyare-lummy,thanhpete/selenium,alb-i986/selenium,tbeadle/selenium,krosenvold/selenium,petruc/selenium,kalyanjvn1/selenium,anshumanchatterji/selenium,blueyed/selenium,lmtierney/selenium,rovner/selenium,gregerrag/selenium,clavery/selenium,MCGallaspy/selenium,mestihudson/selenium,krosenvold/selenium-git-release-candidate,Herst/selenium,alexec/selenium,aluedeke/chromedriver,meksh/selenium,wambat/selenium,vveliev/selenium,wambat/selenium,jknguyen/josephknguyen-selenium,jsakamoto/selenium,quoideneuf/selenium,asashour/selenium,jsakamoto/selenium,jsarenik/jajomojo-selenium,tkurnosova/selenium,o-schneider/selenium,twalpole/selenium,onedox/selenium,MCGallaspy/selenium,SouWilliams/selenium,Herst/selenium,davehunt/selenium,amikey/selenium,valfirst/selenium,DrMarcII/selenium,tarlabs/selenium,dibagga/selenium,Dude-X/selenium,arunsingh/selenium,TikhomirovSergey/selenium,doungni/selenium,dandv/selenium,bartolkaruza/selenium,5hawnknight/selenium,jknguyen/josephknguyen-selenium,actmd/selenium,uchida/selenium,houchj/selenium,gemini-testing/selenium,Ardesco/selenium,alexec/selenium,gorlemik/selenium,skurochkin/selenium,manuelpirez/selenium,chrsmithdemos/selenium,Dude-X/selenium,juangj/selenium,DrMarcII/selenium,knorrium/selenium,slongwang/selenium,dcjohnson1989/selenium,dibagga/selenium,blackboarddd/selenium,gotcha/selenium,gurayinan/selenium,gorlemik/selenium,slongwang/selenium,lummyare/lummyare-lummy,freynaud/selenium,jabbrwcky/selenium,arunsingh/selenium,lmtierney/selenium,clavery/selenium,carsonmcdonald/selenium,chrsmithdemos/selenium,denis-vilyuzhanin/selenium-fastview,quoideneuf/selenium,gurayinan/selenium,joshbruning/selenium,asashour/selenium,jerome-jacob/selenium,freynaud/selenium,kalyanjvn1/selenium,pulkitsinghal/selenium,xsyntrex/selenium,clavery/selenium,orange-tv-blagnac/selenium,dandv/selenium,vinay-qa/vinayit-android-server-apk,lrowe/selenium,joshmgrant/selenium,blackboarddd/selenium,lukeis/selenium,blueyed/selenium,gotcha/selenium,Tom-Trumper/selenium,SouWilliams/selenium,denis-vilyuzhanin/selenium-fastview,Jarob22/selenium,sankha93/selenium,titusfortner/selenium,Ardesco/selenium,lrowe/selenium,p0deje/selenium,RamaraoDonta/ramarao-clone,minhthuanit/selenium,carlosroh/selenium,jsakamoto/selenium,jabbrwcky/selenium,amar-sharma/selenium,xmhubj/selenium,gemini-testing/selenium,blackboarddd/selenium,uchida/selenium,asashour/selenium,GorK-ChO/selenium,dcjohnson1989/selenium,o-schneider/selenium,quoideneuf/selenium,markodolancic/selenium,bartolkaruza/selenium,livioc/selenium,uchida/selenium,mestihudson/selenium,davehunt/selenium,twalpole/selenium,lmtierney/selenium,freynaud/selenium,mojwang/selenium,Herst/selenium,aluedeke/chromedriver,temyers/selenium,chrisblock/selenium,aluedeke/chromedriver,onedox/selenium,Ardesco/selenium,denis-vilyuzhanin/selenium-fastview,clavery/selenium,Herst/selenium,temyers/selenium,lrowe/selenium,petruc/selenium,uchida/selenium,petruc/selenium,lummyare/lummyare-test,xmhubj/selenium,Sravyaksr/selenium,carsonmcdonald/selenium,bmannix/selenium,TikhomirovSergey/selenium,dkentw/selenium,s2oBCN/selenium,Tom-Trumper/selenium,Tom-Trumper/selenium,knorrium/selenium,sevaseva/selenium,SevInf/IEDriver,TheBlackTuxCorp/selenium,carlosroh/selenium,p0deje/selenium,lrowe/selenium,krosenvold/selenium-git-release-candidate,carlosroh/selenium,RamaraoDonta/ramarao-clone,dbo/selenium,Sravyaksr/selenium,rplevka/selenium,jknguyen/josephknguyen-selenium,titusfortner/selenium,valfirst/selenium,krmahadevan/selenium,s2oBCN/selenium,anshumanchatterji/selenium,orange-tv-blagnac/selenium,MeetMe/selenium,eric-stanley/selenium,dcjohnson1989/selenium,carsonmcdonald/selenium,GorK-ChO/selenium,actmd/selenium,gemini-testing/selenium,krosenvold/selenium-git-release-candidate,manuelpirez/selenium,compstak/selenium,gotcha/selenium,actmd/selenium,jsarenik/jajomojo-selenium,xsyntrex/selenium,orange-tv-blagnac/selenium,krosenvold/selenium,p0deje/selenium,freynaud/selenium,jabbrwcky/selenium,chrsmithdemos/selenium,alb-i986/selenium,pulkitsinghal/selenium,telefonicaid/selenium,alexec/selenium,joshbruning/selenium,onedox/selenium,orange-tv-blagnac/selenium,meksh/selenium,alexec/selenium,titusfortner/selenium,isaksky/selenium,i17c/selenium,stupidnetizen/selenium,lilredindy/selenium,i17c/selenium,gotcha/selenium,vveliev/selenium,lmtierney/selenium,5hawnknight/selenium,gemini-testing/selenium,slongwang/selenium,5hawnknight/selenium,krmahadevan/selenium,AutomatedTester/selenium,chrisblock/selenium,onedox/selenium,meksh/selenium,orange-tv-blagnac/selenium,amikey/selenium,misttechnologies/selenium,DrMarcII/selenium,Dude-X/selenium,blackboarddd/selenium,livioc/selenium,lilredindy/selenium,Sravyaksr/selenium,krosenvold/selenium,lilredindy/selenium,doungni/selenium,rovner/selenium,titusfortner/selenium,JosephCastro/selenium,dcjohnson1989/selenium,onedox/selenium,alexec/selenium,valfirst/selenium,markodolancic/selenium,amikey/selenium,dbo/selenium,krosenvold/selenium-git-release-candidate,gabrielsimas/selenium,Ardesco/selenium,rplevka/selenium,krmahadevan/selenium,doungni/selenium,rplevka/selenium,compstak/selenium,alb-i986/selenium,xmhubj/selenium,GorK-ChO/selenium,rplevka/selenium,denis-vilyuzhanin/selenium-fastview,lilredindy/selenium,orange-tv-blagnac/selenium,alexec/selenium,gurayinan/selenium,sri85/selenium,soundcloud/selenium,gemini-testing/selenium,rplevka/selenium,DrMarcII/selenium,doungni/selenium,mojwang/selenium,wambat/selenium,tkurnosova/selenium,sri85/selenium,sri85/selenium,sankha93/selenium,temyers/selenium,Jarob22/selenium,isaksky/selenium,joshmgrant/selenium,slongwang/selenium,carlosroh/selenium,gurayinan/selenium,vveliev/selenium,amar-sharma/selenium,carsonmcdonald/selenium,lukeis/selenium,HtmlUnit/selenium,JosephCastro/selenium,JosephCastro/selenium,pulkitsinghal/selenium,slongwang/selenium,SevInf/IEDriver,quoideneuf/selenium,joshmgrant/selenium,Ardesco/selenium,sebady/selenium,xsyntrex/selenium,pulkitsinghal/selenium,petruc/selenium,lummyare/lummyare-test,MeetMe/selenium,customcommander/selenium,temyers/selenium,dkentw/selenium,RamaraoDonta/ramarao-clone,vveliev/selenium,mestihudson/selenium,lummyare/lummyare-test,s2oBCN/selenium,mach6/selenium,actmd/selenium,blackboarddd/selenium,amar-sharma/selenium,AutomatedTester/selenium,SeleniumHQ/selenium,jabbrwcky/selenium,yukaReal/selenium,actmd/selenium,compstak/selenium,lrowe/selenium,blueyed/selenium,twalpole/selenium,arunsingh/selenium,bayandin/selenium,DrMarcII/selenium,asolntsev/selenium,petruc/selenium,sag-enorman/selenium,rovner/selenium,freynaud/selenium,joshmgrant/selenium,sag-enorman/selenium,krmahadevan/selenium,titusfortner/selenium,sankha93/selenium,tkurnosova/selenium,knorrium/selenium,5hawnknight/selenium,onedox/selenium,minhthuanit/selenium,minhthuanit/selenium,MeetMe/selenium,stupidnetizen/selenium,AutomatedTester/selenium,yukaReal/selenium,Sravyaksr/selenium,amikey/selenium,vinay-qa/vinayit-android-server-apk,manuelpirez/selenium,TheBlackTuxCorp/selenium,Appdynamics/selenium,rovner/selenium,dibagga/selenium,o-schneider/selenium,temyers/selenium,aluedeke/chromedriver,Sravyaksr/selenium,DrMarcII/selenium,gabrielsimas/selenium,misttechnologies/selenium,o-schneider/selenium,mestihudson/selenium,juangj/selenium,Herst/selenium,davehunt/selenium,gabrielsimas/selenium,bayandin/selenium,p0deje/selenium,sag-enorman/selenium,soundcloud/selenium,wambat/selenium,gotcha/selenium,freynaud/selenium,aluedeke/chromedriver,bartolkaruza/selenium,blueyed/selenium,dkentw/selenium,twalpole/selenium,xmhubj/selenium,TheBlackTuxCorp/selenium,Appdynamics/selenium,dandv/selenium,TikhomirovSergey/selenium,5hawnknight/selenium,MCGallaspy/selenium,yukaReal/selenium,gemini-testing/selenium,twalpole/selenium,krosenvold/selenium,actmd/selenium,rovner/selenium,telefonicaid/selenium,titusfortner/selenium,meksh/selenium,davehunt/selenium,gorlemik/selenium,onedox/selenium,lilredindy/selenium,SeleniumHQ/selenium,sevaseva/selenium,titusfortner/selenium,stupidnetizen/selenium,freynaud/selenium,clavery/selenium,asashour/selenium,bayandin/selenium,rrussell39/selenium,misttechnologies/selenium,jsakamoto/selenium,p0deje/selenium,asashour/selenium,doungni/selenium,kalyanjvn1/selenium,SeleniumHQ/selenium,tbeadle/selenium,clavery/selenium,TikhomirovSergey/selenium,tarlabs/selenium,mojwang/selenium,bmannix/selenium,mestihudson/selenium,compstak/selenium,asolntsev/selenium,Dude-X/selenium,gorlemik/selenium,tarlabs/selenium,AutomatedTester/selenium,bayandin/selenium,BlackSmith/selenium,Jarob22/selenium,soundcloud/selenium,oddui/selenium,dandv/selenium,Dude-X/selenium,gorlemik/selenium,Appdynamics/selenium,dbo/selenium,asolntsev/selenium,MeetMe/selenium,arunsingh/selenium,krmahadevan/selenium,yukaReal/selenium,xmhubj/selenium,chrisblock/selenium,SeleniumHQ/selenium,temyers/selenium,tkurnosova/selenium,gregerrag/selenium,sebady/selenium,p0deje/selenium,eric-stanley/selenium,markodolancic/selenium,tarlabs/selenium,jabbrwcky/selenium,jerome-jacob/selenium,lummyare/lummyare-lummy,kalyanjvn1/selenium,asolntsev/selenium,JosephCastro/selenium,gemini-testing/selenium,blackboarddd/selenium,xsyntrex/selenium,jabbrwcky/selenium,rrussell39/selenium,amar-sharma/selenium,JosephCastro/selenium,sri85/selenium,anshumanchatterji/selenium,eric-stanley/selenium,lmtierney/selenium,Jarob22/selenium,krosenvold/selenium,juangj/selenium,chrisblock/selenium,bayandin/selenium,p0deje/selenium,livioc/selenium,doungni/selenium,oddui/selenium,krosenvold/selenium-git-release-candidate,gabrielsimas/selenium,uchida/selenium,thanhpete/selenium,anshumanchatterji/selenium,jerome-jacob/selenium,juangj/selenium,s2oBCN/selenium,Tom-Trumper/selenium,bmannix/selenium,Ardesco/selenium,twalpole/selenium,yukaReal/selenium,telefonicaid/selenium,eric-stanley/selenium,orange-tv-blagnac/selenium,rplevka/selenium,denis-vilyuzhanin/selenium-fastview,BlackSmith/selenium,lukeis/selenium,zenefits/selenium,MeetMe/selenium,chrsmithdemos/selenium,misttechnologies/selenium,thanhpete/selenium,valfirst/selenium,customcommander/selenium,tarlabs/selenium,tkurnosova/selenium,chrsmithdemos/selenium,gurayinan/selenium,stupidnetizen/selenium,petruc/selenium,livioc/selenium,chrisblock/selenium,TheBlackTuxCorp/selenium,mach6/selenium,blueyed/selenium,quoideneuf/selenium,asashour/selenium,livioc/selenium,misttechnologies/selenium,lrowe/selenium,chrisblock/selenium,amikey/selenium,aluedeke/chromedriver,joshmgrant/selenium,dibagga/selenium,carlosroh/selenium,onedox/selenium,alb-i986/selenium,lilredindy/selenium,o-schneider/selenium,davehunt/selenium,alexec/selenium,meksh/selenium,AutomatedTester/selenium,lrowe/selenium,joshbruning/selenium,s2oBCN/selenium,houchj/selenium,dandv/selenium,sri85/selenium,petruc/selenium,freynaud/selenium,titusfortner/selenium,joshmgrant/selenium,soundcloud/selenium,bmannix/selenium,kalyanjvn1/selenium,mojwang/selenium,Herst/selenium,SevInf/IEDriver,jsakamoto/selenium,sag-enorman/selenium,juangj/selenium,krmahadevan/selenium,gorlemik/selenium,sag-enorman/selenium,mestihudson/selenium,bmannix/selenium,sebady/selenium,alb-i986/selenium,lummyare/lummyare-lummy,sevaseva/selenium,carlosroh/selenium,Tom-Trumper/selenium,vinay-qa/vinayit-android-server-apk,titusfortner/selenium,compstak/selenium,dcjohnson1989/selenium,lmtierney/selenium,dimacus/selenium,bayandin/selenium,meksh/selenium,gurayinan/selenium,jknguyen/josephknguyen-selenium,temyers/selenium,Tom-Trumper/selenium,bmannix/selenium,5hawnknight/selenium,amikey/selenium,freynaud/selenium,tkurnosova/selenium,skurochkin/selenium,skurochkin/selenium,amar-sharma/selenium,valfirst/selenium,vinay-qa/vinayit-android-server-apk,sag-enorman/selenium,arunsingh/selenium,AutomatedTester/selenium,sankha93/selenium,jknguyen/josephknguyen-selenium,telefonicaid/selenium,dandv/selenium,compstak/selenium,TheBlackTuxCorp/selenium,doungni/selenium,amikey/selenium,knorrium/selenium,customcommander/selenium,sri85/selenium,valfirst/selenium,slongwang/selenium,tbeadle/selenium,minhthuanit/selenium,i17c/selenium,gregerrag/selenium,knorrium/selenium,vinay-qa/vinayit-android-server-apk,SouWilliams/selenium,SeleniumHQ/selenium,denis-vilyuzhanin/selenium-fastview,soundcloud/selenium,knorrium/selenium,SouWilliams/selenium,bartolkaruza/selenium,JosephCastro/selenium,tbeadle/selenium,MCGallaspy/selenium,Herst/selenium,clavery/selenium,MeetMe/selenium,actmd/selenium,vinay-qa/vinayit-android-server-apk,gorlemik/selenium,dbo/selenium,customcommander/selenium,dimacus/selenium,valfirst/selenium,sebady/selenium,oddui/selenium,temyers/selenium,telefonicaid/selenium,Jarob22/selenium,eric-stanley/selenium,gabrielsimas/selenium,MCGallaspy/selenium,lummyare/lummyare-lummy,rovner/selenium,lummyare/lummyare-test,TheBlackTuxCorp/selenium,dimacus/selenium,mach6/selenium,Herst/selenium,gotcha/selenium,gorlemik/selenium,jsarenik/jajomojo-selenium,i17c/selenium,gregerrag/selenium,houchj/selenium,sri85/selenium,AutomatedTester/selenium,bartolkaruza/selenium,krosenvold/selenium-git-release-candidate,xmhubj/selenium,sebady/selenium,manuelpirez/selenium,sevaseva/selenium,houchj/selenium,joshbruning/selenium,eric-stanley/selenium,mach6/selenium,doungni/selenium,krmahadevan/selenium,oddui/selenium,isaksky/selenium,chrisblock/selenium,skurochkin/selenium,livioc/selenium,chrsmithdemos/selenium,asolntsev/selenium,bartolkaruza/selenium,mojwang/selenium,dandv/selenium,GorK-ChO/selenium,vinay-qa/vinayit-android-server-apk,BlackSmith/selenium,wambat/selenium,zenefits/selenium,lrowe/selenium,dcjohnson1989/selenium,davehunt/selenium,soundcloud/selenium,davehunt/selenium,dkentw/selenium,dkentw/selenium,misttechnologies/selenium,alexec/selenium,Ardesco/selenium,jerome-jacob/selenium,dcjohnson1989/selenium,jerome-jacob/selenium,gregerrag/selenium,krmahadevan/selenium,anshumanchatterji/selenium,gotcha/selenium,alb-i986/selenium,dbo/selenium,skurochkin/selenium,thanhpete/selenium,wambat/selenium,anshumanchatterji/selenium,xsyntrex/selenium,mojwang/selenium,gregerrag/selenium,temyers/selenium,pulkitsinghal/selenium,wambat/selenium,lummyare/lummyare-test,lilredindy/selenium,chrsmithdemos/selenium,BlackSmith/selenium,blackboarddd/selenium,s2oBCN/selenium,krosenvold/selenium-git-release-candidate,Jarob22/selenium,5hawnknight/selenium,i17c/selenium,MCGallaspy/selenium,s2oBCN/selenium,o-schneider/selenium,tbeadle/selenium,tarlabs/selenium,sebady/selenium,amar-sharma/selenium,eric-stanley/selenium,stupidnetizen/selenium,SeleniumHQ/selenium,RamaraoDonta/ramarao-clone,tbeadle/selenium,anshumanchatterji/selenium,tbeadle/selenium,asolntsev/selenium,yukaReal/selenium,joshuaduffy/selenium,arunsingh/selenium,bayandin/selenium,misttechnologies/selenium,telefonicaid/selenium,markodolancic/selenium,SevInf/IEDriver,markodolancic/selenium,markodolancic/selenium,thanhpete/selenium,eric-stanley/selenium,mestihudson/selenium,lukeis/selenium,dbo/selenium,dimacus/selenium,SevInf/IEDriver,sevaseva/selenium,quoideneuf/selenium,lukeis/selenium,jabbrwcky/selenium,HtmlUnit/selenium,mach6/selenium,actmd/selenium,stupidnetizen/selenium,valfirst/selenium,bartolkaruza/selenium,mojwang/selenium,customcommander/selenium,valfirst/selenium,thanhpete/selenium,rrussell39/selenium,rplevka/selenium,xmhubj/selenium,sri85/selenium,MCGallaspy/selenium,sag-enorman/selenium,mestihudson/selenium,Appdynamics/selenium,amikey/selenium,quoideneuf/selenium,p0deje/selenium,p0deje/selenium,joshbruning/selenium,jerome-jacob/selenium,Sravyaksr/selenium,minhthuanit/selenium,bartolkaruza/selenium,krosenvold/selenium-git-release-candidate,joshuaduffy/selenium,joshbruning/selenium,rovner/selenium,lummyare/lummyare-test,livioc/selenium,markodolancic/selenium,krosenvold/selenium,twalpole/selenium,oddui/selenium,manuelpirez/selenium,HtmlUnit/selenium,Appdynamics/selenium,stupidnetizen/selenium,houchj/selenium,soundcloud/selenium,sevaseva/selenium,joshuaduffy/selenium,dkentw/selenium,zenefits/selenium,compstak/selenium,asolntsev/selenium,vveliev/selenium,yukaReal/selenium,gurayinan/selenium,customcommander/selenium,SevInf/IEDriver,TikhomirovSergey/selenium,RamaraoDonta/ramarao-clone,mojwang/selenium,isaksky/selenium,MeetMe/selenium,TikhomirovSergey/selenium,aluedeke/chromedriver,alexec/selenium,bayandin/selenium,skurochkin/selenium,tarlabs/selenium,rrussell39/selenium,Sravyaksr/selenium,chrisblock/selenium,gabrielsimas/selenium,HtmlUnit/selenium,MeetMe/selenium,tbeadle/selenium,RamaraoDonta/ramarao-clone,joshmgrant/selenium,o-schneider/selenium,uchida/selenium,lukeis/selenium,dbo/selenium,bartolkaruza/selenium,lummyare/lummyare-test,eric-stanley/selenium,customcommander/selenium,jknguyen/josephknguyen-selenium,rovner/selenium,xmhubj/selenium,TheBlackTuxCorp/selenium,wambat/selenium,joshuaduffy/selenium,uchida/selenium,jsarenik/jajomojo-selenium,gemini-testing/selenium,manuelpirez/selenium,actmd/selenium,BlackSmith/selenium,vveliev/selenium,AutomatedTester/selenium,amar-sharma/selenium,amar-sharma/selenium,Ardesco/selenium,HtmlUnit/selenium,zenefits/selenium,clavery/selenium,asolntsev/selenium,gabrielsimas/selenium,vveliev/selenium,DrMarcII/selenium,valfirst/selenium,joshmgrant/selenium,GorK-ChO/selenium,sri85/selenium,SeleniumHQ/selenium,Sravyaksr/selenium,Jarob22/selenium,chrsmithdemos/selenium,Appdynamics/selenium,o-schneider/selenium,Dude-X/selenium,davehunt/selenium,clavery/selenium,jabbrwcky/selenium,SevInf/IEDriver,Dude-X/selenium,misttechnologies/selenium,thanhpete/selenium,lilredindy/selenium,lmtierney/selenium,zenefits/selenium,joshuaduffy/selenium,dandv/selenium,GorK-ChO/selenium,Tom-Trumper/selenium,Herst/selenium,asashour/selenium,telefonicaid/selenium,krosenvold/selenium,dimacus/selenium,BlackSmith/selenium,i17c/selenium,joshbruning/selenium,joshuaduffy/selenium,twalpole/selenium,houchj/selenium,chrsmithdemos/selenium,pulkitsinghal/selenium,gurayinan/selenium,asolntsev/selenium,gemini-testing/selenium,manuelpirez/selenium,livioc/selenium,twalpole/selenium,jerome-jacob/selenium,Appdynamics/selenium,kalyanjvn1/selenium,amar-sharma/selenium,HtmlUnit/selenium,arunsingh/selenium,minhthuanit/selenium,5hawnknight/selenium,SouWilliams/selenium,SouWilliams/selenium,yukaReal/selenium,vveliev/selenium,skurochkin/selenium,krmahadevan/selenium,RamaraoDonta/ramarao-clone,DrMarcII/selenium,TikhomirovSergey/selenium,bayandin/selenium,orange-tv-blagnac/selenium,soundcloud/selenium,joshuaduffy/selenium,jsakamoto/selenium,aluedeke/chromedriver,carsonmcdonald/selenium,Dude-X/selenium,arunsingh/selenium,lukeis/selenium,carsonmcdonald/selenium,sankha93/selenium,tkurnosova/selenium,TikhomirovSergey/selenium,minhthuanit/selenium,isaksky/selenium,quoideneuf/selenium,wambat/selenium,amikey/selenium,Jarob22/selenium | firefox/src/py/extensionconnection.py | firefox/src/py/extensionconnection.py | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communication with the firefox extension."""
import logging
import socket
import time
try:
import json
except ImportError: # Python < 2.6
import simplejson as json
# Some old JSON libraries don't have "dumps", make sure we have a good one
if not hasattr(json, 'dumps'):
import simplejson as json
from selenium.remote.command import Command
from selenium.remote.remote_connection import RemoteConnection
_DEFAULT_TIMEOUT = 20
_DEFAULT_PORT = 7055
LOGGER = logging.getLogger("webdriver.ExtensionConnection")
class ExtensionConnection(RemoteConnection):
"""This class maintains a connection to the firefox extension.
"""
def __init__(self, timeout=_DEFAULT_TIMEOUT):
RemoteConnection.__init__(
self, "http://localhost:%d/hub" % _DEFAULT_PORT)
LOGGER.debug("extension connection initiated")
self.timeout = timeout
def quit(self, sessionId=None):
self.execute(Command.QUIT, {'sessionId':sessionId})
while self.is_connectable():
logging.info("waiting to quit")
time.sleep(1)
def connect(self):
"""Connects to the extension and retrieves the session id."""
return self.execute(Command.NEW_SESSION, {'desiredCapabilities':{
'browserName': 'firefox',
'platform': 'ANY',
'version': '',
'javascriptEnabled': True}})
def connect_and_quit(self):
"""Connects to an running browser and quit immediately."""
self._request('%s/extensions/firefox/quit' % self._url)
def is_connectable(self):
"""Trys to connect to the extension but do not retrieve context."""
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect(("localhost", _DEFAULT_PORT))
socket_.close()
return True
except socket.error:
return False
class ExtensionConnectionError(Exception):
"""An internal error occurred int the extension.
Might be caused by bad input or bugs in webdriver
"""
pass
| # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communication with the firefox extension."""
import logging
import socket
import time
try:
import json
except ImportError: # Python < 2.6
import simplejson as json
# FIXME: What is this?
if not hasattr(json, 'dumps'):
import simplejson as json
from selenium.remote.command import Command
from selenium.remote.remote_connection import RemoteConnection
_DEFAULT_TIMEOUT = 20
_DEFAULT_PORT = 7055
LOGGER = logging.getLogger("webdriver.ExtensionConnection")
class ExtensionConnection(RemoteConnection):
"""This class maintains a connection to the firefox extension.
"""
def __init__(self, timeout=_DEFAULT_TIMEOUT):
RemoteConnection.__init__(
self, "http://localhost:%d/hub" % _DEFAULT_PORT)
LOGGER.debug("extension connection initiated")
self.timeout = timeout
def quit(self, sessionId=None):
self.execute(Command.QUIT, {'sessionId':sessionId})
while self.is_connectable():
logging.info("waiting to quit")
time.sleep(1)
def connect(self):
"""Connects to the extension and retrieves the session id."""
return self.execute(Command.NEW_SESSION, {'desiredCapabilities':{
'browserName': 'firefox',
'platform': 'ANY',
'version': '',
'javascriptEnabled': True}})
def connect_and_quit(self):
"""Connects to an running browser and quit immediately."""
self._request('%s/extensions/firefox/quit' % self._url)
def is_connectable(self):
"""Trys to connect to the extension but do not retrieve context."""
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect(("localhost", _DEFAULT_PORT))
socket_.close()
return True
except socket.error:
return False
class ExtensionConnectionError(Exception):
"""An internal error occurred int the extension.
Might be caused by bad input or bugs in webdriver
"""
pass
| apache-2.0 | Python |
1f3c1af308be68393ac8f7caab17d04cdd632d2b | Add the get_arguments function in include | softwaresaved/international-survey | survey_creation/include/get_arguments.py | survey_creation/include/get_arguments.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import getopt
"""
Short script to parse
the argments from the command line
"""
def get_arguments(argv):
"""
"""
country = None
year = None
try:
opts, args = getopt.getopt(argv, 'hc:y:', ['country=', 'year='])
except getopt.GetoptError:
print('run.py -c <country> -y <year>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('run.py -c <country> -y <year>')
sys.exit()
elif opt in ('-c', '--country'):
country = arg
elif opt in ('-y', '--year'):
year = arg
if country and year:
# folder_path = os.path.join(year, country)
return year, country
else:
print('Need a country and a year. Please use the following command:\n' +
'\trun.py -c <country> -y <year>')
sys.exit(2)
| bsd-3-clause | Python |
|
fc103544a7fcd8506e4d1612f70ff4b5d3eb6dfe | add command to set prices and commissions of teacher levels | malaonline/iOS,malaonline/Server,malaonline/Server,malaonline/Android,malaonline/Android,malaonline/iOS,malaonline/Android,malaonline/Server,malaonline/iOS,malaonline/Server | server/app/management/commands/set_level_price.py | server/app/management/commands/set_level_price.py | from django.core.management.base import BaseCommand
from app.models import Region, Grade, Subject, Ability, Level, Price
class Command(BaseCommand):
help = "设置教师级别的价格和佣金比例\n" \
"例如: \n" \
" python manage.py set_level_price 郑州市 --percentages '20,20,20,20,20,20,20,20,20,20' --prices '2000,3000,4000,5000,6000,7000,8000,9000,10000,11000'"
def create_parser(self, prog_name, subcommand):
from argparse import RawTextHelpFormatter
parser = super(Command, self).create_parser(prog_name, subcommand)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser):
parser.add_argument(
'region_name',
help='地区名称',
)
parser.add_argument(
'--open',
type=int,
default=1,
help='是否设置开发此地区. 0[默认] or 1',
)
parser.add_argument(
'--prices',
required=True,
help='价格数字串, 英文逗号分隔\n单位是分',
)
parser.add_argument(
'--percentages',
required=True,
help='佣金比例数字串, 引文逗号分隔\n每个数在0-100之间',
)
def handle(self, *args, **options):
# print(args)
# print(options)
region_name = options.get('region_name')
is_open = options.get('open') and True or False
prices = options.get('prices')
percentages = options.get('percentages')
price_cfg = [int(p) for p in prices.split(',')]
commission_percentages = [int(p) for p in percentages.split(',')]
if len(price_cfg) != len(commission_percentages):
print("价格和佣金比例个数不同")
return -1
levels = list(Level.objects.all())
if len(levels) != len(price_cfg):
print("价格和佣金比例个数和现有级别数不同")
return -2
region = Region.objects.get(name=region_name)
if is_open != region.opened:
region.opened = is_open
region.save()
abilities = Ability.objects.all()
for level in levels:
# print(" {level_name}".format(level_name=level.name))
i = level.id - 1
for ability in abilities:
c = price_cfg[i]
price, _ = Price.objects.get_or_create(region=region, level=level, ability=ability,
defaults={'price': c})
price.price = c
price.commission_percentage = commission_percentages[i]
price.save()
print('设置完毕')
return 0
| mit | Python |
|
d7945d85dcce968d6430e079662b1ef9fc464c97 | update ukvi org branding spelling | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0047_ukvi_spelling.py | migrations/versions/0047_ukvi_spelling.py | """empty message
Revision ID: 0047_ukvi_spelling
Revises: 0046_organisations_and_branding
Create Date: 2016-08-22 16:06:32.981723
"""
# revision identifiers, used by Alembic.
revision = '0047_ukvi_spelling'
down_revision = '0046_organisations_and_branding'
from alembic import op
def upgrade():
op.execute("""
UPDATE organisation
SET name = 'UK Visas & Immigration'
WHERE id = '9d25d02d-2915-4e98-874b-974e123e8536'
""")
def downgrade():
op.execute("""
UPDATE organisation
SET name = 'UK Visas and Immigration'
WHERE id = '9d25d02d-2915-4e98-874b-974e123e8536'
""")
| mit | Python |
|
a2463270e6850b0e7df210c03946bfba449f29d7 | Add a test for simultaneous device access | matthewelse/pyOCD,0xc0170/pyOCD,matthewelse/pyOCD,0xc0170/pyOCD,matthewelse/pyOCD,wjzhang/pyOCD,pyocd/pyOCD,mbedmicro/pyOCD,wjzhang/pyOCD,flit/pyOCD,wjzhang/pyOCD,mesheven/pyOCD,0xc0170/pyOCD,flit/pyOCD,mbedmicro/pyOCD,pyocd/pyOCD,mesheven/pyOCD,mesheven/pyOCD,mbedmicro/pyOCD | test/parallel_test.py | test/parallel_test.py | """
mbed CMSIS-DAP debugger
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from pyOCD.board import MbedBoard
from pyOCD.pyDAPAccess import DAPAccess
import threading
import multiprocessing
def run_in_parallel(function, args_list):
"""Create and run a thread in parallel for each element in args_list
Wait until all threads finish executing. Throw an exception if an exception
occurred on any of the threads.
"""
def _thread_helper(idx, func, args):
"""Run the function and set result to True if there was not error"""
func(*args)
result_list[idx] = True
result_list = [False] * len(args_list)
thread_list = []
for idx, args in enumerate(args_list):
thread = threading.Thread(target=_thread_helper,
args=(idx, function, args))
thread.start()
thread_list.append(thread)
for thread in thread_list:
thread.join()
for result in result_list:
if result is not True:
raise Exception("Running in thread failed")
def run_in_processes(function, args_list):
"""Create and run a processes in parallel for each element in args_list
Wait until all processes finish executing. Throw an exception if an
exception occurred on any of the processes.
"""
process_list = []
for args in args_list:
process = multiprocessing.Process(target=function, args=args)
process.start()
process_list.append(process)
error_during_run = False
for process in process_list:
process.join()
if process.exitcode != 0:
error_during_run = True
if error_during_run:
raise Exception("Running in process failed")
def list_boards(id_list):
"""List all connected DAPLink boards repeatedly
Assert that they are the same as the id list passed in.
"""
for _ in range(0, 20):
device_list = DAPAccess.get_connected_devices()
found_id_list = [device.get_unique_id() for device in device_list]
found_id_list.sort()
assert id_list == found_id_list, "Expected %s, got %s" % \
(id_list, found_id_list)
def search_and_lock(board_id):
"""Repeatedly lock a board with the given ID"""
for _ in range(0, 20):
device = DAPAccess.get_device(board_id)
device.open()
device.close()
with MbedBoard.chooseBoard(board_id=board_id):
pass
def open_already_opened(board_id):
"""Open a device that is already open to verify it gives an error"""
device = DAPAccess.get_device(board_id)
try:
device.open()
assert False
except DAPAccess.DeviceError:
pass
def parallel_test():
"""Test that devices can be found and opened in parallel"""
device_list = DAPAccess.get_connected_devices()
id_list = [device.get_unique_id() for device in device_list]
id_list.sort()
if len(id_list) < 2:
print("Need at least 2 boards to run the parallel test")
exit(-1)
# Goal of this file is to test that:
# -The process of listing available boards does not interfere
# with other processes enumerating, opening, or using boards
# -Opening and using a board does not interfere with another process
# processes which is enumerating, opening, or using boards as
# long as that is not the current board
print("Listing board from multiple threads at the same time")
args_list = [(id_list,) for _ in range(5)]
run_in_parallel(list_boards, args_list)
print("Listing board from multiple processes at the same time")
run_in_processes(list_boards, args_list)
print("Opening same board from multiple threads at the same time")
device = DAPAccess.get_device(id_list[0])
device.open()
open_already_opened(id_list[0])
args_list = [(id_list[0],) for _ in range(5)]
run_in_parallel(open_already_opened, args_list)
device.close()
print("Opening same board from multiple processes at the same time")
device = DAPAccess.get_device(id_list[0])
device.open()
open_already_opened(id_list[0])
args_list = [(id_list[0],) for _ in range(5)]
run_in_processes(open_already_opened, args_list)
device.close()
print("Opening different boards from different threads")
args_list = [(board_id,) for board_id in id_list]
run_in_parallel(search_and_lock, args_list)
print("Opening different boards from different processes")
run_in_processes(search_and_lock, args_list)
print("Test passed")
if __name__ == "__main__":
parallel_test()
| apache-2.0 | Python |
|
be3acc4a869c9e45e4d1fdd563571da0d12ae85f | Add modify code Hello World code | KuChanTung/Python | HelloWorld.py | HelloWorld.py | print("HelloWorld")
text="HelloWorld_Text"
print(text) | epl-1.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.