commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
0db094aba5095b63a8f9bfb066afb0048617f87e | add update_GeneAtlas_images.py | SuLab/scheduled-bots,SuLab/scheduled-bots,SuLab/scheduled-bots | scheduled_bots/scripts/update_GeneAtlas_images.py | scheduled_bots/scripts/update_GeneAtlas_images.py | """
One off script to change GeneAtlas images to point to full-sized versions
https://github.com/SuLab/GeneWikiCentral/issues/1
As described at https://www.wikidata.org/wiki/Property_talk:P692#How_about_using_full_size_image_instead_of_small_thumbnail.3F
update all uses of the Gene Atlas Image property to use the full-sized version of the Gene Atlas image
(e.g., https://www.wikidata.org/wiki/File:PBB_GE_ACTN3_206891_at_fs.png) instead of the thumbnail
(e.g., https://www.wikidata.org/wiki/File:PBB_GE_ACTN3_206891_at_tn.png)
SELECT ?item ?image
WHERE
{
?item wdt:P351 ?entrez .
?item wdt:P703 wd:Q15978631 .
?item wdt:P692 ?image
} limit 1000
"""
from collections import defaultdict
from scheduled_bots.local import WDPASS, WDUSER
from tqdm import tqdm
from wikidataintegrator import wdi_core, wdi_login, wdi_helpers
import urllib.request
login = wdi_login.WDLogin(WDUSER, WDPASS)
image_qid = wdi_helpers.id_mapper("P692", [("P703", "Q15978631")])
qid_images = defaultdict(list)
for image, qid in image_qid.items():
qid_images[qid].append(image)
qid_images = dict(qid_images)
for qid, images in tqdm(qid_images.items()):
images = [urllib.request.unquote(image.replace("http://commons.wikimedia.org/wiki/Special:FilePath/", "")) for image in images]
images_proc = [image for image in images if image.startswith("PBB GE") and image.endswith("at tn.png")]
if not images_proc:
continue
images_keep = [image for image in images if image.startswith("PBB GE") and image.endswith("at fs.png")]
item = wdi_core.WDItemEngine(wd_item_id=qid)
s = []
for image in images_proc:
s.append(wdi_core.WDCommonsMedia(image.replace(" at tn.png", " at fs.png"), "P692"))
for image in images_keep:
s.append(wdi_core.WDCommonsMedia(image, "P692"))
item.update(data=s)
wdi_helpers.try_write(item, '', '', login, edit_summary="replace thumbnail gene atlas image with fs")
| mit | Python |
|
427a95f0c56facc138448cde7e7b9da1bcdc8ea4 | Add super basic Hypothesis example | dkua/pyconca16-talk | add_example.py | add_example.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Unit Tests
def test_add_zero():
assert 0 + 1 == 1 + 0
def test_add_single_digits():
assert 1 + 2 == 2 + 1
def test_add_double_digits():
assert 10 + 12 == 12 + 10
# Property-based Test
from hypothesis import given
import hypothesis.strategies as st
@given(st.integers(), st.integers())
def test_add(x, y):
assert x + y == y + x
| mit | Python |
|
3a4cb29e91008225c057feb3811e93b59f99d941 | use flask-mail | voltaire/minecraft-site,voltaire/minecraft-site,voltaire/minecraft-site | application.py | application.py | from flask import Flask
from flask.ext.mail import Mail, Message
mail = Mail()
app = Flask(__name__)
app.config.update(
MAIL_SERVER='smtp.gmail.com',
MAIL_PORT='465',
MAIL_USE_SSL=True,
MAIL_USERNAME='[email protected]',
MAIL_PASSWORD='H3rpD3rpL0l')
mail.init_app(app)
@app.route("/")
def index():
msg = Message("Hello", sender=("flask", "[email protected]"), recipients=["[email protected]"])
msg.body = "testing"
msg.html = "<b>testing</b>"
mail.send(msg)
return msg.html
if __name__ == '__main__':
app.run()
| bsd-3-clause | Python |
|
21a504dce25a1b22bda27cd74a443af98b24ad14 | Add pseudo filter combining pypandoc and panflute | sergiocorreia/panflute-filters | filters/extract_urls.py | filters/extract_urls.py | import io
import pypandoc
import panflute
def prepare(doc):
doc.images = []
doc.links = []
def action(elem, doc):
if isinstance(elem, panflute.Image):
doc.images.append(elem)
elif isinstance(elem, panflute.Link):
doc.links.append(elem)
if __name__ == '__main__':
data = pypandoc.convert_file('example.md', 'json')
f = io.StringIO(data)
doc = panflute.load(f)
doc = panflute.run_filter(action, prepare=prepare, doc=doc)
print("\nImages:")
for image in doc.images:
print(image.url)
print("\nLinks:")
for link in doc.links:
print(link.url)
| bsd-3-clause | Python |
|
b811bb9e9469a23921f841d4bfe3b52928a83e14 | Create b.py | xsthunder/a,xsthunder/a,xsthunder/a,xsthunder/acm,xsthunder/acm,xsthunder/acm,xsthunder/a,xsthunder/acm,xsthunder/a | at/abc126/b.py | at/abc126/b.py | read = input
s = read()
a, b = map(int , [s[:2], s[2:]])
YYMM = False
MMYY = False
if 1 <= b and b <= 12:
YYMM = True
if 1 <= a and a <= 12:
MMYY = True
if YYMM and MMYY :
print('AMBIGUOUS')
elif YYMM and not MMYY:
print('YYMM')
elif not YYMM and MMYY:
print('MMYY')
else :
print('NA')
| mit | Python |
|
32c025a217f7771be94976fda6ede2d80855b4b6 | Move things to new units module | olemke/pyatmlab,gerritholl/pyatmlab | pyatmlab/units.py | pyatmlab/units.py | """Various units-related things
"""
from pint import (UnitRegistry, Context)
ureg = UnitRegistry()
ureg.define("micro- = 1e-6 = µ-")
# aid conversion between different radiance units
sp2 = Context("radiance")
sp2.add_transformation(
"[length] * [mass] / [time] ** 3",
"[mass] / [time] ** 2",
lambda ureg, x: x / ureg.speed_of_light)
sp2.add_transformation(
"[mass] / [time] ** 2",
"[length] * [mass] / [time] ** 3",
lambda ureg, x: x * ureg.speed_of_light)
ureg.add_context(sp2)
radiance_units = {
"si": ureg.W/(ureg.m**2*ureg.sr*ureg.Hz),
"ir": ureg.mW/(ureg.m**2*ureg.sr*(1/ureg.cm))}
| bsd-3-clause | Python |
|
50494947bdf7fc8fce50cb5f589c84fd48db4b05 | test perm using py.test #1150 | pkimber/login,pkimber/login,pkimber/login | login/tests/fixture.py | login/tests/fixture.py | # -*- encoding: utf-8 -*-
import pytest
from login.tests.factories import (
TEST_PASSWORD,
UserFactory,
)
class PermTest:
def __init__(self, client):
setup_users()
self.client = client
def anon(self, url):
self.client.logout()
response = self.client.get(url)
assert 200 == response.status_code
def staff(self, url):
# check anon user cannot login
self.client.logout()
response = self.client.get(url)
assert 302 == response.status_code
assert 'accounts/login' in response['Location']
# check web user cannot login
assert self.client.login(username='web', password=TEST_PASSWORD)
assert 302 == response.status_code
assert 'accounts/login' in response['Location']
# check staff user can login
assert self.client.login(username='staff', password=TEST_PASSWORD)
response = self.client.get(url)
assert 200 == response.status_code
@pytest.fixture
def perm_check(client):
"""Check permissions on a URL.
We use a clever trick to pass parameters to the fixture. For details:
py.test: Pass a parameter to a fixture function
http://stackoverflow.com/questions/18011902/py-test-pass-a-parameter-to-a-fixture-function
"""
return PermTest(client)
def setup_users():
"""Using factories - set-up users for permissions test cases."""
UserFactory(
username='admin',
email='[email protected]',
is_staff=True,
is_superuser=True
)
UserFactory(username='staff', email='[email protected]', is_staff=True)
UserFactory(
username='web', email='[email protected]',
first_name='William', last_name='Webber'
)
| apache-2.0 | Python |
|
1a98ccfbff406509d9290e76bbdf8edbb862fc1d | Solve orderred dict | rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank | python/py-collections-ordereddict.py | python/py-collections-ordereddict.py | from collections import OrderedDict
d = OrderedDict()
number_of_items = int(input().strip())
for i in range(number_of_items):
item, delimeter, price = input().strip().rpartition(" ")
price = int(price)
if (item in d):
previous_total_purchased = d.get(item)
next_total_purchased = previous_total_purchased + price
d[item] = next_total_purchased
else:
d[item] = price
for item, price in d.items():
print (f'{item} {price}')
| mit | Python |
|
fa4155114304d1ebc9e3bb04f546ce7d4708c381 | Add simple pipeline | flypy/pykit,ContinuumIO/pykit,ContinuumIO/pykit,Inaimathi/pykit,flypy/pykit,Inaimathi/pykit | pykit/pipeline.py | pykit/pipeline.py | # -*- coding: utf-8 -*-
"""
Pipeline that determines phase ordering and execution.
"""
from __future__ import print_function, division, absolute_import
import types
cpy = {
'lower_convert': lower_convert,
}
lower = {
}
# ______________________________________________________________________
# Execute pipeline
def apply_transform(transform, func, env):
if isinstance(transform, types.ModuleType):
return transform.run(func, env)
else:
return transform(func, env)
def run(transforms, order, func, env):
for transform in order:
if transform in transforms:
func, env = apply_transform(transforms[transform], func, env) | bsd-3-clause | Python |
|
141005c72b1686d73cdc581e9ee8313529e11e4c | Add health check script. | serac/powermon,serac/powermon | tools/health-check.py | tools/health-check.py | #!/usr/bin/python
# Health check script that examines the /status/ URI and sends mail on any
# condition other than 200/OK.
# Configuration is via environment variables:
# * POWERMON_STATUS - absolute URL to /status/ URI
# * POWERMON_SMTPHOST - SMTP host name used to send mail
# * POWERMON_MAILTO - email address where problem reports are sent
from email.mime.text import MIMEText
from httplib import HTTPConnection, HTTPSConnection
from os import environ
from os.path import basename
from smtplib import SMTP
from urlparse import urlparse
import sys
def getenvars(*vars):
"""Returns the values of one or more environment variables."""
values = []
for var in vars:
if not var in environ:
die('%s environment variable not defined' % var)
values.append(environ[var])
return tuple(values)
def die_err(e, message):
"""Displays exception details and a message then exits program."""
print message
print e
sys.exit(1)
def die(message):
"""Displays a message then exits program."""
print message
sys.exit(1)
def http_get(url):
"""Returns the tuple (status, response body) for a GET request to the given URL."""
conn = None
headers = {
'Accept': 'text/plain, text/html, text/xml',
'Content-Length': 0,
'User-Agent': 'Python/%s.%s.%s' % sys.version_info[0:3]
}
result = urlparse(url)
try :
if result.scheme == 'https':
conn = HTTPSConnection(result.netloc)
else:
conn = HTTPConnection(result.netloc)
conn.request('GET', url, "", headers)
response = conn.getresponse()
return (response.status, str(response.read()))
except Exception, e:
die_err(e, 'HTTP GET failed:')
finally:
if conn: conn.close()
def send_mail(mfrom, mto, body, smtp_host):
"""Sends a health check failure notice to the designated recipient."""
msg = MIMEText(body)
msg['Subject'] = 'Powermon Health Check Failure'
msg['From'] = mfrom
msg['To'] = mto
s = SMTP(smtp_host)
try:
s.sendmail(mfrom, [mto], msg.as_string())
finally:
s.quit
(status_url, mailto, smtp_host) = getenvars(
'POWERMON_STATUS', 'POWERMON_MAILTO', 'POWERMON_SMTPHOST')
hostname = 'localhost'
if 'HOSTNAME' in environ:
hostname = environ['HOSTNAME']
mailfrom = '%s@%s' % (environ['USER'], hostname)
print 'Checking', status_url
(status, body) = http_get(status_url)
print body
if status > 200:
print 'Sending failure notice to', mailto
send_mail(mailfrom, mailto, body, smtp_host)
| apache-2.0 | Python |
|
210eba35fc4473e626fc58a8e4ea3cdbb6abdc28 | add undocumented function to display new messages. | yskmt/rtv,shaggytwodope/rtv,TheoPib/rtv,michael-lazar/rtv,shaggytwodope/rtv,yskmt/rtv,michael-lazar/rtv,TheoPib/rtv,bigplus/rtv,5225225/rtv,5225225/rtv,michael-lazar/rtv | rtv/docs.py | rtv/docs.py | from .__version__ import __version__
__all__ = ['AGENT', 'SUMMARY', 'AUTH', 'CONTROLS', 'HELP', 'COMMENT_FILE',
'SUBMISSION_FILE', 'COMMENT_EDIT_FILE']
AGENT = """\
desktop:https://github.com/michael-lazar/rtv:{} (by /u/civilization_phaze_3)\
""".format(__version__)
SUMMARY = """
Reddit Terminal Viewer is a lightweight browser for www.reddit.com built into a
terminal window.
"""
AUTH = """\
Authenticating is required to vote and leave comments. If only a username is
given, the program will display a secure prompt to enter a password.
"""
CONTROLS = """
Controls
--------
RTV currently supports browsing both subreddits and individual submissions.
In each mode the controls are slightly different. In subreddit mode you can
browse through the top submissions on either the front page or a specific
subreddit. In submission mode you can view the self text for a submission and
browse comments.
"""
HELP = """
Basic Commands
`j/k` or `UP/DOWN` : Move the cursor up/down
`m/n` or `PgUp/PgDn`: Jump to the previous/next page
`o` or `ENTER` : Open the selected item as a webpage
`r` or `F5` : Refresh page content
`u` : Log in or switch accounts
`i` : Display new messages prompt
`?` : Show the help screen
`q` : Quit
Authenticated Commands
`a/z` : Upvote/downvote
`c` : Compose a new post or comment
`e` : Edit an existing post or comment
`d` : Delete an existing post or comment
`s` : Open/close subscribed subreddits list
Subreddit Mode
`l` or `RIGHT` : Enter the selected submission
`/` : Open a prompt to switch subreddits
`f` : Open a prompt to search the current subreddit
Submission Mode
`h` or `LEFT` : Return to subreddit mode
`SPACE` : Fold the selected comment, or load additional comments
"""
COMMENT_FILE = u"""
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Replying to {author}'s {type}
{content}
"""
COMMENT_EDIT_FILE = u"""{content}
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Editing your comment
"""
SUBMISSION_FILE = u"""{content}
# Please enter your submission. Lines starting with '#' will be ignored,
# and an empty field aborts the submission.
#
# The first line will be interpreted as the title
# The following lines will be interpreted as the content
#
# Posting to {name}
"""
| from .__version__ import __version__
__all__ = ['AGENT', 'SUMMARY', 'AUTH', 'CONTROLS', 'HELP', 'COMMENT_FILE',
'SUBMISSION_FILE', 'COMMENT_EDIT_FILE']
AGENT = """\
desktop:https://github.com/michael-lazar/rtv:{} (by /u/civilization_phaze_3)\
""".format(__version__)
SUMMARY = """
Reddit Terminal Viewer is a lightweight browser for www.reddit.com built into a
terminal window.
"""
AUTH = """\
Authenticating is required to vote and leave comments. If only a username is
given, the program will display a secure prompt to enter a password.
"""
CONTROLS = """
Controls
--------
RTV currently supports browsing both subreddits and individual submissions.
In each mode the controls are slightly different. In subreddit mode you can
browse through the top submissions on either the front page or a specific
subreddit. In submission mode you can view the self text for a submission and
browse comments.
"""
HELP = """
Basic Commands
`j/k` or `UP/DOWN` : Move the cursor up/down
`m/n` or `PgUp/PgDn`: Jump to the previous/next page
`o` or `ENTER` : Open the selected item as a webpage
`r` or `F5` : Refresh page content
`u` : Log in or switch accounts
`?` : Show the help screen
`q` : Quit
Authenticated Commands
`a/z` : Upvote/downvote
`c` : Compose a new post or comment
`e` : Edit an existing post or comment
`d` : Delete an existing post or comment
`s` : Open/close subscribed subreddits list
Subreddit Mode
`l` or `RIGHT` : Enter the selected submission
`/` : Open a prompt to switch subreddits
`f` : Open a prompt to search the current subreddit
Submission Mode
`h` or `LEFT` : Return to subreddit mode
`SPACE` : Fold the selected comment, or load additional comments
"""
COMMENT_FILE = u"""
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Replying to {author}'s {type}
{content}
"""
COMMENT_EDIT_FILE = u"""{content}
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Editing your comment
"""
SUBMISSION_FILE = u"""{content}
# Please enter your submission. Lines starting with '#' will be ignored,
# and an empty field aborts the submission.
#
# The first line will be interpreted as the title
# The following lines will be interpreted as the content
#
# Posting to {name}
"""
| mit | Python |
04287120372a6fdb906ed9f27ead4c5f91d5690e | Add a modified version of simple bot | fisadev/tota | tota/heroes/lenovo.py | tota/heroes/lenovo.py | from tota.utils import closest, distance, sort_by_distance, possible_moves
from tota import settings
__author__ = "angvp"
def create():
def lenovo_hero_logic(self, things, t):
# some useful data about the enemies I can see in the map
enemy_team = settings.ENEMY_TEAMS[self.team]
enemies = [thing for thing in things.values()
if thing.team == enemy_team]
closest_enemy = closest(self, enemies)
closest_enemy_distance = distance(self, closest_enemy)
real_life = (self.life / self.max_life) * 100
# now lets decide what to do
if int(real_life) < 85 and self.can('heal', t):
# if I'm hurt and can heal, heal
if closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('fireball', t):
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('stun', t):
return 'stun', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('attack', t):
return 'attack', closest_enemy.position
else:
return 'heal', self.position
else:
# else, try to attack
if closest_enemy:
# there is an enemy
if closest_enemy_distance <= settings.STUN_DISTANCE and self.can('stun', t):
# try to stun him
return 'stun', closest_enemy.position
if closest_enemy_distance <= settings.FIREBALL_DISTANCE and self.can('fireball', t) and closest_enemy_distance > settings.FIREBALL_RADIUS:
# else try to fireball him, but only if I'm not in range
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HERO_ATTACK_DISTANCE:
# else try to attack him
return 'attack', closest_enemy.position
else:
# of finally just move to him (if I have life > 50)
moves = sort_by_distance(closest_enemy,
possible_moves(self, things))
if len(moves) > 0:
back_moves = moves[len(moves)-1]
else:
back_moves = self.position
if moves and int(real_life) > 50:
return 'move', moves[0]
else:
return 'move', back_moves
# can't do the things I want. Do nothing.
return None
return lenovo_hero_logic
| mit | Python |
|
2f7d5f30fd6b6cb430c55b21d7cab75800bcfe97 | Add a little hacky highlighter | chourobin/weave-demos,errordeveloper/weave-demos,errordeveloper/weave-demos,chourobin/weave-demos,errordeveloper/weave-demos,chourobin/weave-demos,chourobin/weave-demos,errordeveloper/weave-demos | screencasts/hello-weave/highlight.py | screencasts/hello-weave/highlight.py | import json
prompt = 'ilya@weave-01:~$ '
highlight = [
('weave-01', 'red'),
('weave-02', 'red'),
('docker', 'red'),
('run', 'red'),
('--name', 'red'),
('hello', 'red'),
('netcat', 'red'),
('-lk', 'red'),
('1234', 'red'),
('sudo curl -s -L git.io/weave -o /usr/local/bin/weave', 'red'),
('b4e40e4b4665a1ffa23f90eb3ab57c83ef243e64151bedc1501235df6e532e09\r\n', 'red'),
('Hello, Weave!\r\n', 'red'),
]
highlight_tokens = [t[0] for t in highlight]
tokens = []
colours = {
'red': ('\033[91m', '\033[00m'),
}
for f in ['rec-weave-01.json', 'rec-weave-02.json']:
with open(f) as json_data:
d = json.load(json_data)
json_data.close()
commands = d['stdout']
word = ''
word_start = 0
for i,x in enumerate(commands):
curr = x[1]
if curr == prompt: continue
elif curr != '\r\n' and curr != ' ' and len(curr) == 1:
if word_start == 0:
word_start = i
word = curr
else:
word += curr
elif (curr == '\r\n' or curr == ' ') and word_start != 0:
tokens.append((word, word_start, True))
word_start = 0
elif curr != '\r\n' and len(curr) > 1:
tokens.append((curr, i, False))
offset = 0
for x in tokens:
if x[0] in highlight_tokens:
commands.insert(x[1] + offset, [0, colours['red'][0]])
offset += 1
l = len(x[0]) if x[2] else 1
commands.insert(x[1] + l + offset, [0, colours['red'][1]])
offset += 1
d['commands'] = commands
print(json.dumps(d))
| apache-2.0 | Python |
|
6b4733c213046c7a16bf255cfbc92408e2f01423 | Add test for registry model hash | RickyCook/DockCI,sprucedev/DockCI-Agent,RickyCook/DockCI,sprucedev/DockCI,sprucedev/DockCI,sprucedev/DockCI,sprucedev/DockCI,sprucedev/DockCI-Agent,RickyCook/DockCI,RickyCook/DockCI | tests/models/test_authenticated_registry_model.py | tests/models/test_authenticated_registry_model.py | import pytest
from dockci.models.auth import AuthenticatedRegistry
BASE_AUTHENTICATED_REGISTRY = dict(
id=1,
display_name='Display name',
base_name='Base name',
username='Username',
password='Password',
email='Email',
insecure=False,
)
class TestHash(object):
""" Test ``AuthenticatedRegistry.__hash__`` """
def test_hash_eq(self):
""" Test when hash should be equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
assert hash(left) == hash(right)
@pytest.mark.parametrize('attr_name,attr_value', [
('id', 7),
('display_name', 'different'),
('base_name', 'different'),
('username', 'different'),
('password', 'different'),
('email', 'different'),
('insecure', True),
])
def test_hash_ne(self, attr_name, attr_value):
""" Test when hash should be not equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
setattr(right, attr_name, attr_value)
assert hash(left) != hash(right)
| isc | Python |
|
984b8ecd043986877349c6de789842155b8a9fa1 | Add own version of compare script | jorisvanzundert/sfsf | scr_compare_chunks_MK.py | scr_compare_chunks_MK.py | import csv
import string
from nltk import word_tokenize
from sfsf import training_data_factory
#from sfsf import sfsf_config
from collections import defaultdict, Counter
def read_chunk_scores( score_file ):
top_chunk_scores = defaultdict(list)
bottom_chunk_scores = defaultdict(list)
with open(score_file, 'rt') as fh:
csv_reader = csv.reader(fh, delimiter=",")
headers = next(csv_reader)
for row in csv_reader:
if row[0][:8] == "training":
continue
isbn = row[2]
score = float(row[6])
if row[0] == "testing_bottom":
bottom_chunk_scores[isbn].append(score)
else:
top_chunk_scores[isbn].append(score)
return top_chunk_scores, bottom_chunk_scores
def compute_doc_freq( top_text_chunks ):
doc_freq = defaultdict( list )
for isbn, text_chunks in top_text_chunks:
terms = set([term for text_chunk in text_chunks for term in tokenize_chunk( text_chunk )])
for term in terms:
doc_freq[term] += [isbn]
return doc_freq
def get_isbn_title(isbn, isbn_data):
for isbn_row in isbn_data:
if isbn_row[1] == isbn:
return isbn_row[2]
def do_sample( top_chunk_scores, bottom_chunk_scores, wpg_data_file ):
training_factory = training_data_factory.TrainingDataFactory()
isbn_data = training_factory.get_isbn_data( wpg_data_file ) # returns data sorted by sales
top_isbn_data = [isbn_row for isbn_row in isbn_data if isbn_row[1] in top_chunk_scores]
bottom_isbn_data = [isbn_row for isbn_row in isbn_data if isbn_row[1] in bottom_chunk_scores]
return top_isbn_data, bottom_isbn_data
def get_text_chunks( sample_data ):
training_factory = training_data_factory.TrainingDataFactory()
return training_factory.sample_txts( sample_data, sample_size=5000 )
def filter_chunks( chunk_tuples, chunk_scores, threshold, bigger_than ):
if bigger_than:
print("filtering bigger than")
return [ chunk for isbn, chunks in chunk_tuples for chunk, score in zip(chunks, chunk_scores[isbn]) if score >= threshold ]
else:
print("filtering smaller than")
return [ chunk for isbn, chunks in chunk_tuples for chunk, score in zip(chunks, chunk_scores[isbn]) if score < threshold ]
def tokenize_chunk( chunk_as_string ):
more_punctuation = string.punctuation + '“”‘’«»'
return word_tokenize( chunk_as_string.lower().translate( str.maketrans( "", "", more_punctuation ) ) )
def make_dist( chunks, doc_freq ):
return Counter([term for chunk in chunks for term in tokenize_chunk( chunk ) if len(doc_freq[term]) > 1])
def get_most_frequent_terms( isbn_data, chunk_scores, threshold, bigger_than=True ):
text_chunks = get_text_chunks( isbn_data )
doc_freq = compute_doc_freq( text_chunks )
chunks = filter_chunks( text_chunks, chunk_scores, threshold, bigger_than )
fdist = make_dist( chunks, doc_freq )
top_terms = [term for term, freq in fdist.most_common(10000)]
return top_terms, fdist, doc_freq
if __name__ == "__main__":
wpg_data_file = "wpg_data.csv"
score_file = "../docker_volume/report-deeplearning-total-120-train-20-test-20-iteration-3-date-20171030_1030.csv"
#score_file = "./data/non_disclosed/remote_volume_20170406/report_20170404_0951.csv"
total_size = 120
top_chunk_scores, bottom_chunk_scores = read_chunk_scores( score_file )
top_isbn_data, bottom_isbn_data = do_sample( top_chunk_scores, bottom_chunk_scores, wpg_data_file )
bottom_terms, bottom_fdist, bottom_doc_freq = get_most_frequent_terms( bottom_isbn_data, bottom_chunk_scores, 0.5, bigger_than=False )
top_terms, top_fdist, top_doc_freq = get_most_frequent_terms( top_isbn_data, top_chunk_scores, 0.8, bigger_than=True )
print(bottom_fdist.most_common(100))
print(top_fdist.most_common(100))
top_only = [term for term in top_terms[:200] if term not in bottom_terms]
bottom_only = [term for term in bottom_terms[:200] if term not in top_terms]
for index, term in enumerate(top_terms[:1000]):
top_rank = index + 1
bottom_rank = "NA"
if term in bottom_terms:
bottom_rank = bottom_terms.index(term) + 1
if bottom_rank == "NA" or bottom_rank / top_rank > 3:
print(term, top_rank, bottom_rank)
for term in top_only:
titles = [get_isbn_title(isbn, top_isbn_data) for isbn in top_doc_freq[term]]
print("top only:", term, top_fdist[term], titles)
for term in bottom_only:
titles = [get_isbn_title(isbn, bottom_isbn_data) for isbn in bottom_doc_freq[term]]
print("bottom only:", term, bottom_fdist[term], titles)
| mit | Python |
|
57dc7e58dcfd101c29026c8c07763cba2eb7dd14 | add helper script to inspect comments on released content | alexanderkyte/mitls-f7,alexanderkyte/mitls-f7,alexanderkyte/mitls-f7,alexanderkyte/mitls-f7,alexanderkyte/mitls-f7 | scripts/show_comments.py | scripts/show_comments.py | #!/usr/bin/env python
from __future__ import print_function
import sys
def main():
fs = open(sys.argv[1]).read().splitlines()
fs = map(lambda f: {'name':f, 'contents':open(f).readlines()},fs)
for f in fs:
buffer = ''
multiline = 0
is_first = True
for i,line in enumerate(f['contents'],start=1):
multiline += line.count('(*')
if (line.count('//') > 0 or multiline > 0) and not is_first:
buffer += '{}: {}'.format(i,line)
closed = line.count('*)')
if closed > 0 and is_first:
is_first = False
multiline -= closed
if buffer:
print ('*** {}:'.format(f['name']))
print (buffer)
print ()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
4b83b7a3d286f60454c96ae609ce18c731339877 | add a stub fuse-based fs component | jeffpc/nx01 | src/fs/nomadfs.py | src/fs/nomadfs.py | #!/usr/bin/env python
#
# Copyright (c) 2015 Josef 'Jeff' Sipek <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import errno
import fuse
fuse.fuse_python_api = (0, 2)
class Nomad(fuse.Fuse):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
def getattr(self, path):
return -errno.ENOSYS
def getdir(self, path):
return -errno.ENOSYS
if __name__ == "__main__":
fs = Nomad()
fs.flags = 0
fs.multithreaded = 0
fs.parse(errex=1)
fs.main()
| mit | Python |
|
4efc50f91d2b141270739ea9f8bef9685cc86e7f | add houdini/shelf/fitcam | cineuse/CNCGToolKit,cineuse/CNCGToolKit | houdini/shelf/fitcam.py | houdini/shelf/fitcam.py | # -*- coding: utf-8 -*-
import hou
import toolutils
def setfit(oldCam, resx, resy):
oldCam.setDisplayFlag(False)
oldCam.parm(oldCam.path() + "/resx").set(resx)
oldCam.parm(oldCam.path() + "/resy").set(resy)
camups = oldCam.inputAncestors()
if camups == ():
camup = oldCam
else:
camup = camups = oldCam.inputAncestors()[-1]
null = hou.node('obj').createNode('null', 'ScaleWorld')
blend = hou.node('obj').createNode('blend', 'Blend_position')
fetch = hou.node('obj').createNode('fetch', 'Fetch_NewCam')
newCam = hou.node('obj').createNode('cam', 'Render_Camera')
null.move(camup.position() + hou.Vector2(0, 1))
blend.move(oldCam.position() + hou.Vector2(0, -1))
fetch.move(oldCam.position() + hou.Vector2(0, -2))
newCam.move(oldCam.position() + hou.Vector2(0, -3))
camup.setNextInput(null)
blend.setNextInput(oldCam)
fetch.setNextInput(blend)
newCam.setNextInput(fetch)
null.setDisplayFlag(False)
blend.setDisplayFlag(False)
fetch.setDisplayFlag(False)
blend.parm(blend.path() + "/blendm1").set(63)
fetch.parm(fetch.path() + "/useinputoffetched").set(1)
oldCamPath = oldCam.path()
relativePath = newCam.relativePathTo(oldCam)
resx = " ch(\"" + relativePath + "/resx\")"
resy = " ch(\"" + relativePath + "/resy\")"
focal = " ch(\"" + relativePath + "/focal\")"
aperture = " ch(\"" + relativePath + "/aperture\")"
vm_background = " ch(\"" + relativePath + "/vm_background\")"
newCam.setParmExpressions(dict(resx=resx, resy=resy, focal=focal,
aperture=aperture, vm_background=vm_background))
newCam.parm("vm_bgenable").set(0)
newCam.parm("vm_bgenable").set(0)
newCam.parm("vm_bgenable").lock(True)
def main():
view = toolutils.sceneViewer()
sel = view.selectObjects('请选择一个相机')
if len(sel) > 0:
if sel[0].type().name()=='cam':
resolution = hou.ui.readInput('set Resolution',buttons = ('Set','close'),title = 'set Resolution',initial_contents = '1920-1080',close_choice = 1,default_choice = 0)
resx = resolution[1].split('-')[0]
resy = resolution[1].split('-')[1]
oldCam = sel[0]
if resolution[0] == 0:
setfit(oldCam, resx, resy) | mit | Python |
|
1886af3e8c96108a8f7bdb320969373e66299bf4 | Create __init__.py | garyelephant/snippets,garyelephant/snippets,garyelephant/snippets,garyelephant/snippets | python/django_standalone_orm/__init__.py | python/django_standalone_orm/__init__.py | mit | Python |
||
f7046ba07a3ec41d26df0b0bce67c6ab8013bfd8 | Fix for the activity whose transcripts are stunted | gnowledge/gstudio,gnowledge/gstudio,gnowledge/gstudio,gnowledge/gstudio,gnowledge/gstudio | doc/release-scripts/Fix_Transcript_Stunted.py | doc/release-scripts/Fix_Transcript_Stunted.py | '''
Issue :Transcript for model comversations stunted
Fix : The CSS used for the transcript part is not same as that of the others (which used the toggler CSS). Have made the required changes for the transcripts
related to the audio and model conversations which come up on click of answer this in Unit 0 :English Beginner Lesson 8 : Let's Talk
'''
import re
from gnowsys_ndf.ndf.models import node_collection
from bs4 import BeautifulSoup
'''Extracting the let's talk activity having the issue'''
actnd = node_collection.find({'_type':'GSystem','_id':ObjectId('59425d1c4975ac013cccbba3')})
soup = BeautifulSoup(actnd.content)
mrkup2 = '<form class="trans-form"><input align="right" id="toggler09" type="checkbox" /> <label class="toggle-me" for="toggler09">Transcript</label><div class="transcript"><object data="/media/b/0/b/3537c6b9800766bde84555191d5b510c5d760afc72a8fea888b765258369f.txt" style="width:99%!important; height:auto!important;word-wrap: break-word;" type="text/html"></object></div></form>'
mrkup3 = '<form class="trans-form"><input align="right" id="toggler08" type="checkbox" /> <label class="toggle-me" for="toggler08">Transcript</label><div class="transcript"><object data="/media/d/0/c/94657554e663a44dc3dfa309454108a4ba5bbc620131bb7a1a1e1d089cb88.txt" style="width:99%!important; height:auto!important;word-wrap: break-word;" type="text/html"></object></div></form>'
mrkup4 = '<form class="trans-form"><input align="right" id="toggler07" type="checkbox" /> <label class="toggle-me" for="toggler07">Transcript</label><div class="transcript"><object data="/media/2/a/3/7868f3d837d326586fe59f6b1f1abdde16b3bfcbcb1e239511877d6963583.txt" style="width:99%!important; height:auto!important;word-wrap: break-word;" type="text/html"></object></div></form>'
'''Replace the transcript related tags with the required'''
for each in soup.find_all('input',{"class":"small radius transcript-toggler"}):
#print each['class'],each.attrs,each.attrs.keys()
stylflg = each.has_attr('style')
if stylflg:
#for child in each.parent.children:
# print each.parent.children
prnt_div = each.parent
inner_divtag = prnt_div.find('div',{"class":"transcript-data hide"})
print inner_divtag
trnscrpt_file = inner_divtag.find('object')['data']
print trnscrpt_file
if trnscrpt_file.split('/')[-1] == '3537c6b9800766bde84555191d5b510c5d760afc72a8fea888b765258369f.txt':
inner_divtag.decompose()
each.replaceWith(BeautifulSoup(mrkup2,'html.parser'))
if trnscrpt_file.split('/')[-1] == '94657554e663a44dc3dfa309454108a4ba5bbc620131bb7a1a1e1d089cb88.txt':
inner_divtag.decompose()
each.replaceWith(BeautifulSoup(mrkup3,'html.parser'))
if trnscrpt_file.split('/')[-1] == '7868f3d837d326586fe59f6b1f1abdde16b3bfcbcb1e239511877d6963583.txt':
inner_divtag.decompose()
each.replaceWith(BeautifulSoup(mrkup4,'html.parser'))
#print prnt_div
#print "*"*30
actnd.content = soup
actnd.content = actnd.content.decode("utf-8")
actnd.save()
| agpl-3.0 | Python |
|
88548319d8a7c44d039ce269621f0a9ff4ee8af6 | refactor leslie matrix; add leslie_exe.py | puruckertom/poptox | poptox/leslie/leslie_exe.py | poptox/leslie/leslie_exe.py | import numpy as np
import os.path
import pandas as pd
import sys
#find parent directory and import base (travis)
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
# print(sys.path)
# print(os.path)
class LeslieInputs(ModelSharedInputs):
"""
Input class for Leslie.
"""
def __init__(self):
"""Class representing the inputs for Leslie"""
super(LeslieInputs, self).__init__()
self.init_pop_size = pd.Series([], dtype="float")
self.stages = pd.Series([], dtype="float")
self.l_m = pd.Series([], dtype="float")
self.time_steps = pd.Series([], dtype="float")
class LeslieOutputs(object):
"""
Output class for Leslie.
"""
def __init__(self):
"""Class representing the outputs for Leslie"""
super(LeslieOutputs, self).__init__()
self.out_pop_matrix = pd.Series(name="out_pop_matrix")
class Leslie(UberModel, LeslieInputs, LeslieOutputs):
"""
Leslie model for population growth.
"""
def __init__(self, pd_obj, pd_obj_exp):
"""Class representing the Leslie model and containing all its methods"""
super(Leslie, self).__init__()
self.pd_obj = pd_obj
self.pd_obj_exp = pd_obj_exp
self.pd_obj_out = None
def execute_model(self):
"""
Callable to execute the running of the model:
1) Populate input parameters
2) Create output DataFrame to hold the model outputs
3) Run the model's methods to generate outputs
4) Fill the output DataFrame with the generated model outputs
"""
self.populate_inputs(self.pd_obj, self)
self.pd_obj_out = self.populate_outputs(self)
self.run_methods()
self.fill_output_dataframe(self)
# Begin model methods
def run_methods(self):
""" Execute all algorithm methods for model logic """
try:
self.leslie_grow()
except Exception as e:
print(str(e))
def leslie_grow(self):
self.out_pop_matrix = np.zeros(shape=(self.stages, self.time_steps))
self.out_pop_matrix[:, 0] = self.init_pop_size
for i in range(1, self.time_steps):
n = np.dot(self.l_m, self.out_pop_matrix[:, i-1])
self.out_pop_matrix[:, i] = n.squeeze()
return self.out_pop_matrix.tolist()
| unlicense | Python |
|
d3d6a6018d55581bf081c93386f6676c8bb105ce | Add module for running the main simulation | JoshuaBrockschmidt/ideal_ANN | simulate.py | simulate.py | import genetic
import sys
output = sys.stdout
def setOutput(out):
output = out
genetic.setOutput(output)
# Test data for a XOR gate
testData = (
(0.1, 0.1, 0.9),
(0.1, 0.9, 0.9),
(0.9, 0.1, 0.9),
(0.9, 0.9, 0.1)
)
def simulate():
sim = genetic.Simulation(2, 1, testData, 100)
sim.simulate(100)
| mit | Python |
|
2cd1e7fcdf53c312c3db8e6f1d257084a87cccbb | Add migration to update action implementation hashes. | mozilla/normandy,mozilla/normandy,mozilla/normandy,mozilla/normandy | recipe-server/normandy/recipes/migrations/0045_update_action_hashes.py | recipe-server/normandy/recipes/migrations/0045_update_action_hashes.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
from base64 import b64encode, urlsafe_b64encode
from django.db import migrations
def make_hashes_urlsafe_sri(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
digest = hashlib.sha384(data).digest()
data_hash = urlsafe_b64encode(digest)
action.implementation_hash = 'sha384-' + data_hash.decode()
action.save()
def make_hashes_sha1(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
data_hash = hashlib.sha1(data).hexdigest()
action.implementation_hash = data_hash
action.save()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0044_auto_20170801_0010'),
]
operations = [
migrations.RunPython(make_hashes_urlsafe_sri, make_hashes_sha1),
]
| mpl-2.0 | Python |
|
37a181a987e4974d21c3e043d66e0d65468785aa | Check in io module | bkg/greenwich | contones/io.py | contones/io.py | import multiprocessing
import os
import uuid
from osgeo import gdal
import contones.raster
def _run_encoder(path, encoder_cls, geom=None):
encoder = encoder_cls()
with contones.raster.Raster(path) as r:
if geom:
with r.crop(geom) as cropped:
cropped.save(encoder)
else:
r.save(encoder)
buff = encoder.read()
# Remove the dataset from memory
encoder.unlink()
return buff
# TODO: Generalize and replace _run_encoder()
def convert(inpath, outpath=None):
if outpath is None:
outpath = get_imageio_for(outpath)()
with contones.raster.Raster(path) as r:
r.save(outpath)
return outpath
def run_encoderpool(encoder_cls, pathnames, geom=None, multicore=True):
"""Run an encoder job using a pool of workers.
Arguments:
path -- path to a GDAL dataset
encoder_cls -- encoder class to use, not an instance
Keyword args:
geom -- geometry used to crop raster as a geos.Polygon or None
multicore -- true/false, process in parallel by default
"""
encoder = encoder_cls()
if not multicore:
return [_run_encoder(path, encoder_cls, geom) for path in pathnames]
num_workers = multiprocessing.cpu_count()
num_workers = num_workers / 2 if num_workers > 4 else num_workers
pool = multiprocessing.Pool(num_workers)
results = [pool.apply(_run_encoder, (path, encoder_cls, geom,))
for path in pathnames]
pool.close()
return results
def get_imageio_for(path):
"""Returns the io class from a file path or gdal.Driver ShortName."""
extsep = os.path.extsep
ext = path.rsplit(extsep, 1)[-1] if extsep in path else path
#ext = os.path.splitext(path)[-1] if extsep in path else path
for cls in BaseImageIO.__subclasses__():
if ext in [cls.ext, cls.driver_name]:
return cls
raise Exception('No IO class for {}'.format(path))
# TODO: These not strictly encoders as they have filepaths, etc. Rename to
# Transformer, Converter, Driver? Or, FileStore, ImageFile, ImageFileStore?
#class BaseEncoder(object):
#class BaseImageStore(object):
class BaseImageIO(object):
"""Base encoder for GDAL Datasets derived from GDAL.Driver, used mainly
for raster image encoding. New raster formats should subclass this.
"""
_vsimem = '/vsimem'
# Specify this in subclass
driver_name = None
driver_opts = []
ext = None
def __init__(self, path=None):
self.driver = gdal.GetDriverByName(self.driver_name)
self.path = path or self.get_tmpname()
def __getattr__(self, attr):
return getattr(self.driver, attr)
def create(self, nx, ny, bandcount, datatype):
#self._check_exists()
ds = self.Create(self.path, nx, ny, bandcount,
datatype, self.driver_opts)
return contones.raster.Raster(ds)
#def vsipath(self):
def get_tmpname(self):
basename = '{}.{}'.format(str(uuid.uuid4()), self.ext)
return os.path.join(self._vsimem, basename)
def _check_exists(self):
if os.path.exists(self.path):
raise IOError('{} already exists'.format(self.path))
def copy_from(self, dataset):
#self._check_exists()
ds = self.CreateCopy(self.path, dataset.ds,
options=self.driver_opts)
return contones.raster.Raster(ds)
def read(self, size=0):
"""Returns the raster data buffer as str."""
f = gdal.VSIFOpenL(self.path, 'rb')
if f is None:
raise IOError('Could not read from {}'.format(self.path))
fstat = gdal.VSIStatL(self.path)
data = gdal.VSIFReadL(1, fstat.size, f)
gdal.VSIFCloseL(f)
return data
def unlink(self):
gdal.Unlink(self.path)
class GeoTIFFEncoder(BaseImageIO):
"""GeoTIFF raster encoder."""
driver_name = 'GTiff'
driver_opts = ['COMPRESS=PACKBITS']
ext = 'tif'
class HFAEncoder(BaseImageIO):
"""Erdas Imagine raster encoder."""
driver_name = 'HFA'
driver_opts = ['COMPRESSED=YES']
ext = 'img'
| bsd-3-clause | Python |
|
1253cf2773b510f88b4391e22f0e98b4ef3cdf52 | Create serializers.py | dfurtado/generator-djangospa,dfurtado/generator-djangospa,dfurtado/generator-djangospa | templates/root/main/serializers.py | templates/root/main/serializers.py | from django.contrib.auth.models import User
from rest_framework import serializers
from <%= appName %>.models import Sample
class SampleSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Sample
fields = ('id', 'created', 'name', 'img_name', 'url', 'owner', 'info')
class UserSerializer(serializers.HyperlinkedModelSerializer):
clownfish = serializers.HyperlinkedRelatedField(many=True, view_name='sample-detail', read_only=True)
class Meta:
model = User
fields = ('url', 'username', 'sample')
| mit | Python |
|
c6015e049ab1ce059298af9147851f9a6a1c1e46 | Replace NotImplemented singleton with NotImplementedError exceptin | selahssea/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core | src/ggrc_workflows/services/workflow_cycle_calculator/one_time_cycle_calculator.py | src/ggrc_workflows/services/workflow_cycle_calculator/one_time_cycle_calculator.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
import datetime
from ggrc_workflows.services.workflow_cycle_calculator import cycle_calculator
class OneTimeCycleCalculator(cycle_calculator.CycleCalculator):
"""CycleCalculator implementation for one-time workflows
Because one-time workflows have concrete start and end dates already
specified for tasks, we don't have to implement relative_day_to_date function
and we can return all values in their raw format (we don't need to adjust for
holidays).
"""
def __init__(self, workflow, base_date=None):
super(OneTimeCycleCalculator, self).__init__(workflow)
def relative_day_to_date(self, relative_day, relative_month=None,
base_date=None):
raise NotImplementedError("Relative days are not applicable "
"for one-time workflows.")
def sort_tasks(self):
self.tasks.sort(key=lambda t: self._date_normalizer(t.start_date))
@staticmethod
def get_relative_start(task):
raise NotImplementedError("Relative days are not applicable "
"for one-time workflows.")
@staticmethod
def get_relative_end(task):
raise NotImplementedError("Relative days are not applicable "
"for one-time workflows.")
@staticmethod
def task_date_range(task, base_date=None):
return task.start_date, task.end_date
@staticmethod
def _date_normalizer(d):
if type(d) is datetime.datetime:
return d.date()
return d
def workflow_date_range(self):
tasks_start_dates = [
self._date_normalizer(task.start_date) for task in self.tasks]
tasks_end_dates = [
self._date_normalizer(task.end_date) for task in self.tasks]
return min(tasks_start_dates), max(tasks_end_dates)
def next_cycle_start_date(self, base_date=None):
return None
| # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
import datetime
from ggrc_workflows.services.workflow_cycle_calculator import cycle_calculator
class OneTimeCycleCalculator(cycle_calculator.CycleCalculator):
"""CycleCalculator implementation for one-time workflows
Because one-time workflows have concrete start and end dates already
specified for tasks, we don't have to implement relative_day_to_date function
and we can return all values in their raw format (we don't need to adjust for
holidays).
"""
def __init__(self, workflow, base_date=None):
super(OneTimeCycleCalculator, self).__init__(workflow)
def relative_day_to_date(self, relative_day, relative_month=None,
base_date=None):
raise NotImplemented("Relative days are not applicable "
"for one-time workflows.")
def sort_tasks(self):
self.tasks.sort(key=lambda t: self._date_normalizer(t.start_date))
@staticmethod
def get_relative_start(task):
raise NotImplemented("Relative days are not applicable "
"for one-time workflows.")
@staticmethod
def get_relative_end(task):
raise NotImplemented("Relative days are not applicable "
"for one-time workflows.")
@staticmethod
def task_date_range(task, base_date=None):
return task.start_date, task.end_date
@staticmethod
def _date_normalizer(d):
if type(d) is datetime.datetime:
return d.date()
return d
def workflow_date_range(self):
tasks_start_dates = [
self._date_normalizer(task.start_date) for task in self.tasks]
tasks_end_dates = [
self._date_normalizer(task.end_date) for task in self.tasks]
return min(tasks_start_dates), max(tasks_end_dates)
def next_cycle_start_date(self, base_date=None):
return None
| apache-2.0 | Python |
5dc2f523473f4921c3b7f1915966c0ac22b09474 | Create package and metadatas | Fantomas42/mots-vides,Fantomas42/mots-vides | mots_vides/__init__.py | mots_vides/__init__.py | """
Mots-vides
"""
__version__ = '2015.1.21.dev0'
__author__ = 'Fantomas42'
__email__ = '[email protected]'
__url__ = 'https://github.com/Fantomas42/mots-vides'
| bsd-3-clause | Python |
|
02e9602a5723aa3cbe9395290e4c18e439065007 | Remove redundant code | gfyoung/numpy,seberg/numpy,anntzer/numpy,abalkin/numpy,endolith/numpy,jakirkham/numpy,pdebuyl/numpy,WarrenWeckesser/numpy,pbrod/numpy,mattip/numpy,jorisvandenbossche/numpy,charris/numpy,endolith/numpy,jorisvandenbossche/numpy,jorisvandenbossche/numpy,mattip/numpy,ahaldane/numpy,jorisvandenbossche/numpy,simongibbons/numpy,madphysicist/numpy,pbrod/numpy,mattip/numpy,pbrod/numpy,mhvk/numpy,numpy/numpy,rgommers/numpy,numpy/numpy,simongibbons/numpy,jakirkham/numpy,shoyer/numpy,pizzathief/numpy,endolith/numpy,madphysicist/numpy,pdebuyl/numpy,mattip/numpy,rgommers/numpy,grlee77/numpy,gfyoung/numpy,grlee77/numpy,MSeifert04/numpy,charris/numpy,MSeifert04/numpy,anntzer/numpy,pdebuyl/numpy,WarrenWeckesser/numpy,grlee77/numpy,numpy/numpy,madphysicist/numpy,anntzer/numpy,endolith/numpy,ahaldane/numpy,abalkin/numpy,pizzathief/numpy,seberg/numpy,simongibbons/numpy,charris/numpy,simongibbons/numpy,pbrod/numpy,mhvk/numpy,jakirkham/numpy,jakirkham/numpy,grlee77/numpy,WarrenWeckesser/numpy,rgommers/numpy,MSeifert04/numpy,mhvk/numpy,shoyer/numpy,madphysicist/numpy,shoyer/numpy,ahaldane/numpy,abalkin/numpy,shoyer/numpy,WarrenWeckesser/numpy,pizzathief/numpy,pizzathief/numpy,mhvk/numpy,MSeifert04/numpy,shoyer/numpy,grlee77/numpy,pizzathief/numpy,simongibbons/numpy,seberg/numpy,ahaldane/numpy,seberg/numpy,jorisvandenbossche/numpy,madphysicist/numpy,pbrod/numpy,mhvk/numpy,anntzer/numpy,MSeifert04/numpy,gfyoung/numpy,numpy/numpy,ahaldane/numpy,rgommers/numpy,charris/numpy,WarrenWeckesser/numpy,jakirkham/numpy,pdebuyl/numpy | numpy/distutils/tests/test_fcompiler.py | numpy/distutils/tests/test_fcompiler.py | from __future__ import division, absolute_import, print_function
from numpy.testing import assert_
import numpy.distutils.fcompiler
customizable_flags = [
('f77', 'F77FLAGS'),
('f90', 'F90FLAGS'),
('free', 'FREEFLAGS'),
('arch', 'FARCH'),
('debug', 'FDEBUG'),
('flags', 'FFLAGS'),
('linker_so', 'LDFLAGS'),
]
def test_fcompiler_flags(monkeypatch):
monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0')
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none')
flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None)
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
monkeypatch.delenv(envvar)
assert_(new_flags == [new_flag])
monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1')
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
monkeypatch.delenv(envvar)
if prev_flags is None:
assert_(new_flags == [new_flag])
else:
assert_(new_flags == prev_flags + [new_flag])
| from __future__ import division, absolute_import, print_function
from numpy.testing import assert_
import numpy.distutils.fcompiler
customizable_flags = [
('f77', 'F77FLAGS'),
('f90', 'F90FLAGS'),
('free', 'FREEFLAGS'),
('arch', 'FARCH'),
('debug', 'FDEBUG'),
('flags', 'FFLAGS'),
('linker_so', 'LDFLAGS'),
]
def test_fcompiler_flags(monkeypatch):
monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0')
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none')
flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None)
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
monkeypatch.delenv(envvar)
assert_(new_flags == [new_flag])
monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1')
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none')
flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None)
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
monkeypatch.delenv(envvar)
if prev_flags is None:
assert_(new_flags == [new_flag])
else:
assert_(new_flags == prev_flags + [new_flag])
| bsd-3-clause | Python |
b9bb7e36977b757a63015ac3af8b538f0c67f16c | add manage.py | vyacheslav-bezborodov/dvhb | manage.py | manage.py | from argparse import ArgumentParser
def apply_migrates(args):
print('migrate')
def make_parser():
parser = ArgumentParser()
subparsers = parser.add_subparsers()
migrate = subparsers.add_parser('migrate')
migrate.set_defaults(func=apply_migrates)
return parser
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
if vars(args):
args.func(args)
else:
parser.print_help()
| mit | Python |
|
67a3a0050c90c500c0c08a638436799df441c326 | Add markov implementation | greenify/zodiacy,greenify/zodiacy | markov.py | markov.py | from nltk import word_tokenize, pos_tag
import numpy
import random
from copy import deepcopy
def compute_transitions(tokens, precondition=lambda token, last_token: True, order=1):
last_tokens = [tokens[0]]
transitions = dict()
# count the occurences of "present | past"
for token in tokens[1:]:
past = tuple(last_tokens)
if precondition(token, past[-1]):
suffixes = [past[i:] for i in range(len(past))]
for suffix in suffixes:
if suffix not in transitions:
transitions[suffix] = {token : 1}
else:
if token not in transitions[suffix]:
transitions[suffix][token] = 1
else:
transitions[suffix][token] += 1
last_tokens = last_tokens[1 if len(last_tokens) == order else 0:]
last_tokens.append(token)
# compute probabilities
for transition_counts in transitions.values():
summed_occurences = sum(transition_counts.values())
for token in transition_counts.keys():
transition_counts[token] /= summed_occurences
# ensure there is a probability
for token in tokens:
if (token,) not in transitions:
transitions[(token,)] = {token: 1}
return transitions
def compute_token_probabilities(pos_tagged_tokens):
token_probabilities = dict()
for item in pos_tagged_tokens:
if item[1] not in token_probabilities:
token_probabilities[item[1]] = {item[0]: 1}
else:
if item[0] not in token_probabilities[item[1]]:
token_probabilities[item[1]][item[0]] = 1
else:
token_probabilities[item[1]][item[0]] += 1
for probabilities in token_probabilities.values():
summed_occurences = sum(probabilities.values())
for token in probabilities.keys():
probabilities[token] /= summed_occurences
return token_probabilities
def _weighted_choice(item_probabilities, value_to_probability=lambda x:x, probability_sum=1):
""" Expects a list of (item, probability)-tuples and the sum of all probabilities and returns one entry weighted at random """
random_value = random.random()*probability_sum
summed_probability = 0
for item, value in item_probabilities:
summed_probability += value_to_probability(value)
if summed_probability > random_value:
return item
def generate_text(transitions, start_symbol, count, symbol_to_token=lambda x:x, precondition=lambda x: True, order=1):
last_symbols = [start_symbol]
generated_tokens = []
for i in range(1, count):
new_symbol = generate_next_token(transitions, tuple(last_symbols[-i if i < order else -order:]), precondition)
last_symbols = last_symbols[1 if len(last_symbols) == order else 0:]
last_symbols.append(new_symbol)
generated_tokens.append(symbol_to_token(new_symbol))
return generated_tokens
def generate_next_token(transitions, past, precondition=lambda x: True):
for key in [past[i:] for i in range(len(past))]:
if key in transitions:
possible_transitions = deepcopy(transitions[key])
for key in transitions[key].keys():
if not precondition(key):
del possible_transitions[key]
return _weighted_choice(possible_transitions.items(), probability_sum=sum(possible_transitions.values()))
def lexicographic_markov(input, count, order=1):
tokens = word_tokenize(input)
pos_tagged_tokens = pos_tag(tokens)
symbol_transitions = compute_transitions([x[1] for x in pos_tagged_tokens])
token_probabilities = compute_token_probabilities(pos_tagged_tokens)
return generate_text(symbol_transitions, random.choice([x[1] for x in pos_tagged_tokens]), count, lambda symbol: _weighted_choice(token_probabilities[symbol].items()), order)
| mit | Python |
|
58cb5bde9c658e7b5fc7a7c946951e8abaade5e4 | Check against sixtrack in different file | SixTrack/SixTrackLib,SixTrack/SixTrackLib,SixTrack/SixTrackLib,SixTrack/SixTrackLib | examples/python/test_workflow_footprint/001_checks_against_sixtrack.py | examples/python/test_workflow_footprint/001_checks_against_sixtrack.py | import pickle
import numpy as np
import pysixtrack
import sixtracktools
# Load machine
with open('line.pkl', 'rb') as fid:
pbline = pickle.load(fid)
line = pysixtrack.Line.fromline(pbline)
# Load particle on CO
with open('particle_on_CO.pkl', 'rb') as fid:
part_on_CO = pysixtrack.Particles.from_dict(
pickle._load(fid))
# Load iconv
with open('iconv.pkl', 'rb') as fid:
iconv = pickle.load(fid)
# Load sixtrack tracking data
sixdump_all = sixtracktools.SixDump101('res/dump3.dat')
# Assume first particle to be on the closed orbit
Nele_st = len(iconv)
sixdump_CO = sixdump_all[::2][:Nele_st]
# Compute closed orbit using tracking
closed_orbit = line.track_elem_by_elem(part_on_CO)
# Check that closed orbit is closed
pstart = closed_orbit[0].copy()
pstart_st = pysixtrack.Particles(**sixdump_CO[0].get_minimal_beam())
print('STsigma, Sigma, Stdelta, delta, Stpx, px')
for iturn in range(10):
line.track(pstart)
line.track(pstart_st)
print('%e, %e, %e, %e, %e, %e' % (pstart_st.sigma, pstart.sigma,
pstart_st.delta, pstart.delta, pstart_st.px, pstart.px))
# Compare closed orbit against sixtrack
for att in 'x px y py delta sigma'.split():
att_CO = np.array([getattr(pp, att) for pp in closed_orbit])
att_CO_at_st_ele = att_CO[iconv]
print('Max C.O. discrepancy in %s %.2e' %
(att, np.max(np.abs(att_CO_at_st_ele-getattr(sixdump_CO, att)))))
# Compare tracking results
sixdump = sixdump_all[1::2] # Particle with deviation from CO
# sixdump = sixdump_all[::2] # Particle on CO
p_in_st = pysixtrack.Particles(**sixdump[0].get_minimal_beam())
p_out_st = pysixtrack.Particles(**sixdump[1].get_minimal_beam())
p_in_pyst = p_in_st.copy()
p_out_pyst = p_in_pyst.copy()
for att in 'x px y py delta sigma'.split():
attin = getattr(p_in_st, att)
attout = getattr(p_out_st, att)
print('SxTr: Change in '+att+': %e' % (attout-attin))
attin_pyst = getattr(p_in_pyst, att)
attout_pyst = getattr(p_out_pyst, att)
print('PyST: Change in '+att+': %e' % (attout_pyst-attin_pyst))
def compare(prun, pbench, pbench_prev):
out = []
out_rel = []
error = False
for att in 'x px y py delta sigma'.split():
vrun = getattr(prun, att)
vbench = getattr(pbench, att)
vbench_prev = getattr(pbench_prev, att)
diff = vrun-vbench
diffrel = abs(1.-abs(vrun-vbench_prev)/abs(vbench-vbench_prev))
out.append(abs(diff))
out_rel.append(diffrel)
print(f"{att:<5} {vrun:22.13e} {vbench:22.13e} {diff:22.13g} {diffrel:22.13g}")
if diffrel > 1e-8 or np.isnan(diffrel):
if diff > 1e-11:
print('Too large discrepancy!')
error = True
print(f"\nmax {max(out):21.12e} maxrel {max(out_rel):22.12e}")
return error
print("")
for ii in range(1, len(iconv)):
jja = iconv[ii-1]
jjb = iconv[ii]
prun = pysixtrack.Particles(**sixdump[ii-1].get_minimal_beam())
pbench_prev = prun.copy()
print(f"\n-----sixtrack={ii} sixtracklib={jja} --------------")
#print(f"pysixtr {jja}, x={prun.x}, px={prun.px}")
for jj in range(jja+1, jjb+1):
label = line.element_names[jj]
elem = line.elements[jj]
pin = prun.copy()
elem.track(prun)
print(f"{jj} {label},{str(elem)[:50]}")
pbench = pysixtrack.Particles(**sixdump[ii].get_minimal_beam())
#print(f"sixdump {ii}, x={pbench.x}, px={pbench.px}")
print("-----------------------")
error = compare(prun, pbench, pbench_prev)
print("-----------------------\n\n")
if error:
print('Error detected')
break
| lgpl-2.1 | Python |
|
8fa7120606e206d08acbad198e253ea428eef584 | Add tests for inline list compilation | ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang | tests/compiler/test_inline_list_compilation.py | tests/compiler/test_inline_list_compilation.py | import pytest
from tests.compiler import compile_snippet, internal_call, STATIC_START, LOCAL_START
from thinglang.compiler.errors import NoMatchingOverload, InvalidReference
from thinglang.compiler.opcodes import OpcodePopLocal, OpcodePushStatic
def test_inline_list_compilation():
assert compile_snippet('list<number> numbers = [1, 2, 3]') == [
OpcodePushStatic(STATIC_START), # Push the values
OpcodePushStatic(STATIC_START + 1),
OpcodePushStatic(STATIC_START + 2),
internal_call('list.__constructor__'), # Create the list
internal_call('list.append'), # Compile 3 append calls
internal_call('list.append'),
internal_call('list.append'),
OpcodePopLocal(LOCAL_START)
]
def test_inline_list_type_homogeneity():
with pytest.raises(NoMatchingOverload):
assert compile_snippet('list<number> numbers = [1, Container(), 3]')
def test_inline_list_declaration_type_match():
with pytest.raises(InvalidReference):
assert compile_snippet('list<number> numbers = [Container(), Container(), Container()]')
| mit | Python |
|
142cb17be1c024839cd972071b2f9665c87ed5f1 | Update downloadable clang to r338452 | alshedivat/tensorflow,tensorflow/tensorflow,jbedorf/tensorflow,Intel-tensorflow/tensorflow,theflofly/tensorflow,Bismarrck/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,ghchinoy/tensorflow,seanli9jan/tensorflow,kobejean/tensorflow,tensorflow/tensorflow-pywrap_saved_model,renyi533/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,kobejean/tensorflow,Intel-tensorflow/tensorflow,Bismarrck/tensorflow,Intel-tensorflow/tensorflow,seanli9jan/tensorflow,paolodedios/tensorflow,davidzchen/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,adit-chandra/tensorflow,cxxgtxy/tensorflow,adit-chandra/tensorflow,dongjoon-hyun/tensorflow,snnn/tensorflow,gautam1858/tensorflow,jhseu/tensorflow,tensorflow/tensorflow,asimshankar/tensorflow,seanli9jan/tensorflow,snnn/tensorflow,Bismarrck/tensorflow,hehongliang/tensorflow,dancingdan/tensorflow,annarev/tensorflow,jendap/tensorflow,gunan/tensorflow,cxxgtxy/tensorflow,snnn/tensorflow,hfp/tensorflow-xsmm,tensorflow/tensorflow-pywrap_saved_model,ZhangXinNan/tensorflow,apark263/tensorflow,gunan/tensorflow,renyi533/tensorflow,freedomtan/tensorflow,arborh/tensorflow,frreiss/tensorflow-fred,kobejean/tensorflow,tensorflow/tensorflow,dongjoon-hyun/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ageron/tensorflow,frreiss/tensorflow-fred,kevin-coder/tensorflow-fork,manipopopo/tensorflow,aam-at/tensorflow,brchiu/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,alshedivat/tensorflow,arborh/tensorflow,xzturn/tensorflow,DavidNorman/tensorflow,annarev/tensorflow,DavidNorman/tensorflow,AnishShah/tensorflow,petewarden/tensorflow,Intel-Corporation/tensorflow,aam-at/tensorflow,karllessard/tensorflow,DavidNorman/tensorflow,manipopopo/tensorflow,frreiss/tensorflow-fred,xodus7/tensorflow,Bismarrck/tensorflow,dongjoon-hyun/tensorflow,manipopopo/tensorflow,karllessard/tensorflow,theflofly/tensorflow,adit-chandra/tensorflow,adit-chandra/tensorflow,chemelnucfin/tensorflow,girving/tensorflow,petewarden/tensorflow,yongtang/tensorflow,gunan/tensorflow,manipopopo/tensorflow,asimshankar/tensorflow,tensorflow/tensorflow-pywrap_saved_model,renyi533/tensorflow,Bismarrck/tensorflow,AnishShah/tensorflow,sarvex/tensorflow,jhseu/tensorflow,ageron/tensorflow,DavidNorman/tensorflow,ageron/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,DavidNorman/tensorflow,karllessard/tensorflow,aldian/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aldian/tensorflow,girving/tensorflow,jendap/tensorflow,seanli9jan/tensorflow,freedomtan/tensorflow,hehongliang/tensorflow,adit-chandra/tensorflow,ageron/tensorflow,jendap/tensorflow,theflofly/tensorflow,ghchinoy/tensorflow,aselle/tensorflow,apark263/tensorflow,gunan/tensorflow,asimshankar/tensorflow,Bismarrck/tensorflow,manipopopo/tensorflow,davidzchen/tensorflow,yongtang/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,brchiu/tensorflow,hfp/tensorflow-xsmm,apark263/tensorflow,manipopopo/tensorflow,karllessard/tensorflow,xodus7/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jbedorf/tensorflow,ppwwyyxx/tensorflow,girving/tensorflow,jbedorf/tensorflow,ppwwyyxx/tensorflow,asimshankar/tensorflow,manipopopo/tensorflow,ZhangXinNan/tensorflow,Intel-tensorflow/tensorflow,alshedivat/tensorflow,ageron/tensorflow,xzturn/tensorflow,xzturn/tensorflow,aam-at/tensorflow,karllessard/tensorflow,yongtang/tensorflow,ZhangXinNan/tensorflow,jbedorf/tensorflow,ghchinoy/tensorflow,alsrgv/tensorflow,aam-at/tensorflow,DavidNorman/tensorflow,jendap/tensorflow,tensorflow/tensorflow,hfp/tensorflow-xsmm,Intel-Corporation/tensorflow,xzturn/tensorflow,freedomtan/tensorflow,yongtang/tensorflow,dancingdan/tensorflow,seanli9jan/tensorflow,ageron/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,dancingdan/tensorflow,jhseu/tensorflow,alshedivat/tensorflow,gunan/tensorflow,jendap/tensorflow,jbedorf/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,Bismarrck/tensorflow,arborh/tensorflow,tensorflow/tensorflow,petewarden/tensorflow,jbedorf/tensorflow,arborh/tensorflow,xodus7/tensorflow,hfp/tensorflow-xsmm,freedomtan/tensorflow,ppwwyyxx/tensorflow,dancingdan/tensorflow,alsrgv/tensorflow,Intel-Corporation/tensorflow,alsrgv/tensorflow,manipopopo/tensorflow,girving/tensorflow,aselle/tensorflow,asimshankar/tensorflow,DavidNorman/tensorflow,aselle/tensorflow,xodus7/tensorflow,alsrgv/tensorflow,kevin-coder/tensorflow-fork,gautam1858/tensorflow,jhseu/tensorflow,xzturn/tensorflow,ppwwyyxx/tensorflow,jbedorf/tensorflow,tensorflow/tensorflow,AnishShah/tensorflow,brchiu/tensorflow,hfp/tensorflow-xsmm,gunan/tensorflow,kevin-coder/tensorflow-fork,kevin-coder/tensorflow-fork,hehongliang/tensorflow,kobejean/tensorflow,ZhangXinNan/tensorflow,dancingdan/tensorflow,alshedivat/tensorflow,hfp/tensorflow-xsmm,alsrgv/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,ageron/tensorflow,asimshankar/tensorflow,aselle/tensorflow,jendap/tensorflow,snnn/tensorflow,asimshankar/tensorflow,ppwwyyxx/tensorflow,aldian/tensorflow,dancingdan/tensorflow,ppwwyyxx/tensorflow,hfp/tensorflow-xsmm,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,ZhangXinNan/tensorflow,theflofly/tensorflow,manipopopo/tensorflow,AnishShah/tensorflow,brchiu/tensorflow,cxxgtxy/tensorflow,jbedorf/tensorflow,jhseu/tensorflow,gautam1858/tensorflow,ppwwyyxx/tensorflow,kobejean/tensorflow,adit-chandra/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,asimshankar/tensorflow,kobejean/tensorflow,DavidNorman/tensorflow,Intel-Corporation/tensorflow,aldian/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow,apark263/tensorflow,renyi533/tensorflow,xzturn/tensorflow,freedomtan/tensorflow,apark263/tensorflow,seanli9jan/tensorflow,karllessard/tensorflow,ppwwyyxx/tensorflow,alshedivat/tensorflow,renyi533/tensorflow,Intel-tensorflow/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,theflofly/tensorflow,ppwwyyxx/tensorflow,alsrgv/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,adit-chandra/tensorflow,ageron/tensorflow,karllessard/tensorflow,chemelnucfin/tensorflow,petewarden/tensorflow,aam-at/tensorflow,petewarden/tensorflow,xodus7/tensorflow,frreiss/tensorflow-fred,davidzchen/tensorflow,xzturn/tensorflow,jhseu/tensorflow,jendap/tensorflow,frreiss/tensorflow-fred,aam-at/tensorflow,tensorflow/tensorflow,annarev/tensorflow,hfp/tensorflow-xsmm,AnishShah/tensorflow,alsrgv/tensorflow,aldian/tensorflow,aam-at/tensorflow,apark263/tensorflow,sarvex/tensorflow,alsrgv/tensorflow,xzturn/tensorflow,snnn/tensorflow,cxxgtxy/tensorflow,jbedorf/tensorflow,annarev/tensorflow,ghchinoy/tensorflow,ppwwyyxx/tensorflow,AnishShah/tensorflow,alshedivat/tensorflow,frreiss/tensorflow-fred,renyi533/tensorflow,ZhangXinNan/tensorflow,aam-at/tensorflow,frreiss/tensorflow-fred,gunan/tensorflow,Bismarrck/tensorflow,asimshankar/tensorflow,seanli9jan/tensorflow,AnishShah/tensorflow,xzturn/tensorflow,girving/tensorflow,arborh/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,cxxgtxy/tensorflow,freedomtan/tensorflow,gautam1858/tensorflow,jendap/tensorflow,davidzchen/tensorflow,ageron/tensorflow,chemelnucfin/tensorflow,chemelnucfin/tensorflow,paolodedios/tensorflow,renyi533/tensorflow,davidzchen/tensorflow,gunan/tensorflow,paolodedios/tensorflow,ageron/tensorflow,sarvex/tensorflow,xodus7/tensorflow,jhseu/tensorflow,DavidNorman/tensorflow,gunan/tensorflow,kevin-coder/tensorflow-fork,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,girving/tensorflow,ppwwyyxx/tensorflow,aam-at/tensorflow,ageron/tensorflow,kobejean/tensorflow,manipopopo/tensorflow,chemelnucfin/tensorflow,jhseu/tensorflow,sarvex/tensorflow,apark263/tensorflow,frreiss/tensorflow-fred,xodus7/tensorflow,davidzchen/tensorflow,apark263/tensorflow,chemelnucfin/tensorflow,alsrgv/tensorflow,yongtang/tensorflow,davidzchen/tensorflow,hfp/tensorflow-xsmm,snnn/tensorflow,xodus7/tensorflow,theflofly/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aselle/tensorflow,chemelnucfin/tensorflow,aselle/tensorflow,alsrgv/tensorflow,hfp/tensorflow-xsmm,jbedorf/tensorflow,gunan/tensorflow,Intel-Corporation/tensorflow,dancingdan/tensorflow,dancingdan/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ghchinoy/tensorflow,petewarden/tensorflow,gunan/tensorflow,kobejean/tensorflow,gautam1858/tensorflow,kevin-coder/tensorflow-fork,alshedivat/tensorflow,apark263/tensorflow,paolodedios/tensorflow,ageron/tensorflow,alshedivat/tensorflow,manipopopo/tensorflow,freedomtan/tensorflow,hfp/tensorflow-xsmm,yongtang/tensorflow,AnishShah/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,sarvex/tensorflow,aam-at/tensorflow,dancingdan/tensorflow,DavidNorman/tensorflow,annarev/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,AnishShah/tensorflow,karllessard/tensorflow,chemelnucfin/tensorflow,renyi533/tensorflow,kobejean/tensorflow,theflofly/tensorflow,kobejean/tensorflow,ghchinoy/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,kevin-coder/tensorflow-fork,annarev/tensorflow,davidzchen/tensorflow,davidzchen/tensorflow,Intel-tensorflow/tensorflow,hehongliang/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,ZhangXinNan/tensorflow,kevin-coder/tensorflow-fork,Bismarrck/tensorflow,ghchinoy/tensorflow,aam-at/tensorflow,petewarden/tensorflow,xodus7/tensorflow,davidzchen/tensorflow,dongjoon-hyun/tensorflow,asimshankar/tensorflow,xodus7/tensorflow,davidzchen/tensorflow,theflofly/tensorflow,snnn/tensorflow,theflofly/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,yongtang/tensorflow,renyi533/tensorflow,brchiu/tensorflow,snnn/tensorflow,cxxgtxy/tensorflow,aselle/tensorflow,snnn/tensorflow,xzturn/tensorflow,aldian/tensorflow,jendap/tensorflow,dongjoon-hyun/tensorflow,chemelnucfin/tensorflow,xzturn/tensorflow,hehongliang/tensorflow,adit-chandra/tensorflow,karllessard/tensorflow,annarev/tensorflow,jendap/tensorflow,adit-chandra/tensorflow,paolodedios/tensorflow,snnn/tensorflow,apark263/tensorflow,girving/tensorflow,dongjoon-hyun/tensorflow,DavidNorman/tensorflow,freedomtan/tensorflow,cxxgtxy/tensorflow,hehongliang/tensorflow,jbedorf/tensorflow,girving/tensorflow,theflofly/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,dongjoon-hyun/tensorflow,yongtang/tensorflow,renyi533/tensorflow,seanli9jan/tensorflow,petewarden/tensorflow,arborh/tensorflow,Bismarrck/tensorflow,renyi533/tensorflow,seanli9jan/tensorflow,kevin-coder/tensorflow-fork,aselle/tensorflow,ghchinoy/tensorflow,annarev/tensorflow,ZhangXinNan/tensorflow,brchiu/tensorflow,dongjoon-hyun/tensorflow,jbedorf/tensorflow,jhseu/tensorflow,girving/tensorflow,alsrgv/tensorflow,brchiu/tensorflow,karllessard/tensorflow,dancingdan/tensorflow,alsrgv/tensorflow,jhseu/tensorflow,freedomtan/tensorflow,ZhangXinNan/tensorflow,seanli9jan/tensorflow,brchiu/tensorflow,aldian/tensorflow,alshedivat/tensorflow,jendap/tensorflow,petewarden/tensorflow,sarvex/tensorflow,chemelnucfin/tensorflow,snnn/tensorflow,annarev/tensorflow,paolodedios/tensorflow,theflofly/tensorflow,theflofly/tensorflow,tensorflow/tensorflow,gunan/tensorflow,aldian/tensorflow,hehongliang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,arborh/tensorflow,aselle/tensorflow,seanli9jan/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,dancingdan/tensorflow,dongjoon-hyun/tensorflow,girving/tensorflow,aselle/tensorflow,brchiu/tensorflow,asimshankar/tensorflow,xzturn/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,jhseu/tensorflow,Bismarrck/tensorflow,paolodedios/tensorflow,apark263/tensorflow,frreiss/tensorflow-fred,arborh/tensorflow,Intel-tensorflow/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ghchinoy/tensorflow,AnishShah/tensorflow,arborh/tensorflow,adit-chandra/tensorflow,ZhangXinNan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,arborh/tensorflow,dongjoon-hyun/tensorflow,Intel-tensorflow/tensorflow,adit-chandra/tensorflow,gautam1858/tensorflow,annarev/tensorflow,Intel-Corporation/tensorflow,DavidNorman/tensorflow,brchiu/tensorflow,ZhangXinNan/tensorflow,jhseu/tensorflow,kobejean/tensorflow,aselle/tensorflow,adit-chandra/tensorflow,arborh/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,arborh/tensorflow,kevin-coder/tensorflow-fork,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,kevin-coder/tensorflow-fork,chemelnucfin/tensorflow,chemelnucfin/tensorflow,xodus7/tensorflow,brchiu/tensorflow,alshedivat/tensorflow,girving/tensorflow,AnishShah/tensorflow,dongjoon-hyun/tensorflow | third_party/clang_toolchain/download_clang.bzl | third_party/clang_toolchain/download_clang.bzl | """ Helpers to download a recent clang release."""
def _get_platform_folder(os_name):
os_name = os_name.lower()
if os_name.startswith("windows"):
return "Win"
if os_name.startswith("mac os"):
return "Mac"
if not os_name.startswith("linux"):
fail("Unknown platform")
return "Linux_x64"
def _download_chromium_clang(
repo_ctx,
platform_folder,
package_version,
sha256,
out_folder):
cds_url = "https://commondatastorage.googleapis.com/chromium-browser-clang"
cds_file = "clang-%s.tgz" % package_version
cds_full_url = "{0}/{1}/{2}".format(cds_url, platform_folder, cds_file)
repo_ctx.download_and_extract(cds_full_url, output = out_folder, sha256 = sha256)
def download_clang(repo_ctx, out_folder):
""" Download a fresh clang release and put it into out_folder.
Clang itself will be located in 'out_folder/bin/clang'.
We currently download one of the latest releases of clang by the
Chromium project (see
https://chromium.googlesource.com/chromium/src/+/master/docs/clang.md).
Args:
repo_ctx: An instance of repository_context object.
out_folder: A folder to extract the compiler into.
"""
# TODO(ibiryukov): we currently download and extract some extra tools in the
# clang release (e.g., sanitizers). We should probably remove the ones
# we don't need and document the ones we want provide in addition to clang.
# Latest CLANG_REVISION and CLANG_SUB_REVISION of the Chromiums's release
# can be found in https://chromium.googlesource.com/chromium/src/tools/clang/+/master/scripts/update.py
CLANG_REVISION = "338452"
CLANG_SUB_REVISION = 1
package_version = "%s-%s" % (CLANG_REVISION, CLANG_SUB_REVISION)
checksums = {
"Linux_x64": "213ba23a0a9855ede5041f66661caa9c5c59a573ec60b82a31839f9a97f397bf",
"Mac": "4267774201f8cb50c25e081375e87038d58db80064a20a0d9d7fe57ea4357ece",
"Win": "a8a5d5b25443c099e2c20d1a0cdce2f1d17e2dba84de66a6dc6a239ce3e78c34",
}
platform_folder = _get_platform_folder(repo_ctx.os.name)
_download_chromium_clang(
repo_ctx,
platform_folder,
package_version,
checksums[platform_folder],
out_folder,
)
| """ Helpers to download a recent clang release."""
def _get_platform_folder(os_name):
os_name = os_name.lower()
if os_name.startswith("windows"):
return "Win"
if os_name.startswith("mac os"):
return "Mac"
if not os_name.startswith("linux"):
fail("Unknown platform")
return "Linux_x64"
def _download_chromium_clang(
repo_ctx,
platform_folder,
package_version,
sha256,
out_folder):
cds_url = "https://commondatastorage.googleapis.com/chromium-browser-clang"
cds_file = "clang-%s.tgz" % package_version
cds_full_url = "{0}/{1}/{2}".format(cds_url, platform_folder, cds_file)
repo_ctx.download_and_extract(cds_full_url, output = out_folder, sha256 = sha256)
def download_clang(repo_ctx, out_folder):
""" Download a fresh clang release and put it into out_folder.
Clang itself will be located in 'out_folder/bin/clang'.
We currently download one of the latest releases of clang by the
Chromium project (see
https://chromium.googlesource.com/chromium/src/+/master/docs/clang.md).
Args:
repo_ctx: An instance of repository_context object.
out_folder: A folder to extract the compiler into.
"""
# TODO(ibiryukov): we currently download and extract some extra tools in the
# clang release (e.g., sanitizers). We should probably remove the ones
# we don't need and document the ones we want provide in addition to clang.
# Latest CLANG_REVISION and CLANG_SUB_REVISION of the Chromiums's release
# can be found in https://chromium.googlesource.com/chromium/src/tools/clang/+/master/scripts/update.py
CLANG_REVISION = "336424"
CLANG_SUB_REVISION = 1
package_version = "%s-%s" % (CLANG_REVISION, CLANG_SUB_REVISION)
checksums = {
"Linux_x64": "2ea97e047470da648f5d078af008bce6891287592382cee3d53a1187d996da94",
"Mac": "c6e28909cce63ee35e0d51284d9f0f6e8838f7fb8b7a0dc9536c2ea900552df0",
"Win": "1299fda7c4378bfb81337f7e5f351c8a1f953f51e0744e2170454b8d722f3db7",
}
platform_folder = _get_platform_folder(repo_ctx.os.name)
_download_chromium_clang(
repo_ctx,
platform_folder,
package_version,
checksums[platform_folder],
out_folder,
)
| apache-2.0 | Python |
3ddf0f0fead6018b5c313253a0df2165452cfb6e | Add shared babel init code | SUNET/eduid-common | src/eduid_common/api/translation.py | src/eduid_common/api/translation.py | # -*- coding: utf-8 -*-
from flask import request
from flask_babel import Babel
__author__ = 'lundberg'
def init_babel(app):
babel = Babel(app)
app.babel = babel
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
# XXX: TODO
# otherwise try to guess the language from the user accept
# header the browser transmits. The best match wins.
return request.accept_languages.best_match(app.config.get('SUPPORTED_LANGUAGES'))
return app
| bsd-3-clause | Python |
|
30bca45e1ac9fc6953728950695135b491403215 | Add test for logical constant folding. | mhoffma/micropython,blazewicz/micropython,tobbad/micropython,oopy/micropython,pozetroninc/micropython,swegener/micropython,tralamazza/micropython,tobbad/micropython,mhoffma/micropython,kerneltask/micropython,MrSurly/micropython,Peetz0r/micropython-esp32,tobbad/micropython,dmazzella/micropython,HenrikSolver/micropython,pramasoul/micropython,micropython/micropython-esp32,bvernoux/micropython,TDAbboud/micropython,MrSurly/micropython-esp32,tuc-osg/micropython,trezor/micropython,trezor/micropython,micropython/micropython-esp32,pozetroninc/micropython,henriknelson/micropython,SHA2017-badge/micropython-esp32,adafruit/circuitpython,MrSurly/micropython-esp32,MrSurly/micropython-esp32,toolmacher/micropython,chrisdearman/micropython,selste/micropython,pozetroninc/micropython,chrisdearman/micropython,deshipu/micropython,HenrikSolver/micropython,lowRISC/micropython,micropython/micropython-esp32,deshipu/micropython,HenrikSolver/micropython,henriknelson/micropython,Peetz0r/micropython-esp32,hiway/micropython,swegener/micropython,adafruit/circuitpython,trezor/micropython,pramasoul/micropython,trezor/micropython,cwyark/micropython,lowRISC/micropython,adafruit/micropython,adafruit/micropython,SHA2017-badge/micropython-esp32,MrSurly/micropython,henriknelson/micropython,SHA2017-badge/micropython-esp32,AriZuu/micropython,MrSurly/micropython-esp32,torwag/micropython,tuc-osg/micropython,hiway/micropython,infinnovation/micropython,toolmacher/micropython,adafruit/circuitpython,ryannathans/micropython,deshipu/micropython,selste/micropython,selste/micropython,micropython/micropython-esp32,alex-robbins/micropython,blazewicz/micropython,Peetz0r/micropython-esp32,PappaPeppar/micropython,HenrikSolver/micropython,mhoffma/micropython,infinnovation/micropython,toolmacher/micropython,dmazzella/micropython,cwyark/micropython,adafruit/micropython,ryannathans/micropython,SHA2017-badge/micropython-esp32,pozetroninc/micropython,pramasoul/micropython,adafruit/circuitpython,blazewicz/micropython,torwag/micropython,PappaPeppar/micropython,cwyark/micropython,henriknelson/micropython,swegener/micropython,bvernoux/micropython,TDAbboud/micropython,deshipu/micropython,tralamazza/micropython,adafruit/micropython,AriZuu/micropython,pramasoul/micropython,AriZuu/micropython,MrSurly/micropython,oopy/micropython,torwag/micropython,chrisdearman/micropython,kerneltask/micropython,tralamazza/micropython,dmazzella/micropython,PappaPeppar/micropython,TDAbboud/micropython,PappaPeppar/micropython,cwyark/micropython,micropython/micropython-esp32,lowRISC/micropython,ryannathans/micropython,swegener/micropython,dmazzella/micropython,blazewicz/micropython,henriknelson/micropython,tobbad/micropython,MrSurly/micropython,Timmenem/micropython,tralamazza/micropython,toolmacher/micropython,tuc-osg/micropython,tuc-osg/micropython,adafruit/micropython,alex-robbins/micropython,puuu/micropython,Peetz0r/micropython-esp32,mhoffma/micropython,pfalcon/micropython,pfalcon/micropython,ryannathans/micropython,oopy/micropython,hiway/micropython,oopy/micropython,mhoffma/micropython,pramasoul/micropython,selste/micropython,torwag/micropython,Timmenem/micropython,alex-robbins/micropython,Peetz0r/micropython-esp32,HenrikSolver/micropython,PappaPeppar/micropython,infinnovation/micropython,infinnovation/micropython,MrSurly/micropython,lowRISC/micropython,ryannathans/micropython,kerneltask/micropython,puuu/micropython,pozetroninc/micropython,lowRISC/micropython,kerneltask/micropython,chrisdearman/micropython,alex-robbins/micropython,MrSurly/micropython-esp32,hiway/micropython,Timmenem/micropython,bvernoux/micropython,oopy/micropython,alex-robbins/micropython,swegener/micropython,blazewicz/micropython,AriZuu/micropython,TDAbboud/micropython,puuu/micropython,bvernoux/micropython,puuu/micropython,hiway/micropython,infinnovation/micropython,adafruit/circuitpython,bvernoux/micropython,Timmenem/micropython,puuu/micropython,chrisdearman/micropython,tuc-osg/micropython,AriZuu/micropython,tobbad/micropython,SHA2017-badge/micropython-esp32,deshipu/micropython,toolmacher/micropython,trezor/micropython,adafruit/circuitpython,pfalcon/micropython,TDAbboud/micropython,Timmenem/micropython,torwag/micropython,cwyark/micropython,pfalcon/micropython,selste/micropython,kerneltask/micropython,pfalcon/micropython | tests/basics/logic_constfolding.py | tests/basics/logic_constfolding.py | # tests logical constant folding in parser
def f_true():
print('f_true')
return True
def f_false():
print('f_false')
return False
print(0 or False)
print(1 or foo)
print(f_false() or 1 or foo)
print(f_false() or 1 or f_true())
print(0 and foo)
print(1 and True)
print(f_true() and 0 and foo)
print(f_true() and 1 and f_false())
print(not 0)
print(not False)
print(not 1)
print(not True)
print(not not 0)
print(not not 1)
| mit | Python |
|
5b3863c90d4bc07bbc170fc213b4a4c46b3ddc01 | Test setting selinux context on lost+found (#1038146) | rhinstaller/blivet,AdamWill/blivet,rvykydal/blivet,vpodzime/blivet,rhinstaller/blivet,jkonecny12/blivet,dwlehman/blivet,dwlehman/blivet,vojtechtrefny/blivet,AdamWill/blivet,jkonecny12/blivet,rvykydal/blivet,vpodzime/blivet,vojtechtrefny/blivet | tests/formats_test/selinux_test.py | tests/formats_test/selinux_test.py | #!/usr/bin/python
import os
import selinux
import tempfile
import unittest
from devicelibs_test import baseclass
from blivet.formats import device_formats
import blivet.formats.fs as fs
class SELinuxContextTestCase(baseclass.DevicelibsTestCase):
"""Testing SELinux contexts.
"""
@unittest.skipUnless(os.geteuid() == 0, "requires root privileges")
def testMountingExt2FS(self):
_LOOP_DEV0 = self._loopMap[self._LOOP_DEVICES[0]]
an_fs = fs.Ext2FS(device=_LOOP_DEV0, label="test")
self.assertIsNone(an_fs.create())
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
root_selinux_context = selinux.getfilecon(mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertTrue(os.path.exists(lost_and_found))
lost_and_found_selinux_context = selinux.getfilecon(lost_and_found)
an_fs.unmount()
os.rmdir(mountpoint)
self.assertEqual(root_selinux_context[1], 'system_u:object_r:file_t:s0')
self.assertEqual(lost_and_found_selinux_context[1],
'system_u:object_r:lost_found_t:s0')
@unittest.skipUnless(os.geteuid() == 0, "requires root privileges")
def testMountingXFS(self):
_LOOP_DEV0 = self._loopMap[self._LOOP_DEVICES[0]]
an_fs = fs.XFS(device=_LOOP_DEV0, label="test")
self.assertIsNone(an_fs.create())
mountpoint = tempfile.mkdtemp("test.selinux")
an_fs.mount(mountpoint=mountpoint)
root_selinux_context = selinux.getfilecon(mountpoint)
lost_and_found = os.path.join(mountpoint, "lost+found")
self.assertFalse(os.path.exists(lost_and_found))
an_fs.unmount()
os.rmdir(mountpoint)
self.assertEqual(root_selinux_context[1], 'system_u:object_r:file_t:s0')
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(SELinuxContextTestCase)
return unittest.TestSuite([suite1])
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 | Python |
|
68d620d56625c4c1bd30a30f31840d9bd440b29e | Add find_objects test module | danforthcenter/plantcv,danforthcenter/plantcv,danforthcenter/plantcv | tests/plantcv/test_find_objects.py | tests/plantcv/test_find_objects.py | import cv2
import numpy as np
from plantcv.plantcv import find_objects
def test_find_objects(test_data):
# Read in test data
img = cv2.imread(test_data.small_rgb_img)
mask = cv2.imread(test_data.small_bin_img, -1)
cnt, _ = test_data.load_contours(test_data.small_contours_file)
contours, _ = find_objects(img=img, mask=mask)
# Assert contours match test data
assert np.all(cnt) == np.all(contours)
def test_find_objects_grayscale_input(test_data):
# Read in test data
img = cv2.imread(test_data.small_gray_img, -1)
mask = cv2.imread(test_data.small_bin_img, -1)
cnt, _ = test_data.load_contours(test_data.small_contours_file)
contours, _ = find_objects(img=img, mask=mask)
# Assert contours match test data
assert np.all(cnt) == np.all(contours)
| mit | Python |
|
36033be962fcc3e97d14dd06b42bcd3be52a97c5 | Add floting_point.py | daineseh/python_code | parser/sample/floting_point.py | parser/sample/floting_point.py | import logging
from lex_tokens import LexToken
from ply.yacc import yacc
class FloatingPointParser(object):
class FloatingPointSyntaxError(Exception): pass
def __init__(self, debug=False):
if debug:
self._log = logging.getLogger('PhysicalDivideCharParser')
else:
self._log = yacc.NullLogger()
self._lex = LexToken(debug)
self.tokens = self._lex.tokens
self._parser = yacc.yacc(module=self, debug=debug, debuglog=self._log)
def p_floating_point(self, p):
'expression : floating'
p[0] = p[1]
def p_floating_1(self, p):
'floating : single_num DOT single_num'
p[0] = p[1] + p[2] + p[3]
def p_floating_2(self, p):
'floating : single_num dot_char single_num'
p[0] = p[1] + p[2] + p[3]
def p_floating_3(self, p):
'floating : single_num'
p[0] = p[1]
def p_divid_dot(self, p):
'dot_char : DOT'
p[0] = p[1]
def p_sign1(self, p):
'single_num : NUMBER'
p[0] = str(p[1])
def p_sign2(self, p):
'single_num : MINUS NUMBER'
p[0] = p[1] + str(p[2])
def p_error(self, p):
if p is None: # End-of-file
raise self.FloatingPointSyntaxError('Parsing error (%s)' % self.__expr_text)
err_msg = 'token type: {}, value: {}'.format(p.type, p.value)
raise self.FloatingPointSyntaxError(err_msg)
def parse(self, s):
self.__expr_text = s
try:
return self._parser.parse(s, lexer=self._lex.lexer())
except self.FloatingPointSyntaxError:
print "NOT Matched"
return None
if __name__ == '__main__':
header_parser = FloatingPointParser()
data = '5.6'
data = '- 5.6'
data = 'VERSION 5.6 ;'
data = '5'
data = '-5'
print header_parser.parse(data)
| mit | Python |
|
be17cf90b06a118d579c0211dd3bc2d45433fb2d | Write unit tests for _handle_long_response | venmo/slouch | tests/test_handle_long_response.py | tests/test_handle_long_response.py | import context
class TestHandleLongResponse(context.slouch.testing.CommandTestCase):
bot_class = context.TimerBot
config = {'start_fmt': '{:%Y}', 'stop_fmt': '{.days}'}
normal_text = "@genericmention: this is generic mention message contains a URL <http://foo.com/>\n@genericmention: this generic mention message contains a :fast_parrot: and :nyancat_big:\n"
over_limit_text = normal_text * 50 # 8550 chars
def test_handle_long_message_api(self):
_res = {
'type': 'message',
'text': self.normal_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual(len(responses), 1)
self.assertEqual(responses, [{
'type': 'message',
'text': self.normal_text,
'channel': None
}])
def test_handle_long_message_over_limit_api(self):
_res = {
'type': 'message',
'text': self.over_limit_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual([len(r['text']) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
def test_handle_long_message_rtm(self):
responses = self.bot._handle_long_response(self.normal_text)
self.assertEqual(responses, [self.normal_text])
self.assertEqual(len(responses), 1)
def test_handle_long_message_over_limit_rtm(self):
responses = self.bot._handle_long_response(self.over_limit_text)
self.assertEqual([len(r) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
| mit | Python |
|
feafe480d651ee6b58a1631f4eb4533f63ea6ad4 | Add user tests | rhgrant10/Groupy | tests/api/test_user.py | tests/api/test_user.py | from unittest import mock
from groupy.api import user
from .base import get_fake_response
from .base import TestCase
class UserTests(TestCase):
def setUp(self):
self.m_session = mock.Mock()
self.m_session.get.return_value = get_fake_response(data={'id': 'foo'})
self.user = user.User(self.m_session)
def test_id_is_foo(self):
self.assertEqual(self.user.me['id'], 'foo')
@mock.patch('groupy.api.user.blocks')
def test_blocks_uses_id(self, m_blocks):
self.user.blocks
(__, id_), __ = m_blocks.Blocks.call_args
self.assertEqual(id_, 'foo')
def test_update(self):
data = {'bar': 'foo'}
self.m_session.post.return_value = get_fake_response(data=data)
result = self.user.update(foo='bar')
self.assertEqual(result, data)
class SmsModeTests(TestCase):
def setUp(self):
self.m_session = mock.Mock()
self.sms_mode = user.SmsMode(self.m_session)
self.m_session.post.return_value = mock.Mock(ok=True)
class EnableSmsModeTests(SmsModeTests):
def setUp(self):
super().setUp()
self.result = self.sms_mode.enable(duration=42)
def test_result_is_True(self):
self.assertTrue(self.result)
def test_payload_is_correct(self):
self.assert_kwargs(self.m_session.post, json={'duration': 42})
class EnableSmsModeWithRegistrationTests(SmsModeTests):
def setUp(self):
super().setUp()
self.result = self.sms_mode.enable(duration=42, registration_id=420)
def test_result_is_True(self):
self.assertTrue(self.result)
def test_payload_is_correct(self):
payload = {'duration': 42, 'registration_id': 420}
self.assert_kwargs(self.m_session.post, json=payload)
class DisableSmsModeTests(SmsModeTests):
def setUp(self):
super().setUp()
self.result = self.sms_mode.disable()
def test_result_is_True(self):
self.assertTrue(self.result)
| apache-2.0 | Python |
|
063899021158fe872745b335595b3094db9834d8 | Add a test for 'version. | samth/pycket,krono/pycket,vishesh/pycket,magnusmorton/pycket,vishesh/pycket,vishesh/pycket,magnusmorton/pycket,cderici/pycket,pycket/pycket,pycket/pycket,cderici/pycket,krono/pycket,magnusmorton/pycket,krono/pycket,pycket/pycket,cderici/pycket,samth/pycket,samth/pycket | pycket/test/test_version.py | pycket/test/test_version.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Test the version here.
#
import pytest
from pycket.test.testhelper import check_equal
EXPECTED_VERSION='6.1.1.8'
def test_version():
check_equal('(version)', '"%s"' % EXPECTED_VERSION)
# EOF
| mit | Python |
|
e940963a6372a4de1a4a28eff1854716f47471e5 | Add deploy script | dbcollection/dbcollection,farrajota/dbcollection | conda-recipe/deploy.py | conda-recipe/deploy.py | #!/usr/bin/env python
"""
Deploy dbcollection to pypi and conda.
"""
import os
import shutil
import subprocess
# PyPi
print('PyPi: Upload sdist...')
msg1 = subprocess.run(["python", 'setup.py', 'sdist', 'upload'], stdout=subprocess.PIPE)
print('PyPi: Upload bdist_wheel...')
msg2 = subprocess.run(["python", 'setup.py', 'bdist_wheel', 'upload'], stdout=subprocess.PIPE)
# Conda
python_versions = ["2.7", "3.5", "3.6"]
for i, pyver in enumerate(python_versions):
print('\nAnaconda: Start build {}/{}'.format(i+1, len(python_versions)))
print(' > Python version: {}'.format(pyver))
temp_output_dir = 'output_build'
print(' > Saving artifacts to dir: {}'.format(temp_output_dir))
if os.path.exists(temp_output_dir):
shutil.rmtree(temp_output_dir, ignore_errors=True)
# build conda
print(' > Build conda recipe...')
cmd = ["conda", 'build', '--python={}'.format(pyver), '--no-anaconda-upload', 'conda-recipe']
msg = subprocess.run(cmd, stdout=subprocess.PIPE)
# parse string message
print(' > Parse conda artifact file name + path...')
msg_s = str(msg)
str_ini = "If you want to upload package(s) to anaconda.org later, type:\\n\\nanaconda upload "
str_end = "\\n\\n# To have conda build upload to anaconda.org automatically"
ini_id = msg_s.find(str_ini) + len(str_ini)
end_id = msg_s.find(str_end)
artifact_fname = msg_s[ini_id:end_id]
print(' > Artifact name: {}'.format(artifact_fname))
# convert to all platforms
print(' > Convert artifact to all platforms...')
msg = subprocess.run(["conda", 'convert', "-p", "all", artifact_fname, "-o", temp_output_dir], stdout=subprocess.PIPE)
# upload to anaconda
print(' > Upload all artifact to all platforms...')
print(' -- Uploading artifact: {}'.format(artifact_fname))
msg_upload = subprocess.run(["anaconda", "upload", artifact_fname], stdout=subprocess.PIPE)
for root, dirs, files in os.walk(temp_output_dir):
if any(files):
for fname in files:
if fname.endswith('.tar.bz2'):
print(' -- Uploading artifact: {} '.format(root + '/' + fname))
msg = subprocess.run(["anaconda", 'upload', root + '/' + fname], stdout=subprocess.PIPE)
print('\nRemoving temp dir: {}'.format(temp_output_dir))
if os.path.exists(temp_output_dir):
shutil.rmtree(temp_output_dir, ignore_errors=True) | mit | Python |
|
b52ba28a8315a0cdeda7593d087607f582f77f18 | Create __init__.py | lyelindustries/IPM | model/__init__.py | model/__init__.py | __version__='0.0.0'
| mit | Python |
|
721720b1f4d63f1368714f764794c8d406e4982d | Add to_data test | iwi/linkatos,iwi/linkatos | tests/test_firebase.py | tests/test_firebase.py | import pytest
import linkatos.firebase as fb
def test_to_data():
url = 'https://foo.com'
data = {'url': 'https://foo.com'}
assert fb.to_data(url) == data
| mit | Python |
|
fe37335645993ad10c9902aaaaf0ca2c53912d49 | Create Average Movies rating etl | searchs/bigdatabox,searchs/bigdatabox | movies_avg_etl.py | movies_avg_etl.py | import pyspark
spark = (
pyspark.sql.SparkSession.builder.appName("FromDatabase")
.config("spark.driver.extraClassPath", "<driver_location>/postgresql-42.2.18.jar")
.getOrCreate()
)
# Read table from db using Spark JDBC
def extract_movies_to_df():
movies_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "movies")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return movies_df
# Read users table from db using Spark JDBC
def extract_users_to_df():
users_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "users")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return users_df
# transforming tables
def transform_avg_ratings(movies_df, users_df):
avg_rating = users_df.groupby("movie_id").mean("rating")
# join movies_df and avg_rating table on id
df = movies_df.join(avg_rating, movies_df.id == avg_rating.movies_id)
df = df.drop("movie_id")
return df
# Write the result into avg_ratings table in db
def load_df_to_db(df):
mode = "overwrite"
url = "jdbc:postgresql://localhost:5432/etl_pipeline"
spark.write()
properties = {
"user": "<username>",
"password": "<password>",
"driver": "org.postgresql.Driver",
}
df.write.jdbc(url=url, table="avg_ratings", mode=mode, properties=properties)
if __name__ == "__main__":
movies_df = extract_movies_to_df()
users_df = extract_users_to_df()
ratings_df = transform_avg_ratings(movies_df, users_df)
load_df_to_db(ratings_df)
| mit | Python |
|
c95bfb10f87bd0a637d0ad790d484b7957441371 | Add WSGI support. | pydotorg/pypi,pydotorg/pypi,pydotorg/pypi,pydotorg/pypi | pypi.wsgi | pypi.wsgi | #!/usr/bin/python
import sys,os
prefix = os.path.dirname(__file__)
sys.path.insert(0, prefix)
import cStringIO, webui, store, config
store.keep_conn = True
class Request:
def __init__(self, environ, start_response):
self.start_response = start_response
self.rfile = cStringIO.StringIO(environ['wsgi.input'].read())
self.wfile = cStringIO.StringIO()
self.config = config.Config(prefix+'/config.ini', 'webui')
def send_response(self, code, message=''):
self.status = '%s %s' % (code, message)
self.headers = []
def send_header(self, keyword, value):
self.headers.append((keyword, value))
def set_content_type(self, content_type):
self.send_header('Content-Type', content_type)
def end_headers(self):
self.start_response(self.status, self.headers)
def debug(environ, start_response):
if environ['PATH_INFO'].startswith("/auth") and \
"HTTP_AUTHORIZATION" not in environ:
start_response("401 login",
[('WWW-Authenticate', 'Basic realm="foo"')])
return
start_response("200 ok", [('Content-type', 'text/plain')])
environ = environ.items()
environ.sort()
for k,v in environ:
yield "%s=%s\n" % (k, v)
return
def application(environ, start_response):
if "HTTP_AUTHORIZATION" in environ:
environ["HTTP_CGI_AUTHORIZATION"] = environ["HTTP_AUTHORIZATION"]
r = Request(environ, start_response)
webui.WebUI(r, environ).run()
return [r.wfile.getvalue()]
| bsd-3-clause | Python |
|
39313cd933e0038b9a9bfa8b6b4cb50e3707d455 | add k_min.py | pepincho/HackBulgaria,pepincho/HackBulgaria,pepincho/Python101-and-Algo1-Courses,pepincho/Python101-and-Algo1-Courses | Algo-1/week2/7-K-Min/k_min.py | Algo-1/week2/7-K-Min/k_min.py | class KMin:
# Quick sort
@staticmethod
def swap(numbers, i, j):
temp = numbers[i]
numbers[i] = numbers[j]
numbers[j] = temp
# The last element is a pivot, all smaller elements are to left of it
# and greater elements to right
@staticmethod
def partition(numbers, l, r):
x = numbers[r]
i = l
for j in range(l, r):
if numbers[j] <= x:
KMin.swap(numbers, i, j)
i += 1
KMin.swap(numbers, i, r)
return i
@staticmethod
def kthSmallest(numbers, l, r, k):
if k > 0 and k <= r - l + 1:
pos = KMin.partition(numbers, l, r)
if pos - l == k - 1:
return numbers[pos]
if pos - l > k - 1:
return KMin.kthSmallest(numbers, l, pos - 1, k)
return KMin.kthSmallest(numbers, pos + 1, r, k - pos + l - 1)
# Finds the k-th minimum element in an unsorted collection.
# numbers - [int]
# k - int
@staticmethod
def kthMinimum(numbers, k):
return KMin.kthSmallest(numbers, 0, len(numbers) - 1, k)
def main():
numbers = [33, 8, 5, 2, 3, 6, 1, 4, 9, 99]
for i in range(1, len(numbers) + 1):
print(KMin.kthMinimum(numbers, i))
if __name__ == '__main__':
main()
| mit | Python |
|
f44fd9df7ac7fa5e553e99d98c1376439a33ffc8 | Change device pull to handle root,and renamed local file as well history.db from results.db | bathepawan/workload-automation,ep1cman/workload-automation,Sticklyman1936/workload-automation,ARM-software/workload-automation,Sticklyman1936/workload-automation,bathepawan/workload-automation,bathepawan/workload-automation,bathepawan/workload-automation,bjackman/workload-automation,Sticklyman1936/workload-automation,setrofim/workload-automation,lisatn/workload-automation,muendelezaji/workload-automation,chase-qi/workload-automation,lisatn/workload-automation,Sticklyman1936/workload-automation,bjackman/workload-automation,setrofim/workload-automation,ARM-software/workload-automation,ep1cman/workload-automation,setrofim/workload-automation,bjackman/workload-automation,chase-qi/workload-automation,bathepawan/workload-automation,chase-qi/workload-automation,jimboatarm/workload-automation,jimboatarm/workload-automation,jimboatarm/workload-automation,muendelezaji/workload-automation,bjackman/workload-automation,setrofim/workload-automation,muendelezaji/workload-automation,jimboatarm/workload-automation,muendelezaji/workload-automation,lisatn/workload-automation,bathepawan/workload-automation,lisatn/workload-automation,jimboatarm/workload-automation,chase-qi/workload-automation,bjackman/workload-automation,chase-qi/workload-automation,ep1cman/workload-automation,Sticklyman1936/workload-automation,muendelezaji/workload-automation,bjackman/workload-automation,ARM-software/workload-automation,ep1cman/workload-automation,Sticklyman1936/workload-automation,ep1cman/workload-automation,ep1cman/workload-automation,muendelezaji/workload-automation,ARM-software/workload-automation,jimboatarm/workload-automation,chase-qi/workload-automation | wlauto/workloads/androbench/__init__.py | wlauto/workloads/androbench/__init__.py | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sqlite3
from wlauto import AndroidUiAutoBenchmark
class Androbench(AndroidUiAutoBenchmark):
name = 'androbench'
description = """Androbench measures the storage performance of device"""
package = 'com.andromeda.androbench2'
activity = '.main'
run_timeout = 10 * 60
def update_result(self, context):
super(Androbench, self).update_result(context)
dbn = 'databases/history.db'
db = self.device.path.join(self.device.package_data_directory, self.package, dbn)
host_results = os.path.join(context.output_directory, 'history.db')
self.device.pull_file(db, host_results, as_root=True)
qs = 'select * from history'
conn = sqlite3.connect(host_results)
c = conn.cursor()
c.execute(qs)
results = c.fetchone()
context.result.add_metric('Sequential Read ', results[8], 'MB/s')
context.result.add_metric('Sequential Write ', results[9], 'MB/s')
context.result.add_metric('Random Read ', results[10], 'MB/s')
context.result.add_metric('Random Write ', results[12], 'MB/s')
| # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sqlite3
from wlauto import AndroidUiAutoBenchmark
class Androbench(AndroidUiAutoBenchmark):
name = 'androbench'
description = """Androbench measures the storage performance of device"""
package = 'com.andromeda.androbench2'
activity = '.main'
run_timeout = 10 * 60
def update_result(self, context):
super(Androbench, self).update_result(context)
dbn = 'databases/history.db'
db = self.device.path.join(self.device.package_data_directory, self.package, dbn)
host_results = os.path.join(context.output_directory, 'results.db')
self.device.pull_file(db, host_results)
qs = 'select * from history'
conn = sqlite3.connect(host_results)
c = conn.cursor()
c.execute(qs)
results = c.fetchone()
context.result.add_metric('Sequential Read ', results[8], 'MB/s')
context.result.add_metric('Sequential Write ', results[9], 'MB/s')
context.result.add_metric('Random Read ', results[10], 'MB/s')
context.result.add_metric('Random Write ', results[12], 'MB/s')
| apache-2.0 | Python |
f700ca39535c5eb14015dd84f4bc0dad2b086d23 | Add ex_fzf.py | frostidaho/dynmen | examples/ex_fzf.py | examples/ex_fzf.py | #!/usr/bin/env python
import string
import textwrap
import pprint
from dynmen import Menu
fzf = Menu(command=('fzf',))
exampl_inp_dict = vars(string)
exampl_inp_dict = {k:v for k,v in exampl_inp_dict.items() if not k.startswith('_')}
def print_obj(obj, prefix=' '):
txt = pprint.pformat(obj)
lines = []
for line in txt.splitlines():
line = textwrap.indent(line, prefix)
lines.append(line)
print('\n'.join(lines))
def run_n_print(entries, fn_str):
fn = globals()[fn_str.split('.')[0]]
for attr in fn_str.split('.')[1:]:
fn = getattr(fn, attr)
print("\nLAUNCHING '{}' with -".format(fn_str))
print_obj(entries)
output = fn(entries)
print('OUTPUT IS -')
print_obj(output)
return output
run_n_print(exampl_inp_dict, 'fzf')
run_n_print(exampl_inp_dict, 'fzf.sort')
run_n_print(list(exampl_inp_dict), 'fzf')
| mit | Python |
|
45edceb65a9cac9f61215ad77e9c048d092c0b57 | add examples/roster.py | max-posedon/telepathy-python,freedesktop-unofficial-mirror/telepathy__telepathy-python,detrout/telepathy-python,freedesktop-unofficial-mirror/telepathy__telepathy-python,PabloCastellano/telepathy-python,PabloCastellano/telepathy-python,epage/telepathy-python,max-posedon/telepathy-python,detrout/telepathy-python,epage/telepathy-python | examples/roster.py | examples/roster.py |
import dbus
import dbus.glib
import gobject
import sys
from account import read_account, connect
from telepathy.client.channel import Channel
from telepathy.constants import (
CONNECTION_HANDLE_TYPE_CONTACT, CONNECTION_HANDLE_TYPE_LIST,
CONNECTION_STATUS_CONNECTED, CONNECTION_STATUS_DISCONNECTED)
from telepathy.errors import NotAvailable
from telepathy.interfaces import (
CHANNEL_INTERFACE_GROUP, CHANNEL_TYPE_CONTACT_LIST, CONN_INTERFACE)
def print_members(conn, chan):
current, local_pending, remote_pending = (
chan[CHANNEL_INTERFACE_GROUP].GetAllMembers())
for member in current:
print ' - %s' % (
conn[CONN_INTERFACE].InspectHandles(
CONNECTION_HANDLE_TYPE_CONTACT, [member])[0])
if not current:
print ' (none)'
class RosterClient:
def __init__(self, conn):
self.conn = conn
conn[CONN_INTERFACE].connect_to_signal(
'StatusChanged', self.status_changed_cb)
def _request_list_channel(self, name):
handle = self.conn[CONN_INTERFACE].RequestHandles(
CONNECTION_HANDLE_TYPE_LIST, [name])[0]
chan_path = self.conn[CONN_INTERFACE].RequestChannel(
CHANNEL_TYPE_CONTACT_LIST, CONNECTION_HANDLE_TYPE_LIST,
handle, True)
return Channel(self.conn._dbus_object._named_service, chan_path)
def status_changed_cb(self, state, reason):
if state == CONNECTION_STATUS_DISCONNECTED:
print 'disconnected: %s' % reason
self.quit()
return
if state != CONNECTION_STATUS_CONNECTED:
return
print 'connected'
for name in ('subscribe', 'publish', 'hide', 'allow', 'deny', 'known'):
try:
chan = self._request_list_channel(name)
except dbus.DBusException:
print "'%s' channel is not available" % name
continue
# hack
chan._valid_interfaces.add(CHANNEL_INTERFACE_GROUP)
print '%s: members' % name
print_members(self.conn, chan)
chan[CHANNEL_INTERFACE_GROUP].connect_to_signal('MembersChanged',
lambda *args: self.members_changed_cb(name, *args))
print 'waiting for changes'
def members_changed_cb(self, name, message, added, removed, local_pending,
remote_pending, actor, reason):
if added:
for handle in added:
print '%s: added: %d' % (name, added)
if removed:
for handle in removed:
print '%s: removed: %d' % (name, added)
def run(self):
self.loop = gobject.MainLoop()
try:
self.loop.run()
except KeyboardInterrupt:
print 'interrupted'
def quit(self):
self.loop.quit()
if __name__ == '__main__':
assert len(sys.argv) == 2
account_file = sys.argv[1]
manager, protocol, account = read_account(account_file)
conn = connect(manager, protocol, account)
client = RosterClient(conn)
print "connecting"
conn[CONN_INTERFACE].Connect()
client.run()
print "disconnecting"
try:
conn[CONN_INTERFACE].Disconnect()
except dbus.dbus_bindings.DBusException:
pass
| lgpl-2.1 | Python |
|
7d198f3eaca6a91b731b3e25c0285cd46e72935a | Remove duplicates in authorized origins table | SoftwareHeritage/swh-web-ui,SoftwareHeritage/swh-web-ui,SoftwareHeritage/swh-web-ui | swh/web/common/migrations/0005_remove_duplicated_authorized_origins.py | swh/web/common/migrations/0005_remove_duplicated_authorized_origins.py | # Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from __future__ import unicode_literals
from django.db import migrations
from swh.web.common.models import SaveAuthorizedOrigin
def _remove_duplicated_urls_in_authorized_list(apps, schema_editor):
sao = SaveAuthorizedOrigin.objects
for url in sao.values_list('url', flat=True).distinct():
sao.filter(pk__in=sao.filter(
url=url).values_list('id', flat=True)[1:]).delete()
class Migration(migrations.Migration):
dependencies = [
('swh.web.common', '0004_auto_20190204_1324'),
]
operations = [
migrations.RunPython(_remove_duplicated_urls_in_authorized_list)
]
| agpl-3.0 | Python |
|
91541cf82f435cb261d9debc85a2a8ae6dd74ab1 | Add a function to initialize the logging. | xgfone/xutils,xgfone/pycom | xutils/init_logging.py | xutils/init_logging.py | # encoding: utf-8
from __future__ import print_function, absolute_import, unicode_literals, division
import logging
def init_logging(logger=None, level="DEBUG", log_file="", file_config=None, dict_config=None):
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = "%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
logger.setLevel(level)
if log_file:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1, backupCount=30)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
| mit | Python |
|
507e3bad4e877330eea29675dafb8210ab6bada5 | Add tests for file agent | cwahbong/onirim-py | tests/test_agent.py | tests/test_agent.py | """
Tests for a agent.
"""
import io
import os
import pytest
from onirim import action
from onirim import agent
from onirim import component
def file_agent(in_str):
return agent.File(io.StringIO(in_str), open(os.devnull, "w"))
def content():
return component.Content([])
@pytest.mark.parametrize(
"in_str, expected",
[
("play\n0\n", (action.Phase1.play, 0)),
("discard\n4\n", (action.Phase1.discard, 4)),
]
)
def test_file_phase_1_action(in_str, expected):
"""
Test input parsing of phase_1_action.
"""
assert file_agent(in_str).phase_1_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("key\n2\n", (action.Nightmare.by_key, {"idx": 2})),
("door\n3\n", (action.Nightmare.by_door, {"idx": 3})),
("hand\n", (action.Nightmare.by_hand, {})),
("deck\n", (action.Nightmare.by_deck, {})),
]
)
def test_file_nightmare_action(in_str, expected):
"""
Test input parsing of nightmare action.
"""
assert file_agent(in_str).nightmare_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("yes\n", True),
("no\n", False),
]
)
def test_file_open_door(in_str, expected):
"""
Test input parsing of open door.
"""
assert file_agent(in_str).open_door(content(), None) == expected
#def test_file_key_discard_react(in_str, expected):
#TODO
| mit | Python |
|
c67e1af4f765f143cb1b8420e053c1a9f00edd05 | Add migrations for new statuses. | edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery | course_discovery/apps/course_metadata/migrations/0168_auto_20190404_1733.py | course_discovery/apps/course_metadata/migrations/0168_auto_20190404_1733.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-04-04 17:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0167_auto_20190403_1606'),
]
operations = [
migrations.AlterModelManagers(
name='course',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courseentitlement',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courserun',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='seat',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterField(
model_name='courserun',
name='status',
field=models.CharField(choices=[('published', 'Published'), ('unpublished', 'Unpublished'), ('reviewed', 'Reviewed'), ('review_by_legal', 'Awaiting Review from Legal'), ('review_by_internal', 'Awaiting Internal Review')], db_index=True, default='unpublished', max_length=255, validators=[djchoices.choices.ChoicesValidator({'published': 'Published', 'review_by_internal': 'Awaiting Internal Review', 'review_by_legal': 'Awaiting Review from Legal', 'reviewed': 'Reviewed', 'unpublished': 'Unpublished'})]),
),
]
| agpl-3.0 | Python |
|
d308874989667f36da1638f22d6b2d7e823b5ebd | Add script to extract reads or alignments matching a barcode. | roryk/junkdrawer,roryk/junkdrawer | extract-barcode.py | extract-barcode.py | """
code to extract a single cell from a set of alignments or reads marked via Valentine's umis
repository:
https://github.com/vals/umis
"""
import regex as re
import sys
from argparse import ArgumentParser
from pysam import AlignmentFile
def extract_barcode(sam, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
sam_file = AlignmentFile(sam, mode='r')
filter_file = AlignmentFile("-", mode='wh', template=sam_file)
track = sam_file.fetch(until_eof=True)
for i, aln in enumerate(track):
if aln.is_unmapped:
continue
match = parser_re.match(aln.qname)
CB = match.group('CB')
if CB == barcode:
filter_file.write(aln)
def stream_fastq(file_handler):
''' Generator which gives all four lines if a fastq read as one string
'''
next_element = ''
for i, line in enumerate(file_handler):
next_element += line
if i % 4 == 3:
yield next_element
next_element = ''
def extract_barcode_fastq(fastq, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
fastq_file = stream_fastq(open(fastq))
for read in fastq_file:
match = parser_re.match(read)
CB = match.group('CB')
if CB == barcode:
sys.stdout.write(read)
if __name__ == "__main__":
parser = ArgumentParser("extract reads/alignments from a single cell")
parser.add_argument("file", help="A SAM or FASTQ file")
parser.add_argument("barcode", help="barcode of the cell to extract")
args = parser.parse_args()
extract_fn = extract_barcode_sam if args.file.endswith(".sam") else extract_barcode_fastq
extract_fn(args.file, args.barcode)
| mit | Python |
|
048d0d7ce30b66af8bf48bcb0cb7f8bfb90fff0c | Add tests for Part, Pin, Bus and Net iterators. | xesscorp/skidl,xesscorp/skidl | tests/test_iters.py | tests/test_iters.py | import pytest
from skidl import *
from .setup_teardown import *
def test_iters_1():
"""Test bus iterator."""
b_size = 4
b = Bus('chplx', b_size)
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 2 * (b_size-1))
def test_iters_2():
"""Test pin iterator."""
q = Part('device','Q_NPN_CEB')
s = 0
for p1 in q:
for p2 in q:
if p1 != p2:
s += 1
assert(s == len(q) * (len(q)-1))
def test_iters_3():
"""Test net iterator."""
b = Net()
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 0)
| mit | Python |
|
60fbfa0b440a762fd25f19148313f5ba27d619aa | add a testing file | StephAlbArt/DS_Algos | DataStructures/Trees/main.py | DataStructures/Trees/main.py | import BST
#Environent for testing BST
def main():
print 'Testing'
main()
| mit | Python |
|
00aad4a302518400dbb936c7e2ce1d7560c5762f | Add files via upload | SeanBeseler/data-structures | src/que_.py | src/que_.py | class our_queue(object):
def __init__(self):
"""initializes queue"""
self.head = self
self.tail = self
self.next_node = None
self.data = None
self.size = 0
def enqueue(self, val):
"""creates new node, pushes it to bottom of the queue and makes it the tail"""
self.size += 1
new_qu = our_queue()
if self.head.data is None:
self.head = new_qu
self.head.next_node = None
else:
self.tail.next_node = new_qu
new_qu.data = val
self.tail = new_qu
return self.head
def dequeue(self):
"""
Removes the head of the queue and returns the value.
New head is established.
"""
current = self.head
temp_data = None
try:
temp_data = current.data
if temp_data is None:
raise IndexError('que is empty')
self.head = current.next_node
self.size -= 1
return temp_data
except AttributeError:
raise IndexError('que is empyt')
def peek(self):
"""
peeks at the data of the head
"""
current = self.head
temp_data = None
try:
temp_data = current.data
if temp_data is None:
raise IndexError('que is empty')
return temp_data
except AttributeError:
raise IndexError('que is empty')
def __len__(self):
"""returns the length of the double linked list"""
length = self.size
return length
temp = our_queue()
temp.enqueue(4)
temp.enqueue(3)
print(len(temp))
| mit | Python |
|
ccc663b3a96268dcdf2256d461a11d845a1044a1 | Add the original test case of bug #1469629, formatted according to local conventions. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Lib/test/leakers/test_dictself.py | Lib/test/leakers/test_dictself.py | '''Test case for "self.__dict__ = self" circular reference bug (#1469629)'''
import gc
class LeakyDict(dict):
pass
def leak():
ld = LeakyDict()
ld.__dict__ = ld
del ld
gc.collect(); gc.collect(); gc.collect()
| mit | Python |
|
994a956486ff94ea777aa300270ae065d2ea62c6 | Add a script to send the contents of newly created files to STDOUT or a TCP/IP socket | telefonicaid/fiware-cosmos-platform,telefonicaid/fiware-cosmos-platform,telefonicaid/fiware-cosmos-platform,telefonicaid/fiware-cosmos-platform,telefonicaid/fiware-cosmos-platform | samson/scripts/file_monitor.py | samson/scripts/file_monitor.py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
# Telefónica Digital - Product Development and Innovation
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
#
# Copyright (c) 2012 Telefónica Investigación y Desarrollo S.A.U.
# All rights reserved.
"""
A script to detect newly created files in a given directory and send the
contents to STDOUT or a TCP/IP socket.
"""
import os
import pyinotify
import socket
import sys
from optparse import OptionParser
class DataFileReader():
"""Read a given file"""
def __init__(self,filename):
self.filename = ""
def read_file(self):
sys.stderr.write(".")
try:
data_file = open (self.filename, "r")
data_file_contents = data_file.read()
except IOError:
sys.stderr.write("Error: Unable to find file (%s) or read its data" % data_file_str)
finally:
data_file.close()
if data_file_contents:
return data_file_contents
else:
return None
class SocketModHandler(pyinotify.ProcessEvent, DataFileReader):
"""Handle inotify events to be sent to a TCP/IP socket"""
sock = None
def __init__(self, host, port):
self.host = host
self.port = port
# socket does DNS checks and will fail if the specified socket is not open
# TODO: Add some logic handling failures
if self.sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, int(port)))
def __del__(self):
"""Clean up"""
self.sock.shutdown(socket.SHUT_WR)
self.sock.close()
def process_IN_CLOSE_WRITE(self, evt):
"""Act on files that were opened for writing and have been closed"""
self.filename = os.path.join(evt.path, evt.name)
sys.stderr.write(".")
data_file_contents = self.read_file()
total_sent=0
MSGLEN=len(data_file_contents)
while total_sent < MSGLEN:
sent = self.sock.send(data_file_contents[total_sent:])
if sent == 0:
raise RuntimeError("socket died")
total_sent = total_sent + sent
class StdOutModHandler(pyinotify.ProcessEvent, DataFileReader):
"""Handle inotify events to be sent to STDOUT"""
def process_IN_CLOSE_WRITE(self, evt):
"""Act on files that were opened for writing and have been closed"""
self.filename = os.path.join(evt.path, evt.name)
data = DataFileReader.read_file(self)
# Write to STDOUT
sys.stdout.write(data)
def main():
# Executing with -h shows all the available args
# optparse is deprecated in Python 2.7 however it's likely this script will
# be deployed with earlier releases.
parser = OptionParser()
parser.add_option ("-d", "--dir", dest="data_dir",
help="Look for files in DIR")
parser.add_option ("-t", "--tcp", dest="socket",
help="Send the data to host:port")
parser.add_option ("-s", action="store_true", default=False,
dest="stdout", help="Send the data to stdout")
(options, args) = parser.parse_args()
if options.stdout and options.socket:
# We're not that chatty and only write to one of STDOUT or a TCP socket
parser.error("Only one of -t/--tcp or -s can be specified")
if not options.data_dir:
options.data_dir = os.getcwd()
if os.path.exists(options.data_dir):
sys.stderr.write("Monitoring %s\n" % options.data_dir)
if options.socket:
(host, port) = options.socket.split(":")
sys.stderr.write("Sending data to %s on %s\n" % (host, port))
handler = SocketModHandler(host, port)
else:
# Send the output to STDOUT
sys.stderr.write("Writing to STDOUT\n")
handler = StdOutModHandler()
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, handler)
wdd = wm.add_watch(options.data_dir, pyinotify.IN_CLOSE_WRITE)
notifier.loop()
else:
sys.stderr.write("No such directory, %s\n" % options.data_dir)
sys.stderr.write("Exiting\n")
if __name__ == "__main__":
main()
| apache-2.0 | Python |
|
ceaabf80649a8a83c6ddfc548a3fa369c973e5c6 | Complete alg fizzbuzz | bowen0701/algorithms_data_structures | alg_fizzbuzz.py | alg_fizzbuzz.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def fizzbuzz(n):
ls = []
for i in range(1, n + 1):
if i % 15 == 0:
ls.append('fizzbuzz')
elif i % 3 == 0:
ls.append('fizz')
elif i % 5 == 0:
ls.append('buzz')
else:
ls.append(i)
return ls
def main():
n = 100
fizzbuzz_ls = fizzbuzz(n)
print(fizzbuzz_ls)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
e670901ebaf7422f7a71f78a3dc94730eba5605b | Add a module full of hinting helpers. | jeremycline/fmn,jeremycline/fmn,jeremycline/fmn | fmn/lib/hinting.py | fmn/lib/hinting.py | """ Helpers for "datanommer hints" for rules.
Rules can optionally define a "hint" for a datanommer query. For
instance, if a rule has to do with filtering for bodhi messages, then a
provided hint could be {'category': 'bodhi'}. This simply speeds up the
process of looking for potential message matches in the history by
letting the database server do some of the work for us. Without this, we
have to comb through literally every message ever and then try to see
what matches and what doesn't in python-land: Slow!
Rules define their hints with the @hint decorator defined here.
When querying datanommer, the ``gather_hinting`` helper here can be used to
construct the hint dict for ``datanommer.grep(..., **hints)``.
"""
import collections
import functools
import fedmsg.config
def hint(invertible=True, **hints):
""" A decorator that can optionally hang datanommer hints on a rule. """
def wrapper(fn):
@functools.wraps(fn)
def replacement(*args, **kwargs):
return fn(*args, **kwargs)
# Hang hints on the function.
replacement.hints = hints
replacement.hinting_invertible = invertible
return replacement
return wrapper
def prefixed(topic, prefix='org.fedoraproject'):
config = fedmsg.config.load_config() # This is memoized for us.
return '.'.join([prefix, config['environment'], topic])
def gather_hinting(filter, valid_paths):
""" Construct hint arguments for datanommer from a filter. """
hinting = collections.defaultdict(list)
for rule in filter.rules:
root, name = rule.code_path.split(':', 1)
info = valid_paths[root][name]
for key, value in info['datanommer-hints'].items():
# If the rule is inverted, but the hint is not invertible, then
# there is no hinting we can provide. Carry on.
if rule.negated and not info['hints-invertible']:
continue
# Otherwise, construct the inverse hint if necessary
if rule.negated:
key = 'not_' + key
# And tack it on.
hinting[key] += value
return hinting
| lgpl-2.1 | Python |
|
1d31feb4fadadc377fbb3cf0f18c38f5a8d39aca | disable tray icon when fail | xyuanmu/XX-Net,jt6562/XX-Net,wangyou/XX-Net,zlsun/XX-Net,zlsun/XX-Net,wangyou/XX-Net,qqzwc/XX-Net,xyuanmu/XX-Net,xyuanmu/XX-Net,wangyou/XX-Net,xyuanmu/XX-Net,zlsun/XX-Net,jt6562/XX-Net,Suwmlee/XX-Net,mikedchavez1010/XX-Net,jt6562/XX-Net,wangyou/XX-Net,xyuanmu/XX-Net,mikedchavez1010/XX-Net,mikedchavez1010/XX-Net,Suwmlee/XX-Net,Suwmlee/XX-Net,wangyou/XX-Net,Suwmlee/XX-Net,zlsun/XX-Net,jt6562/XX-Net,wangyou/XX-Net,qqzwc/XX-Net,qqzwc/XX-Net,mikedchavez1010/XX-Net,qqzwc/XX-Net,qqzwc/XX-Net,Suwmlee/XX-Net,Suwmlee/XX-Net,wangyou/XX-Net,jt6562/XX-Net,zlsun/XX-Net,mikedchavez1010/XX-Net | launcher/1.2.0/start.py | launcher/1.2.0/start.py | #!/usr/bin/env python
# coding:utf-8
import os, sys
current_path = os.path.dirname(os.path.abspath(__file__))
python_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "linux" or sys.platform == "linux2":
from gtk_tray import sys_tray
elif sys.platform == "win32":
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_path)
from win_tray import sys_tray
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
osx_lib = os.path.join(python_path, 'lib', 'osx')
sys.path.append(osx_lib)
try:
from mac_tray import sys_tray
except:
from non_tray import sys_tray
else:
from non_tray import sys_tray
import atexit
import logging
import webbrowser
import web_control
import module_init
import update
import config
import setup_win_python
def exit_handler():
print 'Stopping all modules before exit!'
module_init.stop_all()
web_control.stop()
atexit.register(exit_handler)
def main():
# change path to launcher
global __file__
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
web_control.confirm_xxnet_exit()
setup_win_python.check_setup()
module_init.start_all_auto()
web_control.start()
#config.load()
if config.get(["modules", "launcher", "popup_webui"], 1) == 1:
webbrowser.open("http://127.0.0.1:8085/")
update.start()
sys_tray.serve_forever()
module_init.stop_all()
sys.exit()
if __name__ == '__main__':
current_path = os.path.dirname(os.path.abspath(__file__))
version = current_path.split(os.path.sep)[-1]
logging.info("launcher version: %s", version)
try:
main()
except KeyboardInterrupt: # Ctrl + C on console
sys.exit
| #!/usr/bin/env python
# coding:utf-8
import os, sys
current_path = os.path.dirname(os.path.abspath(__file__))
python_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "linux" or sys.platform == "linux2":
from gtk_tray import sys_tray
elif sys.platform == "win32":
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_path)
from win_tray import sys_tray
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
osx_lib = os.path.join(python_path, 'lib', 'osx')
sys.path.append(osx_lib)
from mac_tray import sys_tray
else:
from non_tray import sys_tray
import atexit
import logging
import webbrowser
import web_control
import module_init
import update
import config
import setup_win_python
def exit_handler():
print 'Stopping all modules before exit!'
module_init.stop_all()
web_control.stop()
atexit.register(exit_handler)
def main():
# change path to launcher
global __file__
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
web_control.confirm_xxnet_exit()
setup_win_python.check_setup()
module_init.start_all_auto()
web_control.start()
#config.load()
if config.get(["modules", "launcher", "popup_webui"], 1) == 1:
webbrowser.open("http://127.0.0.1:8085/")
update.start()
sys_tray.serve_forever()
module_init.stop_all()
sys.exit()
if __name__ == '__main__':
current_path = os.path.dirname(os.path.abspath(__file__))
version = current_path.split(os.path.sep)[-1]
logging.info("launcher version: %s", version)
try:
main()
except KeyboardInterrupt: # Ctrl + C on console
sys.exit
| bsd-2-clause | Python |
719dd9064904d2e94cacd5c9ab349b0658344294 | Create weather_proc.py | clavicule/keras-exp | tmp/weather_proc.py | tmp/weather_proc.py | import argparse
from datetime import datetime
import numpy as np
# timeslot indexing funtion
def get_time_index(timestamp):
day = int(timestamp.date().day) - 1
slot = int((timestamp.time().hour * 3600 + timestamp.time().minute * 60 + timestamp.time().second) / 600)
return day * 144 + slot
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--weather", required=True, help="Path to the weather data file")
ap.add_argument("-o", "--output", required=True, help="Path to the output file")
args = vars(ap.parse_args())
total_timeslots = 19 * 144
weather_dataset = np.zeros((total_timeslots, 11), dtype="float")
print('reading weather')
weather_file = open(args['weather'], 'r')
for line in weather_file:
weather_data = line.split('\t')
time_key = get_time_index(datetime.strptime(weather_data[0].strip(), '%Y-%m-%d %H:%M:%S'))
if time_key > total_timeslots:
continue
climate = int(weather_data[1].strip())
temperature = float(weather_data[2].strip())
pollution = float(weather_data[3].strip())
weather_dataset[time_key][climate - 1] += 1.
weather_dataset[time_key][9] += temperature
weather_dataset[time_key][10] += pollution
weather_file.close()
count = np.sum(weather_dataset[:, 0:9], axis=1)
count[ count == 0 ] = 1.;
weather_dataset[:, 9] = weather_dataset[:, 9] / count
weather_dataset[:, 10] = weather_dataset[:, 10] / count
np.savetxt(args["output"], weather_dataset, delimiter=',', fmt='%f')
| mit | Python |
|
9f2e4aad6d3a4004e80378f44aa178b37dd6da57 | add ShellExecError | faycheng/tpl,faycheng/tpl | tpl/errors.py | tpl/errors.py | # -*- coding:utf-8 -*-
from gettext import gettext as _
class BaseError(BaseException):
ERR_MSG = _('')
class ShellExecError(BaseError):
ERR_MSG = _('Command exit code not zero. \nExit Code:\n{}.\nOut:\n{}\nErr:\n{}')
def __init__(self, exit_code, out, err):
self.message = self.ERR_MSG.format(exit_code, out, err)
super(ShellExecError, self).__init__(self.message)
| mit | Python |
|
3d027df005725cbc5dfbba0262b0c52c5392d7f0 | Add whoami resource which decodes token and returns user info from token | brayoh/bucket-list-api | app/resources/check_token.py | app/resources/check_token.py | from flask import make_response, jsonify
from flask_restful import Resource, reqparse, marshal, fields
from app.models import User
from app.common.auth.token import JWT
user_fields = {
"id": fields.Integer,
"username": fields.String,
"created_at": fields.DateTime
}
class WhoAmIResource(Resource):
""" This class takes a token from the Authorization header
and then returns the user info for the token if its valid
"""
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument("Authorization",
location="headers",
required=True)
def get(self):
""" get method """
args = self.parser.parse_args()
token = args["Authorization"] # get token from header
try:
user_id = int(JWT.decode_token(token))
user = User.query.get(user_id)
return marshal(user, user_fields), 200
except ValueError:
return make_response(jsonify({
"status": "failed",
"message": "Invalid token, please login again"
}), 401)
| mit | Python |
|
62484ca423d6adfa19a581d7b74472e8475cf817 | Create findbro.py | jshlbrd/python-drawer | findbro/findbro.py | findbro/findbro.py | # findbro.py v0.1
# Matches Bro logs against a specified list of UIDs
# Can run on N number of Bro logs
# Performs no error checking
# Should only be run on directories that contains only gzip Bro logs
# Best way to collect UIDs is via bro-cut and grep
#
# Josh Liburdi 2016
from os import listdir
import sys
import gzip
import argparse
def write_file(fout_name,file_contents):
fout = gzip.open(fout_name, 'w')
fout.write(file_contents)
fout.close()
def proc_bro(fout_name,input,uid_list):
file_cache = ''
with gzip.open(input) as fin:
lines = fin.readlines()
file_cache += lines[6]
file_cache += lines[7]
for line in lines[8:-1]:
if any(uid in line for uid in uid_list):
file_cache += line
if len(file_cache.split('\n')) == 3:
print 'No matches in %s' % input
else:
print '%d matches in %s' % ( (len(file_cache.split('\n')) - 3), input )
write_file(fout_name,file_cache)
def main():
parser = argparse.ArgumentParser(description='Merge Bro logs from a single day')
parser.add_argument('--bro-dir', '-bd', dest='directory', action='store')
parser.add_argument('--label', '-l', dest='label', action='store', default=None)
parser.add_argument('--uid', '-u', dest='uid_file', action='store')
argsout = parser.parse_args()
dir_list = listdir(argsout.directory)
log_dict = {}
uid_list = [line.strip() for line in open(argsout.uid_file, 'r')]
for log_file in dir_list:
log_type = log_file.split('.')[0]
log_dict.setdefault(log_type,[]).append(log_file)
for key,list_val in log_dict.iteritems():
if argsout.label is None:
fout_name = key + '.log.gz'
else:
fout_name = key + '.' + argsout.label + '.log.gz'
for f in list_val:
fpath = argsout.directory + f
proc_bro(fout_name,fpath,uid_list)
if __name__ == "__main__":
main()
| apache-2.0 | Python |
|
f34dabd23faa7d50e507b829e576c1968bdc2d52 | Print The Message Happy New Year | let42/python-course | src/iterations/exercise3.py | src/iterations/exercise3.py | # Print The Message "Happy new Year" followed by the name of a person
# taken from a list for all people mentioned in the list.
def print_Happy_New_Year_to( listOfPeople ):
for user in listOfPeople:
print 'Happy New Year, ', user
print 'Done!'
def main( ):
listOfPeople=['John', 'Mary', 'Luke']
print_Happy_New_Year_to( listOfPeople )
quit(0)
main( ) | mit | Python |
|
67cb63bcb776b1a89d8e96a7b90c02724ef5b0b6 | update migrations | GNOME/extensions-web,GNOME/extensions-web,GNOME/extensions-web,GNOME/extensions-web | sweettooth/extensions/migrations/0005_auto_20190112_1733.py | sweettooth/extensions/migrations/0005_auto_20190112_1733.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-12 17:33
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations, models
import sweettooth.extensions.models
class Migration(migrations.Migration):
dependencies = [
('extensions', '0004_auto_20181216_2102'),
]
operations = [
migrations.AlterField(
model_name='extension',
name='icon',
field=models.ImageField(blank=True, default='', upload_to=sweettooth.extensions.models.make_icon_filename),
),
migrations.AlterField(
model_name='extension',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'),
),
]
| agpl-3.0 | Python |
|
3aa6ba18655a92753f33622ac80be66eb3b69ff6 | Add useful python functions | gordonbrander/device_resolutions | device_resolutions.py | device_resolutions.py | from math import sqrt
import csv
def as_orientation(x, y, is_portrait=False):
if is_portrait:
return (y, x) if x > y else (x, y)
else:
return (x, y) if x > y else (y, x)
def as_portrait(x, y):
"""Given a dimensions, return that pair in portrait orientation"""
return as_orientation(x, y, is_portrait=True)
def as_landscape(x, y):
"""Given a dimensions, return that pair in landscape orientation"""
return as_orientation(x, y, is_portrait=False)
def calc_hypotenuse(a, b):
return sqrt(a**2 + b**2)
def calc_ppi(width_px, height_px, hypotenuse_in):
"""
Given the diagnonal measurement of the screen in inches (`hypotenuse_in`),
calculate the pixels-per-inch (ppi) offered by the screen.
"""
hypotenuse_px = calc_hypotenuse(width_px, height_px)
return hypotenuse_px / hypotenuse_in
# @TODO port to CSV
COMMON_ASPECT_RATIOS = (
(3, 4, "3:4"),
(1, 1, "1:1"),
(5, 4, "5:4"),
(4, 3, "4:3"),
(1.43, 1, "IMAX 1.43:1"),
(3, 2, "3:2"),
(5, 3, "5:3")
(14, 9, "14:9"),
(16, 10, "16:10"),
(16, 9, "16:9"),
(17, 9, "17:9"),
(21, 9, "21:9"),
(1.375, 1, "Academy Ratio 1.375:1"),
(2.35, 1, "CinemaScope 2.35:1"),
(2.59, 1, "Cinemara 2.59:1"),
(2.75, 1, "Ultra Panavision 70 2.75:1"),
(2.76, 1, "MGM 65 2.76:1")
)
def find_aspect_ratio(x, y):
"""
Given an aspect ratio, find an aspect ratio description using a list
of common aspect ratios.
"""
ratio = x / y
for cx, cy, name in COMMON_ASPECT_RATIOS:
if ratio == (cx/cy):
return (ratio, cx, cy, name)
return (ratio, ratio, 1, "") | mit | Python |
|
dad5f0a06dd057eccde5a086c84d5c639bb74ae9 | Add back peaks for backwards compatibility with a deprecation warning. | nilgoyyou/dipy,nilgoyyou/dipy,matthieudumont/dipy,villalonreina/dipy,StongeEtienne/dipy,matthieudumont/dipy,villalonreina/dipy,FrancoisRheaultUS/dipy,FrancoisRheaultUS/dipy,StongeEtienne/dipy | dipy/reconst/peaks.py | dipy/reconst/peaks.py | import warnings
w_s = "The module 'dipy.reconst.peaks' is deprecated."
w_s += " Please use the module 'dipy.direction.peaks' instead"
warnings.warn(w_s, DeprecationWarning)
from dipy.direction.peaks import *
| bsd-3-clause | Python |
|
52a8a0c0def2930667155660c8844bb6836f9ff5 | add script for table of orders/country | PythonSanSebastian/ep-tools,EuroPython/ep-tools,PythonSanSebastian/ep-tools,EuroPython/ep-tools,PythonSanSebastian/ep-tools,EuroPython/ep-tools,PythonSanSebastian/ep-tools,EuroPython/ep-tools | scripts/country_order_stats.py | scripts/country_order_stats.py | import sqlite3
import pandas as pd
TICKET_SALE_START_DATE = '2016-01-01'
conn = sqlite3.connect('data/site/p3.db')
c = conn.cursor()
query = c.execute("""
SELECT ORDER_ID, COUNTRY_ID
FROM assopy_orderitem, assopy_order
WHERE assopy_orderitem.order_id == assopy_order.id AND
assopy_order.created >= date(TICKET_SALE_START_DATE)"""")
countries = query.fetchall()
df = pd.DataFrame(countries, columns=['order_id', 'country'])
counts = df.groupby('country').count().sort_values(by='order_id', ascending=False)
print(counts)
| mit | Python |
|
696b9d1177d24ca6c455052f15e529f4952196a0 | add test | Jasily/jasily-python,Cologler/py.jasily.cologler | @test/test_lang_with.py | @test/test_lang_with.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018~2999 - Cologler <[email protected]>
# ----------
#
# ----------
from jasily.lang import with_it, with_objattr, with_objattrs
class SomeLock:
def __init__(self):
self.locked = False
def __enter__(self):
self.locked = True
def __exit__(self, *args):
self.locked = False
def test_with_it():
lock = SomeLock()
@with_it(lock)
def func():
assert lock.locked
return 1
assert not lock.locked
assert func() == 1
assert not lock.locked
def test_with_objattr():
class X:
def __init__(self):
self.some_lock = SomeLock()
@with_objattr('some_lock')
def func(self):
assert self.some_lock.locked
return 1
x = X()
assert not x.some_lock.locked
assert x.func() == 1
assert not x.some_lock.locked
def test_with_objattrs():
class X:
def __init__(self):
self.some_lock_1 = SomeLock()
self.some_lock_2 = SomeLock()
@with_objattrs('some_lock_1', 'some_lock_2')
def func(self):
assert self.some_lock_1.locked
assert self.some_lock_2.locked
return 1
x = X()
assert not x.some_lock_1.locked
assert not x.some_lock_2.locked
assert x.func() == 1
assert not x.some_lock_1.locked
assert not x.some_lock_2.locked
| mit | Python |
|
791ce2275933f16cf483dad1b16948441292e61c | add hook for google-api-python-client (#3965) | dmpetrov/dataversioncontrol,efiop/dvc,dmpetrov/dataversioncontrol,efiop/dvc | scripts/hooks/hook-pydrive2.py | scripts/hooks/hook-pydrive2.py | from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata("pydrive2")
datas += copy_metadata("google-api-python-client")
| apache-2.0 | Python |
|
534db68d8f773c459788650590b6585fc0369e19 | create a default permission handler for ObjectOwner | michaelhenry/Localizr,michaelhenry/Localizr,michaelhenry/Localizr,michaelhenry/Localizr | apps/Localizr/permissions.py | apps/Localizr/permissions.py | from rest_framework.permissions import IsAuthenticated, SAFE_METHODS
class IsObjectOwner(IsAuthenticated):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
if hasattr(obj, 'created_by'):
return obj.created_by == request.user
return False | mit | Python |
|
f7d3ca5d537140e07ff95d082f2a78e86bc06604 | Add flip | jkoelker/zl.indicators | zl/indicators/flip.py | zl/indicators/flip.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Jason Koelker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numbers
from zipline.transforms import utils as transforms
BULL = 'Bull'
BEAR = 'Bear'
class Flip(object):
__metaclass__ = transforms.TransformMeta
def __init__(self, period=4, setup_price='close_price'):
self.period = period
self.setup_price = setup_price
self.sid_windows = collections.defaultdict(self.create_window)
def create_window(self):
return FlipWindow(self.period, self.setup_price)
def update(self, event):
window = self.sid_windows[event.sid]
window.update(event)
return window()
class FlipWindow(transforms.EventWindow):
def __init__(self, period, setup_price):
transforms.EventWindow.__init__(self, window_length=period + 2)
self.period = period
self.setup_price = setup_price
def handle_add(self, event):
assert hasattr(event, self.setup_price)
value = getattr(event, self.setup_price, None)
assert isinstance(value, numbers.Number)
def handle_remove(self, event):
pass
def __call__(self):
if len(self.ticks) < self.window_length:
return
Yp = getattr(self.ticks[-1], self.setup_price)
Xp = getattr(self.ticks[-2], self.setup_price)
X = getattr(self.ticks[0], self.setup_price)
Y = getattr(self.ticks[1], self.setup_price)
if (Xp > X) and (Yp < Y):
return BEAR
if (Xp < X) and (Yp > Y):
return BULL
| apache-2.0 | Python |
|
e07cc0ea6e56339d117fd5d81c0939b0c658727e | Create cnn.py | AdityaSoni19031997/Machine-Learning,AdityaSoni19031997/Machine-Learning | Classifying_datasets/Convolutional_Neural_Networks/Convolutional_Neural_Networks/cnn.py | Classifying_datasets/Convolutional_Neural_Networks/Convolutional_Neural_Networks/cnn.py | # Convolutional Neural Network
# Part 1 - Building the CNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(64, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(64, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 512, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 25,
validation_data = test_set,
validation_steps = 2000)
| mit | Python |
|
5199ee1a544b2aa59895a1b22359d6a9adb765a3 | Add .prepare-commit-msg.py | PyconUK/ConferenceScheduler | .prepare-commit-msg.py | .prepare-commit-msg.py | #!/usr/bin/env python
# This script is an optional git hook and will prepend the issue
# number to a commit message in the correct format for Github to parse.
#
# If you wish to use it, create a shortcut to this file in .git/hooks called
# 'prepare-commit-msg' e.g. from top folder of your project:
# ln -s ../../.prepare-commit-msg.py .git/hooks/prepare-commit-msg
#
# or, for Windows users:
# mklink .git\hooks\prepare-commit-msg .prepare-commit-msg.py
import sys
import re
from subprocess import check_output
# By default, the hook will check to see if the branch name starts with
# 'issue-' and will then prepend whatever follows in the commit message.
# e.g. for a branch named 'issue-123', the commit message will start with
# '[#123]'
# If you wish to use a diferent prefix on branch names, change it here.
issue_prefix = 'issue-'
commit_msg_filepath = sys.argv[1]
branch = check_output(
['git', 'symbolic-ref', '--short', 'HEAD']
).strip().decode(encoding='UTF-8')
if branch.startswith(issue_prefix):
issue_number = re.match('%s(.*)' % issue_prefix, branch).group(1)
print(
f'prepare-commit-msg: Prepending [#{issue_number}] to commit message')
with open(commit_msg_filepath, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(f'[#{issue_number}] {content}')
else:
print("prepare-commit-msg: No changes made to commit message")
| mit | Python |
|
ce28c5642c3ab543fc48e2f4f1f0b2f2a62890a2 | Add script to extract information for playbook files | ASaiM/framework,ASaiM/framework | src/misc/parse_tool_playbook_yaml.py | src/misc/parse_tool_playbook_yaml.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import re
import yaml
def get_revision_number(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
if tool.has_key("revision"):
print tool["revision"][0]
def get_owner(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
print tool['owner']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
parser.add_argument('--tool_name', required=True)
parser.add_argument('--tool_function', required=True)
args = parser.parse_args()
with open(args.file,'r') as yaml_file:
yaml_content = yaml.load(yaml_file)
functions = {
'get_revision_number': get_revision_number,
'get_owner': get_owner
}
functions[args.tool_function](yaml_content, args.tool_name) | apache-2.0 | Python |
|
f8ee6bcd2742e1afb2645c5195d84bd9d2db06bb | Create utils.py | Esri/raster-functions | functions/utils.py | functions/utils.py | __all__ = ['isProductVersionOK',
'computePixelBlockExtents',
'computeCellSize',
'Projection',
'Trace']
## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ##
def isProductVersionOK(productInfo, major, minor, build):
v = productInfo['major']*1.e+10 + int(0.5+productInfo['minor']*10)*1.e+6 + productInfo['build']
return v >= major*1e+10 + minor*1e+7 + build
def computePixelBlockExtents(tlc, shape, props):
nRows, nCols = shape if len(shape) == 2 else shape[1:] # dimensions of request pixel block
e, w, h = props['extent'], props['width'], props['height'] # dimensions of parent raster
dX, dY = (e[2]-e[0])/w, (e[3]-e[1])/h # cell size of parent raster
xMin, yMax = e[0]+tlc[0]*dX, e[3]-tlc[1]*dY # top-left corner of request on map
return (xMin, yMax-nRows*dY, xMin+nCols*dX, yMax) # extents of request on map
def computeCellSize(props, sr=None, proj=None):
e, w, h = props['extent'], props['width'], props['height'] # dimensions of parent raster
if sr is None:
return (e[2]-e[0])/w, (e[3]-e[1])/h # cell size of parent raster
if proj is None: proj = Projection() # reproject extents
(xMin, xMax) = proj.transform(props['spatialReference'], sr, e[0], e[2])
(yMin, yMax) = proj.transform(props['spatialReference'], sr, e[1], e[3])
return (xMax-xMin)/w, (yMax-yMin)/h # cell size of parent raster
## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ##
class Projection():
def __init__(self):
pyprojModule = __import__('pyproj')
self._inProj, self._outProj = None, None
self._inEPSG, self._outEPSG = -1, -1
self._projClass = getattr(pyprojModule, 'Proj')
self._transformFunc = getattr(pyprojModule, 'transform')
def transform(self, inEPSG, outEPSG, x, y):
if inEPSG != self._inEPSG:
self._inProj = self._projClass("+init=EPSG:{0}".format(inEPSG))
self._inEPSG = inEPSG
if outEPSG != self._outEPSG:
self._outProj = self._projClass("+init=EPSG:{0}".format(outEPSG))
self._outEPSG = outEPSG
return self._transformFunc(self._inProj, self._outProj, x, y)
## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ##
class Trace():
def __init__(self):
ctypes = __import__('ctypes')
self.trace = ctypes.windll.kernel32.OutputDebugStringA
self.trace.argtypes = [ctypes.c_char_p]
self.c_char_p = ctypes.c_char_p
def log(self, s):
self.trace(self.c_char_p(s.encode('utf-8')))
return s
## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ##
| apache-2.0 | Python |
|
24c763ead7af8a669ff1055b3f352f513274a47f | Insert a note at a specific position in a linked list | arvinsim/hackerrank-solutions | all-domains/data-structures/linked-lists/insert-a-node-at-a-specific-positin-in-a-linked-list/solution.py | all-domains/data-structures/linked-lists/insert-a-node-at-a-specific-positin-in-a-linked-list/solution.py | # https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list
# Python 2
"""
Insert Node at a specific position in a linked list
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
return back the head of the linked list in the below method.
"""
# This is a "method-only" submission.
# You only need to complete this method.
def InsertNth(head, data, position):
if head is None:
return Node(data=data)
else:
current = head
if position == 0:
node_to_insert = Node(data=data, next_node=current)
return node_to_insert
else:
prev = None
for i in xrange(position):
prev = current
current = current.next
new_node = Node(data=data)
prev.next = new_node
new_node.next = current
return head
# def display_linked_list(head):
# s = ''
# while True:
# s += '{}->'.format(head.data)
# if head.next == None:
# break
# else:
# head = head.next
# s += 'NULL'
# print(s)
#
#
# # LL = Node(1)
# c = Node(3)
# b = Node(2, c)
# head = Node(1, b)
#
# head = InsertNth(head, 'x', 1)
#
# display_linked_list(head)
| mit | Python |
|
db914944615f16c4b170e7dfd428901d5fc29271 | Add test for image.fromstring - refs #1805 | manz/python-mapnik,mapnik/mapnik,rouault/mapnik,kapouer/mapnik,garnertb/python-mapnik,Uli1/mapnik,Mappy/mapnik,stefanklug/mapnik,qianwenming/mapnik,yohanboniface/python-mapnik,sebastic/python-mapnik,qianwenming/mapnik,naturalatlas/mapnik,pnorman/mapnik,manz/python-mapnik,whuaegeanse/mapnik,tomhughes/python-mapnik,lightmare/mapnik,yiqingj/work,davenquinn/python-mapnik,tomhughes/mapnik,tomhughes/python-mapnik,tomhughes/mapnik,mapycz/mapnik,mapnik/mapnik,naturalatlas/mapnik,jwomeara/mapnik,mapycz/mapnik,rouault/mapnik,kapouer/mapnik,qianwenming/mapnik,kapouer/mapnik,Mappy/mapnik,pramsey/mapnik,mbrukman/mapnik,cjmayo/mapnik,pnorman/mapnik,Airphrame/mapnik,rouault/mapnik,mapnik/mapnik,mapycz/mapnik,tomhughes/mapnik,whuaegeanse/mapnik,pramsey/mapnik,tomhughes/python-mapnik,davenquinn/python-mapnik,mapnik/python-mapnik,naturalatlas/mapnik,naturalatlas/mapnik,zerebubuth/mapnik,mapycz/python-mapnik,mbrukman/mapnik,stefanklug/mapnik,Airphrame/mapnik,qianwenming/mapnik,Uli1/mapnik,mapnik/python-mapnik,mapnik/mapnik,qianwenming/mapnik,pramsey/mapnik,mbrukman/mapnik,yiqingj/work,stefanklug/mapnik,garnertb/python-mapnik,pnorman/mapnik,mapycz/python-mapnik,sebastic/python-mapnik,zerebubuth/mapnik,jwomeara/mapnik,CartoDB/mapnik,tomhughes/mapnik,sebastic/python-mapnik,Uli1/mapnik,lightmare/mapnik,Mappy/mapnik,stefanklug/mapnik,Uli1/mapnik,lightmare/mapnik,garnertb/python-mapnik,yiqingj/work,davenquinn/python-mapnik,jwomeara/mapnik,whuaegeanse/mapnik,yohanboniface/python-mapnik,mbrukman/mapnik,whuaegeanse/mapnik,Airphrame/mapnik,cjmayo/mapnik,mapnik/python-mapnik,pramsey/mapnik,CartoDB/mapnik,CartoDB/mapnik,Airphrame/mapnik,Mappy/mapnik,jwomeara/mapnik,yohanboniface/python-mapnik,manz/python-mapnik,yiqingj/work,lightmare/mapnik,rouault/mapnik,cjmayo/mapnik,kapouer/mapnik,cjmayo/mapnik,zerebubuth/mapnik,pnorman/mapnik | tests/python_tests/image_test.py | tests/python_tests/image_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os, mapnik
from timeit import Timer, time
from nose.tools import *
from utilities import execution_path
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def test_image_open_from_string():
filepath = '../data/images/dummy.png'
im1 = mapnik.Image.open(filepath)
im2 = mapnik.Image.fromstring(open(filepath,'rb').read())
eq_(im1.width(),im2.width())
length = len(im1.tostring())
eq_(length,len(im2.tostring()))
eq_(len(mapnik.Image.fromstring(im1.tostring('png')).tostring()),length)
eq_(len(mapnik.Image.fromstring(im1.tostring('jpeg')).tostring()),length)
eq_(len(mapnik.Image.fromstring(im1.tostring('tiff')).tostring()),length)
if __name__ == "__main__":
setup()
[eval(run)() for run in dir() if 'test_' in run]
| lgpl-2.1 | Python |
|
dedcdaf1a55b08c275af29c535a7ae574b8ee5d2 | Add 20150517 question. | fantuanmianshi/Daily,fantuanmianshi/Daily | LeetCode/number_of_islands.py | LeetCode/number_of_islands.py | """
Given a 2d grid map of '1's (land) and '0's (water), count the number of
islands. An island is surrounded by water and is formed by connecting adjacent
lands horizontally or vertically. You may assume all four edges of the grid are
all surrounded by water.
Example 1:
11110
11010
11000
00000
Answer: 1
Example 2:
11000
11000
00100
00011
Answer: 3
Tags: DFS, BFS
Difficulty: Medium
"""
class Solution:
# @param {character[][]} grid
# @return {integer}
def numIslands(self, grid):
if not grid:
return 0
if not grid[0]:
return 0
width = len(grid[0])
height = len(grid)
visited = [[False] * width for _ in xrange(height)]
count = 0
i = 0
while i < height:
j = 0
while j < width:
if grid[i][j] == '1' and not visited[i][j]:
self.bfs(grid, visited, [(i, j)])
count += 1
j += 1
i += 1
return count
def bfs(self, grid, visited, to_be_visited):
if not to_be_visited:
return
x, y = to_be_visited.pop()
if visited[x][y] or grid[x][y] == '0':
return
visited[x][y] = True
if x > 0:
to_be_visited.append((x - 1, y))
if x < len(visited) - 1:
to_be_visited.append((x + 1, y))
if y > 0:
to_be_visited.append((x, y - 1))
if y < len(visited[0]) - 1:
to_be_visited.append((x, y + 1))
while to_be_visited:
self.bfs(grid, visited, to_be_visited)
| mit | Python |
|
681c67381eef9384845e0041214011797be6ea03 | Create text2hex.py | jamokou/text2hex | text2hex.py | text2hex.py | # Program Name : text2hex
# Programmer : The Alpha
# Credits : Iranpython.blog.ir
# Version : 0.91(Beta Version)
# Linted By : Pyflakes
# Info : text2hex is a simple tool that uses to convert strings to hex.
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
import binascii
class TextToHex(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle("Text2Hex")
layout = QGridLayout()
self.label_cp = QLabel("<b><code><h3>pystudent copyright</h3></code></b>")
label_text = QLabel("<b><code><h3>Text :</h3></code></b>")
self.line_edit_text = QLineEdit()
label_hex = QLabel("<b><code><h3>Hex :</h3></code></b>")
self.line_edit_hex = QLineEdit()
self.line_edit_hex.setReadOnly(True)
self.convert_button = QPushButton("Convert")
self.exit_button = QPushButton("Exit")
layout.addWidget(label_text, 0, 0)
layout.addWidget(self.line_edit_text, 0, 1)
layout.addWidget(label_hex, 1, 0)
layout.addWidget(self.line_edit_hex, 1, 1)
layout.addWidget(self.convert_button, 2, 0)
layout.addWidget(self.label_cp, 2, 1)
layout.addWidget(self.exit_button, 2, 2)
self.convert_button.clicked.connect(self.convertor)
self.exit_button.clicked.connect(self.close)
self.setLayout(layout)
def convertor(self):
data = self.line_edit_text.text()
hex_text = binascii.hexlify(bytes(data, 'utf-8'))
hex_text = str(hex_text)
hex_text = hex_text.replace("b'", "")
hex_text = hex_text.replace("'", "")
hex_text = "0x"+hex_text
self.line_edit_hex.setText(hex_text)
if hex_text == "0x":
self.line_edit_hex.setText("")
app = QApplication(sys.argv)
dialog = TextToHex()
dialog.show()
app.exec_()
| mit | Python |
|
dce13f074187cb95644b0ac3cfd84d1e0649f93c | Fix bytes/str handling in disqus SSO. | dsanders11/mezzanine,jjz/mezzanine,readevalprint/mezzanine,promil23/mezzanine,ZeroXn/mezzanine,geodesign/mezzanine,damnfine/mezzanine,mush42/mezzanine,sjdines/mezzanine,saintbird/mezzanine,Skytorn86/mezzanine,eino-makitalo/mezzanine,stephenmcd/mezzanine,frankchin/mezzanine,gradel/mezzanine,stephenmcd/mezzanine,adrian-the-git/mezzanine,SoLoHiC/mezzanine,christianwgd/mezzanine,wbtuomela/mezzanine,ryneeverett/mezzanine,industrydive/mezzanine,biomassives/mezzanine,wyzex/mezzanine,nikolas/mezzanine,vladir/mezzanine,nikolas/mezzanine,dustinrb/mezzanine,viaregio/mezzanine,Cicero-Zhao/mezzanine,frankier/mezzanine,jerivas/mezzanine,jjz/mezzanine,adrian-the-git/mezzanine,dustinrb/mezzanine,wyzex/mezzanine,frankier/mezzanine,viaregio/mezzanine,geodesign/mezzanine,SoLoHiC/mezzanine,christianwgd/mezzanine,spookylukey/mezzanine,sjuxax/mezzanine,AlexHill/mezzanine,vladir/mezzanine,spookylukey/mezzanine,nikolas/mezzanine,ryneeverett/mezzanine,vladir/mezzanine,promil23/mezzanine,sjuxax/mezzanine,dustinrb/mezzanine,wyzex/mezzanine,mush42/mezzanine,batpad/mezzanine,gradel/mezzanine,stephenmcd/mezzanine,Kniyl/mezzanine,damnfine/mezzanine,Cicero-Zhao/mezzanine,theclanks/mezzanine,molokov/mezzanine,saintbird/mezzanine,wbtuomela/mezzanine,ZeroXn/mezzanine,viaregio/mezzanine,PegasusWang/mezzanine,gradel/mezzanine,dovydas/mezzanine,damnfine/mezzanine,mush42/mezzanine,eino-makitalo/mezzanine,fusionbox/mezzanine,dsanders11/mezzanine,molokov/mezzanine,jerivas/mezzanine,tuxinhang1989/mezzanine,SoLoHiC/mezzanine,sjdines/mezzanine,cccs-web/mezzanine,emile2016/mezzanine,dovydas/mezzanine,biomassives/mezzanine,joshcartme/mezzanine,wbtuomela/mezzanine,ZeroXn/mezzanine,industrydive/mezzanine,industrydive/mezzanine,dovydas/mezzanine,AlexHill/mezzanine,dekomote/mezzanine-modeltranslation-backport,adrian-the-git/mezzanine,eino-makitalo/mezzanine,tuxinhang1989/mezzanine,ryneeverett/mezzanine,Kniyl/mezzanine,readevalprint/mezzanine,sjdines/mezzanine,agepoly/mezzanine,webounty/mezzanine,Skytorn86/mezzanine,Cajoline/mezzanine,douglaskastle/mezzanine,cccs-web/mezzanine,jerivas/mezzanine,geodesign/mezzanine,joshcartme/mezzanine,dekomote/mezzanine-modeltranslation-backport,saintbird/mezzanine,emile2016/mezzanine,jjz/mezzanine,webounty/mezzanine,promil23/mezzanine,theclanks/mezzanine,douglaskastle/mezzanine,dsanders11/mezzanine,batpad/mezzanine,emile2016/mezzanine,biomassives/mezzanine,joshcartme/mezzanine,PegasusWang/mezzanine,christianwgd/mezzanine,sjuxax/mezzanine,Cajoline/mezzanine,molokov/mezzanine,douglaskastle/mezzanine,frankier/mezzanine,Kniyl/mezzanine,Skytorn86/mezzanine,spookylukey/mezzanine,PegasusWang/mezzanine,agepoly/mezzanine,tuxinhang1989/mezzanine,fusionbox/mezzanine,agepoly/mezzanine,readevalprint/mezzanine,frankchin/mezzanine,Cajoline/mezzanine,frankchin/mezzanine,theclanks/mezzanine,webounty/mezzanine,dekomote/mezzanine-modeltranslation-backport | mezzanine/generic/templatetags/disqus_tags.py | mezzanine/generic/templatetags/disqus_tags.py | from __future__ import unicode_literals
from future.builtins import bytes, int
import base64
import hashlib
import hmac
import json
import time
from mezzanine import template
register = template.Library()
@register.simple_tag
def disqus_id_for(obj):
"""
Returns a unique identifier for the object to be used in
DISQUS JavaScript.
"""
return "%s-%s" % (obj._meta.object_name, obj.id)
@register.inclusion_tag("generic/includes/disqus_sso.html", takes_context=True)
def disqus_sso_script(context):
"""
Provides a generic context variable which adds single-sign-on
support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and
``COMMENTS_DISQUS_API_SECRET_KEY`` are specified.
"""
settings = context["settings"]
public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "")
secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "")
user = context["request"].user
if public_key and secret_key and user.is_authenticated():
context["public_key"] = public_key
context["sso_data"] = _get_disqus_sso(user, public_key, secret_key)
return context
def _get_disqus_sso(user, public_key, secret_key):
# Based on snippet provided on http://docs.disqus.com/developers/sso/
# create a JSON packet of our data attributes
data = json.dumps({
'id': '%s' % user.id,
'username': user.username,
'email': user.email,
})
# encode the data to base64
message = base64.b64encode(bytes(data, encoding="utf8"))
# generate a timestamp for signing the message
timestamp = int(time.time())
# generate our hmac signature
sig = hmac.HMAC(bytes(secret_key, encoding="utf8"),
bytes('%s %s' % (message, timestamp), encoding="utf8"),
hashlib.sha1).hexdigest()
# Messages are of the form <message> <signature> <timestamp>
return '%s %s %s' % (message, sig, timestamp)
| from __future__ import unicode_literals
from future.builtins import int, str
import base64
import hashlib
import hmac
import json
import time
from mezzanine import template
register = template.Library()
@register.simple_tag
def disqus_id_for(obj):
"""
Returns a unique identifier for the object to be used in
DISQUS JavaScript.
"""
return "%s-%s" % (obj._meta.object_name, obj.id)
@register.inclusion_tag("generic/includes/disqus_sso.html", takes_context=True)
def disqus_sso_script(context):
"""
Provides a generic context variable which adds single-sign-on
support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and
``COMMENTS_DISQUS_API_SECRET_KEY`` are specified.
"""
settings = context["settings"]
public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "")
secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "")
user = context["request"].user
if public_key and secret_key and user.is_authenticated():
context["public_key"] = public_key
context["sso_data"] = _get_disqus_sso(user, public_key, secret_key)
return context
def _get_disqus_sso(user, public_key, secret_key):
# Based on snippet provided on http://docs.disqus.com/developers/sso/
# create a JSON packet of our data attributes
data = json.dumps({
'id': '%s' % user.id,
'username': user.username,
'email': user.email,
})
# encode the data to base64
message = base64.b64encode(data)
# generate a timestamp for signing the message
timestamp = int(time.time())
# generate our hmac signature
sig = hmac.HMAC(str(secret_key), '%s %s' % (message, timestamp),
hashlib.sha1).hexdigest()
# Messages are of the form <message> <signature> <timestamp>
return '%s %s %s' % (message, sig, timestamp)
| bsd-2-clause | Python |
913a77592a9f399820cddbc7753c24182ad21639 | Add options for plots | jvivian/rnaseq-lib,jvivian/rnaseq-lib | src/rnaseq_lib/plot/opts.py | src/rnaseq_lib/plot/opts.py | gene_curves = {
'Curve': {'plot': dict(height=120, width=600, tools=['hover'], invert_xaxis=True, yrotation=45, yaxis='left'),
'style': dict(line_width=1.5)},
'Curve.Percentage_of_Normal_Samples': {'plot': dict(xaxis=None, invert_yaxis=True),
'style': dict(color='Blue')},
'Curve.Gene_Expression': {'plot': dict(xaxis=None),
'style': dict(color='Green')},
'Curve.Log2_Fold_Change': {'plot': dict(height=150),
'style': dict(color='Purple')},
'Scatter': {'style': dict(color='red', size=3)}}
gene_kde = {}
| mit | Python |
|
fa1e30635f57aaffdc74eaa307b8c74f89bf50ae | add base gender choices object | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator_abstract/models/base_gender_choices.py | accelerator_abstract/models/base_gender_choices.py | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
GENDER_MALE_CHOICE = "Male"
GENDER_FEMALE_CHOICE = "Female"
GENDER_CISGENDER_CHOICE = "Cisgender"
GENDER_TRANSGENDER_CHOICE = "Transgender"
GENDER_NON_BINARY_CHOICE = "Non-Binary"
GENDER_PREFER_TO_SELF_DESCRIBE_CHOICE = "I Prefer To Self-describe"
GENDER_PREFER_NOT_TO_SAY_CHOICE = "I Prefer Not To Say"
GENDER_CHOICES = (
GENDER_MALE_CHOICE,
GENDER_FEMALE_CHOICE,
GENDER_CISGENDER_CHOICE,
GENDER_TRANSGENDER_CHOICE,
GENDER_NON_BINARY_CHOICE,
GENDER_PREFER_TO_SELF_DESCRIBE_CHOICE,
GENDER_PREFER_NOT_TO_SAY_CHOICE
)
class BaseGenderChoices(AcceleratorModel):
name = models.CharField(max_length=255, unique=True)
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_genderchoices'
abstract = True
ordering = ['name', ]
verbose_name = "Gender Choice"
verbose_name_plural = "Gender Choices"
| mit | Python |
|
b55ef35a68305269e8a49a8afcdf46d94d06361f | add drf module | jeromecc/doctoctocbot | src/common/drf.py | src/common/drf.py | from rest_framework.exceptions import APIException
class ServiceUnavailable(APIException):
status_code = 503
default_detail = 'Service temporarily unavailable, try again later.'
default_code = 'service_unavailable' | mpl-2.0 | Python |
|
cdfee7e893564157e2143f20dea0b10c8bd33cfb | Create pythonLock.py | noekleby/TTK4145,noekleby/TTK4145,noekleby/TTK4145 | ving2/pythonLock.py | ving2/pythonLock.py |
from threading import Thread
from threading import Lock
i = 0
def someThreadFunction1(lock):
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
global i
for j in range (0,1000000):
lock.acquire()
i += 1
lock.release()
def someThreadFunction2(lock):
global i
for j in range (0,1000000):
lock.acquire()
i -= 1
lock.release()
def main():
lock = Lock()
someThread1 = Thread(target = someThreadFunction1, args = ([lock]))
someThread1.start()
someThread2 = Thread(target = someThreadFunction2, args = ([lock]))
someThread2.start()
someThread1.join()
someThread2.join()
print(i)
main()
| mit | Python |
|
c894e509f14cd671eaa49a5d6608bf773a8838c2 | Create updaterepo.py | chickenmatt5/python-cydia-repo-updater | updaterepo.py | updaterepo.py | from os import system as s # s will serve as an easy way to send a command to the system
from os import path, remove, listdir
import hashlib, shutil, ftplib, gnupg
news = listdir('/REPODIRECTORY/new') # Taking inventory of all new packages, placed in a "/new" directory
for entry in news:
enpath = '/REPODIRECTORY/new/%s' % entry
if path.isdir(enpath): # Checking to see if any packages (in directory form, with the DEBIAN directory) have yet to be packaged
makedeb = 'dpkg -b %s' % enpath
s(makedeb) # Packaging any not-yet-packaged packages
shutil.rmtree(enpath) # Deleting the now-packaged package's folder
news = listdir('/REPODIRECTORY/new') # Taking inventory of all new packages
for file in news:
newf = path.join('/REPODIRECTORY/new', file)
newfm = path.join('/REPODIRECTORY', file)
shutil.move(newf, newfm) # Moving all new packages into the repo root, so they can be accounted for when creating the Packages index
remove('Packages') # Removing the old Packages index files
remove('Packages.gz')
remove('Packages.bz2')
s('sudo dpkg-scanpackages -m . /dev/null >Packages') # Creating the Pacakges file
s('bzip2 -fks Packages') # Creating the Packages.bz2 file
s('gzip -f Packages') # Turning the Packages file into the Packages.gz file
s('sudo dpkg-scanpackages -m . /dev/null >Packages') # Creating another Packages file
m1 = hashlib.md5(open('Packages').read()).hexdigest() # Calculating checksums for each Packages index file
m2 = hashlib.md5(open('Packages.gz').read()).hexdigest()
m3 = hashlib.md5(open('Packages.bz2').read()).hexdigest()
s1 = path.getsize('Packages') # Getting file size of each Packages index files
s2 = path.getsize('Packages.gz')
s3 = path.getsize('Packages.bz2')
sums = '%s %s Packages\n%s %s Packages.gz\n%s %s Packages.bz2\n' % (m1, s1, m2, s2, m3, s3)
with open("Release", "r+") as f: # Writing the sums & file sizes of the Packages index files to the Release file
old = f.read()
old = old[:XXX] ### This XXX varies on how long the Release file is, as this line skips to the end of the Release file to tag on the sums
f.seek(0)
f.write(old + sums)
gpg = gnupg.GPG()
nosign = open('Release', "rb") # Signing the Release file
signed = gpg.sign_file(nosign, keyid='GPGSIGNATUREID', passphrase='GPGSIGNATUREPASSWORD')
remove('Release.gpg') # Removing the old Release.gpg signed file
open("Release.gpg", "w").write(str(signed)[XXX:]) # Create and write signature data to Release.gpg
# On the line above, the XXX varies on how long the Release file is, as gpg.sign_file from 5 lines up outputs more than Cydia wants
session = ftplib.FTP('FTPADDRESS','FTPUSERNAME','FTPPASSWORD') # Setting up a FTP connection
ftplib.FTP.cwd(session,'/FTPUPLOADDIRECTORY/')
news.append('Packages') # Preparing files for upload (only new packages, and the index files)
news.append('Packages.gz')
news.append('Packages.bz2')
news.append('Release')
news.append('Release.gpg')
for file in news: # Upload each file, and print as each file is uploaded
upl = open(file, 'rb')
upcmd = 'STOR %s' % file
session.storbinary(upcmd, upl)
print '%s uploaded.' % file
upl.close()
print 'Finished uploads.'
| mit | Python |
|
9266e24e616174cc37b5e6f7926dfda81471abb5 | Initialize PracticeQuestions | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/CrackingCodesWithPython/Chapter13/PracticeQuestions.py | books/CrackingCodesWithPython/Chapter13/PracticeQuestions.py | # Chapter 13 Practice Questions
# 1. What do the following expressions evaluate to?
print(17 % 1000)
print(5 % 5)
# 2. What is the GCD of 10 and 15?
# Don't do this - imports should be at the top of the file
from books.CrackingCodesWithPython.Chapter13.cryptomath import gcd
print(gcd(10, 15))
# 3. What does spam contain after executing spam, eggs = 'hello', 'world'?
spam, eggs = 'hello', 'world'
print(spam)
# 4. The GCD of 17 and 31 is 1. Are 17 and 31 relatively prime?
if not gcd(17, 31) == 1:
print("No")
else:
print("Yes")
# 5. Why aren't 6 and 8 relatively prime?
print(gcd(6, 8))
# 6. What is the formula for the modular inverse of A mod C?
# Hint: check page 183
| mit | Python |
|
c9e90ef5413bd560422e915d213df73ad88dffd7 | Add apigateway integration test for PutIntegration | boto/botocore,pplu/botocore | tests/integration/test_apigateway.py | tests/integration/test_apigateway.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
class TestApigateway(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('apigateway', 'us-east-1')
# Create a resoruce to use with this client.
self.api_name = 'mytestapi'
self.api_id = self.client.create_rest_api(name=self.api_name)['id']
def tearDown(self):
self.client.delete_rest_api(restApiId=self.api_id)
def test_put_integration(self):
# The only resource on a brand new api is the path. So use that ID.
path_resource_id = self.client.get_resources(
restApiId=self.api_id)['items'][0]['id']
# Create a method for the resource.
self.client.put_method(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
authorizationType='None'
)
# Put an integration on the method.
response = self.client.put_integration(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
type='HTTP',
integrationHttpMethod='GET',
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
self.assertEqual(response['type'], 'HTTP')
| apache-2.0 | Python |
|
4ce7a1932d9cde635263a4fe5a80af57589e1cfa | add NASM 2.13.02 Conan package recipe | ConnectedVision/connectedvision,ConnectedVision/connectedvision,ConnectedVision/connectedvision,ConnectedVision/connectedvision,ConnectedVision/connectedvision,ConnectedVision/connectedvision | build_env/Conan/packages/NASM/2.13.02/conanfile.py | build_env/Conan/packages/NASM/2.13.02/conanfile.py | import os
from conans import ConanFile, AutoToolsBuildEnvironment, tools
class NASM(ConanFile):
name = "NASM"
version = "2.13.02"
url = "http://www.nasm.us"
settings = {"os": ["Linux"]}
def getSubdirectories(self, d):
return [ f for f in os.listdir(d) if os.path.isdir(f) ]
def source(self):
self.output.info("")
self.output.info("---------- source ----------")
self.output.info("")
filename = "nasm-" + self.version + ".tar.bz2"
url = "http://www.nasm.us/pub/nasm/releasebuilds/" + self.version + "/" + filename
self.output.info("downloading " + url)
tools.download(url, filename, retry=3, retry_wait=10)
tools.unzip(filename, self.source_folder)
dirnames = self.getSubdirectories(self.source_folder)
if len(dirnames) < 1:
raise Exception("archive does not contain any subdirectories")
os.rename(dirnames[0], self.name)
os.remove(filename)
def build(self):
self.output.info("")
self.output.info("---------- build ----------")
self.output.info("")
with tools.chdir(self.name):
env = AutoToolsBuildEnvironment(self)
env.configure(args=["--prefix=" + self.package_folder])
env.make()
env.make(args=["install"])
def package(self):
self.output.info("")
self.output.info("---------- package ----------")
self.output.info("")
def package_info(self):
self.output.info("")
self.output.info("---------- package_info ----------")
self.output.info("")
self.env_info.PATH.append(os.path.join(self.package_folder, "bin")) | mit | Python |
|
9fb564d8f02d92432a62be02c906e3b227f48c10 | Create add_results_new.py | vortex610/mos,vortex610/mos,vortex610/mos,vortex610/mos | run_tests/shaker_run/add_results_new.py | run_tests/shaker_run/add_results_new.py | custom_res1 = [{'status_id': 5, 'content': 'Check [Operations per second Median; iops]', 'expected': '88888', 'actual': '7777'},{'status_id': 5, 'content': 'Check [deviation; %]', 'expected': '5555', 'actual': '9999'}]
res1 = {'test_id': test_4kib_read, 'status_id': 5, 'custom_test_case_steps_results': custom_res1}
res2 = {'test_id': test_4kib_write, 'status_id': 5, 'custom_test_case_steps_results': [{'status_id': 5, 'content': 'Check [Operations per second Median; iops]', 'expected': '20202', 'actual': '30303'},{'status_id': 5, 'content': 'Check [deviation; %]', 'expected': '90909', 'actual': '80808'}]}
results_list = [res1, res2]
res_all = {'results': results_list}
print client.send_post('add_results/{}'.format(run_id), res_all)
| apache-2.0 | Python |
|
729f1c5147e4d4ce242d73731c8e455b2a50fca3 | add 188 | EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,zeyuanxy/project-euler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler | vol4/188.py | vol4/188.py | def tetration(a, b, m):
t0 = 1
for i in range(b):
t1 = pow(a, t0, m)
if t0 == t1:
break
t0 = t1
return t0
if __name__ == "__main__":
print tetration(1777, 1855, 10 ** 8)
| mit | Python |
|
98c1ff71d57749168f0ca35d97dbe77a8a67e082 | Add module for utilities related to xgboost | rladeira/mltils | mltils/xgboost/utils.py | mltils/xgboost/utils.py |
xgb_to_sklearn = {
'eta': 'learning_rate',
'num_boost_round': 'n_estimators',
'alpha': 'reg_alpha',
'lambda': 'reg_lambda',
'seed': 'random_state',
}
def to_sklearn_api(params):
return {
xgb_to_sklearn.get(key, key): value
for key, value in params.items()
}
| mit | Python |
|
bbb10ba41db6f70512fe6bcb5207377606a22455 | Create Mordecai_Output.py | openeventdata/Focus_Locality_Extraction,openeventdata/Focus_Locality_Extraction,openeventdata/Focus_Locality_Extraction,openeventdata/Focus_Locality_Extraction,openeventdata/Focus_Locality_Extraction | Geoparser_Comparison/English/Mordecai_Output.py | Geoparser_Comparison/English/Mordecai_Output.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Download and run Mordecai from following link:
"https://github.com/openeventdata/mordecai"
To change the corpus, just change the name in main function.
"""
import xml.etree.ElementTree as et
import re
import json, sys
import requests
#reload(sys)
#sys.setdefaultencoding("utf-8")
def Mordecai(text):
headers = {'Content-Type': 'application/json'}
place=list()
data = {'text': text}
data = json.dumps(data)
out = requests.post('http://localhost:5000/places', data=data, headers=headers)
parsed_json = json.loads(out.text)
try:
for e in parsed_json:
#print e
index = [m.start() for m in re.finditer(e['placename'].strip(), text)]
for ind in index:
place.append(e['searchterm'] + ",," + e['placename'] + ",," + str(e['lat']) + ",," + str(e['lon']) + ",,"+ str(ind) +',,'+ str(ind +len(e['placename'].strip()) ))
except:
pass
return place
if __name__ == '__main__':
f = open('./data/wiki_mordecai_Original.txt' , 'w') #change it if your data is lgl.xml
tree = et.parse('./WikToR(SciPaper).xml') #change it if your data is lgl.xml
root = tree.getroot()
c = 0
for child in root:
c +=1
print c
text = child.find('text').text
place = Mordecai(text)
if (place):
for t in place:
f.write(t + "||")
f.write("\n")
f.flush()
| mit | Python |
|
9d98c3280d4e9dc6dda172d11e02922fc9958471 | add homwork01_v0.2.py | seerjk/reboot06,seerjk/reboot06 | 01/homwork01_v0.2.py | 01/homwork01_v0.2.py | #!/usr/bin/env python
#coding=utf-8
num_list = [1,2,3,2,12,3,1,3,21,2,2,3,4111,22,3333,444,111,4,5,777,65555,45,33,45]
max2 = max1 = num_list[0]
# print max1, max2
# max1 bigger than max2
# 1. n>max1 and n>max2
# 2. n<=max1 and n>max2
# 3. n<max1 and n<=max2
for n in num_list:
if n > max2:
if n > max1:
max2 = max1
max1 = n
elif n < max1:
max2 = n
print "Two large numbers are: %d, %d" % (max1, max2)
| mit | Python |
|
73bc2dbfe40db224a38725f4412e33b1b5accac6 | Add script example. | kuujo/active-redis | examples/script.py | examples/script.py | # Copyright (c) 2013 Jordan Halterman <[email protected]>
# See LICENSE for details.
import sys, os
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
# The Active Redis API provides native support for Redis server-side
# Lua scripting.
from active_redis import Script
class PushMany(Script):
"""
Push several items on to a queue.
"""
# Define keyword argument names for keys used by the script.
keys = ['key']
# Define keyword argument names for all other arguments to the script.
args = []
# In this case, we're using a variable number of arguments. Note that
# when variable arguments are used, only the last defined argument
# may have a variable number.
variable_args = True
# Finally, define the Lua script. This is just a simple example.
script = """
local key = KEYS[1]
local vals = ARGV
redis.call('RPUSH', key, unpack(vals))
"""
# Building upon the datatype example, we can extend the Queue class
# and make use of our script.
from datatype import Queue
from active_redis import registry
@registry.datatype
class BetterQueue(Queue):
"""A better version of our queue."""
type = 'better_queue'
_scripts = {
'pushmany': PushMany,
}
def push_many(self, *args):
"""Pushes many items on to the queue."""
return self._execute_script('pushmany', self.key, *args)
| mit | Python |
|
68ba389a4b6cefe70864577bcc195f14012e224d | Add UK flag example | samirelanduk/omnicanvas | examples/ukflag.py | examples/ukflag.py | import math
import omnicanvas
def create_union_flag(height):
# The union flag is twice as wide as it is high
canvas = omnicanvas.Canvas(height * 2, height, background_color="#000066")
#This is the length of the diagonal of the flag, with Pythagoras
diagonal_length = math.sqrt((height ** 2) + ((height * 2) ** 2))
# This is the angle of the diagonal strips from the horizontal
# tan(θ) = opposite / adjacent, so θ = atan(opposite / adjacent)
diagonal_angle = math.degrees(math.atan((height / 2) / height))
# Add The diagonal white strips
canvas.add_rectangle(
height - (height * 0.1),
(height / 2) - (diagonal_length / 2),
height * 0.2,
diagonal_length,
line_width=0,
rotation=(
height, height / 2, 270 + diagonal_angle
)
)
canvas.add_rectangle(
height - (height * 0.1),
(height / 2) - (diagonal_length / 2),
height * 0.2,
diagonal_length,
line_width=0,
rotation=(
height, height / 2, 90 - diagonal_angle
)
)
# Add diagonal red strips - these'll be partly covered by the white cross
canvas.add_rectangle(
height - (height / 15),
(height / 2) - (diagonal_length / 2),
height / 15,
diagonal_length / 2,
line_width=0,
fill_color="#CC0000",
rotation=(
height, height / 2, 90 - diagonal_angle
)
)
canvas.add_rectangle(
height - (height / 15),
(height / 2) - (diagonal_length / 2),
height / 15,
diagonal_length / 2,
line_width=0,
fill_color="#CC0000",
rotation=(
height, height / 2, 270 - diagonal_angle
)
)
canvas.add_rectangle(
height - (height / 15),
(height / 2) - (diagonal_length / 2),
height / 15,
diagonal_length / 2,
line_width=0,
fill_color="#CC0000",
rotation=(
height, height / 2, 270 + diagonal_angle
)
)
canvas.add_rectangle(
height - (height / 15),
(height / 2) - (diagonal_length / 2),
height / 15,
diagonal_length / 2,
line_width=0,
fill_color="#CC0000",
rotation=(
height, height / 2, 90 + diagonal_angle
)
)
# Add the white cross
canvas.add_rectangle(
height - (height / 6),
0,
height / 3,
height,
line_width=0
)
canvas.add_rectangle(
0,
(height / 2) - (height / 6),
height * 2,
height / 3,
line_width=0
)
# Add the red cross
canvas.add_rectangle(
height - (height / 10),
0,
height / 5,
height,
line_width=0,
fill_color="#CC0000",
)
canvas.add_rectangle(
0,
(height / 2) - (height / 10),
height * 2,
height / 5,
line_width=0,
fill_color="#CC0000",
)
return canvas
# Create a flag of height 360px (and so width 720px)
create_union_flag(360).save("ukflag.svg")
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.