commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
b22cfb4c6b8c0c0c3751078b720313d0e2baff1d | Test API call | SaishRedkar/FilmyBot | src/filmyBot.py | src/filmyBot.py | import time,json,requests
import os
from slackclient import SlackClient
# get the Slack API token as an environment variable
SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
CHANNEL_NAME = "test2"
BOT_ID = "U53TE8XSS"
SLACK_BOT_NAME = "<@" + BOT_ID + ">"
def main():
print(SLACK_BOT_NAME)
# Create the slackclient instance
sc = SlackClient(SLACK_BOT_TOKEN)
response = requests.get("http://www.omdbapi.com/?t=The+Dark+Knight&plot=full")
data = response.json()
# Connect to slack
if sc.rtm_connect():
# Send first message
#sc.rtm_send_message(CHANNEL_NAME, "I'm ALIVE!!!")
while True:
# Read latest messages
for slack_message in sc.rtm_read():
message = slack_message.get("text")
user = slack_message.get("user")
print(message, user)
if(message and user):
if(SLACK_BOT_NAME in message):
print("done!")
sc.rtm_send_message(CHANNEL_NAME, data["Plot"])
sc.rtm_send_message(CHANNEL_NAME, sc.api_call("users.list"))
else:
sc.rtm_send_message(CHANNEL_NAME, "")
if __name__ == '__main__':
main() | mit | Python |
|
6f2ab55d0b83c33fad322101e7214425efd10829 | add colors to module | adrn/GaiaPairsFollowup | comoving_rv/plot.py | comoving_rv/plot.py | colors = dict()
colors['line_marker'] = '#3182bd'
colors['gp_model'] = '#ff7f0e'
colors['not_black'] = '#333333'
colors['fit'] = '#2ca25f'
| mit | Python |
|
963b1ab24767acb5253b9fe2f29749d8656b2918 | index file added | ryanrdetzel/Mental-Cache,ryanrdetzel/Mental-Cache | index.py | index.py | #!/usr/bin/env python
import web
import page
import upload
import utils
#from google.appengine.ext import db
import logging
from Cheetah.Template import Template
import os
urls = (
'/page', page.app_page,
'/upload', upload.app_upload,
'/login', "login",
'/(\d+)-(?:[\w|-]+)\.html', "index",
"/(.*)", "index"
)
class login:
def GET(self):
#utils.login()
#return '<form action="/login" method="POST"><input type="text" name="email" value="ryan" /><input type="submit" /></form>'
path = os.path.join(os.path.dirname(__file__), 'templates/login.html')
template_values = { 'user':'test',}
tmpl = Template( file = path, searchList = (template_values,) )
return tmpl
def POST(self):
if (utils.login() is None):
raise web.seeother('/login')
else:
raise web.seeother('/index.html')
#class Page(db.Model):
# id = db.IntegerProperty()
# title = db.StringProperty()
# tags = db.StringListProperty()
# content = db.TextProperty()
# owner = db.IntegerProperty(default=666)
class redirect:
def GET(self,page_name):
if utils.set_page_id(page_name):
web.redirect("/index.html")
else:
return "FAIL"
class index:
def GET(self,page_name):
if page_name == "w":
return 'test'
#page = Page()
#page.id = 1
#page.title = "Random Stuff"
#page.tags = ["test","ryan","links"]
#page.content = '{"name": "Untitled", "order": "", "components": {}, "last_id":0 }'
#page.put()
else:
#path = os.path.join(os.path.dirname(__file__), 'static/index.html')
path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
template_values = { 'page_name':page_name,}
tmpl = Template( file = path, searchList = (template_values,) )
return tmpl
app = web.application(urls, globals())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
app.run()
| mit | Python |
|
b91eb0b8b5bd66ea0bf090e6c6e71232c81d6e7a | Add mount.py | jakogut/KiWI | kiwi/mount.py | kiwi/mount.py | def mountpoint(path):
try:
subprocess.check_call(['mountpoint', path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return False
return True
def unmount(path):
subprocess.check_call(['umount', path])
def mount(src, dst, mkdir=False, force=False):
if mkdir: subprocess.check_call(['mkdir', '-p', dst])
if mountpoint(dst):
logger.warning('Destination %s is already a mountpoint' % dst)
if force: unmount(dst)
else: return
subprocess.check_call(['mount', src, dst])
| mit | Python |
|
cc1a799da671fbbbdd0406eeebc8c5a801a099d5 | Add extension test | Hardtack/Flask-Swag,Hardtack/Flask-Swag,Hardtack/Flask-Swag | tests/test_extension.py | tests/test_extension.py | """
tests.test_extension
====================
Tests for extension
"""
import json
from flask import Flask
from flask_swag import Swag
def test_extension():
"""Basic test for flask extension."""
app = Flask(__name__)
app.config['SWAG_TITLE'] = "Test application."
app.config['SWAG_API_VERSION'] = '1.0.1'
swag = Swag(app)
with app.test_request_context('/swagger/swagger.json'):
swagger_json = app.generate_swagger()
client = app.test_client()
response = client.get('/swagger/swagger.json')
assert 200 == response.status_code
assert swagger_json == json.loads(response.data.decode('utf-8'))
| mit | Python |
|
4181f69bda52c4cbec7ac1d7529d44e26ede61d1 | create object base classes. | christophreimer/pygeobase | pygeobase/object_base.py | pygeobase/object_base.py | # Copyright (c) 2015, Vienna University of Technology, Department of Geodesy
# and Geoinformation. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology, Department of
# Geodesy and Geoinformation nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class TS(object):
"""
The TS class represents the base object of a time series.
"""
def __init__(self, gpi, data, metadata):
"""
Initialization of the image object.
Parameters
----------
gpi : int
Grid point index associated with the time series
data : pandas.DataFrame
Pandas DataFrame that holds data for each variable of the time
series
metadata : dict
dictionary of numpy arrays that hold the metadata
"""
self.gpi = gpi
self.data = data
self.metadata = metadata
class Image(object):
"""
The Image class represents the base object of an image.
"""
def __init__(self, data, metadata, lon, lat, timestamp):
"""
Initialization of the image object.
Parameters
----------
data : dict
dictionary of numpy arrays that holds the image data for each
variable of the dataset
metadata : dict
dictionary of numpy arrays that hold the metadata
lon : numpy.array or None
array of longitudes, if None self.grid will be assumed
lat : numpy.array or None
array of latitudes, if None self.grid will be assumed
timestamp : datetime.datetime
exact timestamp of the image
"""
self.data = data
self.metadata = metadata
self.lon = lon
self.lat = lat
self.timestamp = timestamp
| bsd-3-clause | Python |
|
88ec4243ff78fe511331461b7563bd49f7124fe2 | Add tuple. | Vayne-Lover/Python | tuple/tuple.py | tuple/tuple.py | #!/usr/local/bin/python
x=(42,)
print x
y=3*(3,)
print y
z=tuple("hello")
i=1,2,3
print i[2]
print i[0:2]
| apache-2.0 | Python |
|
24788b106b9cdd70e7240dc3eccac82fba290c85 | Add test for yaml enviroment | lukas-hetzenecker/home-assistant,LinuxChristian/home-assistant,molobrakos/home-assistant,sffjunkie/home-assistant,titilambert/home-assistant,ewandor/home-assistant,emilhetty/home-assistant,mikaelboman/home-assistant,nkgilley/home-assistant,robbiet480/home-assistant,jawilson/home-assistant,molobrakos/home-assistant,devdelay/home-assistant,florianholzapfel/home-assistant,deisi/home-assistant,betrisey/home-assistant,jaharkes/home-assistant,qedi-r/home-assistant,postlund/home-assistant,mezz64/home-assistant,eagleamon/home-assistant,w1ll1am23/home-assistant,varunr047/homefile,balloob/home-assistant,leoc/home-assistant,jabesq/home-assistant,MungoRae/home-assistant,Cinntax/home-assistant,nugget/home-assistant,soldag/home-assistant,dmeulen/home-assistant,happyleavesaoc/home-assistant,devdelay/home-assistant,sffjunkie/home-assistant,kennedyshead/home-assistant,sffjunkie/home-assistant,sander76/home-assistant,hexxter/home-assistant,balloob/home-assistant,Teagan42/home-assistant,DavidLP/home-assistant,Danielhiversen/home-assistant,robjohnson189/home-assistant,morphis/home-assistant,Zac-HD/home-assistant,ct-23/home-assistant,w1ll1am23/home-assistant,Danielhiversen/home-assistant,aronsky/home-assistant,kyvinh/home-assistant,emilhetty/home-assistant,hexxter/home-assistant,hmronline/home-assistant,varunr047/homefile,shaftoe/home-assistant,MartinHjelmare/home-assistant,shaftoe/home-assistant,deisi/home-assistant,joopert/home-assistant,open-homeautomation/home-assistant,open-homeautomation/home-assistant,dmeulen/home-assistant,oandrew/home-assistant,HydrelioxGitHub/home-assistant,deisi/home-assistant,leoc/home-assistant,emilhetty/home-assistant,postlund/home-assistant,ct-23/home-assistant,ct-23/home-assistant,auduny/home-assistant,betrisey/home-assistant,HydrelioxGitHub/home-assistant,PetePriority/home-assistant,leppa/home-assistant,home-assistant/home-assistant,PetePriority/home-assistant,sffjunkie/home-assistant,persandstrom/home-assistant,jabesq/home-assistant,robjohnson189/home-assistant,jamespcole/home-assistant,alexmogavero/home-assistant,oandrew/home-assistant,Smart-Torvy/torvy-home-assistant,philipbl/home-assistant,tchellomello/home-assistant,GenericStudent/home-assistant,rohitranjan1991/home-assistant,stefan-jonasson/home-assistant,xifle/home-assistant,philipbl/home-assistant,auduny/home-assistant,MungoRae/home-assistant,jaharkes/home-assistant,Zac-HD/home-assistant,soldag/home-assistant,tinloaf/home-assistant,bdfoster/blumate,morphis/home-assistant,keerts/home-assistant,morphis/home-assistant,jnewland/home-assistant,Smart-Torvy/torvy-home-assistant,partofthething/home-assistant,alexmogavero/home-assistant,eagleamon/home-assistant,PetePriority/home-assistant,mKeRix/home-assistant,florianholzapfel/home-assistant,tinloaf/home-assistant,kyvinh/home-assistant,nkgilley/home-assistant,persandstrom/home-assistant,kyvinh/home-assistant,Zac-HD/home-assistant,turbokongen/home-assistant,turbokongen/home-assistant,fbradyirl/home-assistant,JshWright/home-assistant,bdfoster/blumate,sffjunkie/home-assistant,partofthething/home-assistant,emilhetty/home-assistant,kennedyshead/home-assistant,aequitas/home-assistant,emilhetty/home-assistant,pschmitt/home-assistant,jabesq/home-assistant,varunr047/homefile,srcLurker/home-assistant,Smart-Torvy/torvy-home-assistant,Julian/home-assistant,toddeye/home-assistant,GenericStudent/home-assistant,mKeRix/home-assistant,srcLurker/home-assistant,rohitranjan1991/home-assistant,robjohnson189/home-assistant,robbiet480/home-assistant,stefan-jonasson/home-assistant,HydrelioxGitHub/home-assistant,jnewland/home-assistant,happyleavesaoc/home-assistant,adrienbrault/home-assistant,alexmogavero/home-assistant,DavidLP/home-assistant,philipbl/home-assistant,tboyce1/home-assistant,LinuxChristian/home-assistant,hmronline/home-assistant,MungoRae/home-assistant,mikaelboman/home-assistant,Duoxilian/home-assistant,hexxter/home-assistant,JshWright/home-assistant,bdfoster/blumate,fbradyirl/home-assistant,miniconfig/home-assistant,robjohnson189/home-assistant,nugget/home-assistant,Cinntax/home-assistant,betrisey/home-assistant,sdague/home-assistant,adrienbrault/home-assistant,tboyce1/home-assistant,home-assistant/home-assistant,lukas-hetzenecker/home-assistant,sdague/home-assistant,tchellomello/home-assistant,rohitranjan1991/home-assistant,toddeye/home-assistant,mKeRix/home-assistant,mKeRix/home-assistant,eagleamon/home-assistant,mikaelboman/home-assistant,aronsky/home-assistant,oandrew/home-assistant,eagleamon/home-assistant,morphis/home-assistant,ewandor/home-assistant,betrisey/home-assistant,ma314smith/home-assistant,sander76/home-assistant,balloob/home-assistant,jaharkes/home-assistant,dmeulen/home-assistant,miniconfig/home-assistant,keerts/home-assistant,aequitas/home-assistant,FreekingDean/home-assistant,Julian/home-assistant,Zac-HD/home-assistant,oandrew/home-assistant,ma314smith/home-assistant,MungoRae/home-assistant,nugget/home-assistant,open-homeautomation/home-assistant,ewandor/home-assistant,deisi/home-assistant,florianholzapfel/home-assistant,mikaelboman/home-assistant,titilambert/home-assistant,Teagan42/home-assistant,jaharkes/home-assistant,Duoxilian/home-assistant,shaftoe/home-assistant,leppa/home-assistant,keerts/home-assistant,mezz64/home-assistant,jawilson/home-assistant,deisi/home-assistant,jamespcole/home-assistant,Smart-Torvy/torvy-home-assistant,tboyce021/home-assistant,stefan-jonasson/home-assistant,stefan-jonasson/home-assistant,ct-23/home-assistant,DavidLP/home-assistant,auduny/home-assistant,pschmitt/home-assistant,ma314smith/home-assistant,persandstrom/home-assistant,varunr047/homefile,philipbl/home-assistant,joopert/home-assistant,devdelay/home-assistant,FreekingDean/home-assistant,happyleavesaoc/home-assistant,hmronline/home-assistant,xifle/home-assistant,tboyce021/home-assistant,xifle/home-assistant,miniconfig/home-assistant,aequitas/home-assistant,Duoxilian/home-assistant,bdfoster/blumate,JshWright/home-assistant,open-homeautomation/home-assistant,keerts/home-assistant,leoc/home-assistant,ma314smith/home-assistant,srcLurker/home-assistant,jamespcole/home-assistant,LinuxChristian/home-assistant,jnewland/home-assistant,varunr047/homefile,shaftoe/home-assistant,miniconfig/home-assistant,tboyce1/home-assistant,devdelay/home-assistant,happyleavesaoc/home-assistant,MungoRae/home-assistant,MartinHjelmare/home-assistant,hmronline/home-assistant,LinuxChristian/home-assistant,bdfoster/blumate,tinloaf/home-assistant,xifle/home-assistant,srcLurker/home-assistant,Julian/home-assistant,florianholzapfel/home-assistant,molobrakos/home-assistant,LinuxChristian/home-assistant,Julian/home-assistant,tboyce1/home-assistant,mikaelboman/home-assistant,leoc/home-assistant,kyvinh/home-assistant,hexxter/home-assistant,Duoxilian/home-assistant,hmronline/home-assistant,JshWright/home-assistant,MartinHjelmare/home-assistant,alexmogavero/home-assistant,qedi-r/home-assistant,dmeulen/home-assistant,fbradyirl/home-assistant,ct-23/home-assistant | tests/util/test_yaml.py | tests/util/test_yaml.py | """Test Home Assistant yaml loader."""
import io
import unittest
import os
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
| """Test Home Assistant yaml loader."""
import io
import unittest
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
| mit | Python |
2b0a96791ad43ef1f27b610233dd34027cf83c75 | Create currency-style.py | Pouf/CodingCompetition,Pouf/CodingCompetition | CiO/currency-style.py | CiO/currency-style.py | import re
def checkio(text):
numbers = re.findall('(?<=\$)[^ ]*\d', text)
for old in numbers:
new = old.replace('.', ',')
if ',' in new and len(new.split(',')[-1]) == 2:
new = '.'.join(new.rsplit(',', 1))
text = text.replace(old, new)
return text
| mit | Python |
|
a46f960e811123a137e4e5fe4350f6a850e9b33e | Create average-of-levels-in-binary-tree.py | yiwen-luo/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/average-of-levels-in-binary-tree.py | Python/average-of-levels-in-binary-tree.py | # Time: O(n)
# Space: O(h)
# Given a non-empty binary tree,
# return the average value of the nodes on each level in the form of an array.
#
# Example 1:
# Input:
# 3
# / \
# 9 20
# / \
# 15 7
# Output: [3, 14.5, 11]
# Explanation:
# The average value of nodes on level 0 is 3,
# on level 1 is 14.5, and on level 2 is 11. Hence return [3, 14.5, 11].
#
# Note:
# The range of node's value is in the range of 32-bit signed integer.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
result = []
q = collections.deque([root])
while q:
total, count = 0, 0
next_q = collections.deque([])
while q:
n = q.popleft()
total += n.val;
count += 1
if n.left:
next_q.append(n.left)
if n.right:
next_q.append(n.right)
q, next_q = next_q, q
result.append(float(total) / count)
return result
| mit | Python |
|
ee076055f11638b8711658972dda8c4d4b40f666 | Enforce max length on project name (#3982) | beeftornado/sentry,gencer/sentry,jean/sentry,ifduyue/sentry,JackDanger/sentry,BuildingLink/sentry,BuildingLink/sentry,jean/sentry,mvaled/sentry,gencer/sentry,beeftornado/sentry,gencer/sentry,fotinakis/sentry,looker/sentry,ifduyue/sentry,JackDanger/sentry,alexm92/sentry,ifduyue/sentry,jean/sentry,JamesMura/sentry,looker/sentry,JackDanger/sentry,mvaled/sentry,gencer/sentry,mvaled/sentry,fotinakis/sentry,BuildingLink/sentry,jean/sentry,JamesMura/sentry,zenefits/sentry,fotinakis/sentry,BuildingLink/sentry,looker/sentry,zenefits/sentry,mvaled/sentry,JamesMura/sentry,mvaled/sentry,BuildingLink/sentry,beeftornado/sentry,alexm92/sentry,looker/sentry,ifduyue/sentry,jean/sentry,fotinakis/sentry,alexm92/sentry,looker/sentry,zenefits/sentry,JamesMura/sentry,JamesMura/sentry,ifduyue/sentry,zenefits/sentry,zenefits/sentry,gencer/sentry,mvaled/sentry | src/sentry/web/forms/add_project.py | src/sentry/web/forms/add_project.py | from __future__ import absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.models import AuditLogEntry, AuditLogEntryEvent, Project
from sentry.signals import project_created
from sentry.utils.samples import create_sample_event
BLANK_CHOICE = [("", "")]
class AddProjectForm(forms.ModelForm):
name = forms.CharField(label=_('Name'), max_length=64,
widget=forms.TextInput(attrs={
'placeholder': _('i.e. API, Frontend, My Application Name'),
}),
help_text=_('Using the repository name generally works well.'),
)
class Meta:
fields = ('name',)
model = Project
def __init__(self, organization, *args, **kwargs):
forms.ModelForm.__init__(self, *args, **kwargs)
self.organization = organization
def save(self, actor, team, ip_address):
project = super(AddProjectForm, self).save(commit=False)
project.team = team
project.organization = team.organization
project.save()
AuditLogEntry.objects.create(
organization=project.organization,
actor=actor,
ip_address=ip_address,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_ADD,
data=project.get_audit_log_data(),
)
project_created.send(project=project, user=actor, sender=self)
create_sample_event(project, platform='javascript')
return project
| from __future__ import absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.models import AuditLogEntry, AuditLogEntryEvent, Project
from sentry.signals import project_created
from sentry.utils.samples import create_sample_event
BLANK_CHOICE = [("", "")]
class AddProjectForm(forms.ModelForm):
name = forms.CharField(label=_('Name'), max_length=200,
widget=forms.TextInput(attrs={
'placeholder': _('i.e. API, Frontend, My Application Name'),
}),
help_text=_('Using the repository name generally works well.'),
)
class Meta:
fields = ('name',)
model = Project
def __init__(self, organization, *args, **kwargs):
forms.ModelForm.__init__(self, *args, **kwargs)
self.organization = organization
def save(self, actor, team, ip_address):
project = super(AddProjectForm, self).save(commit=False)
project.team = team
project.organization = team.organization
project.save()
AuditLogEntry.objects.create(
organization=project.organization,
actor=actor,
ip_address=ip_address,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_ADD,
data=project.get_audit_log_data(),
)
project_created.send(project=project, user=actor, sender=self)
create_sample_event(project, platform='javascript')
return project
| bsd-3-clause | Python |
96f224a6b80720a88fefc8530aea113f975ef110 | Add new layout window command | shaochuan/sublime-plugins | new_layout.py | new_layout.py | import sublime, sublime_plugin
class NewLayoutCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
self.view.window().run_command("set_layout", args)
self.view.window().run_command("focus_group", { "group": 0 })
self.view.window().run_command("move_to_group", { "group": 1 } )
| mit | Python |
|
62ff128888bce33cf87e083a921ddac65a2f1879 | Add regression test for #3951 | explosion/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,honnibal/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,honnibal/spaCy,explosion/spaCy,spacy-io/spaCy | spacy/tests/regression/test_issue3951.py | spacy/tests/regression/test_issue3951.py | # coding: utf8
from __future__ import unicode_literals
import pytest
from spacy.matcher import Matcher
from spacy.tokens import Doc
@pytest.mark.xfail
def test_issue3951(en_vocab):
"""Test that combinations of optional rules are matched correctly."""
matcher = Matcher(en_vocab)
pattern = [
{"LOWER": "hello"},
{"LOWER": "this", "OP": "?"},
{"OP": "?"},
{"LOWER": "world"},
]
matcher.add("TEST", None, pattern)
doc = Doc(en_vocab, words=["Hello", "my", "new", "world"])
matches = matcher(doc)
assert len(matches) == 0
| mit | Python |
|
8436253648c67205de23db8797c9fcc7c2172b3e | add the actual test | thomasvs/pychecker,thomasvs/pychecker | test/test_slice.py | test/test_slice.py | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
'''
Tests related to slices.
'''
import unittest
import common
class SliceTestCase:#(common.TestCase):
'''
test that slices work.
'''
def test_slice(self):
self.check('test_slice')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
|
8ca0e88b7df79461f401e7c46c822f16223ddd0b | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/implementation/easy/between_two_sets/py/solution.py | hackerrank/algorithms/implementation/easy/between_two_sets/py/solution.py | #!/bin/python3
import sys
# Hackerrank Python3 environment does not provide math.gcd
# as of the time of writing. We define it ourselves.
def gcd(n, m):
while m > 0:
n, m = m, n % m
return n
def lcm(x, y):
return (x * y) // gcd(x, y)
def between(s1, s2):
import functools
cd = functools.reduce(gcd, s2)
cm = functools.reduce(lcm, s1)
return tuple(x for x in range(cm, cd + 1) if cd % x == 0 and x % cm == 0)
n, m = input().strip().split(' ')
n, m = [int(n),int(m)]
a = [int(a_temp) for a_temp in input().strip().split(' ')]
b = [int(b_temp) for b_temp in input().strip().split(' ')]
btw = between(a, b)
print(len(btw))
| mit | Python |
|
325465d18e963400b427f259547d4292a47368c9 | Use Django nose for tests. | 1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow | oneflow/settings/snippets/common_development.py | oneflow/settings/snippets/common_development.py | #
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
| #
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
| agpl-3.0 | Python |
df69df55cdf51da60e62226c16b30c76e2836c20 | Add initial test suite | adamjstewart/fiscalyear | test_fiscalyear.py | test_fiscalyear.py | import fiscalyear
import pytest
class TestFiscalYear:
@pytest.fixture(scope='class')
def a(self):
return fiscalyear.FiscalYear(2016)
@pytest.fixture(scope='class')
def b(self):
return fiscalyear.FiscalYear(2017)
@pytest.fixture(scope='class')
def c(self):
return fiscalyear.FiscalYear('2017')
def test_basic(self, a):
assert a.fiscal_year == 2016
def test_repr(self, a):
assert repr(a) == 'fiscalyear.FiscalYear(2016)'
def test_str(self, a):
assert str(a) == 'FY2016'
def test_less_than(self, a, b):
assert a < b
def test_less_than_equals(self, a, b, c):
assert a <= b <= c
def test_equals(self, b, c):
assert b == c
def test_not_equals(self, a, b):
assert a != b
def test_greater_than(self, a, b):
assert b > a
def test_greater_than_equals(self, a, b, c):
assert c >= b >= a
def test_from_string(self, c):
assert c.fiscal_year == 2017
def test_wrong_type(self):
with pytest.raises(TypeError):
x = fiscalyear.FiscalYear(2017.5)
with pytest.raises(TypeError):
y = fiscalyear.FiscalYear('hello world')
def test_out_of_range(self):
with pytest.raises(ValueError):
x = fiscalyear.FiscalYear(0)
with pytest.raises(ValueError):
y = fiscalyear.FiscalYear(-2017)
class TestFiscalQuarter:
@pytest.fixture(scope='class')
def a(self):
return fiscalyear.FiscalQuarter(2016, 4)
@pytest.fixture(scope='class')
def b(self):
return fiscalyear.FiscalQuarter(2017, 1)
@pytest.fixture(scope='class')
def c(self):
return fiscalyear.FiscalQuarter(2017, 2)
@pytest.fixture(scope='class')
def d(self):
return fiscalyear.FiscalQuarter(2017, 3)
@pytest.fixture(scope='class')
def e(self):
return fiscalyear.FiscalQuarter(2017, 4)
@pytest.fixture(scope='class')
def f(self):
return fiscalyear.FiscalQuarter(2018, 1)
@pytest.fixture(scope='class')
def g(self):
return fiscalyear.FiscalQuarter('2018', '1')
def test_basic(self, a):
assert a.fiscal_year == 2016
assert a.quarter == 4
def test_repr(self, a):
assert repr(a) == 'fiscalyear.FiscalQuarter(2016, 4)'
def test_str(self, a):
assert str(a) == 'FY2016 Q4'
def test_less_than(self, a, b, c, d, e, f):
assert a < b < c < d < e < f
def test_less_than_equals(self, a, b, c, d, e, f, g):
assert a <= b <= c <= d <= e <= f <= g
def test_equals(self, f, g):
assert f == g
def test_not_equals(self, b, c, g):
# Same year, different quarter
assert b != c
# Same quarter, different year
assert b != g
def test_greater_than(self, a, b, c, d, e, f):
assert f > e > d > c > b > a
def test_greater_than_equals(self, a, b, c, d, e, f, g):
assert g >= f >= e >= d >= c >= b >= a
def test_from_string(self, g):
assert g.fiscal_year == 2018
assert g.quarter == 1
def test_wrong_type(self):
with pytest.raises(TypeError):
x = fiscalyear.FiscalQuarter(2017.5, 1.2)
with pytest.raises(TypeError):
y = fiscalyear.FiscalQuarter('hello', 'world')
def test_out_of_range(self):
with pytest.raises(ValueError):
x = fiscalyear.FiscalQuarter(2017, 0)
with pytest.raises(ValueError):
y = fiscalyear.FiscalQuarter(2017, 5)
with pytest.raises(ValueError):
z = fiscalyear.FiscalQuarter(0, 2)
| mit | Python |
|
3e9289f142efd0769beff97cddfcbcbede40f85a | add a half written Qkkk | farseerfc/pacvis,farseerfc/pacvis,farseerfc/pacvis,farseerfc/pacvis | pacfiles/Qkkk.py | pacfiles/Qkkk.py | #!/usr/bin/env python3
import pyalpm
import pycman
import tarfile
import sys, os, os.path
pacmanconf = pycman.config.init_with_config("/etc/pacman.conf")
rootdir = pacmanconf.rootdir
def local_database():
handle = pacmanconf
localdb = handle.get_localdb()
packages = localdb.pkgcache
syncdbs = handle.get_syncdbs()
db = dict()
for pkg in packages:
for syncdb in syncdbs:
if syncdb.get_pkg(pkg.name) is not None:
db[pkg.name] = syncdb.get_pkg(pkg.name)
return db
def get_pkgfiles(package):
db = local_database()
pkg = db[package].filename
result = []
for d in pacmanconf.cachedirs:
p = os.path.join(d, pkg)
if os.path.exists(p):
result.append(p)
return result
def error_file(file, pkgfile, pkgname):
print(f'"{{file}}" in {{pkgfile}} of {{pkgname}} mismatch')
def check_pkgfile(pkgname, pkgfile):
with tarfile.open(pkgfile) as tar:
for fn in tar:
fnpath = os.path.join(rootdir, fn.name)
if fn.isdir():
if not os.path.isdir(fnpath):
error_file(fnpath, pkgfile, pkgname)
# else if fn.issym():
# if not os.path.issym(fnpath):
def main():
for pkgname in sys.args:
for pkgfile in get_pkgfiles(pkgname):
check_pkgfile(pkgname, pkgfile)
if __name__ == '__main__':
main()
| mit | Python |
|
701acbccc764101e00eef35dfff81dda5c5437a3 | Create pages_in_dict.py | nevmenandr/thai-language | pages_in_dict.py | pages_in_dict.py | import codecs
import os
import re
letters = []
no_letters = []
number_of = {}
pages = os.listdir(".")
for page in pages:
if page.endswith('.html'):
if page[0:3] not in letters:
letters.append(page[0:3])
f = codecs.open(page, 'r', 'utf-8-sig')
text = f.read()
#n = re.findall('Page [0-9]*? of [0-9][0-9]', text, flags=re.U)
#number_of[page[0:3]] = n[-1:-2]
for i in range(161, 206):
if str(i) not in letters:
no_letters.append(str(i))
print no_letters
| cc0-1.0 | Python |
|
39bf0b2ab6f89cfe3450102699a5bbeaf235011a | Create 4.py | gotclout/PythonJunk | 4.py | 4.py | #!/usr/bin/env python
MAX_TRI = 999999L
triangles = []
def next_pos(mn, pos):
if mn > triangles[MAX_TRI - 1]:
return -1
else:
maxv = MAX_TRI - 1
minv = 0
mid = minv + (maxv - minv) / 2
while triangles[mid] != mn and minv < maxv:
if triangles[mid] < mn :
minv = mid + 1
else :
maxv = mid - 1
mid = minv + (maxv - minv) / 2
return mid
def gen_triangles(offset):
triangles[:] = []
i = 1L + offset * MAX_TRI
bound = i + MAX_TRI
print "Generating %i through %i " % (i, bound)
while i <= bound:
triangles.append((i * (i + 1L)) / 2L)
i += 1L
print "Max value = %i " % (triangles[MAX_TRI - 1])
def pe321():
offset = 0L
#total = 0L
#count = 0L
#pos = 0L
n = 1L
#mn = 0L
gen_triangles(offset)
offset = total = count = mn = 0L
n = 1L
while count < 41:
mn = 2L * n + n * n
while mn % 3 != 0 and mn % 9 != 1:
n += 1L
mn = 2L * n + n * n
pos = next_pos(mn, pos)
if pos == -1 :
offset += 2L
gen_triangles(offset)
pos = 0L
if mn == triangles[pos]:
count += 1L
total += n
print "M(%i) = %i is triangular" % (n, mn)
n += 1L;
else:
n += 1L
print "The sum of the first %i terms = %i" % (count, total)
pe321()
| mit | Python |
|
ac357bc1ccefe55e25bb34021772301726ceec0e | Complete P4 | medifle/python_6.00.1x | Quiz/Problem4_defMyLog.py | Quiz/Problem4_defMyLog.py | def myLog(x, b):
'''
x: a positive integer
b: a positive integer; b >= 2
returns: log_b(x), or, the logarithm of x relative to a base b.
'''
if x < b:
return 0
else:
return 1 + myLog(x / b, b) | mit | Python |
|
e6ea8ad5b94b51d8b07dea238f2545eacba3abfe | Create Elentirmo_GUI_V0.1.py | kellogg76/ArduinoTelescopeDustCover | Elentirmo_GUI_V0.1.py | Elentirmo_GUI_V0.1.py | #!/usr/bin/python
from Tkinter import *
root = Tk()
root.title("Elentirmo Observatory Controller v0.1")
dust_cover_text = StringVar()
dust_cover_text.set('Cover Closed')
flat_box_text = StringVar()
flat_box_text.set('Flat Box Off')
def dust_cover_open():
print "Opening"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to OPEN the dust cover."
ser.write("O")
print "Opening serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
dust_cover_label.config(bg="Green")
dust_cover_text.set('Cover is Open')
def dust_cover_close():
print "Closing"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to CLOSE the dust cover."
ser.write("C")
print "Closing serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
dust_cover_label.config(bg="red")
dust_cover_text.set('Cover is closed')
def flat_on():
print "Activating flat box"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to turn on the flat box via relay."
ser.write("Q")
print "Opening serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
flat_box_label.config(bg="Green")
flat_box_text.set('Flat Box on')
def flat_off():
print "Dectivating flat box"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to turn off the flat box via relay."
ser.write("F")
print "Opening serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
flat_box_label.config(bg="red")
flat_box_text.set('Flat Box Off')
open_dust_cover_btn = Button(text=" Open Cover ", width=15, command=dust_cover_open)
open_dust_cover_btn.grid(row=0, column=0)
close_dust_cover_btn = Button(text=" Close Cover ", width=15, command=dust_cover_close)
close_dust_cover_btn.grid(row=1, column=0)
flat_box_on_btn = Button(text="Turn On Light", width=15, command=flat_on)
flat_box_on_btn.grid(row=0, column=2)
flat_box_off_btn = Button(text="Turn Off Light", width=15, command=flat_off)
flat_box_off_btn.grid(row=1, column=2)
status_label = Label(root, text=("Current Status"), width=15, fg="Black")
status_label.grid(row=2, column=1)
dust_cover_label = Label(root, textvariable=dust_cover_text, width=15, fg="Black", bg="Red")
dust_cover_label.grid(row=2, column=0)
flat_box_label = Label(root, textvariable=flat_box_text, width=15, fg="Black", bg="Red")
flat_box_label.grid(row=2, column=2)
root.mainloop()
| mit | Python |
|
51feabbc27821c5acb7f0ceb932d19c0d79f16d1 | test ssl version check functions as expected in python 2.6 | psf/requests | tests/test_help.py | tests/test_help.py | # -*- encoding: utf-8
import sys
import pytest
from requests.help import info
@pytest.mark.skipif(sys.version_info[:2] != (2,6), reason="Only run on Python 2.6")
def test_system_ssl_py26():
"""OPENSSL_VERSION_NUMBER isn't provided in Python 2.6, verify we don't
blow up in this case.
"""
assert info()['system_ssl'] == {'version': ''}
@pytest.mark.skipif(sys.version_info < (2,7), reason="Only run on Python 2.7+")
def test_system_ssl():
"""Verify we're actually setting system_ssl when it should be available."""
assert info()['system_ssl']['version'] != ''
| apache-2.0 | Python |
|
ca27dc71bd814fe42282521edd97ae444d6c714b | Add test of PlotData | pfi/maf,pfi/maf | tests/test_plot.py | tests/test_plot.py | from maflib.plot import *
import unittest
class TestPlotData(unittest.TestCase):
inputs = [
{ 'x': 1, 'y': 2, 'z': 50, 'k': 'p' },
{ 'x': 5, 'y': 3, 'z': 25, 'k': 'q' },
{ 'x': 3, 'y': 5, 'z': 10, 'k': 'q' },
{ 'x': 7, 'y': 4, 'z': 85, 'k': 'p' }
]
def test_empty_inputs(self):
pd = PlotData([])
data = pd.get_data_1d('x')
self.assertListEqual([], data)
def test_get_data_1d(self):
pd = PlotData(self.inputs)
data = pd.get_data_1d('x')
self.assertListEqual([1, 3, 5, 7], data)
def test_get_data_1d_unsorted(self):
pd = PlotData(self.inputs)
data = pd.get_data_1d('x', sort=False)
self.assertListEqual([1, 5, 3, 7], data)
def test_get_data_1d_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_1d('x', key='k')
self.assertDictEqual({ 'p': [1, 7], 'q': [3, 5] }, data)
def test_get_data_1d_unsorted_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_1d('x', key='k', sort=False)
self.assertDictEqual({ 'p': [1, 7], 'q': [5, 3] }, data)
def test_get_data_2d(self):
pd = PlotData(self.inputs)
data = pd.get_data_2d('x', 'y')
self.assertEqual(2, len(data))
self.assertListEqual([1, 3, 5, 7], data[0])
self.assertListEqual([2, 5, 3, 4], data[1])
def test_get_data_2d_unsorted(self):
pd = PlotData(self.inputs)
data = pd.get_data_2d('x', 'y', sort=False)
self.assertEqual(2, len(data))
self.assertListEqual([1, 5, 3, 7], data[0])
self.assertListEqual([2, 3, 5, 4], data[1])
def test_get_data_2d_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_2d('x', 'y', key='k')
self.assertDictEqual(
{ 'p': ([1, 7], [2, 4]), 'q': ([3, 5], [5, 3]) }, data)
def test_get_data_2d_unsorted_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_2d('x', 'y', key='k', sort=False)
self.assertDictEqual(
{ 'p': ([1, 7], [2, 4]), 'q': ([5, 3], [3, 5]) }, data)
def test_get_data_3d(self):
pd = PlotData(self.inputs)
data = pd.get_data_3d('x', 'y', 'z')
self.assertEqual(3, len(data))
self.assertListEqual([1, 3, 5, 7], data[0])
self.assertListEqual([2, 5, 3, 4], data[1])
self.assertListEqual([50, 10, 25, 85], data[2])
def test_get_data_3d_unsorted(self):
pd = PlotData(self.inputs)
data = pd.get_data_3d('x', 'y', 'z', sort=False)
self.assertEqual(3, len(data))
self.assertListEqual([1, 5, 3, 7], data[0])
self.assertListEqual([2, 3, 5, 4], data[1])
self.assertListEqual([50, 25, 10, 85], data[2])
def test_get_data_3d_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_3d('x', 'y', 'z', key='k')
self.assertDictEqual({
'p': ([1, 7], [2, 4], [50, 85]),
'q': ([3, 5], [5, 3], [10, 25])
}, data)
def test_Get_data_3d_unsorted_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_3d('x', 'y', 'z', key='k', sort=False)
self.assertDictEqual({
'p': ([1, 7], [2, 4], [50, 85]),
'q': ([5, 3], [3, 5], [25, 10])
}, data)
| bsd-2-clause | Python |
|
63804c534f23ffbe16ff539087048d99f9fcaf17 | Implement test_encoder_decoder | kenkov/seq2seq | test_encoder_decoder.py | test_encoder_decoder.py | #! /usr/bin/env python
# coding:utf-8
if __name__ == "__main__":
import sys
import argparse
from seq2seq import decode
from util import load_dictionary
import configparser
import os
from chainer import serializers
# GPU config
parser = argparse.ArgumentParser()
parser.add_argument('config_file', metavar='config_file', type=str,
help='config file')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--type', '-t', default="relu", type=str,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
gpu_flag = True if args.gpu >= 0 else False
config_file = args.config_file
parser_config = configparser.ConfigParser()
parser_config.read(config_file)
config = parser_config["CONFIG"]
# config["SEPARATOR"] = bytes(
# config["DEFAULT"]["SEPARATOR"], "utf-8"
# ).decode("unicode_escape")
# params
model_dir = config["model_dir"]
n_units = int(config["n_units"])
# load conversation sentences
dictionary = load_dictionary(config["dict_file"])
# Prepare encoder RNN model
dim = len(dictionary.keys())
model_type = args.type
if model_type == "relu":
import relu_rnn
model = relu_rnn.Classifier(
relu_rnn.ReLURNN(
embed_dim=dim,
n_units=int(config["n_units"]),
gpu=args.gpu
)
)
elif model_type == "lstm":
import lstm
model = lstm.Classifier(
lstm.LSTM(
embed_dim=dim,
n_units=int(config["n_units"]),
gpu=args.gpu
)
)
else:
raise Exception("model argment should be relu or lstm")
# load model
init_model_name = os.path.join(
model_dir,
"model.npz"
)
if os.path.exists(init_model_name):
serializers.load_npz(init_model_name, model)
print("load model {}".format(init_model_name))
else:
raise Exception("learn model first")
for text in (_.strip() for _ in sys.stdin):
ws = text.split()
print(ws)
decoded_words = decode(
ws,
model,
model,
dictionary,
)
answer_text = "".join(decoded_words[1:-1])
print(answer_text)
| mit | Python |
|
8eeb4c2db613c1354c38696ac6691cf79f66a383 | Add spider for Brookdale Senior Living | iandees/all-the-places,iandees/all-the-places,iandees/all-the-places | locations/spiders/brookdale.py | locations/spiders/brookdale.py | # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
URL = 'https://www.brookdale.com/bin/brookdale/community-search?care_type_category=resident&loc=&finrpt=&state='
US_STATES = (
"AL", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "OH", "OK", "OR", "PA", "RI", "SC",
"TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
)
class TemplateSpider(scrapy.Spider):
name = "brookdale"
allowed_domains = ["www.brookdale.com"]
def start_requests(self):
for state in US_STATES:
url = ''.join([URL, state])
yield scrapy.Request(url, callback=self.parse_info)
def parse_info(self, response):
data = json.loads(response.body_as_unicode())
i = 0
while i < len(data):
print(data[i]['name'])
properties = {
"ref": data[i]['community_id'],
"name": data[i]['name'],
"lat": data[i]['latitude'],
"lon": data[i]['longitude'],
"addr_full": data[i]['address1'],
"city": data[i]['city'],
"state": data[i]['state'],
"country": data[i]['country_code'],
"postcode": data[i]['zip_postal_code'],
"website": data[i]['website'],
"phone": data[i]['contact_center_phone'],
}
yield GeojsonPointItem(**properties)
i += 1 | mit | Python |
|
47893c708f3b63f79a01d5ee927f4c7d3f6dff27 | Create script to delete untitled and unpublished projects | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rsr/management/commands/delete_untitled_and_unpublished_projects.py | akvo/rsr/management/commands/delete_untitled_and_unpublished_projects.py | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
import datetime
from tablib import Dataset
from django.core.management.base import BaseCommand
from django.db.models import Q
from akvo.rsr.models import Project, PublishingStatus, IndicatorPeriodData, Result, IatiActivityImport
class Command(BaseCommand):
help = """\
Delete all Untitled and Unpublished projects created before the given date
<script> <date:%Y-%m-%d> --delete
"""
def add_arguments(self, parser):
parser.add_argument('date', type=lambda date: datetime.datetime.strptime(date, '%Y-%m-%d').date())
parser.add_argument('--delete', action='store_true', help='Actually delete projects')
def handle(self, *args, **options):
the_date = options['date']
projects = Project.objects\
.filter(created_at__lt=the_date)\
.filter(Q(title__exact='') | Q(publishingstatus__status=PublishingStatus.STATUS_UNPUBLISHED))
project_ids = projects.values_list('id', flat=True)
if options['delete']:
updates = IndicatorPeriodData.objects.filter(period__indicator__result__project__in=project_ids)
print(f"Deleting {updates.count()} period updates")
updates.delete()
iati_import = IatiActivityImport.objects.filter(project__in=project_ids)
print(f"Deleting {iati_import.count()} iati activity import")
iati_import.delete()
results = Result.objects.filter(project__in=project_ids)
print(f"Deleting {results.count()} results")
results.delete()
print(f"Deleting {projects.count()} projects)")
projects.delete()
else:
data = Dataset()
data.headers = [
'project_id',
'project_title',
'is_published',
'created_at'
]
for p in projects:
data.append([
p.id,
p.title,
p.is_published(),
p.created_at
])
print(data.export('csv'))
print(f'Found {projects.count()} projects to delete.')
| agpl-3.0 | Python |
|
03c837b0da9d7f7a6c6c54286631e9a403da3e60 | Add network scan python script - Closes #7 | OpenSecTre/sword,OpenSecTre/sword,OpenSecTre/sword | backend/net_scan.py | backend/net_scan.py | #!/usr/bin/python
import sys, getopt, nmap
def usage():
print 'sword_nmap.py -t <target> -p <port range>'
def main(argv):
target=''
port_range=''
try:
opts, args = getopt.getopt(argv,'ht:p:',['target=','ports='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ('-t', '--target'):
target = arg
elif opt in ('-p', '--ports'):
port_range = arg
if target == '':
usage()
sys.exit(1)
if port_range == '':
usage()
sys.exit(1)
scan(target, port_range)
def scan (target, port_range):
print ('Scanning %s %s' %(target, port_range))
nm = nmap.PortScanner()
nm.scan(target, port_range)
nm.command_line()
for host in nm.all_hosts():
print('Host : %s (%s): %s' % (host, nm[host].hostname(), nm[host].state()))
for proto in nm[host].all_protocols():
lport = nm[host][proto].keys()
lport.sort()
for port in lport:
print ('\t%s port %s %s' % (proto, port, nm[host][proto][port]['state']))
if __name__ == '__main__':
main(sys.argv[1:])
| mit | Python |
|
24e4ed9f26f9803d54d37202d0d71e8f47b18aa3 | Add swix create functionality and make verifyswix test use it | aristanetworks/swi-tools,aristanetworks/swi-tools | swixtools/create.py | swixtools/create.py | #!/usr/bin/env python3
# Copyright ( c ) 2021 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
'''
This module is responsible for packaging a SWIX file.
'''
import argparse
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
def dealWithExistingOutputFile( outputSwix, force ):
'''
If the desired output file exists, fail unless `force` is specified.
'''
if os.path.exists( outputSwix ):
if force:
os.remove( outputSwix )
else:
sys.exit( f'File {outputSwix!r} exists: use --force to overwrite.\n' )
def sha1sum( filename, blockSize=65536 ):
'''
Compute the SHA1 sum of a file.
We read in blocks in case of large files.
'''
result = hashlib.sha1()
with open( filename, 'rb' ) as f:
block = f.read( blockSize )
while block:
result.update( block )
block = f.read( blockSize )
return result.hexdigest()
def createManifest( tempDir, rpms ):
'''
Create a manifest file for the SWIX which contains:
- The format version.
- The name of the primary RPM.
- The SHA1 digest of all RPMs.
'''
manifestBaseName = 'manifest.txt'
manifestFileName = os.path.join( tempDir, manifestBaseName )
basename = os.path.basename
try:
with open( manifestFileName, 'w' ) as manifest:
print( 'format: 1', file=manifest )
print( f'primaryRpm: {basename( rpms[0] )}', file=manifest )
for rpm in rpms:
print( f'{basename( rpm )}-sha1: {sha1sum( rpm )}', file=manifest )
except Exception as e:
sys.exit( f'{manifestFileName}: {e}\n' )
return manifestFileName
def create( outputSwix=None, info=None, rpms=None, force=False, sign=False ):
'''
Create a SWIX file named `outputSwix` given a list of RPMs.
`info` and `sign` are currently unused.
'''
dealWithExistingOutputFile( outputSwix, force )
try:
tempDir = tempfile.mkdtemp( suffix='.tempDir',
dir='.',
prefix=os.path.basename( outputSwix ) )
manifest = createManifest( tempDir, rpms )
filesToZip = [manifest] + rpms
if info:
pass # TODO: If YAML file, verify.
# '-0' means 'no compression'.
# '-j' means 'use basenames'.
subprocess.check_call( f'zip -0 -j {outputSwix}'.split() + filesToZip )
if sign:
pass # TODO: Sign.
except Exception as e:
sys.exit( f'Error occurred during generation of SWIX file: {e}\n' )
finally:
shutil.rmtree( tempDir, ignore_errors=True )
def parseCommandArgs( args ):
parser = argparse.ArgumentParser( prog='swix create' )
add = parser.add_argument
add( 'outputSwix', metavar='OUTFILE.swix',
help='Name of output file' )
add( 'rpms', metavar='PACKAGE.rpm', type=str, nargs='+',
help='An RPM to add to the swix' )
add( '-f', '--force', action='store_true',
help='Overwrite OUTFILE.swix if it already exists' )
add( '-i', '--info', metavar='manifest.yaml', action='store', type=str,
help='Location of manifest.yaml file to add metadata to swix' )
return parser.parse_args( args )
def main():
args = parseCommandArgs( sys.argv[1:] )
create( **args.__dict__ )
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
4699c1c301f1cb99f6c9e616b31769c01bc291d5 | change datafiles in v1.* to make it work in v2.* | heyrict/exam | v1_to_v2.py | v1_to_v2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import optparse, pickle, exam
def main():
opt = optparse.OptionParser()
(options, args) = opt.parse_args()
for i in args:
with open(i,'rb') as f: data = pickle.load(f)
data = exam.QuestForm(data)
with open('v3.'.join(i.split('.')),'wb') as f: pickle.dump(data,f)
return
main()
| apache-2.0 | Python |
|
0403d6f78189be3f3b22f068dad1db0c53687ef7 | Add ptch module and base PatchFile class. This class can unpack RLE-compressed patchfiles. | jleclanche/pywow,jleclanche/pywow,jleclanche/pywow,jleclanche/pywow,jleclanche/pywow,jleclanche/pywow | ptch/__init__.py | ptch/__init__.py | # -*- coding: utf-8 -*-
"""
PTCH files are a container format for Blizzard patch files.
They begin with a 72 byte header containing some metadata, immediately
followed by a RLE-packed BSDIFF40.
The original BSDIFF40 format is compressed with bzip2 instead of RLE.
"""
#from hashlib import md5
from struct import unpack
from binascii import hexlify
from cStringIO import StringIO
class PatchFile(object):
def __init__(self, file):
# Parse the header
file.seek(0)
assert file.read(4) == "PTCH"
unk1 = file.read(4)
self.sizeBefore, self.sizeAfter = unpack("ii", file.read(8))
assert file.read(4) == "MD5_"
assert unpack("i", file.read(4)) == (0x28, )
self.md5Before, self.md5After = unpack("16s16s", file.read(32))
self.md5Before, self.md5After = hexlify(self.md5Before), hexlify(self.md5After)
assert file.read(4) == "XFRM"
file.read(4)
assert file.read(4) == "BSD0"
self.fileSize, = unpack("i", file.read(4))
self.compressedDiff = file.read()
file.close()
def __repr__(self):
header = ("sizeBefore", "sizeAfter", "md5Before", "md5After", "fileSize")
return "%s(%s)" % (self.__class__.__name__, ", ".join("%s=%r" % (k, getattr(self, k)) for k in header))
def rleUnpack(self):
"""
Read the RLE-packed data and
return the unpacked output.
"""
data = StringIO(self.compressedDiff)
ret = []
byte = data.read(1)
while byte:
byte = ord(byte)
# Is it a repeat control?
if byte & 0x80:
count = (byte & 0x7F) + 1
ret.append(data.read(count))
else:
ret.append("\0" * (byte+1))
byte = data.read(1)
return "".join(ret)
| cc0-1.0 | Python |
|
8533c93505a733980406ce655372c7742dfcfdfc | Add update policy that allows for in place upgrade of ES cluster (#1537) | cloudtools/troposphere,cloudtools/troposphere,ikben/troposphere,ikben/troposphere | troposphere/policies.py | troposphere/policies.py | from . import AWSProperty, AWSAttribute, validate_pausetime
from .validators import positive_integer, integer, boolean
class AutoScalingRollingUpdate(AWSProperty):
props = {
'MaxBatchSize': (positive_integer, False),
'MinInstancesInService': (integer, False),
'MinSuccessfulInstancesPercent': (integer, False),
'PauseTime': (validate_pausetime, False),
'SuspendProcesses': ([basestring], False),
'WaitOnResourceSignals': (boolean, False),
}
class AutoScalingScheduledAction(AWSProperty):
props = {
'IgnoreUnmodifiedGroupSizeProperties': (boolean, False),
}
class AutoScalingReplacingUpdate(AWSProperty):
props = {
'WillReplace': (boolean, False),
}
class CodeDeployLambdaAliasUpdate(AWSProperty):
props = {
'AfterAllowTrafficHook': (basestring, False),
'ApplicationName': (boolean, True),
'BeforeAllowTrafficHook': (basestring, False),
'DeploymentGroupName': (boolean, True),
}
class UpdatePolicy(AWSAttribute):
props = {
'AutoScalingRollingUpdate': (AutoScalingRollingUpdate, False),
'AutoScalingScheduledAction': (AutoScalingScheduledAction, False),
'AutoScalingReplacingUpdate': (AutoScalingReplacingUpdate, False),
'CodeDeployLambdaAliasUpdate': (CodeDeployLambdaAliasUpdate, False),
'UseOnlineResharding': (boolean, False),
'EnableVersionUpgrade': (boolean, False),
}
class ResourceSignal(AWSProperty):
props = {
'Count': (positive_integer, False),
'Timeout': (validate_pausetime, False),
}
class AutoScalingCreationPolicy(AWSProperty):
props = {
'MinSuccessfulInstancesPercent': (integer, False),
}
class CreationPolicy(AWSAttribute):
props = {
'AutoScalingCreationPolicy': (AutoScalingCreationPolicy, False),
'ResourceSignal': (ResourceSignal, True),
}
| from . import AWSProperty, AWSAttribute, validate_pausetime
from .validators import positive_integer, integer, boolean
class AutoScalingRollingUpdate(AWSProperty):
props = {
'MaxBatchSize': (positive_integer, False),
'MinInstancesInService': (integer, False),
'MinSuccessfulInstancesPercent': (integer, False),
'PauseTime': (validate_pausetime, False),
'SuspendProcesses': ([basestring], False),
'WaitOnResourceSignals': (boolean, False),
}
class AutoScalingScheduledAction(AWSProperty):
props = {
'IgnoreUnmodifiedGroupSizeProperties': (boolean, False),
}
class AutoScalingReplacingUpdate(AWSProperty):
props = {
'WillReplace': (boolean, False),
}
class CodeDeployLambdaAliasUpdate(AWSProperty):
props = {
'AfterAllowTrafficHook': (basestring, False),
'ApplicationName': (boolean, True),
'BeforeAllowTrafficHook': (basestring, False),
'DeploymentGroupName': (boolean, True),
}
class UpdatePolicy(AWSAttribute):
props = {
'AutoScalingRollingUpdate': (AutoScalingRollingUpdate, False),
'AutoScalingScheduledAction': (AutoScalingScheduledAction, False),
'AutoScalingReplacingUpdate': (AutoScalingReplacingUpdate, False),
'CodeDeployLambdaAliasUpdate': (CodeDeployLambdaAliasUpdate, False),
'UseOnlineResharding': (boolean, False),
}
class ResourceSignal(AWSProperty):
props = {
'Count': (positive_integer, False),
'Timeout': (validate_pausetime, False),
}
class AutoScalingCreationPolicy(AWSProperty):
props = {
'MinSuccessfulInstancesPercent': (integer, False),
}
class CreationPolicy(AWSAttribute):
props = {
'AutoScalingCreationPolicy': (AutoScalingCreationPolicy, False),
'ResourceSignal': (ResourceSignal, True),
}
| bsd-2-clause | Python |
2a6527c60d09c0cbb2f1902b57ae02ddade213eb | Create communicati.py | mncmilan/eZ430-Chronos | libs/communicati.py | libs/communicati.py | # communications.py
# Mónica Milán (@mncmilan)
# [email protected]
# http://steelhummingbird.blogspot.com.es/
# Library that contains all necessary methods in order to enable communications between PC and eZ430-Chronos.
import serial
s = serial.Serial('COM4', baudrate=115200,timeout=None) # open serial port
class CommunicationManager():
def open_serial_port(self):
s.write(bytearray([255, 7, 3])) # starting communications with serial port
def send_data_request(self):
s.write(bytearray([255, 8, 7, 0, 0, 0, 0])) # acceleration data request
bytesToRead = s.inWaiting()
return bytesToRead
def read_from_labVIEW_request(self):
bytes_to_read = s.inWaiting()
inbyte = s.read(bytes_to_read)
return bytes_to_read, inbyte
def read_data(self, bytes_to_read):
inbyte = s.read(bytes_to_read)
return inbyte
def close_serial_port(self):
s.write(bytearray([255, 9, 3])) # stop transmitting
s.close()
| apache-2.0 | Python |
|
e5293f7e33740f210ab58c3c05db18829db1474d | add docstrings [skip ci] | eugene-eeo/mailthon,krysros/mailthon,ashgan-dev/mailthon | mailthon/helpers.py | mailthon/helpers.py | """
mailthon.helpers
~~~~~~~~~~~~~~~~
Implements various helper functions/utilities.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
import sys
import mimetypes
from collections import MutableMapping
from email.utils import formataddr
if sys.version_info[0] == 3:
bytes_type = bytes
else:
bytes_type = str
def guess(filename, fallback='application/octet-stream'):
"""
Using the mimetypes library, guess the mimetype and
encoding for a given *filename*. If the mimetype
cannot be guessed, *fallback* is assumed instead.
:param filename: Filename- can be absolute path.
:param fallback: A fallback mimetype.
"""
guessed, encoding = mimetypes.guess_type(filename, strict=False)
if guessed is None:
return fallback, encoding
return guessed, encoding
def format_addresses(addrs):
"""
Given an iterable of addresses or name-address
tuples *addrs*, return a header value that joins
all of them together with a space and a comma.
"""
return ', '.join(
formataddr(item) if isinstance(item, tuple) else item
for item in addrs
)
def encode_address(addr, encoding='utf-8'):
"""
Given an email address *addr*, try to encode
it with ASCII. If it's not possible, encode
the *local-part* with the *encoding* and the
*domain* with IDNA.
"""
if isinstance(addr, bytes_type):
return addr
try:
addr = addr.encode('ascii')
except UnicodeEncodeError:
if '@' in addr:
localpart, domain = addr.split('@', 1)
addr = b'@'.join([
localpart.encode(encoding),
domain.encode('idna'),
])
else:
addr = addr.encode(encoding)
return addr
class UnicodeDict(dict):
"""
A dictionary that handles unicode values
magically - that is, byte-values are
automatically decoded. Accepts a dict
or iterable *values*.
:param encoding: Default encoding used
if no encoding is specified.
"""
def __init__(self, values=(), encoding='utf-8'):
dict.__init__(self)
self.encoding = encoding
self.update(values)
def __setitem__(self, key, value):
if isinstance(value, bytes_type):
value = value.decode(self.encoding)
dict.__setitem__(self, key, value)
update = MutableMapping.update
| """
mailthon.helpers
~~~~~~~~~~~~~~~~
Implements various helper functions/utilities.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
import sys
import mimetypes
from collections import MutableMapping
from email.utils import formataddr
if sys.version_info[0] == 3:
bytes_type = bytes
else:
bytes_type = str
def guess(filename, fallback='application/octet-stream'):
"""
Using the mimetypes library, guess the mimetype and
encoding for a given *filename*. If the mimetype
cannot be guessed, *fallback* is assumed instead.
:param filename: Filename- can be absolute path.
:param fallback: A fallback mimetype.
"""
guessed, encoding = mimetypes.guess_type(filename, strict=False)
if guessed is None:
return fallback, encoding
return guessed, encoding
def format_addresses(addrs):
"""
Given an iterable of addresses or name-address
tuples *addrs*, return a header value that joins
all of them together with a space and a comma.
"""
return ', '.join(
formataddr(item) if isinstance(item, tuple) else item
for item in addrs
)
def encode_address(addr, encoding='utf-8'):
"""
Given an email address *addr*, try to encode
it with ASCII. If it's not possible, encode
the *local-part* with the *encoding* and the
*domain* with IDNA.
"""
if isinstance(addr, bytes_type):
return addr
try:
addr = addr.encode('ascii')
except UnicodeEncodeError:
if '@' in addr:
localpart, domain = addr.split('@', 1)
addr = b'@'.join([
localpart.encode(encoding),
domain.encode('idna'),
])
else:
addr = addr.encode(encoding)
return addr
class UnicodeDict(dict):
def __init__(self, values=(), encoding='utf-8'):
dict.__init__(self)
self.encoding = encoding
self.update(values)
def __setitem__(self, key, value):
if isinstance(value, bytes_type):
value = value.decode(self.encoding)
dict.__setitem__(self, key, value)
update = MutableMapping.update
| mit | Python |
ea652c892219d1ed08a0453a3b2ede3efb452e23 | Create __init__.py | bistaray/odoo-8.0-public,raycarnes/odoo-8.0-public,raycarnes/odoo-8.0-public,bistaray/odoo-8.0-public | ui_techmenu/__init__.py | ui_techmenu/__init__.py | # -*- coding: utf-8 -*-
######################################################################
#
# ui_techmenu - Explode Technical Menu for Odoo
# Copyright (C) 2012 - TODAY, Ursa Information Systems (<http://ursainfosystems.com>)
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>)
# [email protected]
#
#
# Ursa is willing to revoke copyright claims in the future if Odoo wishes to certify this module.
#
######################################################################
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#
# For clients with an annual support contract with Ursa, this program is warranted within the guidelines of that contract.
#
# For ALL OTHERS, this program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY (including the absence
# of implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE). See the GNU Affero General Public License for
# more information.
#
######################################################################
#
# You should have received a copy of the GNU Affero General Public License along with this program. The license is in the file
# named LICENSE in the top level directory and also provided on the web at <http://www.gnu.org/licenses/>.
#
######################################################################
# python dependencies (either files or classes) are designated below
# import <file_dependency>
# import <class_dependency>
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
|
841e8fe236eab35b803cb9d8bec201306ce4642e | Add script to generate big RUM_* files | itmat/rum,itmat/rum,itmat/rum | util/repeat_rum_file.py | util/repeat_rum_file.py | from rum_mapping_stats import aln_iter
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--times', type=int)
parser.add_argument('--max-seq', type=int)
parser.add_argument('rum_file', type=file)
args = parser.parse_args()
alns = list(aln_iter(args.rum_file))
for t in range(args.times):
for aln in alns:
old_read_num = aln.read_num
aln.read_num = old_read_num + t * args.max_seq
aln.write(sys.stdout)
aln.read_num = old_read_num
| mit | Python |
|
3ab98baaf2b81ffa1afef808f27608f06bc946d3 | Create commands.py | anvanza/invenavi,anvanza/invenavi,anvanza/invenavi | web/commands.py | web/commands.py | #
# Commands for RPC interface
#
from twisted.protocols.amp import AMP, Boolean, Integer, String, Float, Command
class Sum(Command):
arguments = [('a', Integer()),
('b', Integer())]
response = [('status', Integer())]
class HeartbeatCmd(Command):
arguments = [('enabled', Boolean())]
response = [('status', Boolean())]
requiresAnswer = False
class HaltCmd(Command):
arguments = []
response = [('status', Boolean())]
requiresAnswer = False
class ModeCmd(Command):
arguments = [('mode', String())]
response = [('status', String())]
requiresAnswer = False
class QueryStatus(Command):
arguments = []
response = [('fix', Boolean()),
('lat', Float()),
('lon', Float()),
('gps_heading', Float()),
('gps_speed', Float()),
('altitude', Float()),
('num_sat', Integer()),
('timestamp', String()),
('datestamp', String()),
('compass_heading', Float()),
('temperature', Float())]
class NavigationCmd(Command):
arguments = [('speed', Float()), ('heading', Float())]
response = [('status', Boolean())]
requiresAnswer = False
class ManualDriveCmd(Command):
arguments = [('throttle', Float()), ('steering', Float())]
response = [('status', Boolean())]
requiresAnswer = False
class ExitCmd(Command):
arguments = []
response = []
requiresAnswer = False
| mit | Python |
|
b5f8299cbe539cf2a01988ca25e0c7638400bc8c | Create stuff.py | danshorstein/cpatoolz,danshorstein/cpatoolz,danshorstein/cpatoolz,dtiz/bottomline | bottomline/stuff.py | bottomline/stuff.py | # Testing
print 'heck yeah!'
| mit | Python |
|
5777877d1ed71ed21f67e096b08ad495ff844ed8 | add testexample/simpleTestwithPython.py | light940929/niagadsofinquery,light940929/niagadsofinquery,light940929/niagadsofinquery,light940929/niagadsofinquery | testexample/simpleTestwithPython.py | testexample/simpleTestwithPython.py | import os
import re
import json
import sys
import getopt
import argparse
from docopt import docopt
from urllib2 import urlopen, Request
import urllib
import urllib2
import requests
url_phenotypes = 'http://localhost:9000/api/phenotypes'
url_genotypes = 'http://localhost:9000/api/genotypes'
token = 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJRCI6Ik5JQUdBRFMiLCJleHAiOjE0NjEzNjI0NTV9.-Roix0YvuPy9VHaWm9wE83yB7NiSunyVXsVlR74lu2Y'
headers = {'Authorization': '%s' % token}
request_phenotypes = Request(url_phenotypes, headers=headers)
request_genotypes = Request(url_genotypes, headers=headers)
response_phenotypes = urlopen(request_phenotypes)
response_genotypes = urlopen(request_genotypes)
data_phenotypes = json.loads(response_phenotypes.read())
data_genotypes = json.loads(response_genotypes.read())
def loadPhenotypes(data_phenotypes):
phenotypes_list = data_phenotypes['phenotypes']
for phenotype in phenotypes_list:
print(phenotype['title'])
print(phenotype['family_id'])
print(phenotype['individual_id'])
print(phenotype['paternal_id'])
print(phenotype['maternal_id'])
def loadGenotypes(data_genotypes):
genotypes_list = data_genotypes['genotypes']
for genotype in genotypes_list:
print(genotype['title'])
print(genotype['chr'])
print(genotype['coordinate'])
print(genotype['variant_id'])
def postGenotypes(url_genotypes, token, headers):
values = {"title":"test","chr":"2","variant_id":"snp4","location":"0","coordinate":"1111830","call":"G T G T G G T T G T T T"}
data = json.dumps(values)
req = requests.post(url_genotypes, data, headers=headers)
print req.status_code
loadPhenotypes(data_phenotypes)
loadGenotypes(data_genotypes)
postGenotypes(url_genotypes, token, headers)
| mit | Python |
|
606020fbb7c3e608c8eab19ca143919003ea4f7d | add some first tests. | alexanderjulo/triptan | test_triptan.py | test_triptan.py | import os
from unittest import TestCase
from tempfile import TemporaryDirectory
from triptan.core import Triptan
class TriptanInitializationTest(TestCase):
"""
Asserts that triptan can setup the necessary data correctly.
"""
def test_init_file_structure(self):
"""
Assert the file structure is created correctly.
"""
with TemporaryDirectory() as tmpd:
Triptan.setup(
tmpd,
'triptan.yml',
{'revisions_location': 'revisions'}
)
assert os.path.exists(os.path.join(tmpd, 'triptan.yml'))
assert os.path.exists(os.path.join(tmpd, 'revisions'))
class TriptanTest(TestCase):
"""
Assert the core functionality is working.
"""
def setUp(self):
"""
Create a temporary directory and set triptan up with it.
"""
self.path = TemporaryDirectory()
Triptan.setup(
self.path.name,
'triptan.yml',
{'revisions_location': 'revisions'}
)
self.triptan = Triptan(self.path.name, 'triptan.yml')
def test_default_revision(self):
"""
Assert the default revision is -1.
"""
assert self.triptan.current_revision == -1
def test_revision_creation(self):
"""
Assert that revisions are correctly created.
"""
self.triptan.new_revision("test revision")
rev_path = os.path.join(self.path.name, 'revisions/revision-000.py')
assert os.path.exists(rev_path)
self.triptan.new_revision("another")
rev_path = os.path.join(self.path.name, 'revisions/revision-001.py')
assert os.path.exists(rev_path)
| mit | Python |
|
1803ec42e2eaad689dd51d3afb0b943e411f10d5 | Add breath first search algorithm | tobegit3hub/tobe-algorithm-manual | breath_first_search/breath_first_search.py | breath_first_search/breath_first_search.py | #!/usr/bin/env python
from collections import deque
class BreathFirstSearchGame(object):
def __init__(self):
# The node index are from 0 to 7, such as 0, 1, 2, 3, 4
self.node_number = 8
# The edges to connect each node
self.edges = [(0, 1), (0, 3), (1, 2), (1, 5), (2, 7), (3, 4), (3, 6),
(4, 5), (5, 7)]
# The 8 * 8 matrix of boolean values, only updated by the edges
self.graph = [[False for j in range(self.node_number)]
for i in range(self.node_number)]
#print(self.graph)
# The queue of open set, which is an array
self.open_set = deque()
# The source and destination nodes for this game
self.source_node = 0
self.destination_node = 7
# The 8 array of boolean which means this node is visited
self.is_visit_node_array = [False for i in range(self.node_number)]
# The 8 array of int which means this node's best parent node id
self.best_parent_node_array = [-1 for i in range(self.node_number)]
self.initialize_internal_variables()
#print(self.graph)
self.travel_and_update_variables()
self.travel_desination_path(self.destination_node)
def initialize_internal_variables(self):
# Update the graph with edges
for i, j in self.edges:
self.graph[i][j] = True
self.graph[j][i] = True
# Update the open set with the source nodes
self.open_set.append(self.source_node)
self.is_visit_node_array[self.source_node] = True
self.best_parent_node_array[self.source_node] = self.source_node
def travel_and_update_variables(self):
# Travel if some nodes in open set
while len(self.open_set) > 0:
current_node = self.open_set.popleft()
for other_node in range(self.node_number):
#import ipdb;ipdb.set_trace()
# Check if these two nodes are connected
if self.graph[current_node][other_node]:
# Check if the other node is visited
if self.is_visit_node_array[other_node] == False:
# Update the open set and visited array
self.open_set.append(other_node)
self.best_parent_node_array[other_node] = current_node
self.is_visit_node_array[other_node] = True
def travel_desination_path(self, destination_node):
if destination_node == self.source_node:
print(destination_node)
else:
self.travel_desination_path(
self.best_parent_node_array[destination_node])
print(destination_node)
def main():
print("Start breath first search")
game = BreathFirstSearchGame()
if __name__ == "__main__":
main()
| apache-2.0 | Python |
|
c98eff8545c90563246a53994fe8f65faaf76b0a | Add fetch recipe for the open source infra repo. | svn2github/chromium-depot-tools,svn2github/chromium-depot-tools,svn2github/chromium-depot-tools | recipes/infra.py | recipes/infra.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Infra(recipe_util.Recipe):
"""Basic Recipe class for the Infrastructure repositories."""
@staticmethod
def fetch_spec(props):
url = 'https://chromium.googlesource.com/infra/infra.git'
solution = {
'name' : 'infra',
'url' : url,
'deps_file': 'DEPS',
'managed' : False,
}
spec = {
'solutions': [solution],
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'infra'
def main(argv=None):
return Infra().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | Python |
|
5b3b5bb145eea8a71c81a383d2bdac7ecf13f98e | Add sys module tests | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/integration/modules/sysmod.py | tests/integration/modules/sysmod.py | # Import python libs
import os
# Import salt libs
import integration
class SysModuleTest(integration.ModuleCase):
'''
Validate the sys module
'''
def test_list_functions(self):
'''
sys.list_functions
'''
funcs = self.run_function('sys.list_functions')
self.assertTrue('hosts.list_hosts' in funcs)
self.assertTrue('pkg.install' in funcs)
def test_list_modules(self):
'''
sys.list_moduels
'''
mods = self.run_function('sys.list_modules')
self.assertTrue('hosts' in mods)
self.assertTrue('pkg' in mods)
| apache-2.0 | Python |
|
7d800a0fc2d94cad14e825faa27e1f5b2d2cbed8 | Create new package (#6648) | matthiasdiener/spack,krafczyk/spack,mfherbst/spack,EmreAtes/spack,LLNL/spack,krafczyk/spack,mfherbst/spack,EmreAtes/spack,EmreAtes/spack,iulian787/spack,matthiasdiener/spack,iulian787/spack,mfherbst/spack,matthiasdiener/spack,LLNL/spack,tmerrick1/spack,EmreAtes/spack,tmerrick1/spack,krafczyk/spack,matthiasdiener/spack,krafczyk/spack,iulian787/spack,krafczyk/spack,tmerrick1/spack,iulian787/spack,mfherbst/spack,LLNL/spack,LLNL/spack,tmerrick1/spack,iulian787/spack,tmerrick1/spack,mfherbst/spack,EmreAtes/spack,matthiasdiener/spack,LLNL/spack | var/spack/repos/builtin/packages/breseq/package.py | var/spack/repos/builtin/packages/breseq/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Breseq(AutotoolsPackage):
"""breseq is a computational pipeline for finding mutations relative to a
reference sequence in short-read DNA re-sequencing data for haploid
microbial-sized genomes."""
homepage = "http://barricklab.org/breseq"
url = "https://github.com/barricklab/breseq/archive/v0.31.1.tar.gz"
version('0.31.1', 'a4e602d5481f8692833ba3d5a3cd0394')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('bedtools2', type='run')
depends_on('r', type='run')
| lgpl-2.1 | Python |
|
19cf7a2833ba2ffcff46bd4543ed93fd80c1d8ea | fix trying to run configure on an already configured directory fixes #2959 (#2961) | EmreAtes/spack,krafczyk/spack,mfherbst/spack,krafczyk/spack,matthiasdiener/spack,tmerrick1/spack,LLNL/spack,mfherbst/spack,LLNL/spack,iulian787/spack,krafczyk/spack,skosukhin/spack,TheTimmy/spack,skosukhin/spack,krafczyk/spack,iulian787/spack,tmerrick1/spack,tmerrick1/spack,matthiasdiener/spack,lgarren/spack,EmreAtes/spack,matthiasdiener/spack,iulian787/spack,EmreAtes/spack,krafczyk/spack,TheTimmy/spack,EmreAtes/spack,skosukhin/spack,LLNL/spack,TheTimmy/spack,mfherbst/spack,tmerrick1/spack,LLNL/spack,matthiasdiener/spack,mfherbst/spack,tmerrick1/spack,TheTimmy/spack,iulian787/spack,lgarren/spack,lgarren/spack,skosukhin/spack,lgarren/spack,EmreAtes/spack,lgarren/spack,LLNL/spack,mfherbst/spack,TheTimmy/spack,matthiasdiener/spack,skosukhin/spack,iulian787/spack | var/spack/repos/builtin/packages/libmng/package.py | var/spack/repos/builtin/packages/libmng/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libmng(AutotoolsPackage):
"""libmng -THE reference library for reading, displaying, writing
and examining Multiple-Image Network Graphics. MNG is the animation
extension to the popular PNG image-format."""
homepage = "http://sourceforge.net/projects/libmng/"
url = "http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz"
version('2.0.2', '1ffefaed4aac98475ee6267422cbca55')
depends_on("jpeg")
depends_on("zlib")
depends_on("lcms")
def patch(self):
# jpeg requires stdio to beincluded before its headrs.
filter_file(r'^(\#include \<jpeglib\.h\>)',
'#include<stdio.h>\n\\1', 'libmng_types.h')
@run_before('configure')
def clean_configure_directory(self):
make('distclean')
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libmng(AutotoolsPackage):
"""libmng -THE reference library for reading, displaying, writing
and examining Multiple-Image Network Graphics. MNG is the animation
extension to the popular PNG image-format."""
homepage = "http://sourceforge.net/projects/libmng/"
url = "http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz"
version('2.0.2', '1ffefaed4aac98475ee6267422cbca55')
depends_on("jpeg")
depends_on("zlib")
depends_on("lcms")
def patch(self):
# jpeg requires stdio to beincluded before its headrs.
filter_file(r'^(\#include \<jpeglib\.h\>)',
'#include<stdio.h>\n\\1', 'libmng_types.h')
| lgpl-2.1 | Python |
94f2ea927d9e218f2b5065456275d407164ddf0a | Add anidub.com tracker support | idlesign/deluge-updatorr,idlesign/deluge-updatorr | updatorr/tracker_handlers/handler_anidub.py | updatorr/tracker_handlers/handler_anidub.py | from updatorr.handler_base import BaseTrackerHandler
from updatorr.utils import register_tracker_handler
import urllib2
class AnidubHandler(BaseTrackerHandler):
"""This class implements .torrent files downloads
for http://tr.anidub.com tracker."""
logged_in = False
# Stores a number of login attempts to prevent recursion.
login_counter = 0
def get_torrent_file(self):
"""This is the main method which returns
a filepath to the downloaded file."""
torrent_file = None
download_link = self.get_download_link()
if download_link is None:
self.dump_error('Cannot find torrent file download link at %s' % self.resource_url)
else:
self.debug('Torrent download link found: %s' % download_link)
torrent_file = self.download_torrent(download_link)
return torrent_file
def get_id_from_link(self):
"""Returns forum thread identifier from full thread URL."""
return self.resource_url.split('=')[1]
def login(self, login, password):
"""Implements tracker login procedure."""
self.logged_in = False
if login is None or password is None:
return False
self.login_counter += 1
# No recursion wanted.
if self.login_counter > 1:
return False
login_url = 'http://tr.anidub.com/takelogin.php'
self.debug('Trying to login at %s ...' % login_url)
form_data = {
'username': login,
'password': password,
}
self.get_resource(login_url, form_data)
cookies = self.get_cookies()
# Login success check.
if cookies.get('uid') is not None:
self.logged_in = True
return self.logged_in
def get_download_link(self):
"""Tries to find .torrent file download link at forum thread page
and return that one."""
response, page_html = self.get_resource(self.resource_url)
page_links = self.find_links(page_html)
download_link = None
for page_link in page_links:
if 'login.php?returnto=' in page_link:
download_link = None
self.debug('Login is required to download torrent file.')
if self.login(self.get_settings('login'), self.get_settings('password')):
download_link = self.get_download_link()
if 'download.php?id=' in page_link:
download_link = 'http://tr.anidub.com/'+urllib2.unquote(page_link).replace("&", "&")
return download_link
def download_torrent(self, url):
"""Gets .torrent file contents from given URL and
stores that in a temporary file within a filesystem.
Returns a path to that file.
"""
self.debug('Downloading torrent file from %s ...' % url)
# That was a check that user himself visited torrent's page ;)
cookies = self.get_cookies()
#self.set_cookie('uid', self.get_id_from_link())
contents = self.get_resource(url, {})[1]
return self.store_tmp_torrent(contents)
# With that one we tell updatetorr to handle links to `rutracker.org` domain with RutrackerHandler class.
register_tracker_handler('tr.anidub.com', AnidubHandler)
| bsd-3-clause | Python |
|
b02ec9a16689bf2814e85f0edb01c7f4a5926214 | Add pre-migration script for account module. | bwrsandman/OpenUpgrade,hifly/OpenUpgrade,pedrobaeza/OpenUpgrade,Endika/OpenUpgrade,mvaled/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,kirca/OpenUpgrade,pedrobaeza/OpenUpgrade,sebalix/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,grap/OpenUpgrade,csrocha/OpenUpgrade,csrocha/OpenUpgrade,bwrsandman/OpenUpgrade,mvaled/OpenUpgrade,mvaled/OpenUpgrade,sebalix/OpenUpgrade,sebalix/OpenUpgrade,blaggacao/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,Endika/OpenUpgrade,hifly/OpenUpgrade,blaggacao/OpenUpgrade,Endika/OpenUpgrade,blaggacao/OpenUpgrade,OpenUpgrade/OpenUpgrade,damdam-s/OpenUpgrade,pedrobaeza/OpenUpgrade,hifly/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,Endika/OpenUpgrade,hifly/OpenUpgrade,blaggacao/OpenUpgrade,kirca/OpenUpgrade,damdam-s/OpenUpgrade,csrocha/OpenUpgrade,grap/OpenUpgrade,pedrobaeza/OpenUpgrade,grap/OpenUpgrade,damdam-s/OpenUpgrade,0k/OpenUpgrade,grap/OpenUpgrade,pedrobaeza/OpenUpgrade,Endika/OpenUpgrade,sebalix/OpenUpgrade,damdam-s/OpenUpgrade,sebalix/OpenUpgrade,0k/OpenUpgrade,blaggacao/OpenUpgrade,kirca/OpenUpgrade,kirca/OpenUpgrade,mvaled/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,0k/OpenUpgrade,bwrsandman/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,0k/OpenUpgrade,csrocha/OpenUpgrade,OpenUpgrade/OpenUpgrade,csrocha/OpenUpgrade,bwrsandman/OpenUpgrade,mvaled/OpenUpgrade,sebalix/OpenUpgrade,bwrsandman/OpenUpgrade,kirca/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,0k/OpenUpgrade,csrocha/OpenUpgrade,hifly/OpenUpgrade,grap/OpenUpgrade,OpenUpgrade/OpenUpgrade,hifly/OpenUpgrade,mvaled/OpenUpgrade,OpenUpgrade/OpenUpgrade,hifly/OpenUpgrade,0k/OpenUpgrade,blaggacao/OpenUpgrade,grap/OpenUpgrade,sebalix/OpenUpgrade,kirca/OpenUpgrade,pedrobaeza/OpenUpgrade,damdam-s/OpenUpgrade,Endika/OpenUpgrade,OpenUpgrade/OpenUpgrade,mvaled/OpenUpgrade,grap/OpenUpgrade,kirca/OpenUpgrade,Endika/OpenUpgrade,blaggacao/OpenUpgrade,bwrsandman/OpenUpgrade,csrocha/OpenUpgrade,bwrsandman/OpenUpgrade,damdam-s/OpenUpgrade,damdam-s/OpenUpgrade,pedrobaeza/OpenUpgrade | addons/account/migrations/8.0.1.1/pre-migration.py | addons/account/migrations/8.0.1.1/pre-migration.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Akretion (http://www.akretion.com/)
# @author: Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
def migrate(cr, version):
if not version:
return
cr.execute(
"""SELECT id FROM account_analytic_journal WHERE type='purchase' """)
res = cr.fetchone()
print "mig account res=", res
if res:
openupgrade.add_xmlid(
cr, 'account', 'exp', 'account.analytic.journal', res[0], True)
| agpl-3.0 | Python |
|
07841312d062fd0dd48baa0d3bc0d92989e05841 | add script mp3-file-renamer.py | charlesbos/my-scripts,charlesbos/my-scripts | mp3-file-renamer.py | mp3-file-renamer.py | #!/usr/bin/python
#Python script to rename mp3 files according to the format
#"Track-number Track-name.mp3", for example: 02 Self Control.mp3
#Note: Tracks must have valid ID3 data for this to work - python-mutagen is required.
#By Charles Bos
import os
import sys
from mutagen.id3 import ID3, ID3NoHeaderError
def usage() :
print('''Usage:
mp3-file-renamer.py <path to music>''')
#Get music directory
args = sys.argv
if (len(args) != 2) or (args[1] == '-h') or (args[1] == '--help') :
usage()
os._exit(0)
else :
if os.path.exists(args[1]) : musicDir = args[1]
else :
usage()
os._exit(0)
#Get titles and track numbers for songs
musicFiles = []
tracknums = []
titles = []
for root, dirs, files in os.walk(musicDir, topdown=False):
for name in files:
if name.find(".mp3") != -1 :
musicFiles.append(os.path.join(root, name))
for x in musicFiles :
try :
audio = ID3(x)
titles.append(str(audio["TIT2"].text[0]))
tracknums.append(str(audio["TRCK"].text[0]))
except (ID3NoHeaderError, KeyError) :
musicFiles.remove(x)
#Add leading 0 if missing
for x in tracknums :
if len(x) == 1 : tracknums[tracknums.index(x)] = "0" + x
if (len(tracknums) != len(titles)) or (len(tracknums) == len(titles) == 0) :
print("No valid music files found. Nothing to do. Exiting...")
os._exit(0)
else :
#Start renaming
def getPath(origSong) :
return origSong.rfind("/") + 1
counter = 0
for x in musicFiles :
path = x[:getPath(x)]
os.rename(x, path + tracknums[counter] + " " + titles[counter] + ".mp3")
counter += 1 | agpl-3.0 | Python |
|
cc76c00efa919f8532e21365606f38431093cc22 | Write inversion counting algorithm | stephtzhang/algorithms | count_inversions.py | count_inversions.py | def count_inversions(list, inversion_count = 0):
"""
recursively counts inversions of halved lists
where inversions are instances where a larger el occurs before a smaller el
merges the halved lists and increments the inversion count at each level
:param list list: list containing comparable elements
:param list list: list containing comparable elements
:returns: tuple w merged list and number of inversions
"""
if len(list) < 2:
return (list, inversion_count)
mid_point = len(list) / 2
# recursively count inversions in 1st half of input
first_half = count_inversions(list[0:mid_point], inversion_count)
# recursively count inversions in 2nd half of input
second_half = count_inversions(list[mid_point:len(list)], inversion_count)
# TODO: indexing into the returned tuple is confusing
# consider returning a dict instead
running_inversion_count = first_half[1] + second_half[1]
return merge_and_count_inversions(first_half[0], second_half[0], running_inversion_count)
def merge_and_count_inversions(a, b, inversion_count):
"""
steps through indexes in both input lists, appending the smaller val to the merged list at each step
increments the inversion count when els from list b are appended to the output before a is exhausted
:param list a: ordered list
:param list b: ordered list
:returns: tuple w merged list and number of inversions
"""
i = 0
j = 0
total_len = len(a) + len(b)
merged = []
for k in range(total_len):
try:
a[i]
except IndexError:
# concat merged w remainder of b if a's finished
merged = merged + b[j:len(b)]
j += 1
return (merged, inversion_count)
try:
b[j]
except IndexError:
# concat merged w remainder of a if b's finished
merged = merged + a[i:len(a)]
i += 1
return (merged, inversion_count)
if a[i] < b[j]:
merged.append(a[i])
i += 1
else:
merged.append(b[j])
j += 1
# increment inversion_count by num els remaining in a if a isn't exhausted
try:
a[i]
# inversion_count = len(a) - i
remaining_in_a = len(a) - i
inversion_count = inversion_count + remaining_in_a
except IndexError:
pass # a is exhausted
return (merged, inversion_count)
list = [ 1, 2, 9, -1, 0]
print count_inversions(list)[1]
# a = [1, 3, 5, 6]
# b = [2, 4, 7, 8, 9]
# print merge_and_count_inversions(a, b) | mit | Python |
|
3331a9a6b8ada075aaefef021a8ad24a49995931 | Add test for prepare_instance_slug #92 | Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django | derrida/books/tests/test_search_indexes.py | derrida/books/tests/test_search_indexes.py | from unittest.mock import patch
from django.test import TestCase
from derrida.books.models import Reference, Instance
from derrida.books.search_indexes import ReferenceIndex
class TestReferenceIndex(TestCase):
fixtures = ['test_references.json']
def setUp(self):
'''None of the Instacefixtures have slugs, so generate them'''
for instance in Instance.objects.all():
instance.slug = instance.generate_safe_slug()
instance.save()
def test_prepare_instance_slug(self):
# create a ReferenceIndex object
refindex = ReferenceIndex()
# get a reference
reference = Reference.objects.first()
# not a book section (none in test set are)
# should return the slug of its instance
slug = refindex.prepare_instance_slug(reference)
assert slug == reference.instance.slug
# create a work as a 'collected in'
ecrit = Instance.objects.get(slug__icontains='lecriture-et-la')
debat = Instance.objects.get(slug__icontains='le-debat-sur')
# make ecrit a 'section' of debat
ecrit.collected_in = debat
ecrit.save()
# get a reference from ecrit
reference = Reference.objects.filter(instance=ecrit).first()
# should return the slug for debat not ecrit
slug = refindex.prepare_instance_slug(reference)
assert slug == debat.slug
| apache-2.0 | Python |
|
451799f126afcdda70138dc348b9e1f276b1f86f | Add setting file for later use. | aocks/ox_herd,aocks/ox_herd,aocks/ox_herd | ox_herd/settings.py | ox_herd/settings.py | """Module to represent basic settings for ox_herd package.
"""
| bsd-2-clause | Python |
|
ec5136b86cce92a49cf2eea852f1d8f2d7110cf0 | Create element_search.py | lcnodc/codes,lcnodc/codes | 09-revisao/practice_python/element_search.py | 09-revisao/practice_python/element_search.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Exercise 20: Element Search
Write a function that takes an ordered list of numbers (a list where the
elements are in order from smallest to largest) and another number. The
function decides whether or not the given number is inside the list and
returns (then prints) an appropriate boolean.
Extras:
Use binary search.
"""
def in_list(a_list, number):
return True if [True for i in a_list if i == number] else False
def in_list2(a_list, number):
if len(a_list) == 1:
return a_list[0] == number
elif a_list[len(a_list) // 2] > number:
return in_list2(a_list[:len(a_list) // 2], number)
else:
return in_list2(a_list[len(a_list) // 2:], number)
if __name__ == "__main__":
a_list = [1, 3, 4, 5, 6, 7, 8, 12, 15, 20, 23, 33, 45, 64]
number = int(input("Enter a number: "))
print(
"The number %i is in the list %s: %s" %
(number, a_list, in_list(a_list, number)))
print(
"The number %i is in the list %s: %s" %
(number, a_list, in_list2(a_list, number)))
| mit | Python |
|
cc06421fb4250640b2c9eef75480a3627a339473 | Add a script to normalize Gerrit ACLs | anbangr/osci-project-config,coolsvap/project-config,openstack-infra/project-config,dongwenjuan/project-config,citrix-openstack/project-config,Tesora/tesora-project-config,dongwenjuan/project-config,citrix-openstack/project-config,open-switch/infra_project-config,noorul/os-project-config,osrg/project-config,coolsvap/project-config,anbangr/osci-project-config,openstack-infra/project-config,noorul/os-project-config,Tesora/tesora-project-config,open-switch/infra_project-config,osrg/project-config | tools/normalize_acl.py | tools/normalize_acl.py | #!/usr/bin/env python
# Usage: normalize_acl.py acl.config [transformation [transformation [...]]]
#
# Transformations:
# 0 - dry run (default, print to stdout rather than modifying file in place)
# 1 - strip/condense whitespace and sort (implied by any other transformation)
# 2 - get rid of unneeded create on refs/tags
# 3 - remove any project.stat{e,us} = active since it's a default or a typo
# 4 - strip default *.owner = group Administrators permissions
# 5 - sort the exclusiveGroupPermissions group lists
import re
import sys
aclfile = sys.argv[1]
try:
transformations = sys.argv[2:]
except KeyError:
transformations = []
def tokens(data):
"""Human-order comparison
This handles embedded positive and negative integers, for sorting
strings in a more human-friendly order."""
data = data.replace('.', ' ').split()
for n in range(len(data)):
try:
data[n] = int(data[n])
except ValueError:
pass
return data
acl = {}
out = ''
if '0' in transformations or not transformations:
dry_run = True
else:
dry_run = False
aclfd = open(aclfile)
for line in aclfd:
# condense whitespace to single spaces and get rid of leading/trailing
line = re.sub('\s+', ' ', line).strip()
# skip empty lines
if not line:
continue
# this is a section heading
if line.startswith('['):
section = line.strip(' []')
# use a list for this because some options can have the same "key"
acl[section] = []
# key=value lines
elif '=' in line:
acl[section].append(line)
# WTF
else:
raise Exception('Unrecognized line!')
aclfd.close()
if '2' in transformations:
try:
acl['access "refs/tags/*"'] = [
x for x in acl['access "refs/tags/*"']
if not x.startswith('create = ')]
except KeyError:
pass
if '3' in transformations:
try:
acl['project'] = [x for x in acl['project'] if x not in
('state = active', 'status = active')]
except KeyError:
pass
if '4' in transformations:
for section in acl.keys():
acl[section] = [x for x in acl[section] if x !=
'owner = group Administrators']
if '5' in transformations:
for section in acl.keys():
newsection = []
for option in acl[section]:
key, value = [x.strip() for x in option.split('=')]
if key == 'exclusiveGroupPermissions':
newsection.append('%s = %s' % (
key, ' '.join(sorted(value.split()))))
else:
newsection.append(option)
acl[section] = newsection
for section in sorted(acl.keys()):
if acl[section]:
out += '\n[%s]\n' % section
for option in sorted(acl[section], key=tokens):
out += '%s\n' % option
if dry_run:
print(out[1:-1])
else:
aclfd = open(aclfile, 'w')
aclfd.write(out[1:])
aclfd.close()
| apache-2.0 | Python |
|
7060b82030d719cdcbdcecdb5eb7d34b405aa805 | Make the migration for previous commit | Turupawn/website,lutris/website,lutris/website,Turupawn/website,Turupawn/website,lutris/website,lutris/website,Turupawn/website | platforms/migrations/0003_auto_20150718_0050.py | platforms/migrations/0003_auto_20150718_0050.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('platforms', '0002_auto_20150718_0042'),
]
operations = [
migrations.AlterField(
model_name='platform',
name='default_installer',
field=jsonfield.fields.JSONField(null=True),
),
]
| agpl-3.0 | Python |
|
3b42e348987294602440c3c1d4aa4361afcdc298 | Add problem 14 | dimkarakostas/matasano-cryptochallenges | problem_14.py | problem_14.py | from problem_12 import new_encryption_oracle, find_blocksize
import random
from string import printable
RANDOM_PREFIX = ''.join(random.choice(printable) for _ in range(random.randrange(0, 20)))
# print len(RANDOM_PREFIX)
def oracle(adversary_input):
return new_encryption_oracle(RANDOM_PREFIX + adversary_input)
def find_oracle_added_length(blocksize):
adversary_input = ''
previous_length = len(oracle(adversary_input))
while True:
adversary_input += '0'
current_length = len(oracle(adversary_input))
if current_length > previous_length:
return current_length - len(adversary_input) - blocksize
def find_padding_length(blocksize):
adversary_input = '0'*64
zero_encrypted_block = oracle(adversary_input)[2*blocksize:3*blocksize]
change_counter = 1
while True:
adversary_input = change_counter*'1' + '0'*(64-change_counter)
current_second_block = oracle(adversary_input)[2*blocksize:3*blocksize]
if current_second_block != zero_encrypted_block:
return 2*blocksize - change_counter + 1
change_counter += 1
def find_single_ecb_character(blocksize, decrypted, start_padding_length, unknown_text_length):
bypass_start_padding = '0'*(2*blocksize - start_padding_length)
input_padding = bypass_start_padding + '0'*(blocksize*(unknown_text_length/blocksize + 1) - len(decrypted) - 1)
test_padding = input_padding + decrypted
block_position = (len(test_padding) - len(bypass_start_padding))/blocksize
ciphertext = oracle(input_padding)[2*blocksize:]
cipher_blocks = [ciphertext[i*blocksize:(i+1)*blocksize] for i in range(len(ciphertext)/blocksize)]
for test_char in printable:
test_character = test_padding + test_char
test_character_ciphertext = oracle(test_character)[2*blocksize:]
test_blocks = [test_character_ciphertext[i*blocksize:(i+1)*blocksize] for i in range(len(test_character_ciphertext)/blocksize)]
if test_blocks[block_position] == cipher_blocks[block_position]:
return test_char
if __name__ == '__main__':
blocksize = find_blocksize(oracle)
oracle_added_length = find_oracle_added_length(blocksize)
start_padding_length = find_padding_length(blocksize)
unknown_text_length = oracle_added_length - start_padding_length
decrypted = ''
while len(decrypted) < unknown_text_length:
decrypted += find_single_ecb_character(blocksize, decrypted, start_padding_length, unknown_text_length)
print decrypted.decode('base64')
| mit | Python |
|
49b1de4a68133e618723f96f2dc922b311bdd982 | Add Script to encode raw RGB565 | SmartArduino/openmv,SmartArduino/openmv,iabdalkader/openmv,kwagyeman/openmv,kwagyeman/openmv,tianzhihen/openmv,kwagyeman/openmv,SmartArduino/openmv,openmv/openmv,openmv/openmv,tianzhihen/openmv,iabdalkader/openmv,kwagyeman/openmv,tianzhihen/openmv,iabdalkader/openmv,iabdalkader/openmv,SmartArduino/openmv,openmv/openmv,openmv/openmv,tianzhihen/openmv | util/encode_raw.py | util/encode_raw.py | #!/usr/bin/env python
# Converts raw RGB565 video to MP4/AVI
from sys import argv, exit
from array import array
from subprocess import call
buf=None
TMP_FILE = "/tmp/video.raw"
if (len(argv) != 4):
print("Usage: encode_raw input.raw output.avi fps")
exit(1)
with open(argv[1], "rb") as f:
buf = array("H", f.read())
#Swap not needed if rgb565be is supported
buf.byteswap()
with open(TMP_FILE, "wb") as f:
f.write(buf.tostring())
cmd = "ffmpeg -vcodec rawvideo -r %d -f rawvideo -pix_fmt rgb565 -s 160x120 -i %s -vcodec mpeg4 %s"%(int(argv[3]), TMP_FILE, argv[2])
call(cmd.split())
| mit | Python |
|
5dba86b3a68c27a01eb143a6dfdb35d01c3c99e8 | add app_test | wecatch/app-turbo,tao12345666333/app-turbo,tao12345666333/app-turbo,tao12345666333/app-turbo | turbo/test/app_test.py | turbo/test/app_test.py | from __future__ import absolute_import, division, print_function, with_statement
import os
import signal
import sys
import unittest
import random
import time
import threading
import logging
import requests
import multiprocessing
from turbo import app
from turbo.conf import app_config
from turbo import register
app_config.app_name = 'app_test'
app_config.web_application_setting = {}
logger = logging.getLogger()
print(logger.level)
class HomeHandler(app.BaseBaseHandler):
def get(self):
logger.info('get')
def run_server():
register.register_url('/', HomeHandler)
app.start()
class AppTest(unittest.TestCase):
def setUp(self):
server = multiprocessing.Process(target=run_server)
server.start()
self.localhost = 'http://localhost:8888'
self.pid = server.pid
logger.warning(self.pid)
def tearDown(self):
os.kill(self.pid, signal.SIGKILL)
def test_get(self):
resp = requests.get(self.localhost)
logger.warning(resp.status_code)
if __name__ == '__main__':
unittest.main() | apache-2.0 | Python |
|
be0331e64726d659b824187fbc91b54ce0405615 | add initial implementation of weighted EM PCA | jakevdp/wpca | wpca/test/test_empca.py | wpca/test/test_empca.py | import numpy as np
from numpy.testing import assert_allclose
from ..empca import orthonormalize, random_orthonormal, pca, empca
def norm_sign(X):
i_max_abs = np.argmax(abs(X), 0)
sgn = np.sign(X[i_max_abs, range(X.shape[1])])
return X * sgn
def assert_columns_allclose_upto_sign(A, B, *args, **kwargs):
assert_allclose(norm_sign(A), norm_sign(B), *args, **kwargs)
def test_orthonormalize():
rand = np.random.RandomState(42)
X = rand.randn(3, 4)
X2 = orthonormalize(X)
assert_allclose(X[0] / np.linalg.norm(X[0]), X2[0])
assert_allclose(np.dot(X2, X2.T), np.eye(X2.shape[0]), atol=1E-15)
def test_random_orthonormal():
def check_random_orthonormal(n_samples, n_features):
X = random_orthonormal(n_samples, n_features, 42)
assert X.shape == (n_samples, n_features)
assert_allclose(np.dot(X, X.T), np.eye(X.shape[0]), atol=1E-15)
for n_samples in range(1, 6):
yield check_random_orthonormal, n_samples, 5
def test_empca_vs_pca():
rand = np.random.RandomState(42)
X = rand.randn(50, 5)
W = np.ones_like(X)
evecs1, coeff1 = empca(X, W, 5, niter=100)
evecs2, coeff2 = pca(X, 5)
assert_columns_allclose_upto_sign(evecs1.T, evecs2.T, rtol=1E-6)
assert_columns_allclose_upto_sign(coeff1, coeff2, rtol=1E-6)
| bsd-3-clause | Python |
|
7ccfc89a51a76764c36b009dd9b5fc55570e3f56 | Add forgot password test | renalreg/radar,renalreg/radar,renalreg/radar,renalreg/radar | api/radar_api/tests/test_forgot_password.py | api/radar_api/tests/test_forgot_password.py | import json
from radar_api.tests.fixtures import get_user
from radar.database import db
def test_forgot_password(app):
user = get_user('admin')
client = app.test_client()
assert user.reset_password_token is None
assert user.reset_password_date is None
response = client.post('/forgot-password', data={
'username': user.username,
'email': user.email
})
assert response.status_code == 200
db.session.refresh(user)
assert user.reset_password_token is not None
assert user.reset_password_date is not None
def test_missing_username(app):
user = get_user('admin')
client = app.test_client()
response = client.post('/forgot-password', data={
'email': user.email,
})
assert response.status_code == 422
data = json.loads(response.data)
assert data == {
'errors': {
'username': ['This field is required.']
}
}
def test_missing_email(app):
user = get_user('admin')
client = app.test_client()
response = client.post('/forgot-password', data={
'username': user.username,
})
assert response.status_code == 422
data = json.loads(response.data)
assert data == {
'errors': {
'email': ['This field is required.']
}
}
def test_user_not_found(app):
client = app.test_client()
response = client.post('/forgot-password', data={
'username': '404',
'email': '[email protected]',
})
assert response.status_code == 422
data = json.loads(response.data)
assert data == {
'errors': {
'username': ['No user found with that username and email.']
}
}
| agpl-3.0 | Python |
|
b4bf757a15c404080679335bcce04ba45a7e4eae | Update fix_nonwarehouse_ledger_gl_entries_for_transactions.py | indictranstech/erpnext,indictranstech/erpnext,indictranstech/erpnext,njmube/erpnext,gsnbng/erpnext,Aptitudetech/ERPNext,gsnbng/erpnext,gsnbng/erpnext,njmube/erpnext,geekroot/erpnext,indictranstech/erpnext,geekroot/erpnext,geekroot/erpnext,geekroot/erpnext,njmube/erpnext,gsnbng/erpnext,njmube/erpnext | erpnext/patches/v7_0/fix_nonwarehouse_ledger_gl_entries_for_transactions.py | erpnext/patches/v7_0/fix_nonwarehouse_ledger_gl_entries_for_transactions.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if not frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock"):
return
frappe.reload_doctype("Account")
warehouses = frappe.db.sql_list("""select name from tabAccount
where account_type = 'Stock' and is_group = 0
and (warehouse is null or warehouse = '')""")
if warehouses:
warehouses = set_warehouse_for_stock_account(warehouses)
stock_vouchers = frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where sle.warehouse in (%s) and creation > '2016-05-01'
and not exists(select name from `tabGL Entry`
where account=sle.warehouse and voucher_type=sle.voucher_type and voucher_no=sle.voucher_no)
order by sle.posting_date""" %
', '.join(['%s']*len(warehouses)), tuple(warehouses))
rejected = []
for voucher_type, voucher_no in stock_vouchers:
try:
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
voucher = frappe.get_doc(voucher_type, voucher_no)
voucher.make_gl_entries()
frappe.db.commit()
except Exception, e:
print frappe.get_traceback()
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print rejected
def set_warehouse_for_stock_account(warehouse_account):
for account in warehouse_account:
if frappe.db.exists('Warehouse', account):
frappe.db.set_value("Account", account, "warehouse", account)
else:
warehouse_account.remove(account)
return warehouse_account
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if not frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock"):
return
frappe.reload_doctype("Account")
warehouses = frappe.db.sql_list("""select name from tabAccount
where account_type = 'Stock' and is_group = 0
and (warehouse is null or warehouse = '')""")
if warehouses:
warehouses = set_warehouse_for_stock_account(warehouses)
stock_vouchers = frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where sle.warehouse in (%s) and creation > '2016-05-01'
and not exists(select name from `tabGL Entry`
where account=sle.warehosue and voucher_type=sle.voucher_type and voucher_no=sle.voucher_no)
order by sle.posting_date""" %
', '.join(['%s']*len(warehouses)), tuple(warehouses))
rejected = []
for voucher_type, voucher_no in stock_vouchers:
try:
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
voucher = frappe.get_doc(voucher_type, voucher_no)
voucher.make_gl_entries()
frappe.db.commit()
except Exception, e:
print frappe.get_traceback()
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print rejected
def set_warehouse_for_stock_account(warehouse_account):
for account in warehouse_account:
if frappe.db.exists('Warehouse', account):
frappe.db.set_value("Account", account, "warehouse", account)
else:
warehouse_account.remove(account)
return warehouse_account | agpl-3.0 | Python |
ae52e3e4dc1fc254b7e1c258caa1fe00317bb9a5 | Add migrate script. | mehdisadeghi/mehdix.ir,mehdisadeghi/mehdix.ir,mehdisadeghi/mehdix.ir,mehdisadeghi/mehdix.ir,mehdisadeghi/mehdix.ir | disqus_converter.py | disqus_converter.py | '''Convert disquls XML comments to YAML.'''
import os
import copy
import pathlib
import hashlib
import yaml
import iso8601
import xmltodict
from postsinfo import mapping
from rebuild_comments import encrypt
COMMENT_DIR = os.environ.get('COMMENT_DIR', './_data/comments')
def get_disqus_threads(infile):
with open(infile, 'r', encoding='utf-8') as file:
disqus = xmltodict.parse(file.read())['disqus']
threads = {}
for trd in disqus['thread']:
if not is_local_thread(trd):
threads[trd['@dsq:id']] = trd
threads[trd['@dsq:id']]['posts'] = []
for pst in disqus['post']:
key = pst['thread']['@dsq:id']
if key in threads:
threads[key]['posts'].append(pst)
return threads
def is_local_thread(thread):
return '0.0.0.0' in thread['link'] or '://localhost' in thread['link']
def write(thread, post_info):
uid = post_info['page_id'][1:]
comments = transform(thread, post_info)
if comments:
with open(os.path.join(COMMENT_DIR, f'{uid}.yml'), 'a+', encoding='utf8') as file:
yaml.dump(comments,
file,
default_flow_style=False,
allow_unicode=True)
def transform(thread, post_info):
'''Convert disqus form data to a normal comment.'''
comments = []
for post in thread['posts']:
comment = copy.copy(post_info)
comment.update(
{'date': iso8601.parse_date(post['createdAt']),
'name': post['author']['name'],
'email': hashlib.md5(post['author']['email'].encode('ascii')).hexdigest(),
'bucket': encrypt(post['author']['email']),
'website': make_profile_url(post),
'message': post['message']})
comments.append(comment)
return comments
def make_profile_url(post):
return 'https://disqus.com/by/{}/'.format(post['author']['username']) if post['author']['isAnonymous'] == 'false' else ''
def main():
# Load disqus
disqus_threads = get_disqus_threads(infile='db.xml')
# Make sure the comment directory exists
pathlib.Path(COMMENT_DIR).mkdir(parents=True, exist_ok=True)
# Convert disqus to current comment format. Use posts mapping.
for trd in disqus_threads.values():
# Update comment files with converted disqus comments
if trd['link'] in mapping:
write(trd, mapping[trd['link']])
if __name__ == '__main__':
main() | cc0-1.0 | Python |
|
588d49ef47cb4fa0848e44775a0102a7bd3f492a | add hdfs utils to distributed | amosonn/distributed,mrocklin/distributed,dask/distributed,broxtronix/distributed,broxtronix/distributed,dask/distributed,dask/distributed,mrocklin/distributed,amosonn/distributed,blaze/distributed,blaze/distributed,amosonn/distributed,mrocklin/distributed,broxtronix/distributed,dask/distributed | distributed/hdfs.py | distributed/hdfs.py | import os
from .utils import ignoring
with ignoring(ImportError):
import snakebite.protobuf.ClientNamenodeProtocol_pb2 as client_proto
from snakebite.client import Client
def get_locations(filename, name_host, name_port):
client = Client(name_host, name_port, use_trash=False)
files = list(client.ls([filename]))
return [pair for file in files for pair in find(file, client)]
def find(f, client, data_root='/data/dfs/dn'):
request = client_proto.GetBlockLocationsRequestProto()
request.src = f['path']
request.length = long(f['length'])
request.offset = long(0)
response = client.service.getBlockLocations(request)
return [{'block': block,
'path': get_local_path(block, data_root),
'hosts': [location.id.ipAddr for location in block.locs]}
for block in response.locations.blocks]
def get_local_path(block, data_root='/data/dfs/dn'):
pool = block.b.poolId
Id = block.b.blockId
loc = idtoBlockdir(Id)
return "{}/current/{}/current/finalized/{}/blk_{}".format(
data_root, pool, loc, Id)
BLOCK_SUBDIR_PREFIX = 'subdir'
def idtoBlockdir(blockId):
d1 = str(((blockId >> 16) & 0xff))
d2 = str(((blockId >> 8) & 0xff))
pathd1 = BLOCK_SUBDIR_PREFIX+d1
pathd2 = BLOCK_SUBDIR_PREFIX+d2
path = os.path.join(pathd1, pathd2)
return path
def get_data_root():
confd = os.environ.get('HADOOP_CONF_DIR', os.environ.get('HADOOP_INSTALL',
'') + '/hadoop/conf')
conf = os.sep.join([confd, 'hdfs-site.xml'])
import xml
x = xml.etree.ElementTree.fromstring(open(conf).read())
for e in x:
if e.find('name').text == 'dfs.datanode.data.dir':
return e.find('value').text
def hdfs_map_blocks(executor, func, location, namenode_host, namenode_port):
""" Map a function over blocks of a location in HDFS
>>> L = map_blocks(executor, pd.read_csv, '/data/nyctaxi/',
... '192.168.1.100', 9000) # doctest: +SKIP
>>> type(L)[0] # doctest: +SKIP
Future
"""
blocks = get_locations(location, namenode_host, namenode_port)
paths = [blk['path'] for blk in blocks]
hosts = [blk['hosts'] for blk in blocks]
return executor.map(func, paths, workers=hosts)
def hdfs_dask_graph(executor, func, location, namenode_host, namenode_port):
""" Produce dask graph mapping function over blocks in HDFS
Inserts HDFS host restrictions into the executor.
Returns a graph and keys corresponding to function applied to blocks.
Does not trigger execution.
>>> dsk, keys = dask_graph(executor, pd.read_csv, '/data/nyctaxi/',
... '192.168.1.100', 9000) # doctest: +SKIP
"""
blocks = get_locations(location, namenode_host, namenode_port)
paths = [blk['path'] for blk in blocks]
hosts = [blk['hosts'] for blk in blocks]
names = [(funcname(func), path) for path in paths]
restrictions = dict(zip(names, hosts))
dsk = {name: (func, path) for name, path in zip(names, paths)}
executor.scheduler_queue.put_nowait({'op': 'update-graph',
'dsk': {},
'keys': [],
'restrictions': restrictions})
return dsk, names
| bsd-3-clause | Python |
|
c8ae682ff98f2c5b5733ae4b299970c820e46630 | Add regression test for #636 | explosion/spaCy,Gregory-Howard/spaCy,spacy-io/spaCy,spacy-io/spaCy,recognai/spaCy,recognai/spaCy,explosion/spaCy,raphael0202/spaCy,aikramer2/spaCy,raphael0202/spaCy,spacy-io/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,raphael0202/spaCy,recognai/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,raphael0202/spaCy,explosion/spaCy,oroszgy/spaCy.hu,aikramer2/spaCy,spacy-io/spaCy,Gregory-Howard/spaCy,explosion/spaCy,recognai/spaCy,honnibal/spaCy,honnibal/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,oroszgy/spaCy.hu,oroszgy/spaCy.hu,honnibal/spaCy,honnibal/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,raphael0202/spaCy,recognai/spaCy,Gregory-Howard/spaCy,explosion/spaCy,aikramer2/spaCy,aikramer2/spaCy,aikramer2/spaCy,raphael0202/spaCy,explosion/spaCy,oroszgy/spaCy.hu | spacy/tests/regression/test_issue636.py | spacy/tests/regression/test_issue636.py | # coding: utf8
from __future__ import unicode_literals
from ...tokens.doc import Doc
import pytest
@pytest.mark.xfail
@pytest.mark.models
@pytest.mark.parametrize('text', ["I cant do this."])
def test_issue636(EN, text):
"""Test that to_bytes and from_bytes don't change the token lemma."""
doc1 = EN(text)
doc2 = Doc(EN.vocab)
doc2.from_bytes(doc1.to_bytes())
print([t.lemma_ for t in doc1], [t.lemma_ for t in doc2])
assert [t.lemma_ for t in doc1] == [t.lemma_ for t in doc2]
| mit | Python |
|
423707ea25e88b2454a9541eb52f900da87e95b2 | allow external backends, specified via ZMQ_BACKEND env | dash-dash/pyzmq,caidongyun/pyzmq,dash-dash/pyzmq,Mustard-Systems-Ltd/pyzmq,Mustard-Systems-Ltd/pyzmq,yyt030/pyzmq,yyt030/pyzmq,Mustard-Systems-Ltd/pyzmq,swn1/pyzmq,yyt030/pyzmq,ArvinPan/pyzmq,caidongyun/pyzmq,swn1/pyzmq,ArvinPan/pyzmq,swn1/pyzmq,caidongyun/pyzmq,dash-dash/pyzmq,ArvinPan/pyzmq | zmq/backend/__init__.py | zmq/backend/__init__.py | """Import basic exposure of libzmq C API as a backend"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from .select import public_api, select_backend
if 'PYZMQ_BACKEND' in os.environ:
backend = os.environ['PYZMQ_BACKEND']
if backend in ('cython', 'cffi'):
backend = 'zmq.backend.%s' % backend
_ns = select_backend(backend)
else:
# default to cython, fallback to cffi
try:
_ns = select_backend('zmq.backend.cython')
except ImportError:
_ns = select_backend('zmq.backend.cffi')
globals().update(_ns)
__all__ = public_api
| """Import basic exposure of libzmq C API as a backend"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .select import public_api, select_backend
try:
_ns = select_backend('zmq.backend.cython')
except ImportError:
_ns = select_backend('zmq.backend.cffi')
globals().update(_ns)
__all__ = public_api
| bsd-3-clause | Python |
6e501f2cbfe6b53eca72389c9a1c98a3c3d098c9 | Add redhat official helper | morucci/repoxplorer,morucci/repoxplorer,morucci/repoxplorer,morucci/repoxplorer | bin/helpers/redhatofficial/redhatoffical.py | bin/helpers/redhatofficial/redhatoffical.py | #!/usr/bin/env python
# Copyright 2018, Red Hat
# Copyright 2018, Fabien Boucher
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import yaml
import github3
import argparse
import requests
# This is a small tool to read the redhatofficial project file
# and create a repoXplorer compatible projects.yaml file.
INFO_URI = (
"https://raw.githubusercontent.com/"
"RedHatOfficial/RedHatOfficial.github.io/"
"dev/app/data/projects.json")
parser = argparse.ArgumentParser(
description='Read/Index RedhatOffical projects file')
parser.add_argument(
'--output-path', type=str,
help='yaml file path to register organization repositories details')
args = parser.parse_args()
def fetch_repos(org, template, repo=None, query=None):
# anon = github3.GitHub()
anon = github3.GitHub('', token='')
orga = anon.organization(org)
data = {}
if not orga:
print(
"Org %s not found, try to find single"
" user's repos ..." % org)
repos = anon.repositories_by(org)
else:
repos = orga.repositories()
for r in repos:
if repo and r.name != repo:
continue
if r.fork:
continue
if query and query not in r.name:
continue
data[r.name] = {
"branches": [r.default_branch],
}
data[r.name]["template"] = template
return data
if __name__ == "__main__":
gp = yaml.safe_load(requests.get(INFO_URI).text)
projects = {}
templates = {}
struct = {'projects': projects,
'project-templates': templates}
c = len(gp)
for project in gp:
print(project)
print("Remain: %d" % c)
c -= 1
uri = project['projectRepository'].rstrip('/')
if '?q=' in uri:
query = uri.split('?q=')[1]
uri = uri.split('?q=')[0]
print("There is a query on %s for %s" % (uri, query))
else:
query = None
uris = uri.split('/')
if uris[-2] == 'github.com':
# It is a github org
org = uris[-1]
repo = None
orguri = uri
else:
# It is a single github repo
org = uris[-2]
repo = uris[-1]
orguri = "/".join(uris[0:-1])
projects[project['projectName']] = {
'repos': fetch_repos(org, project['projectName'], repo, query),
'description': project['projectDescription'],
}
templates[project['projectName']] = {
"branches": ["master"],
"uri": orguri + "/%(name)s",
"gitweb": orguri + "/%(name)s/commit/%%(sha)s",
"tags": [project['category']]
}
path = 'redhatoffical.yaml'
if args.output_path:
path = os.path.join(os.path.expanduser(args.output_path), path)
with open(path, 'w') as fd:
fd.write(yaml.safe_dump(struct,
default_flow_style=False))
print("")
print("RedHatOffical source repositories details"
" has been written to %s" % path)
print("Please edit the yaml file if needed (like adding additional"
" branches to index, defines custom releases, ...)")
sys.exit(0)
| apache-2.0 | Python |
|
cb9166c4564c4e763e1214355dc76cbe6d466258 | Add data migration for section | supermitch/simple-author | books/migrations/0009_auto_20141127_1718.py | books/migrations/0009_auto_20141127_1718.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_sections(apps, schema_editor):
# Don't just use books.models.Section, that could be out of date
Section = apps.get_model('books', 'Section')
FRONT_MATTER_CHOICES = [
#('db_value', 'human readable'),
('half_title', 'Half title'),
('title_page', 'Title Page'),
('colophon', 'Colophon'),
('contents', 'Contents'),
('foreward', 'Foreward'),
('preface', 'Preface'),
('acknowledgment', 'Acknowlegment'),
('introduction', 'Introduction'),
('dedication', 'Dedication'),
('epigraph', 'Epigraph'),
('prologue', 'Prologue'),
]
BACK_MATTER_CHOICES = [
('epilogue', 'Epilogue'),
('afterward', 'Afterward'),
('conclusion', 'Conclusion'),
('postscript', 'Postscript'),
('appendix', 'Appendix'),
('glossary', 'Glossary'),
('bibliography', 'Bibliography'),
('index', 'Index'),
('colophon', 'Colophon'),
]
for order, (sect_name, _) in enumerate(FRONT_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='front')
sect.save()
for order, (sect_name, _) in enumerate(BACK_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='back')
sect.save()
def remove_sections(apps, schema_editor):
""" Just make the migration reversible, by calling this function. """
Section = apps.get_model('books', 'Section')
for section in Section.objects.all():
section.delete()
class Migration(migrations.Migration):
dependencies = [
('books', '0008_auto_20141127_1657'),
]
operations = [
migrations.RunPython(add_sections, remove_sections),
]
| mit | Python |
|
161feec0d3764c7cdeebfdc7cd62e5901a89666a | Add initial implementation | Isaac-W/cpr-vision-measurement,Isaac-W/cpr-vision-measurement,Isaac-W/cpr-vision-measurement | runtracker.py | runtracker.py | import cv2
import numpy as np
import imutils
PI = 3.141592654
AREA_ERROR_THRESH = 0.05 # Error away from the mean area
# Color ranges
#CALIB_COLOR_MIN = ( 70, 40, 61)
#CALIB_COLOR_MAX = (110, 175, 255)
CALIB_COLOR_MIN = ( 52, 24, 56)
CALIB_COLOR_MAX = ( 98, 169, 178)
TRACK_COLOR_MIN = ( 0, 0, 0)
TRACK_COLOR_MAX = (255, 225, 255)
prevCalib = []
prevTrack = None
def ellipseArea(ellipse):
return ellipse[1][0] * ellipse[1][1] * PI / 4
def main():
# Open webcam
cap = cv2.VideoCapture(0)
while True:
# Get frame
ret, frame = cap.read()
output = frame.copy()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert to HSV (for color range)
# Apply morphological filtering
k_x = cv2.getGaussianKernel(8, 0)
k_y = cv2.getGaussianKernel(8, 0)
kernel = k_x * np.transpose(k_y)
filt = cv2.inRange(hsv, CALIB_COLOR_MIN, CALIB_COLOR_MAX)
filt = cv2.morphologyEx(filt, cv2.MORPH_OPEN, kernel, iterations=2)
# Find contours
_, contours, _ = cv2.findContours(filt, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(output, contours, -1, (0, 0, 255), 3)
matches = [] # Contours that match a marker
for c in contours:
e = cv2.fitEllipse(c)
area_c = cv2.contourArea(c)
area_e = ellipseArea(e)
if abs(area_c - area_e) < (AREA_ERROR_THRESH * (area_c + area_e) / 2): # Is within error
matches.append((c, e))
# Sort by size
matches.sort(key=lambda x: ellipseArea(x[1]), reverse=True)
# Get 2 best ellipses
for i in range(0, min(len(matches), 2)):
c = matches[i][0]
e = matches[i][1]
cv2.ellipse(output, e, (0, 255, 0), 2)
cv2.putText(output, 'C: ' + str(cv2.contourArea(c)) + ' | E: ' + str(ellipseArea(e)), (int(e[0][0]), int(e[0][1])), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
# Show frame
cv2.imshow('Frame', frame)
cv2.imshow('Filtered', filt)
cv2.imshow('Output', output)
if cv2.waitKey(1) == 27:
break
if __name__ == '__main__':
main() | mit | Python |
|
07825b7f80a12619c847de49f0f2b991faeea7b4 | Add a simple handler cookie_wsh.py useful for cookie test | bpsinc-native/src_third_party_pywebsocket_src,bpsinc-native/src_third_party_pywebsocket_src,bpsinc-native/src_third_party_pywebsocket_src,bpsinc-native/src_third_party_pywebsocket_src | example/cookie_wsh.py | example/cookie_wsh.py | # Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the COPYING file or at
# https://developers.google.com/open-source/licenses/bsd
import urlparse
def _add_set_cookie(request, value):
request.extra_headers.append(('Set-Cookie', value))
def web_socket_do_extra_handshake(request):
components = urlparse.urlparse(request.uri)
command = components[4]
ONE_DAY_LIFE = 'Max-Age=86400'
if command == 'set':
_add_set_cookie(request, '; '.join(['foo=bar', ONE_DAY_LIFE]))
elif command == 'set_httponly':
_add_set_cookie(request,
'; '.join(['httpOnlyFoo=bar', ONE_DAY_LIFE, 'httpOnly']))
elif command == 'clear':
_add_set_cookie(request, 'foo=0; Max-Age=0')
_add_set_cookie(request, 'httpOnlyFoo=0; Max-Age=0')
def web_socket_transfer_data(request):
pass
| bsd-3-clause | Python |
|
39019e998da2c1f73f82e0eb446df78ffc95c134 | Create safe_steps.py | uktechreviews/minecraft_workshops | safe_steps.py | safe_steps.py | import mcpi.minecraft as minecraft
import mcpi.block as block
mc = minecraft.Minecraft.create()
while True:
p = mc.player.getTilePos()
b = mc.getBlock(p.x, p.y-1, p.z)
if b == block.AIR.id or b == block.WATER_FLOWING.id or b==block.WATER_STATIONARY.id:
mc.setBlock(pos.x, pos.y-1, pos.z, block.WOOD_PLANKS.id)
| cc0-1.0 | Python |
|
c78480fc1f566bb6d266705336dbe9cd90d07996 | Create 476_number_complement.py | jsingh41/algos | 476_number_complement.py | 476_number_complement.py | """
https://leetcode.com/problems/number-complement/description/
Given a positive integer, output its complement number. The complement strategy is to flip the bits of its binary representation.
Note:
The given integer is guaranteed to fit within the range of a 32-bit signed integer.
You could assume no leading zero bit in the integer’s binary representation.
Example 1:
Input: 5
Output: 2
Explanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.
Example 2:
Input: 1
Output: 0
Explanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.
"""
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
# Solution: Extract binary of the number using bin() function. For every character in the binary, get complement, append.
c = ''
bin_c = ''
bin_num = bin(num)[2:]
print bin_num
for i in range(0,len(bin_num)):
print bin_num[i]
if bin_num[i] == '0':
# print bin_num[i]
c = '1'
elif bin_num[i] == '1':
# print bin_num[i]
c = '0'
bin_c = bin_c+c
print "bin output: ",(bin_c)
return(int(bin_c,2))
| mit | Python |
|
0104600fe32b2b676974f29df37d10cc86a7441a | enable CMake build (with HTTP/3) -- take 2 | facebook/bistro,facebook/bistro,facebook/bistro,facebook/bistro,facebook/bistro,facebook/bistro | build/fbcode_builder/specs/proxygen_quic.py | build/fbcode_builder/specs/proxygen_quic.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import specs.folly as folly
import specs.fizz as fizz
import specs.mvfst as mvfst
import specs.sodium as sodium
import specs.wangle as wangle
import specs.zstd as zstd
def fbcode_builder_spec(builder):
builder.add_option("proxygen/proxygen:cmake_defines", {"BUILD_QUIC": "ON"})
return {
"depends_on": [folly, wangle, fizz, sodium, zstd, mvfst],
"steps": [builder.fb_github_cmake_install("proxygen/proxygen", "..")],
}
| mit | Python |
|
37e74416a090342c18cfad87df74dd958400145d | Add 'Others' category. | enjaz/enjaz,osamak/student-portal,enjaz/enjaz,osamak/student-portal,osamak/student-portal,osamak/student-portal,enjaz/enjaz,enjaz/enjaz,enjaz/enjaz,osamak/student-portal | bulb/migrations/0009_add_others_category.py | bulb/migrations/0009_add_others_category.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_categories(apps, schema_editor):
Category = apps.get_model('bulb', 'Category')
Category.objects.create(code_name="others", name="أخرى")
def remove_categories(apps, schema_editor):
Category = apps.get_model('bulb', 'Category')
Category.objects.filter(code_name="others").delete()
class Migration(migrations.Migration):
dependencies = [
('bulb', '0008_improve_status'),
]
operations = [
migrations.RunPython(
add_categories,
reverse_code=remove_categories),
]
| agpl-3.0 | Python |
|
317160665a58a2e0433202e4605710b09a71de9d | add scrub script to remove solution tags, thanks https://gist.github.com/minrk/3836889 | amcdawes/QMlabs | scrub_sols.py | scrub_sols.py | #!/usr/bin/env python
"""
simple example script for scrubping solution code cells from IPython notebooks
Usage: `scrub_code.py foo.ipynb [bar.ipynb [...]]`
Marked code cells are scrubbed from the notebook
"""
import io
import os
import sys
from IPython.nbformat.current import read, write
def scrub_code_cells(nb):
scrubbed = 0
cells = 0
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type != 'code':
continue
cells += 1
# scrub cells marked with initial '# Solution' comment
# any other marker will do, or it could be unconditional
if cell.input.startswith("# Solution"):
cell.input = u'# Solution goes here'
scrubbed += 1
cell.outputs = []
print
print("scrubbed %i/%i code cells from notebook %s" % (scrubbed, cells, nb.metadata.name))
if __name__ == '__main__':
for ipynb in sys.argv[1:]:
print("scrubbing %s" % ipynb)
with io.open(ipynb, encoding='utf8') as f:
nb = read(f, 'json')
scrub_code_cells(nb)
base, ext = os.path.splitext(ipynb)
new_ipynb = "%s_blank%s" % (base, ext)
with io.open(new_ipynb, 'w', encoding='utf8') as f:
write(nb, f, 'json')
print("wrote %s" % new_ipynb)
| mit | Python |
|
3bafceba383125475d5edb895bc9d88b0dfc5042 | Add status to Role | barberscore/barberscore-api,dbinetti/barberscore,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore-django,dbinetti/barberscore-django,barberscore/barberscore-api,dbinetti/barberscore | project/apps/api/migrations/0093_role_status.py | project/apps/api/migrations/0093_role_status.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-05 23:28
from __future__ import unicode_literals
from django.db import migrations
import django_fsm
class Migration(migrations.Migration):
dependencies = [
('api', '0092_auto_20160305_1514'),
]
operations = [
migrations.AddField(
model_name='role',
name='status',
field=django_fsm.FSMIntegerField(choices=[(0, b'New')], default=0),
),
]
| bsd-2-clause | Python |
|
4735ee97aa36920e811edc450d8b6e8a09b5caf5 | add utility for explode bam | jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public | iron/utilities/explode_bam.py | iron/utilities/explode_bam.py | #!/usr/bin/python
import sys, argparse
from subprocess import Popen, PIPE
from SamBasics import SamStream
from multiprocessing import cpu_count, Pool
def main():
parser = argparse.ArgumentParser(description="Break a bam into evenly sized chunks print the number of chunks",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Use - for STDIN sam or directly name bamfile")
parser.add_argument('output_base',help="output base name myout will go to myout.1.bam")
parser.add_argument('-k',type=int,required=True,help="Number per chunk")
parser.add_argument('--threads',type=int,default=cpu_count(),help="Number of threads")
args = parser.parse_args()
inf = None
if args.input == '-':
inf = sys.stdin
else:
cmd = "samtools view -h "+args.input
p = Popen(cmd.split(),stdout=PIPE)
inf = p.stdout
v = SamStream(inf)
buffer = []
i = 0
if args.threads > 1:
poo= Pool(processes=args.threads)
while True:
e = v.read_entry()
if not e: break
buffer.append(e)
if len(buffer) >= args.k:
i+=1
if args.threads > 1:
poo.apply_async(do_output,args=(buffer,v.header[:],i,args.output_base))
else:
do_output(buffer,v.header[:],i,args.output_base)
buffer = []
if len(buffer) > 0:
i+=1
if args.threads > 1:
poo.apply_async(do_output,args=(buffer,v.header[:],i,args.output_base))
else:
do_output(buffer,v.header[:],i,args.output_base)
if args.threads > 1:
poo.close()
poo.join()
if args.input != '-':
p.communicate()
print i
def do_output(buffer,header,i,output_base):
of = open(output_base+'.'+str(i)+'.bam','w')
cmd = 'samtools view - -Sb'
p = Popen(cmd.split(),stdin=PIPE,stdout=of)
for e in header:
p.stdin.write(e)
for e in buffer:
p.stdin.write(e)
p.communicate()
of.close()
if __name__=="__main__":
main()
| apache-2.0 | Python |
|
77a031fd34d73047a529fe9e06d7781ba0d4c56d | add basic structure of python ui | wilseypa/warped2-models,wilseypa/warped2-models,wilseypa/warped2-models | models/synthetic/ui/synthetic.py | models/synthetic/ui/synthetic.py |
from Tkinter import *
# initial
root = Tk()
root.title("Synthetic Model")
label = Label(root, text = 'Synthetic Model', font = (None, 20))
label.pack()
m1 = PanedWindow()
m1.pack(fill = BOTH, expand = 1)
m2 = PanedWindow(m1, orient = VERTICAL)
m1.add(m2)
m3 = PanedWindow(m1, orient = VERTICAL)
m1.add(m3)
m4 = PanedWindow(m1, orient = VERTICAL)
m1.add(m4)
m5 = PanedWindow(m1, orient = VERTICAL)
m1.add(m5)
m6 = PanedWindow(m1, orient = VERTICAL)
m1.add(m6)
# network
nt2 = Label(m2, text = 'Netwrok Selection')
nt3 = Label(m3, text = ' ')
nt4 = Label(m4, text = ' ')
nt5 = Label(m5, text = ' ')
nt6 = Label(m6, text = ' ')
m2.add(nt2)
m3.add(nt3)
m4.add(nt4)
m5.add(nt5)
m6.add(nt6)
network = IntVar()
R1 = Radiobutton(m2, text = "Watts-strogattz", font = 30, fg = "red", variable = network, value = 1)
ws_mT = Label(m3, text = 'mean degree', fg = 'red')
ws_mV = Entry(m4, bd = 2)
ws_pT = Label(m5, text = 'probability', fg = 'red')
ws_pV = Entry(m6, bd = 2)
R2 = Radiobutton(m2, text = "Barabsi-Albert", font = 30, fg = "red", variable = network, value = 2)
ba_mT = Label(m3, text = 'mean degree', fg = 'red')
ba_mV = Entry(m4, bd = 2)
ba_pT = Label(m5, text = 'probability', fg = 'red')
ba_pV = Entry(m6, bd = 2)
m2.add(R1)
m3.add(ws_mT)
m4.add(ws_mV)
m5.add(ws_pT)
m6.add(ws_pV)
m2.add(R2)
m3.add(ba_mT)
m4.add(ba_mV)
m5.add(ba_pT)
m6.add(ba_pV)
# Node selection
DIST = [("Exponential", "1"),
("Geometric", "2"),
("Binomial", "3"),
("Normal", "4"),
("Uniform", "5"),
("Poisson", "6"),
("Lognormal", "7"),
]
ns = IntVar()
ns.set("1")
for dist, c in DIST:
b = Radiobutton(m2, text = dist, variable = ns, value = c, font = 30, fg = "blue")
m2.add(b)
# Event send
es = IntVar()
for dist, c in DIST:
e = Radiobutton(m2, text = dist, variable = ns, value = c, font = 30, fg = "red")
m2.add(e)
# nodes
node1 = Label(m2, text = "Number of Nodes", font = 30, fg = "blue")
nodevar = Entry(m2, bd = 4)
nodevar.insert(0, "100000")
m2.add(node1)
m2.add(nodevar)
# state size
s1 = Label(m2, text = "State Size: Min", font = 30, fg = "red")
s2 = Label(m2, text = "Max", font = 30, fg = "red")
s_min = Entry(m2, bd = 4)
s_max = Entry(m2, bd = 4)
s_min.insert(0,"100")
s_max.insert(0,"100")
m2.add(s1)
m2.add(s_min)
m2.add(s2)
m2.add(s_max)
# floating point operation count
fp1 = Label(m2, text = "Floating Point: Min", font = 30, fg = "blue")
fp2 = Label(m2, text = "Max", font = 30, fg = "blue")
fp_min = Entry(m2, bd = 4)
fp_max = Entry(m2, bd = 4)
fp_min.insert(0,"1000")
fp_max.insert(0,"1000")
m2.add(fp1)
m2.add(fp_min)
m2.add(fp2)
m2.add(fp_max)
mainloop()
| mit | Python |
|
ebd62eac70d5589b0b7f593009024868f981e658 | Add actor with behavior similar to old-style Delay | les69/calvin-base,EricssonResearch/calvin-base,les69/calvin-base,EricssonResearch/calvin-base,EricssonResearch/calvin-base,les69/calvin-base,EricssonResearch/calvin-base,les69/calvin-base | calvin/actorstore/systemactors/std/ClassicDelay.py | calvin/actorstore/systemactors/std/ClassicDelay.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
class ClassicDelay(Actor):
"""
After first token, pass on token once every 'delay' seconds.
Input :
token: anything
Outputs:
token: anything
"""
@manage(['delay'])
def init(self, delay=0.1):
self.delay = delay
self.use('calvinsys.events.timer', shorthand='timer')
self.timer = None
def setup(self):
self.timer = self['timer'].repeat(self.delay)
def will_migrate(self):
self.timer.cancel()
def did_migrate(self):
self.setup()
@condition(['token'], ['token'])
@guard(lambda self, _: not self.timer)
def start_timer(self, token):
self.setup()
return ActionResult(production=(token, ))
@condition(['token'], ['token'])
@guard(lambda self, _: self.timer and self.timer.triggered)
def passthrough(self, token):
self.timer.ack()
return ActionResult(production=(token, ))
action_priority = (start_timer, passthrough)
requires = ['calvinsys.events.timer']
| apache-2.0 | Python |
|
a88cf930a5c0e67a7aef93ab5c4eb705ad7aad32 | Fix ‘permissions_classes’ typos | benjaoming/kolibri,benjaoming/kolibri,jonboiser/kolibri,lyw07/kolibri,learningequality/kolibri,mrpau/kolibri,jonboiser/kolibri,mrpau/kolibri,indirectlylit/kolibri,benjaoming/kolibri,learningequality/kolibri,jonboiser/kolibri,christianmemije/kolibri,benjaoming/kolibri,learningequality/kolibri,mrpau/kolibri,jonboiser/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,lyw07/kolibri,lyw07/kolibri,christianmemije/kolibri,christianmemije/kolibri,DXCanas/kolibri,learningequality/kolibri,lyw07/kolibri,DXCanas/kolibri,mrpau/kolibri,DXCanas/kolibri,DXCanas/kolibri,christianmemije/kolibri,indirectlylit/kolibri | kolibri/core/lessons/tests.py | kolibri/core/lessons/tests.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
# Create your tests here.
| mit | Python |
4bbd622921fcef6a07d5d87c0640a9eb4e48cf12 | Add nurseryTherm python file | mattmole/nursery-temperature-touch | nurseryTherm.py | nurseryTherm.py | #!/usr/bin/python
#CamJam Edukit 2 - Sensors
# Worksheet 3 - Temperature
# Import Libraries
import os
import glob
import time
import paho.mqtt.client as paho
import json
# Initialize the GPIO Pins
os.system('modprobe w1-gpio') # Turns on the GPIO module
os.system('modprobe w1-therm') # Turns on the Temperature module
# Finds the correct device file that holds the temperature data
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
# A function that reads the sensors data
def read_temp_raw():
f = open(device_file, 'r') # Opens the temperature device file
lines = f.readlines() # Returns the text
f.close()
return lines
# Convert the value of the sensor into a temperature
def read_temp():
lines = read_temp_raw() # Read the temperature 'device file'
# While the first line does not contain 'YES', wait for 0.2s
# and then read the device file again.
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
# Look for the position of the '=' in the second line of the
# device file.
equals_pos = lines[1].find('t=')
# If the '=' is found, convert the rest of the line after the
# '=' into degrees Celsius, then degrees Fahrenheit
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c, temp_f
def on_connect(client, userdata, flags, rc):
m="Connected flags"+str(flags)+"result code "\
+str(rc)+"client1_id "+str(client)
print(m)
def on_message(client1, userdata, message):
print("message received " ,str(message.payload.decode("utf-8")))
# Print out the temperature until the program is stopped.
#Connect to an MQTT server
client = paho.Client()
#client.on_connect= on_connect #attach function to callback
#client.on_message=on_message #attach function to callback
time.sleep(1)
client.connect("192.168.1.104") #connect to broker
#client.loop_start() #start the loop
#client.subscribe("house/nursery")
#>>> from time import gmtime, strftime
#>>> strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
# While running print the time and temperature
# Optionally write to a CSV file
# Publish the temperature to the MQTT server
try:
while True:
strTime = time.strftime("%H:%M:%S", time.localtime())
strDate = time.strftime("%Y-%m-%d",time.localtime())
temp = read_temp()
print(strTime,temp)
client.publish("/house/nursery/temp","%0.1f"%temp[0])
# f = open("/home/pi/nurseryTemp.csv","a")
# f.write("%s,%s,%s\n"%(strDate,strTime,temp[0]))
# f.close()
time.sleep(30)
except:
client.disconnect()
#client.loop_stop()
print("Closing")
| mit | Python |
|
e363aac46c9a5b607c7b32bcc5546c5a2728d750 | Add migration which fixes missing message IDs. | qubs/data-centre,qubs/data-centre,qubs/climate-data-api,qubs/climate-data-api | climate_data/migrations/0029_auto_20170628_1527.py | climate_data/migrations/0029_auto_20170628_1527.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-28 15:27
from __future__ import unicode_literals
from django.db import migrations
from datetime import timedelta
# noinspection PyUnusedLocal
def add_message_id_to_reading(apps, schema_editor):
# noinspection PyPep8Naming
Reading = apps.get_model('climate_data', 'Reading')
# noinspection PyPep8Naming
Message = apps.get_model('climate_data', 'Message')
for reading in Reading.objects.filter(message_id=None):
reading.message = Message.objects.filter(
station=reading.station,
arrival_time__gt=reading.read_time,
arrival_time__lt=(reading.read_time + timedelta(minutes=52))
).first()
reading.save()
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0028_auto_20170627_1914'),
]
operations = [
migrations.RunPython(add_message_id_to_reading),
]
| apache-2.0 | Python |
|
840bc57e7120ae67e84c1c7bca94cfef34c8d2a8 | Copy old script from @erinspace which added identifiers to existing preprints. | erinspace/osf.io,chennan47/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,felliott/osf.io,baylee-d/osf.io,saradbowman/osf.io,leb2dg/osf.io,caseyrollins/osf.io,sloria/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,crcresearch/osf.io,mattclark/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,felliott/osf.io,mfraezz/osf.io,adlius/osf.io,mfraezz/osf.io,adlius/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,pattisdr/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,aaxelb/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,baylee-d/osf.io,laurenrevere/osf.io,chennan47/osf.io,pattisdr/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,chennan47/osf.io,cslzchen/osf.io,laurenrevere/osf.io,HalcyonChimera/osf.io,adlius/osf.io,binoculars/osf.io,cslzchen/osf.io,crcresearch/osf.io,leb2dg/osf.io,TomBaxter/osf.io,pattisdr/osf.io,mattclark/osf.io,binoculars/osf.io,erinspace/osf.io,icereval/osf.io,sloria/osf.io,cslzchen/osf.io,caseyrollins/osf.io,mattclark/osf.io,felliott/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,saradbowman/osf.io,erinspace/osf.io,leb2dg/osf.io,laurenrevere/osf.io,leb2dg/osf.io,HalcyonChimera/osf.io,adlius/osf.io,binoculars/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io | scripts/add_missing_identifiers_to_preprints.py | scripts/add_missing_identifiers_to_preprints.py | import sys
import time
import logging
from scripts import utils as script_utils
from django.db import transaction
from website.app import setup_django
from website.identifiers.utils import request_identifiers_from_ezid, parse_identifiers
setup_django()
logger = logging.getLogger(__name__)
def add_identifiers_to_preprints(dry=True):
from osf.models import PreprintService
preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)
logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
for preprint in preprints_without_identifiers:
logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))
if not dry:
ezid_response = request_identifiers_from_ezid(preprint)
id_dict = parse_identifiers(ezid_response)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
preprint.save()
doi = preprint.get_identifier('doi')
assert preprint._id.upper() in doi.value
logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))
time.sleep(1)
else:
logger.info('Dry run - would have created identifier for preprint {} from service {}'.format(preprint._id, preprint.provider.name))
logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
def main(dry=True):
# Start a transaction that will be rolled back if any exceptions are un
add_identifiers_to_preprints(dry)
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Dry Run complete -- not actually saved')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if '--debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif '--warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif '--info' in sys.argv:
logger.setLevel(logging.INFO)
elif '--error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
| apache-2.0 | Python |
|
3a9ec86e4b996912b1a47abe07c70116be14b3f8 | Create hello.py | BhaveshSGupta/LearnPy | hello.py | hello.py | print "Hello all"
| mit | Python |
|
d73b2108358c8aa43509b6def6879fc70b138fb5 | add objects | LumPenPacK/NetworkExtractionFromImages,LumPenPacK/NetworkExtractionFromImages,LumPenPacK/NetworkExtractionFromImages,LumPenPacK/NetworkExtractionFromImages,LumPenPacK/NetworkExtractionFromImages | nefi2_main/nefi2/view/test2.py | nefi2_main/nefi2/view/test2.py | from PyQt4 import QtGui, QtCore
import sys
class Main(QtGui.QMainWindow):
def __init__(self, parent = None):
super(Main, self).__init__(parent)
# main button
self.addButton = QtGui.QPushButton('button to add other widgets')
self.addButton.clicked.connect(self.addWidget)
# scroll area widget contents - layout
self.scrollLayout = QtGui.QFormLayout()
# scroll area widget contents
self.scrollWidget = QtGui.QWidget()
self.scrollWidget.setLayout(self.scrollLayout)
# scroll area
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setWidget(self.scrollWidget)
# main layout
self.mainLayout = QtGui.QVBoxLayout()
# add all main to the main vLayout
self.mainLayout.addWidget(self.addButton)
self.mainLayout.addWidget(self.scrollArea)
# central widget
self.centralWidget = QtGui.QWidget()
self.centralWidget.setLayout(self.mainLayout)
# set central widget
self.setCentralWidget(self.centralWidget)
def addWidget(self):
self.scrollLayout.addRow(Test())
class Test(QtGui.QWidget):
def __init__( self, parent=None):
super(Test, self).__init__(parent)
self.pushButton = QtGui.QPushButton('I am in Test widget')
layout = QtGui.QHBoxLayout()
layout.addWidget(self.pushButton)
self.setLayout(layout)
app = QtGui.QApplication(sys.argv)
myWidget = Main()
myWidget.show()
app.exec_() | bsd-2-clause | Python |
|
98bf1c67b95d40888e26068015e4abf1b94d0640 | add ddns state module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/states/ddns.py | salt/states/ddns.py | '''
Dynamic DNS updates.
====================
Ensure a DNS record is present or absent utilizing RFC 2136
type dynamic updates. Requires dnspython module.
.. code-block:: yaml
webserver:
ddns.present:
- zone: example.com
- ttl: 60
'''
def __virtual__():
return 'ddns' if 'ddns.update' in __salt__ else False
def present(name, zone, ttl, data, rdtype='A'):
'''
Ensures that the named DNS record is present with the given ttl.
name
The host portion of the DNS record, e.g., 'webserver'
zone
The zone to check/update
ttl
TTL for the record
data
Data for the DNS record. E.g., the IP addres for an A record.
rdtype
DNS resource type. Default 'A'.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} record "{1}" will be updated'.format(rdtype, name)
return ret
status = __salt__['ddns.update'](zone, name, ttl, rdtype, data)
if status is None:
ret['result'] = True
ret['comment'] = '{0} record "{1}" already present with ttl of {2}'.format(
rdtype, name, ttl)
elif status:
ret['result'] = True
ret['comment'] = 'Updated {0} record for "{1}"'.format(rdtype, name)
ret['changes'] = {'name': name,
'zone': zone,
'ttl': ttl,
'rdtype': rdtype,
'data': data
}
else:
ret['result'] = False
ret['comment'] = 'Failed to create or update {0} record for "{1}"'.format(rdtype, name)
return ret
def absent(name, zone, data=None, rdtype=None):
'''
Ensures that the named DNS record is absent.
name
The host portion of the DNS record, e.g., 'webserver'
zone
The zone to check
data
Data for the DNS record. E.g., the IP addres for an A record. If omitted,
all records matching name (and rdtype, if provided) will be purged.
rdtype
DNS resource type. If omitted, all types will be purged.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} record "{1}" will be deleted'.format(rdtype, name)
return ret
status = __salt__['ddns.delete'](zone, name, rdtype, data)
if status is None:
ret['result'] = True
ret['comment'] = 'No matching DNS record(s) present'
elif status:
ret['result'] = True
ret['comment'] = 'Deleted DNS record(s)'
ret['changes'] = True
else:
ret['result'] = False
ret['comment'] = 'Failed to delete DNS record(s)'
return ret
| apache-2.0 | Python |
|
4ea54e24948356b039ad961c857e685c30bb0737 | Solve task #500 | Zmiecer/leetcode,Zmiecer/leetcode | 500.py | 500.py | class Solution(object):
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
rows = ['qwertyuiop', 'asdfghjkl', 'zxcvbnm']
def inOneRow(word):
mask = [0, 0, 0]
for i in range(len(rows)):
for ch in word:
if ch in rows[i]:
mask[i] = 1
return sum(mask) == 1
ans = []
for word in words:
wordl = word.lower()
if inOneRow(wordl):
ans.append(word)
return ans
| mit | Python |
|
ce3eef2c749f7d9f7bcd1d439497121e89e3727b | Add notification | devicehive/devicehive-python | devicehive/notification.py | devicehive/notification.py | from devicehive.api_object import ApiObject
class Notification(ApiObject):
"""Notification class."""
DEVICE_ID_KEY = 'deviceId'
ID_KEY = 'id'
NOTIFICATION_KEY = 'notification'
PARAMETERS_KEY = 'parameters'
TIMESTAMP_KEY = 'timestamp'
def __init__(self, transport, token, notification):
ApiObject.__init__(self, transport)
self._token = token
self._device_id = notification[self.DEVICE_ID_KEY]
self._id = notification[self.ID_KEY]
self._notification = notification[self.NOTIFICATION_KEY]
self._parameters = notification[self.PARAMETERS_KEY]
self._timestamp = notification[self.TIMESTAMP_KEY]
def device_id(self):
return self._device_id
def id(self):
return self._id
def notification(self):
return self._notification
def parameters(self):
return self._parameters
def timestamp(self):
return self._timestamp
| apache-2.0 | Python |
|
a02a46752d954c29a65bf8bc5b88fa3545315175 | Add unit tests for timestr() | OakNinja/svtplay-dl,qnorsten/svtplay-dl,dalgr/svtplay-dl,iwconfig/svtplay-dl,dalgr/svtplay-dl,leakim/svtplay-dl,selepo/svtplay-dl,spaam/svtplay-dl,olof/svtplay-dl,leakim/svtplay-dl,OakNinja/svtplay-dl,qnorsten/svtplay-dl,selepo/svtplay-dl,iwconfig/svtplay-dl,olof/svtplay-dl,spaam/svtplay-dl,OakNinja/svtplay-dl,leakim/svtplay-dl | lib/svtplay_dl/tests/utils.py | lib/svtplay_dl/tests/utils.py | #!/usr/bin/python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# The unittest framwork doesn't play nice with pylint:
# pylint: disable-msg=C0103
from __future__ import absolute_import
import unittest
import svtplay_dl.utils
class timestrTest(unittest.TestCase):
def test_1(self):
self.assertEqual(svtplay_dl.utils.timestr(1), "00:00:00,00")
def test_100(self):
self.assertEqual(svtplay_dl.utils.timestr(100), "00:00:00,10")
def test_3600(self):
self.assertEqual(svtplay_dl.utils.timestr(3600), "00:00:03,60")
def test_3600000(self):
self.assertEqual(svtplay_dl.utils.timestr(3600000), "01:00:00,00")
| mit | Python |
|
46c036cad1323d55c61f546b5cd6174739ab1b42 | add helper functions for data persistence | usc-isi-i2/mydig-webservice,usc-isi-i2/mydig-webservice,usc-isi-i2/mydig-webservice,usc-isi-i2/mydig-webservice | ws/data_persistence.py | ws/data_persistence.py | # https://github.com/usc-isi-i2/dig-etl-engine/issues/92
import json
import threading
import os
import codecs
# 1.acquire file write lock
# 2.write to file.new
# 3.acquire replace lock
# 4.rename file to file.old
# 5.rename file.new to file
# 6.release replace lock and write lock
# 7.remove file.old
def dump_data(data, file_path, write_lock, replace_lock):
new_path = file_path + '.new'
old_path = file_path + '.old'
try:
write_lock.acquire()
with codecs.open(new_path, 'w') as f:
f.write(data)
replace_lock.acquire()
# https://docs.python.org/2/library/os.html#os.rename
# On Unix, if dst exists and is a file,
# it will be replaced silently if the user has permission.
os.rename(file_path, old_path)
os.rename(new_path, file_path)
os.remove(old_path)
except Exception as e:
print e
finally:
write_lock.release()
replace_lock.release()
# when starting:
# if only file exists, correct.
# if both file.new and file.old exist, ignore file.old and rename file.new to file (shut down in the middle of replacing, file.new is complete)
# if both file.new and file exist, ignore file.new (shut down in the middle of generating file.new).
# if only file.new exists, error (user deletion)
# if only file.old exists, error (user deletion)
# if three of them exists, error (user operation, system error
def read_data(file_path):
new_path = file_path + '.new'
old_path = file_path + '.old'
has_file = os.path.exists(file_path)
has_new = os.path.exists(new_path)
has_old = os.path.exists(old_path)
if has_file and not has_new and not has_old:
pass
elif not has_file and has_old and has_new:
os.remove(old_path)
os.rename(new_path, file_path)
elif has_file and not has_old and has_new:
os.remove(new_path)
else:
return
with codecs.open(file_path, 'r') as f:
return f.read()
| mit | Python |
|
327b74d5e0328e6415520b907e4c43ed8cb54cf2 | add sample that fetches the graph and renders it as an ascii tree | tariqdaouda/pyArango,tariqdaouda/pyArango | examples/fetchDebianDependencyGraph.py | examples/fetchDebianDependencyGraph.py | #!/usr/bin/python
import sys
from pyArango.connection import *
from pyArango.graph import *
from asciitree import *
conn = Connection(username="root", password="")
db = conn["ddependencyGrahp"]
if not db.hasGraph('debian_dependency_graph'):
raise Exception("didn't find the debian dependency graph, please import first!")
ddGraph = db.graphs['debian_dependency_graph']
graphQuery = '''
FOR package, depends, path IN
1..2 ANY
@startPackage Depends RETURN path
'''
startNode = sys.argv[1]
bindVars = { "startPackage": "packages/" + startNode }
queryResult = db.AQLQuery(graphQuery, bindVars=bindVars, rawResults=True)
# sub iterateable object to build up the tree for draw_tree:
class Node(object):
def __init__(self, name, children):
self.name = name
self.children = children
def getChild(self, searchName):
for child in self.children:
if child.name == searchName:
return child
return None
def __str__(self):
return self.name
def iteratePath(path, depth, currentNode):
pname = path[depth]['name']
subNode = currentNode.getChild(pname)
if subNode == None:
subNode = Node(pname, [])
currentNode.children.append(subNode)
if len(path) > depth + 1:
iteratePath(path, depth + 1, subNode)
# Now we fold the paths substructure into the tree:
rootNode = Node(startNode, [])
for path in queryResult:
p = path['edges']
iteratePath(p, 0, rootNode)
print draw_tree(rootNode)
| apache-2.0 | Python |
|
8e73752e9242796a933d3566eb4a5e4470f13d5e | Create sequences.py | edwardjiang7/sequences_project | sequences.py | sequences.py | import random
import sys
import os
# User input
user_input = input("Type in 5 integers of any sequence separated by commas. Example: 1,2,3,4,5: ")
list_input = user_input.split(",")
# Convert numbered strings into integers in list
list_int = list(map(int, list_input))
# Check Arithmetic Sequence
list_arith = list_int[1] - list_int[0]
if list_int[1] == list_int[0] + list_arith and list_int[2] == list_int[1] + list_arith:
print("Arithmetic Sequence")
# Check Geometric Sequence
if list_int[1] == list_int[0] * 2 and list_int[2] == list_int[1] * 2 and list_int[3] == list_int[2] * 2:
print("This is a Geometric Sequence")
# Check Quadratic Sequence
list_quad1 = list_int[1] - list_int[0]
list_quad2 = list_int[2] - list_int[1]
list_diff = list_quad2 - list_quad1
if list_int[1] == list_int[0] + list_quad1 and list_int[2] == list_int[1] + list_quad2:
print("This is a Quadratic Sequence")
# Check Cubic Sequence
cub1 = list_int[1] - list_int[0] # Subtraction Process
cub2 = list_int[2] - list_int[1] # Subtraction Process
cub3 = list_int[3] - list_int[2] # Subtraction Process
cub_r1 = cub3 - cub2 # Subtraction Process
cub_r2 = cub2 - cub1 # Subtraction Process
# "if" comparison
if cub_r1 == cub_r2:
print("This is a Cubic Sequence")
# Check Fibonacci Sequence
fib_chck1 = list_int[0] + list_int[1]
fib_chck2 = list_int[1] + list_int[2]
if list_int[2] == fib_chck1 and list_int[3] == fib_chck2:
print("Fibonacci Sequence")
| mit | Python |
|
ea40075f8924c2d61da8f92fe9ecf74045bbe6cc | add script to convert Tandem Repeats Finder dat format to bed format required for STRetch | hdashnow/STRetch,Oshlack/STRetch,Oshlack/STRetch,hdashnow/STRetch,Oshlack/STRetch | scripts/TRFdat_to_bed.py | scripts/TRFdat_to_bed.py | #!/usr/bin/env python
from argparse import (ArgumentParser, FileType)
def parse_args():
"Parse the input arguments, use '-h' for help"
parser = ArgumentParser(description='Convert Tandem Repeat Finder (TRF) dat file to bed format with repeat units for microsatellite genotyping')
parser.add_argument(
'--dat', type=str, required=True,
help='Input dat file produced by Tandem Repeat Finder (TRF) using the -d option')
parser.add_argument(
'--bed', type=str, required=True,
help='Output bed file containing genomic locations and repeat units of microsatellites.')
return parser.parse_args()
### Main
def main():
# Parse command line arguments
args = parse_args()
datfile = args.dat
bedfile = args.bed
with open(bedfile, 'w') as bed:
chrom = ""
with open(datfile, 'r') as dat:
for line in dat:
splitline = line.split()
if line.startswith("Sequence:"):
chrom = line.split()[1]
else:
# Catch index errors when line is blank
try:
# Check if in header sequence (all non-header lines start with an int: start pos)
try:
int(splitline[0])
except ValueError:
continue
start = splitline[0]
end = splitline[1]
motif = splitline[13]
copynum = splitline[3]
bed.write('\t'.join([chrom,start,end,motif,copynum]) + '\n')
except IndexError:
pass
if __name__ == '__main__':
main()
| mit | Python |
|
272eceebbc44bd7dc44498233a7dca5ab9c2bdd8 | add iplookup | cxhernandez/fah-map,cxhernandez/fah-map | scripts/iplookup.py | scripts/iplookup.py | import sys
import json
import numpy as np
import pandas as pd
import geoip2.database
if len(sys.argv) != 3:
sys.exit('Please specify a GeoLite DB and an ip table.')
reader = geoip2.database.Reader(sys.argv[1])
def get_name(entry, lang):
if hasattr(entry, 'names') and lang in entry.names:
return entry.names[lang]
return 'unknown'
def get_location(addr):
response = reader.city(addr)
city = get_name(response.city, 'en')
lat = response.location.latitude
lng = response.location.longitude
return (city, lat, lng)
ip = np.loadtxt(sys.argv[2], dtype=str)
locations = map(get_location, ip)
series = pd.Series(locations)
ucounts = series.value_counts()
info = []
for location, count in zip(ucounts.keys(), ucounts.get_values()):
if location:
info.append({'city_name': location[0],
'lat': location[1],
'long': location[-1],
'nb_visits': count})
print json.dumps(info)
| mit | Python |
|
462cdfaf93f23e227b8da44e143a5ff9e8c047be | test futil for files | hobson/pug-nlp,hobson/pug-nlp,hobson/pug-nlp | tests/test_futil.py | tests/test_futil.py | """Run doctests in pug.nlp.futil."""
from __future__ import print_function, absolute_import
import doctest
import pug.nlp.futil
from unittest import TestCase
class DoNothingTest(TestCase):
"""A useless TestCase to encourage Django unittests to find this module and run `load_tests()`."""
def test_example(self):
self.assertTrue(True)
def load_tests(loader, tests, ignore):
"""Run doctests for the pug.nlp.futil module"""
tests.addTests(doctest.DocTestSuite(pug.nlp.futil, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE))
return tests
| mit | Python |
|
a1039c2e38243b64d2027621aa87ee020636f23b | Add initial test for routes. | jonathanchu/fpo,jonathanchu/fpo | tests/test_views.py | tests/test_views.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import website
import unittest
import tempfile
class FPOTestCase(unittest.TestCase):
def test_homepage(self):
self.app = website.app.test_client()
resp = self.app.get('/')
self.assertEqual(resp.status_code, 200)
def test_admin(self):
self.app = website.app.test_client()
resp = self.app.get('/admin/')
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
6cebbd302556469dd4231d6252ec29c5d7c1165c | add script to convert data from Rime/luna-pinyin | gumblex/pyime | data/convertdict.py | data/convertdict.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
def uniq(seq): # Dave Kirby
# Order preserving
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def pinyin(word):
N = len(word)
pos = 0
result = []
while pos < N:
for i in range(N, pos, -1):
frag = word[pos:i]
if frag in chdict:
result.append(sorted(chdict[frag], key=lambda x: -prob.get((frag, x), 0))[0])
break
pos = i
return ' '.join(result)
chdict = {}
prob = {}
started = False
# Pass 1: Load Pinyin and its probability from dict
with open('luna_pinyin.dict.yaml', 'r', encoding='utf-8') as f:
for ln in f:
ln = ln.strip()
if started and ln and ln[0] != '#':
l = ln.split('\t')
w, c = l[0], l[1]
if w in chdict:
chdict[w].append(c)
else:
chdict[w] = [c]
if len(l) == 3:
if l[2][-1] == '%':
p = float(l[2][:-1]) / 100
else:
p = float(l[2])
prob[(w, c)] = p
elif ln == '...':
started = True
essay = {}
# Pass 2: Load more words and word frequency
with open('essay.txt', 'r', encoding='utf-8') as f:
for ln in f:
word, freq = ln.strip().split('\t')
# add-one smoothing
essay[word] = int(freq) + 1
if len(word) > 1:
c = pinyin(word)
if word not in chdict:
chdict[word] = [c]
# Pass 3: Calculate (word, pinyin) pair frequency
final = []
for word, codes in chdict.items():
for code in codes:
freq = max(int(essay.get(word, 1) * prob.get((word, code), 1)), 1)
final.append((word, code, freq))
final.sort()
with open('pinyin_rime.txt', 'w', encoding='utf-8') as f:
for item in final:
f.write('%s\t%s\t%s\n' % item)
| mit | Python |
|
7553a438672ab68206f30204d572f89bd088e744 | Add files via upload | adigoel/Team-Flux-2017 | pylab.py | pylab.py | import numpy as np
import matplotlib.pyplot as plt
import sympy as sympy
import csv
masterDataList = []
with open('candata.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
commitList = list(reader)
masterDataList.append(commitList)
print(masterDataList[0][1][0])
"""number of lines of data from csv = n """
dataKind = 2
n = 4
integers = []
for r in range(0,n):
integers.append(r)
for a in integers:
print(masterDataList[0][a][dataKind])
""" note there is an extra shell list so format is for any item... masterDatList[0][time in s][type of item]
dataKind = input("Data measured {options are: gps time, gps lat, gps long, gps alt, gps speed, gps course, gyroX, gyroy, gyroZ, compass, temp, humidity, pressure, long dist, lat dist, alt dist} = ")
withRespectTo = input("Measuring with respect to _____ {options are: time, altitude}")
"""
"""once input mechanism known, can automate data input - for now random data"""
x = np.array([0, 1, 2, 3, 4,
5, 6, 7, 8, 9])
y = np.array([0, 9.8, 16, 17, 20,
23, 27, 34, 47, 70])
"""acceleration for y for windform"""
order = int(input("order = "))
"""order is the order of polynomial we can try to estimate the data, can oscillate
I've made it throw up a warning if the data and the order mismatch loads
and making an order > 100 polynomial will like start screwing up computer"""
coefficients = np.polyfit(x, y, order)
polynomial = np.poly1d(coefficients)
functionToIntegrate = ""
for coefficient in coefficients:
if str(coefficient).find("-") == -1:
coefficient = str(coefficient)
coefficient = "+ "+str(coefficient)
if str(coefficient).find("-") == 1:
coefficient = str(coefficient)
coefficient = "- "+str(coefficient)
functionToIntegrate = functionToIntegrate + str(coefficient)+"z "
print(functionToIntegrate)
""" NEED TO MAKE EXCEPTION FOR LAST COEFFICIENT """
powerArray = []
for p,l in enumerate(functionToIntegrate):
if l == "z":
powerArray.append(p)
print(powerArray)
exponentLengthCount = 0
for power in powerArray:
exponent = "**"+str(order)
exponentLengthCount = exponentLengthCount + len(exponent)
functionToIntegrate = functionToIntegrate[:(power+1+exponentLengthCount-(len(exponent)))] + exponent + functionToIntegrate[((power+1+exponentLengthCount-(len(exponent)))):]
order = order-1
print(functionToIntegrate)
xs = np.arange(0, 9, 1)
ys = polynomial(xs)
func = "z**3 + z"
sympy.init_printing(use_unicode=False, wrap_line=False, no_global=True)
z = sympy.Symbol('z')
indefiniteIntegral = sympy.integrate(func, z)
print(indefiniteIntegral)
plt.plot(x, y, 'x')
plt.plot(xs, ys)
plt.ylabel('y')
plt.xlabel('x')
plt.show()
| mit | Python |
|
41e3d696967b523d0d031a0a17d18c9804f455ee | Change G+ default type | creimers/djangocms-blog,DjangoBeer/djangocms-blog,marty3d/djangocms-blog,mistalaba/djangocms-blog,vnavascues/djangocms-blog,jedie/djangocms-blog,dapeng0802/djangocms-blog,sephii/djangocms-blog,EnglishConnection/djangocms-blog,EnglishConnection/djangocms-blog,jedie/djangocms-blog,DjangoBeer/djangocms-blog,ImaginaryLandscape/djangocms-blog,kriwil/djangocms-blog,motleytech/djangocms-blog,skirsdeda/djangocms-blog,skirsdeda/djangocms-blog,sephii/djangocms-blog,nephila/djangocms-blog,britny/djangocms-blog,DjangoBeer/djangocms-blog,nephila/djangocms-blog,vnavascues/djangocms-blog,motleytech/djangocms-blog,dapeng0802/djangocms-blog,britny/djangocms-blog,skirsdeda/djangocms-blog,mistalaba/djangocms-blog,nephila/djangocms-blog,marty3d/djangocms-blog,ImaginaryLandscape/djangocms-blog,kriwil/djangocms-blog,creimers/djangocms-blog | djangocms_blog/settings.py | djangocms_blog/settings.py | # -*- coding: utf-8 -*-
from django.conf import settings
from meta_mixin import settings as meta_settings
BLOG_IMAGE_THUMBNAIL_SIZE = getattr(settings, 'BLOG_IMAGE_THUMBNAIL_SIZE', {
'size': '120x120',
'crop': True,
'upscale': False
})
BLOG_IMAGE_FULL_SIZE = getattr(settings, 'BLOG_IMAGE_FULL_SIZE', {
'size': '640x120',
'crop': True,
'upscale': False
})
BLOG_TAGCLOUD_MIN = getattr(settings, 'BLOG_TAGCLOUD_MIN', 1)
BLOG_TAGCLOUD_MAX = getattr(settings, 'BLOG_TAGCLOUD_MAX', 10)
BLOG_PAGINATION = getattr(settings, 'BLOG_PAGINATION', 10)
BLOG_LATEST_POSTS = getattr(settings, 'BLOG_LATEST_POSTS', 5)
BLOG_POSTS_LIST_TRUNCWORDS_COUNT = getattr(settings, 'BLOG_POSTS_LIST_TRUNCWORDS_COUNT', 100)
BLOG_TYPE = getattr(settings, 'BLOG_TYPE', 'Article')
BLOG_FB_TYPE = getattr(settings, 'BLOG_FB_TYPE', 'Article')
BLOG_FB_APPID = getattr(settings, 'BLOG_FB_APPID', meta_settings.FB_APPID)
BLOG_FB_PROFILE_ID = getattr(settings, 'BLOG_FB_PROFILE_ID', meta_settings.FB_PROFILE_ID)
BLOG_FB_PUBLISHER = getattr(settings, 'BLOG_FB_PUBLISHER', meta_settings.FB_PUBLISHER)
BLOG_FB_AUTHOR_URL = getattr(settings, 'BLOG_FB_AUTHOR_URL', 'get_author_url')
BLOG_FB_AUTHOR = getattr(settings, 'BLOG_FB_AUTHOR', 'get_author_name')
BLOG_TWITTER_TYPE = getattr(settings, 'BLOG_TWITTER_TYPE', 'Summary')
BLOG_TWITTER_SITE = getattr(settings, 'BLOG_TWITTER_SITE', meta_settings.TWITTER_SITE)
BLOG_TWITTER_AUTHOR = getattr(settings, 'BLOG_TWITTER_AUTHOR', 'get_author_twitter')
BLOG_GPLUS_TYPE = getattr(settings, 'BLOG_GPLUS_SCOPE_CATEGORY', 'Blog')
BLOG_GPLUS_AUTHOR = getattr(settings, 'BLOG_GPLUS_AUTHOR', 'get_author_gplus')
BLOG_ENABLE_COMMENTS = getattr(settings, 'BLOG_ENABLE_COMMENTS', True)
BLOG_USE_PLACEHOLDER = getattr(settings, 'BLOG_USE_PLACEHOLDER', True)
| # -*- coding: utf-8 -*-
from django.conf import settings
from meta_mixin import settings as meta_settings
BLOG_IMAGE_THUMBNAIL_SIZE = getattr(settings, 'BLOG_IMAGE_THUMBNAIL_SIZE', {
'size': '120x120',
'crop': True,
'upscale': False
})
BLOG_IMAGE_FULL_SIZE = getattr(settings, 'BLOG_IMAGE_FULL_SIZE', {
'size': '640x120',
'crop': True,
'upscale': False
})
BLOG_TAGCLOUD_MIN = getattr(settings, 'BLOG_TAGCLOUD_MIN', 1)
BLOG_TAGCLOUD_MAX = getattr(settings, 'BLOG_TAGCLOUD_MAX', 10)
BLOG_PAGINATION = getattr(settings, 'BLOG_PAGINATION', 10)
BLOG_LATEST_POSTS = getattr(settings, 'BLOG_LATEST_POSTS', 5)
BLOG_POSTS_LIST_TRUNCWORDS_COUNT = getattr(settings, 'BLOG_POSTS_LIST_TRUNCWORDS_COUNT', 100)
BLOG_TYPE = getattr(settings, 'BLOG_TYPE', 'Article')
BLOG_FB_TYPE = getattr(settings, 'BLOG_FB_TYPE', 'Article')
BLOG_FB_APPID = getattr(settings, 'BLOG_FB_APPID', meta_settings.FB_APPID)
BLOG_FB_PROFILE_ID = getattr(settings, 'BLOG_FB_PROFILE_ID', meta_settings.FB_PROFILE_ID)
BLOG_FB_PUBLISHER = getattr(settings, 'BLOG_FB_PUBLISHER', meta_settings.FB_PUBLISHER)
BLOG_FB_AUTHOR_URL = getattr(settings, 'BLOG_FB_AUTHOR_URL', 'get_author_url')
BLOG_FB_AUTHOR = getattr(settings, 'BLOG_FB_AUTHOR', 'get_author_name')
BLOG_TWITTER_TYPE = getattr(settings, 'BLOG_TWITTER_TYPE', 'Summary')
BLOG_TWITTER_SITE = getattr(settings, 'BLOG_TWITTER_SITE', meta_settings.TWITTER_SITE)
BLOG_TWITTER_AUTHOR = getattr(settings, 'BLOG_TWITTER_AUTHOR', 'get_author_twitter')
BLOG_GPLUS_TYPE = getattr(settings, 'BLOG_GPLUS_SCOPE_CATEGORY', 'Article')
BLOG_GPLUS_AUTHOR = getattr(settings, 'BLOG_GPLUS_AUTHOR', 'get_author_gplus')
BLOG_ENABLE_COMMENTS = getattr(settings, 'BLOG_ENABLE_COMMENTS', True)
BLOG_USE_PLACEHOLDER = getattr(settings, 'BLOG_USE_PLACEHOLDER', True)
| bsd-3-clause | Python |
ab2b2c6f12e2e5ec53ac6d140919a343a74b7e3c | Update migration | hobarrera/django-afip,hobarrera/django-afip | django_afip/migrations/0017_receipt_issued_date.py | django_afip/migrations/0017_receipt_issued_date.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-10 13:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('afip', '0016_auto_20170529_2012'),
]
operations = [
migrations.AlterField(
model_name='receipt',
name='issued_date',
field=models.DateField(
help_text=(
'Can diverge up to 5 days for good, or 10 days otherwise'
),
verbose_name='issued date',
),
),
]
| isc | Python |
|
a52dd9d66ff7d9a29f6d635e5ca1a2a0584c267b | Add rosetta utils | lotrekagency/djlotrek,lotrekagency/djlotrek | rosetta_utils.py | rosetta_utils.py | # From: https://github.com/mbi/django-rosetta/issues/50
# Gunicorn may work with --reload option but it needs
# https://pypi.python.org/pypi/inotify package for performances
from django.dispatch import receiver
from rosetta.signals import post_save
import time
import os
@receiver(post_save)
def restart_server(sender, **kwargs):
os.system("./gunicorn.sh stop")
pass
| mit | Python |
|
4de410b1ea93665f22874826ceebcea68737dde7 | Add permissions list | TransportLayer/TLBot-Core | tlbot/permission.py | tlbot/permission.py | ###############################################################################
# TransportLayerBot: Permission List - All-in-one modular bot for Discord #
# Copyright (C) 2017 TransportLayer #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published #
# by the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
# Permissions as of 01 Aug 2017
# Values: https://discordapp.com/developers/docs/topics/permissions
CREATE_INSTANT_INVITE = 0x00000001 # Allows creation of instant invites
KICK_MEMBERS = 0x00000002 # Allows kicking members
BAN_MEMBERS = 0x00000004 # Allows banning members
ADMINISTRATOR = 0x00000008 # Allows all permissions and bypasses channel permission overwrites
MANAGE_CHANNELS = 0x00000010 # Allows management and editing of channels
MANAGE_GUILD = 0x00000020 # Allows management and editing of the guild
ADD_REACTIONS = 0x00000040 # Allows for the addition of reactions to messages
VIEW_AUDIT_LOG = 0x00000080 # Allows for viewing of audit logs
READ_MESSAGES = 0x00000400 # Allows reading messages in a channel. The channel will not appear for users without this permission
SEND_MESSAGES = 0x00000800 # Allows for sending messages in a channel
SEND_TTS_MESSAGES = 0x00001000 # Allows for sending of /tts messages
MANAGE_MESSAGES = 0x00002000 # Allows for deletion of other users messages
EMBED_LINKS = 0x00004000 # Links sent by this user will be auto-embedded
ATTACH_FILES = 0x00008000 # Allows for uploading images and files
READ_MESSAGE_HISTORY = 0x00010000 # Allows for reading of message history
MENTION_EVERYONE = 0x00020000 # Allows for using the @everyone tag to notify all users in a channel, and the @here tag to notify all online users in a channel
USE_EXTERNAL_EMOJIS = 0x00040000 # Allows the usage of custom emojis from other servers
CONNECT = 0x00100000 # Allows for joining of a voice channel
SPEAK = 0x00200000 # Allows for speaking in a voice channel
MUTE_MEMBERS = 0x00400000 # Allows for muting members in a voice channel
DEAFEN_MEMBERS = 0x00800000 # Allows for deafening of members in a voice channel
MOVE_MEMBERS = 0x01000000 # Allows for moving of members between voice channels
USE_VAD = 0x02000000 # Allows for using voice-activity-detection in a voice channel
CHANGE_NICKNAME = 0x04000000 # Allows for modification of own nickname
MANAGE_NICKNAMES = 0x08000000 # Allows for modification of other users nicknames
MANAGE_ROLES = 0x10000000 # Allows management and editing of roles
MANAGE_WEBHOOKS = 0x20000000 # Allows management and editing of webhooks
MANAGE_EMOJIS = 0x40000000 # Allows management and editing of emojis
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.