commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
e581eb8af860456b0ff46e99398002b3df0f0677 | add Julia magic for IPython | JuliaLang/pyjulia,PallHaraldsson/pyjulia,JuliaPy/pyjulia,JuliaPy/pyjulia | julia/magic.py | julia/magic.py | """
==========================
Julia magics for IPython
==========================
{JULIAMAGICS_DOC}
Usage
=====
``%%julia``
{JULIA_DOC}
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import sys
from IPython.core.magic import Magics, magics_class, line_cell_magic
from julia import Julia
#-----------------------------------------------------------------------------
# Main classes
#-----------------------------------------------------------------------------
@magics_class
class JuliaMagics(Magics):
"""A set of magics useful for interactive work with Julia.
"""
def __init__(self, shell):
"""
Parameters
----------
shell : IPython shell
"""
super(JuliaMagics, self).__init__(shell)
print("Initializing Julia interpreter. This may take some time...",
end='')
# Flush, otherwise the Julia startup will keep stdout buffered
sys.stdout.flush()
self.julia = Julia(init_julia=True)
print()
@line_cell_magic
def julia(self, line, cell=None):
"""
Execute code in Julia, and pull some of the results back into the
Python namespace.
"""
src = str(line if cell is None else cell)
return self.julia.eval(src)
# Add to the global docstring the class information.
__doc__ = __doc__.format(
JULIAMAGICS_DOC = ' '*8 + JuliaMagics.__doc__,
JULIA_DOC = ' '*8 + JuliaMagics.julia.__doc__,
)
#-----------------------------------------------------------------------------
# IPython registration entry point.
#-----------------------------------------------------------------------------
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(JuliaMagics)
| mit | Python |
|
e830ce7115ea417feb00c62bf68a7d1829815630 | Create UAV_State class | ProgrammingRobotsStudyGroup/robo_magellan,ProgrammingRobotsStudyGroup/robo_magellan,ProgrammingRobotsStudyGroup/robo_magellan,ProgrammingRobotsStudyGroup/robo_magellan | scripts/uav_state.py | scripts/uav_state.py | #!/usr/bin/env python
#
# UAV State Model:
# Encapsulates UAV state and abstracts communication
# States:
# - Setpoint pose
# - local_position
# - MAV mode
# - arm
# import ROS libraries
import rospy
import mavros
from mavros.utils import *
from mavros import setpoint as SP
import mavros_msgs.msg
import mavros_msgs.srv
#
import time
from datetime import datetime
import enum
class AutoNumber(enum.Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
class MODE(AutoNumber):
MANUAL = ()
RTL = ()
class ARM(AutoNumber):
ARMED = ()
DISARMED = ()
#
class _coord:
def __init__(self):
self.x = 0
self.y = 0
self.z = 0
#
class UAV_State:
def __init__(self):
self.current_pose = _coord()
self.setpoint_pose = _coord()
self.mode = "None"
self.arm = "None"
self.guided = "None"
self.timestamp = float(datetime.utcnow().strftime('%S.%f'))
self.connection_delay = 0.0
mavros.set_namespace("/mavros")
# Subscribers
self.local_position_sub = rospy.Subscriber(mavros.get_topic('local_position', 'pose'),
SP.PoseStamped, self.__local_position_cb)
self.setpoint_local_sub = rospy.Subscriber(mavros.get_topic('setpoint_raw', 'target_local'),
mavros_msgs.msg.PositionTarget, self.__setpoint_position_cb)
self.state_sub = rospy.Subscriber(mavros.get_topic('state'),
mavros_msgs.msg.State, self.__state_cb)
pass
def __local_position_cb(self, topic):
self.current_pose.x = topic.pose.position.x
self.current_pose.y = topic.pose.position.y
self.current_pose.z = topic.pose.position.z
def __setpoint_position_cb(self, topic):
self.setpoint_pose.x = topic.position.x
self.setpoint_pose.y = topic.position.y
self.setpoint_pose.z = topic.position.z
def __state_cb(self, topic):
self.__calculate_delay()
self.mode = topic.mode
self.guided = topic.guided
self.arm = topic.armed
def __calculate_delay(self):
tmp = float(datetime.utcnow().strftime('%S.%f'))
if tmp<self.timestamp:
# over a minute
self.connection_delay = 60.0 - self.timestamp + tmp
else:
self.connection_delay = tmp - self.timestamp
self.timestamp = tmp
####
def get_mode(self):
return self.mode
def set_mode(self, new_mode):
rospy.wait_for_service('/mavros/set_mode')
try:
flightModeService = rospy.ServiceProxy('/mavros/set_mode', mavros_msgs.srv.SetMode)
isModeChanged = flightModeService(custom_mode=new_mode)
except rospy.ServiceException, e:
rospy.loginfo("Service set_mode call failed: %s. Mode %s could not be set. Check that GPS is enabled.",e,new_mode)
####
def get_arm(self):
return self.arm
def set_arm(self, new_arm):
rospy.wait_for_service('/mavros/cmd/arming')
try:
armService = rospy.ServiceProxy('/mavros/cmd/arming', mavros_msgs.srv.CommandBool)
armService(new_arm)
except rospy.ServiceException, e:
rospy.loginfo("Service arm call failed: %s. Attempted to set %s",e,new_arm)
####
def get_current_pose(self):
return self.current_pose
def get_setpoint_pose(self):
return self.setpoint_pose
def get_guided(self):
return self.guided
def get_delay(self):
return self.connection_delay
| apache-2.0 | Python |
|
12691d47c4dbbaac42d2c9a8fe04e70cb5a94e98 | add Yaspin.write usage example | pavdmyt/yaspin | examples/write_method.py | examples/write_method.py | # -*- coding: utf-8 -*-
"""
examples.write_method
~~~~~~~~~~~~~~~~~~~~~
Basic usage of ``write`` method.
"""
import time
from yaspin import yaspin
def main():
with yaspin(text='Downloading images') as sp:
# task 1
time.sleep(1)
sp.write('> image 1 download complete')
# task 2
time.sleep(2)
sp.write('> image 2 download complete')
# finalize
sp.ok()
if __name__ == '__main__':
main()
| mit | Python |
|
324bc6f72deef0349f0da48366ab11b749a231b5 | Make AzureKeyVaultBackend backwards-compatible (#12626) | sekikn/incubator-airflow,mistercrunch/airflow,Acehaidrey/incubator-airflow,sekikn/incubator-airflow,bolkedebruin/airflow,Acehaidrey/incubator-airflow,airbnb/airflow,Acehaidrey/incubator-airflow,apache/airflow,danielvdende/incubator-airflow,mistercrunch/airflow,DinoCow/airflow,bolkedebruin/airflow,lyft/incubator-airflow,danielvdende/incubator-airflow,mistercrunch/airflow,apache/incubator-airflow,airbnb/airflow,apache/incubator-airflow,apache/incubator-airflow,Acehaidrey/incubator-airflow,lyft/incubator-airflow,apache/airflow,dhuang/incubator-airflow,apache/airflow,lyft/incubator-airflow,nathanielvarona/airflow,danielvdende/incubator-airflow,dhuang/incubator-airflow,Acehaidrey/incubator-airflow,nathanielvarona/airflow,apache/incubator-airflow,dhuang/incubator-airflow,cfei18/incubator-airflow,bolkedebruin/airflow,dhuang/incubator-airflow,cfei18/incubator-airflow,DinoCow/airflow,DinoCow/airflow,nathanielvarona/airflow,DinoCow/airflow,mrkm4ntr/incubator-airflow,mistercrunch/airflow,apache/airflow,nathanielvarona/airflow,cfei18/incubator-airflow,danielvdende/incubator-airflow,cfei18/incubator-airflow,airbnb/airflow,sekikn/incubator-airflow,bolkedebruin/airflow,danielvdende/incubator-airflow,apache/airflow,mrkm4ntr/incubator-airflow,lyft/incubator-airflow,sekikn/incubator-airflow,cfei18/incubator-airflow,danielvdende/incubator-airflow,nathanielvarona/airflow,Acehaidrey/incubator-airflow,mrkm4ntr/incubator-airflow,airbnb/airflow,apache/airflow,nathanielvarona/airflow,cfei18/incubator-airflow,bolkedebruin/airflow,mrkm4ntr/incubator-airflow | airflow/contrib/secrets/azure_key_vault.py | airflow/contrib/secrets/azure_key_vault.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.microsoft.azure.secrets.azure_key_vault`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.microsoft.azure.secrets.azure_key_vault import AzureKeyVaultBackend # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.microsoft.azure.secrets.azure_key_vault`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | Python |
|
165d6795c2e3b173282736127c092ede57ae8f55 | Create create_recurring_for_failed.py | ShashaQin/erpnext,ShashaQin/erpnext,ShashaQin/erpnext,ShashaQin/erpnext | erpnext/patches/v6_27/create_recurring_for_failed.py | erpnext/patches/v6_27/create_recurring_for_failed.py | import frappe
from erpnext.controllers.recurring_document import manage_recurring_documents
def execute():
frappe.db.sql("""update `tabSales Invoice`
set is_recurring=1 where (docstatus=1 or docstatus=0) and next_date='2016-06-26' and is_recurring=0""")
manage_recurring_documents("Sales Invoice", "2016-06-26")
| agpl-3.0 | Python |
|
93d1d4cc446cd13affaf1b467e39845c5dc437a5 | Add missing migration | kooditiimi/linkedevents,kooditiimi/linkedevents,tuomas777/linkedevents,kooditiimi/linkedevents,tuomas777/linkedevents,kooditiimi/linkedevents,aapris/linkedevents,aapris/linkedevents,tuomas777/linkedevents,City-of-Helsinki/linkedevents,City-of-Helsinki/linkedevents,aapris/linkedevents,City-of-Helsinki/linkedevents | events/migrations/0002_auto_20150119_2138.py | events/migrations/0002_auto_20150119_2138.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='offer',
name='price',
field=models.CharField(max_length=512),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='price_en',
field=models.CharField(null=True, max_length=512),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='price_fi',
field=models.CharField(null=True, max_length=512),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='price_sv',
field=models.CharField(null=True, max_length=512),
preserve_default=True,
),
]
| mit | Python |
|
a8b46224dfda38173ea130d820411aad6a47acfc | Add Commander.py | molly/GorillaBot,quanticle/GorillaBot,molly/GorillaBot,quanticle/GorillaBot | src/Commander.py | src/Commander.py | # Copyright (c) 2013 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
from Bot import Bot
from time import strftime
import logging
def main():
print ("Starting GorillaBot.\n")
desc = "This is the command-line utility for setting up and running GorillaBot, "
"a simple IRC bot."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-default", action="store_true")
logger = logging.getLogger("GB")
logger.info("LOG!")
GorillaBot = Bot()
parser.parse_args()
if __name__ == "__main__":
main()
| mit | Python |
|
52c9a8ab10934c7acf8bcc404dccd2524199acb7 | support for qualifying keys with dot('.') in JSON reference | ujjawalmisra/json-ws-test | src/DictUtils.py | src/DictUtils.py | import collections
class DictUtils:
@staticmethod
def __retrieveFromDict(t, key):
if None != t:
found = True
if str == type(key):
keys = key.split('.')
else:
keys = key
for k in keys:
if k in t:
t = t[k]
else:
found = False
break
if found:
return t
return None
@staticmethod
def defaultIfNone(theDict, defaultDict, key):
if None == key:
return None
val = DictUtils.__retrieveFromDict(theDict, key)
if None != val:
return val
return DictUtils.__retrieveFromDict(defaultDict, key)
@staticmethod
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(DictUtils.convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(DictUtils.convert, data))
else:
return data | import collections
class DictUtils:
@staticmethod
def __retrieveFromDict(t, key):
if None != t:
found = True
if str == type(key):
keys = [key]
else:
keys = key
for k in keys:
if k in t:
t = t[k]
else:
found = False
break
if found:
return t
return None
@staticmethod
def defaultIfNone(theDict, defaultDict, key):
if None == key:
return None
val = DictUtils.__retrieveFromDict(theDict, key)
if None != val:
return val
return DictUtils.__retrieveFromDict(defaultDict, key)
@staticmethod
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(DictUtils.convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(DictUtils.convert, data))
else:
return data | mit | Python |
fe36fd79c1981c489fd1db548c7468acbf98fff5 | add test for s3 filename unquote | bcgov/gwells,bcgov/gwells,bcgov/gwells,bcgov/gwells | app/backend/gwells/tests/test_documents.py | app/backend/gwells/tests/test_documents.py | from django.test import TestCase
from gwells.documents import MinioClient
class DocumentsTestCase(TestCase):
def test_document_url_with_space(self):
minio_client = MinioClient(disable_private=True)
test_document = {
"bucket_name": "test_bucket",
"object_name": "test key"
}
test_url = minio_client.create_url(test_document, "example.com", test_document.get("bucket_name"))
self.assertEqual(test_url, "https://example.com/test_bucket/test key")
def test_document_url_with_plus(self):
minio_client = MinioClient(disable_private=True)
test_document = {
"bucket_name": "test_bucket",
# if this was a real plus in the filename it should be %2B in the listing.
# spaces get encoded into + (so in this test case, this object_name originally had a space).
"object_name": "test+key"
}
test_url = minio_client.create_url(test_document, "example.com", test_document.get("bucket_name"))
self.assertEqual(test_url, "https://example.com/test_bucket/test key")
| apache-2.0 | Python |
|
4b06b5ec929af3466bfe9f03892b6c68259a2e3e | add gunicorn app | Answeror/aip,Answeror/aip | gunicorn_app.py | gunicorn_app.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
from logbook.compat import redirect_logging
redirect_logging()
from aip import make
from aip.log import RedisPub
with RedisPub():
app = make(
instance_path=DATA_PATH,
instance_relative_config=True
)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
| mit | Python |
|
52236b1ad285683d828b248e462a7b984d31e636 | Add example of connecting OGR to matplotlib through shapely and numpy | jdmcbr/Shapely,mindw/shapely,jdmcbr/Shapely,mouadino/Shapely,abali96/Shapely,abali96/Shapely,mouadino/Shapely,mindw/shapely | examples/world.py | examples/world.py | import ogr
import pylab
from numpy import asarray
from shapely.wkb import loads
source = ogr.Open("/var/gis/data/world/world_borders.shp")
borders = source.GetLayerByName("world_borders")
fig = pylab.figure(1, figsize=(4,2), dpi=300)
while 1:
feature = borders.GetNextFeature()
if not feature:
break
geom = loads(feature.GetGeometryRef().ExportToWkb())
a = asarray(geom)
pylab.plot(a[:,0], a[:,1])
pylab.show()
| bsd-3-clause | Python |
|
bc871956d492a3bc34e28847de136e1b4ad82035 | Create codechallenge.py | jve2kor/MSD_Hackathon | codechallenge.py | codechallenge.py | mit | Python |
||
2044e3b018595e45cc2969d0675d5006ea02ccf5 | update to use new struct data of g_project | develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms | trunk/editor/savefilerooms.py | trunk/editor/savefilerooms.py | #!/usr/bin/env python
from xml.dom import minidom
from xml.etree import ElementTree
#to use OrderedDict in python < 2.7
try:
from collections import OrderedDict
except ImportError:
from misc.dict import OrderedDict
from structdata.project import g_project
def prettify(content):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(content, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def saveData(top, tag, dictionary):
tag_dict = {}
dict_todo = []
#cicla su tutti gli elementi del dizionario
#se trova delle liste le salva per poi richiamare se stessa
#su questi per poter memorizzare i dati
for key, value in dictionary.items():
if not isinstance(value, list):
tag_dict[key] = value
else:
dict_todo.append(value)
father_tag = ElementTree.SubElement(top, tag, tag_dict)
for el in dict_todo:
for single_el in el:
saveData(father_tag, single_el.tag_name, single_el.dictionary())
def saveFileRooms(path_file):
"""
funzione che salva la struttura dati su un file .rooms
prende in ingresso il path del file e la struttura che contiene tutti i dati
da salvare
"""
top = ElementTree.Element("world",
g_project.data['world'].dictionary())
for data_key, data_value in g_project.data.items():
if data_key != "world":
father = ElementTree.SubElement(top, data_key)
for key, value in data_value:
saveData(father, value.tag_name,
value.dictionary())
write_file = open(path_file, 'w')
write_file.write(prettify(top))
| #!/usr/bin/env python
from xml.dom import minidom
from xml.etree import ElementTree
#to use OrderedDict in python < 2.7
try:
from collections import OrderedDict
except ImportError:
from misc.dict import OrderedDict
from structdata.world import g_world
def prettify(content):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(content, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def saveData(top, tag, dictionary):
tag_dict = {}
dict_todo = []
#cicla su tutti gli elementi del dizionario
#se trova delle liste le salva per poi richiamare se stessa
#su questi per poter memorizzare i dati
for key, value in dictionary.items():
if not isinstance(value, list):
tag_dict[key] = value
else:
dict_todo.append(value)
father_tag = ElementTree.SubElement(top, tag, tag_dict)
for el in dict_todo:
for single_el in el:
saveData(father_tag, single_el.tag_name, single_el.dictionary())
def saveFileRooms(path_file):
"""
funzione che salva la struttura dati su un file .rooms
prende in ingresso il path del file e la struttura che contiene tutti i dati
da salvare
"""
top = ElementTree.Element("world",
g_world.informations.dictionary())
for key_information in g_world.dictionary():
if key_information != "informations":
father = ElementTree.SubElement(top, key_information)
for key in g_world.__dict__[key_information]:
saveData(father, g_world.__dict__[key_information][key].tag_name,
g_world.__dict__[key_information][key].dictionary())
write_file = open(path_file, 'w')
write_file.write(prettify(top))
| mit | Python |
93d91ba059a7037281f6a5e4d6afd5e071668d81 | Create freebook.py | hkamran80/python-projects | freebook/reddit/freebook.py | freebook/reddit/freebook.py | # Get free ebooks from Reddit
from bs4 import BeautifulSoup
import feedparser
import requests
url = "https://www.reddit.com/r/freebooks.rss"
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:59.0) Gecko/20100101 Firefox/59.0"}
urls = []
books = []
book_data_all = []
d = feedparser.parse(requests.get(url, headers=headers).text)
print(len(d.entries))
for e in d.entries:
for l in BeautifulSoup(e.description, "html.parser").find_all("a"):
if l.string == "[link]" and "reddit" not in l["href"]:
print(e.title)
print(l["href"])
urls.append(l["href"])
print()
print(urls)
print("***GETTING BOOK DATA***")
for u in urls:
if "amazon" in u:
book_data = BeautifulSoup(requests.get(u, headers=headers).text, "html.parser")
print(u)
title = book_data.find("span", attrs={"id":"ebooksProductTitle"}).string
if "Visit" in book_data.find("span", attrs={"class":"author notFaded"}).find("a", attrs={"class":"a-link-normal"}).string:
author = book_data.find("span", {"class":"a-size-medium"}).text.replace("\n", "").replace("\t", "").replace("(Author)", "").strip()
else:
author = book_data.find("span", attrs={"class":"author notFaded"}).find("a", attrs={"class":"a-link-normal"}).string
try:
price = str(book_data.find("td", attrs={"class":"a-color-price"})).replace("\n", "").replace(" ", "").split(">")[1].split("<")[0]
except TypeError:
price = book_data.find("td", attrs={"class":"a-color-base a-align-bottom a-text-strike"}).string.strip()
try:
book_data_all.append([title, author, price, u])
except Exception as e:
print(e)
continue
print(book_data_all)
print(len(book_data_all))
for b in book_data_all:
if b[2] == "$0.00":
books.append(b)
else:
continue
print(len(books))
print(str(len(book_data_all) - len(books)) + " paid books")
print(books)
| unlicense | Python |
|
6edadeb278be9b776845a12954871386ead270d4 | add tests for log rotation | evernym/plenum,evernym/zeno | plenum/test/test_log_rotation.py | plenum/test/test_log_rotation.py | import pytest
import os
import logging
import shutil
import time
from plenum.common.logging.TimeAndSizeRotatingFileHandler \
import TimeAndSizeRotatingFileHandler
def cleanFolder(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path, exist_ok=True)
return path
def test_time_log_rotation():
logDirPath = cleanFolder("/tmp/plenum/test_time_log_rotation")
logFile = os.path.join(logDirPath, "log")
logger = logging.getLogger('test_time_log_rotation-logger')
logger.setLevel(logging.DEBUG)
handler = TimeAndSizeRotatingFileHandler(logFile, interval=1, when='s')
logger.addHandler(handler)
for i in range(3):
time.sleep(1)
logger.debug("line")
assert len(os.listdir(logDirPath)) == 4 # initial + 3 new
def test_size_log_rotation():
logDirPath = cleanFolder("/tmp/plenum/test_size_log_rotation")
logFile = os.path.join(logDirPath, "log")
logger = logging.getLogger('test_time_log_rotation-logger')
logger.setLevel(logging.DEBUG)
handler = TimeAndSizeRotatingFileHandler(logFile, maxBytes=21)
logger.addHandler(handler)
for i in range(20):
logger.debug("line")
assert len(os.listdir(logDirPath)) == 5
def test_time_and_size_log_rotation():
logDirPath = cleanFolder("/tmp/plenum/test_time_and_size_log_rotation")
logFile = os.path.join(logDirPath, "log")
logger = logging.getLogger('test_time_and_size_log_rotation-logger')
logger.setLevel(logging.DEBUG)
handler = TimeAndSizeRotatingFileHandler(logFile, maxBytes=21, interval=1, when="s")
logger.addHandler(handler)
for i in range(20):
logger.debug("line")
for i in range(3):
time.sleep(1)
logger.debug("line")
assert len(os.listdir(logDirPath)) == 8 | apache-2.0 | Python |
|
fd75ee4a96eddc1e71eb85dd36a2c8f5b13807ca | Create RemoveLinkedListElement.py | lingcheng99/LeetCode | RemoveLinkedListElement.py | RemoveLinkedListElement.py | """Remove Linked List Elements
Remove all elements from a linked list of integers that have value val.
Example
Given: 1 --> 2 --> 6 --> 3 --> 4 --> 5 --> 6, val = 6
Return: 1 --> 2 --> 3 --> 4 --> 5
"""
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
if not head:
return None
while head and head.val==val:
head=head.next
pos=head
while pos and pos.next:
if pos.next.val==val:
pos.next=pos.next.next
else:
pos=pos.next
return head
| mit | Python |
|
fd33fadc260cda2bd2395f027457f990ab05480b | Add migration for Registration changed | pythonkr/pyconapac-2016,pythonkr/pyconapac-2016,pythonkr/pyconapac-2016 | registration/migrations/0008_auto_20160418_2250.py | registration/migrations/0008_auto_20160418_2250.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-18 13:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0007_auto_20160416_1217'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='payment_status',
field=models.CharField(choices=[('ready', 'Ready'), ('paid', 'Paid'), ('deleted', 'Deleted')], default='ready', max_length=10),
),
migrations.AlterField(
model_name='registration',
name='transaction_code',
field=models.CharField(blank=True, max_length=36),
),
]
| mit | Python |
|
3c1be9f8fb362699737b6dd867398e734057c300 | Add main entry point. | rave-engine/rave | rave/__main__.py | rave/__main__.py | import argparse
import sys
from os import path
def parse_arguments():
parser = argparse.ArgumentParser(description='A modular and extensible visual novel engine.', prog='rave')
parser.add_argument('-b', '--bootstrapper', help='Select bootstrapper to bootstrap the engine with. (default: autoselect)')
parser.add_argument('-B', '--game-bootstrapper', metavar='BOOTSTRAPPER', help='Select bootstrapper to bootstrap the game with. (default: autoselect)')
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging.')
parser.add_argument('game', metavar='GAME', nargs='?', help='The game to run. Format dependent on used bootstrapper.')
arguments = parser.parse_args()
return arguments
def main():
args = parse_arguments()
if args.debug:
from . import log
log.Logger.LEVEL |= log.DEBUG
from . import bootstrap
bootstrap.bootstrap_engine(args.bootstrapper)
bootstrap.bootstrap_game(args.game_bootstrapper, args.game)
main()
| bsd-2-clause | Python |
|
592b3dda603dec0765825fc8dc03fb623906cb63 | Add migration | Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data | infrastructure/migrations/0018_auto_20210928_1642.py | infrastructure/migrations/0018_auto_20210928_1642.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-28 14:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0017_auto_20210928_1329'),
]
operations = [
migrations.AlterField(
model_name='project',
name='latest_implementation_year',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear'),
),
]
| mit | Python |
|
05659cd132a5dfb54b50ec38ff1d405697de251a | Add crawler for superpoop | klette/comics,klette/comics,datagutten/comics,jodal/comics,datagutten/comics,jodal/comics,datagutten/comics,jodal/comics,jodal/comics,klette/comics,datagutten/comics | comics/crawler/crawlers/superpoop.py | comics/crawler/crawlers/superpoop.py | from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Superpoop'
language = 'en'
url = 'http://www.superpoop.com/'
start_date = '2008-01-01'
history_capable_days = 30
schedule = 'Mo,Tu,We,Th'
time_zone = -5
rights = 'Drew'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.superpoop.com/rss/rss.php')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
self.title = entry.title
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
return
| agpl-3.0 | Python |
|
6da1f28296a8db0c18c0726dcfdc0067bebd9114 | add a script to test learned DQN | Xaxetrov/OSCAR,Xaxetrov/OSCAR | learning_tools/keras-rl/dqn/dqn_tester.py | learning_tools/keras-rl/dqn/dqn_tester.py | import numpy as np
import gym
import os
import pickle
import argparse
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, LinearAnnealedPolicy
from rl.memory import SequentialMemory
from oscar.env.envs.general_learning_env import GeneralLearningEnv
CONFIG_FILE = 'config/learning_complex.json'
WEIGHT_FILE = 'ML_homework/results/2018-04-22_16/duel_dqn_learning_complex_weights.h5f'
# Get the environment and extract the number of actions.
env = GeneralLearningEnv(CONFIG_FILE, True, log_file_path=None, publish_stats=False)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
print(model.summary())
memory = SequentialMemory(limit=50000, window_length=1)
boltzmann_policy = BoltzmannQPolicy(tau=1.0, clip=(0.0, 500.0))
# enable the dueling network
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10, policy=boltzmann_policy,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
dqn.load_weights(WEIGHT_FILE)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=1, visualize=False)
env.close()
del env
| apache-2.0 | Python |
|
9f6f6b727458eb331d370443074a58d1efa6d755 | Add migration for blank true. | mrpau/kolibri,benjaoming/kolibri,indirectlylit/kolibri,benjaoming/kolibri,christianmemije/kolibri,lyw07/kolibri,benjaoming/kolibri,learningequality/kolibri,rtibbles/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,jonboiser/kolibri,MingDai/kolibri,DXCanas/kolibri,rtibbles/kolibri,benjaoming/kolibri,rtibbles/kolibri,indirectlylit/kolibri,lyw07/kolibri,jonboiser/kolibri,christianmemije/kolibri,mrpau/kolibri,lyw07/kolibri,MingDai/kolibri,DXCanas/kolibri,learningequality/kolibri,learningequality/kolibri,lyw07/kolibri,jonboiser/kolibri,DXCanas/kolibri,learningequality/kolibri,christianmemije/kolibri,jonboiser/kolibri,mrpau/kolibri,MingDai/kolibri,DXCanas/kolibri,rtibbles/kolibri,christianmemije/kolibri,MingDai/kolibri,mrpau/kolibri | kolibri/logger/migrations/0003_auto_20170531_1140.py | kolibri/logger/migrations/0003_auto_20170531_1140.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-31 18:40
from __future__ import unicode_literals
import kolibri.core.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logger', '0002_auto_20170518_1031'),
]
operations = [
migrations.AlterField(
model_name='usersessionlog',
name='last_interaction_timestamp',
field=kolibri.core.fields.DateTimeTzField(blank=True, null=True),
),
]
| mit | Python |
|
d68a89b73e6ff47a2ebd169c06070815d9fd859c | Add example tests for REST API | mikebryant/rapid-router,mikebryant/rapid-router,mikebryant/rapid-router,CelineBoudier/rapid-router,CelineBoudier/rapid-router,CelineBoudier/rapid-router,mikebryant/rapid-router,CelineBoudier/rapid-router | game/tests/test_api.py | game/tests/test_api.py | # -*- coding: utf-8 -*-
# Code for Life
#
# Copyright (C) 2015, Ocado Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS – Section 7 GNU General Public Licence
#
# This licence does not grant any right, title or interest in any “Ocado” logos,
# trade names or the trademark “Ocado” or any other trademarks or domain names
# owned by Ocado Innovation Limited or the Ocado group of companies or any other
# distinctive brand features of “Ocado” as may be secured from time to time. You
# must not distribute any modification of this program using the trademark
# “Ocado” or claim any affiliation or association with Ocado or its employees.
#
# You are not authorised to use the name Ocado (or any of its trade names) or
# the names of any author or contributor in advertising or for publicity purposes
# pertaining to the distribution of this program, without the prior written
# authorisation of Ocado.
#
# Any propagation, distribution or conveyance of this program must include this
# copyright notice and these terms. You must not misrepresent the origins of this
# program; modified versions of the program must be marked as such and not
# identified as the original program.
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from hamcrest import *
from hamcrest.core.base_matcher import BaseMatcher
from game.models import Decor
class APITests(APITestCase):
def test_list_decors(self):
url = reverse('decor-list')
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_200_OK))
assert_that(response.data, has_length(len(Decor.objects.all())))
def test_known_decor_detail(self):
decor_id = 1
url = reverse('decor-detail', kwargs={'pk': decor_id})
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_200_OK))
assert_that(response.data['id'], equal_to(decor_id))
def test_unknown_decor_detail(self):
decor_id = 0
url = reverse('decor-detail', kwargs={'pk': decor_id})
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_404_NOT_FOUND))
def test_levels_for_known_episode(self):
episode_id = 1
url = reverse('level-for-episode', kwargs={'pk': episode_id})
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_200_OK))
assert_that(response.data, has_length(greater_than(0)))
def test_levels_for_unknown_episode(self):
episode_id = 0
url = reverse('level-for-episode', kwargs={'pk': episode_id})
response = self.client.get(url)
assert_that(response, has_status_code(status.HTTP_200_OK))
assert_that(response.data, has_length(0))
def has_status_code(status_code):
return HasStatusCode(status_code)
class HasStatusCode(BaseMatcher):
def __init__(self, status_code):
self.status_code = status_code
def _matches(self, response):
return response.status_code == self.status_code
def describe_to(self, description):
description.append_text('has status code ').append_text(self.status_code)
def describe_mismatch(self, response, mismatch_description):
mismatch_description.append_text('had status code ').append_text(response.status_code)
| agpl-3.0 | Python |
|
b04e3787de29d4bee68854e15a7e783cbe3c3bd0 | Add test for microstructure generator | awhite40/pymks,davidbrough1/pymks,davidbrough1/pymks | pymks/tests/test_microstructure_generator.py | pymks/tests/test_microstructure_generator.py | import pytest
import numpy as np
from pymks.datasets import make_microstructure
@pytest.mark.xfail
def test_size_and_grain_size_failure():
make_microstructure(n_samples=1, size=(7, 7), grain_size=(8, 1))
@pytest.mark.xfail
def test_volume_fraction_failure():
make_microstructure(n_samples=1, volume_fraction=(0.3, 0.6))
@pytest.mark.xfail
def test_volume_fraction_with_n_phases_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.5, 0.5))
@pytest.mark.xfail
def test_percent_variance_exceeds_limit_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.3, 0.3, 0.4), percent_variance=0.5)
def test_volume_fraction():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5))
assert np.allclose(np.sum(X == 1) / float(X.size), 0.2, rtol=1e-4)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.5, atol=1e-4)
def test_percent_variance():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5),
percent_variance=.2)
print np.sum(X == 1) / float(X.size)
print np.sum(X == 2) / float(X.size)
assert np.allclose(np.sum(X == 1) / float(X.size), 0.09, atol=1e-2)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.57, atol=1e-2)
if __name__ == '__main__':
test_volume_fraction()
test_percent_variance()
| mit | Python |
|
74ede836ad6572c9e6c7865e5d29671a994629af | Create ManifoldWR.py | AlexHung780312/SmilNN | SmilNN/ManifoldWR.py | SmilNN/ManifoldWR.py | # -*- coding: utf-8 -*-
import numpy as np
import keras.backend as K
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense
from keras.layers.noise import GaussianDropout
from keras.models import Sequential
from keras.optimizers import SGD
from sklearn.datasets import load_svmlight_file
from keras.regularizers import WeightRegularizer
class ManifoldWeightRegularizer(WeightRegularizer):
def __init__(self, m=0., **kwargs):
self.m = K.cast_to_floatx(m)
super(ManifoldWeightRegularizer, self).__init__(**kwargs)
def __call__(self, loss):
if not hasattr(self, 'p'):
raise Exception('Need to call `set_param` on '
'WeightRegularizer instance '
'before calling the instance. '
'Check that you are not passing '
'a WeightRegularizer instead of an '
'ActivityRegularizer '
'(i.e. activity_regularizer="l2" instead '
'of activity_regularizer="activity_l2".')
regularized_loss = loss + K.sum(K.abs(self.p)) * self.l1
regularized_loss += K.sum(K.square(self.p)) * self.l2
#
out_dim = self.p.shape.eval()[-1]
diff_mat = np.eye(out_dim) - np.eye(out_dim, k=1)
diff_mat[-1, -1] = 0
d = K.variable(diff_mat)
regularized_loss += K.sum(K.square(K.dot(self.p, d))) * self.m
return K.in_train_phase(regularized_loss, loss)
# 讀檔
def read_data(file):
x, y = load_svmlight_file(file, dtype=np.float32)
return x.todense(), y - 1 # y要從0開始
train_X, train_y = read_data('Data/training_data_libsvm.txt')
test_X, test_y = read_data('Data/testing_data_libsvm.txt')
feat_dim = train_X.shape[-1]
num_class = np.max(train_y) + 1
print 'feat_dim=%d, num_class=%d' % (feat_dim, num_class)
#
model = Sequential()
model.add(Dense(1024, activation='relu', input_dim=feat_dim, init='uniform'))
model.add(GaussianDropout(0.5))
model.add(Dense(num_class, activation='softmax', W_regularizer=ManifoldWeightRegularizer(m=0.1)))
model.compile(optimizer='Adadelta',
loss='sparse_categorical_crossentropy', # 因為label直接是class id
metrics=['accuracy'])
mdlchk = ModelCheckpoint(filepath='weights.best.hdf5', save_best_only=True, monitor='val_acc')
model.fit(train_X, train_y, validation_data=(test_X, test_y), batch_size=100, nb_epoch=200, verbose=2, callbacks=[mdlchk]) # starts training
model.load_weights('weights.best.hdf5')
model.compile(optimizer=SGD(lr=1e-7, momentum=0.9),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_X, train_y, validation_data=(test_X, test_y), batch_size=1, nb_epoch=3, verbose=1, callbacks=[mdlchk]) # starts training
model.load_weights('weights.best.hdf5')
loss, acc = model.evaluate(test_X, test_y, batch_size=5000)
print "Loss=%.4f, ACC=%.4f" % (loss, acc)
| mit | Python |
|
04dcdadf4f8b18405754683af0138ddc8363580e | Create followExpression.py | momotarou-zamurai/kibidango | maya/python/animation/followExpression.py | maya/python/animation/followExpression.py | ctrlShape = cmds.createNode('locator')
ctrlTransform = cmds.listRelatives(ctrlShape,p=True,f=True)
if isinstance(ctrlTransform,list):
ctrlTransform = ctrlTransform[0]
jt = cmds.createNode('joint',n='followJoint')
attrName = 'follow'
if not cmds.attributeQuery(attrName,n=ctrlTransform,ex=True):
cmds.addAttr(ctrlTransform,ln=attrName,at='double',min=0.0,max=1.0,dv=0.1)
cmds.setAttr('%s.%s'%(ctrlTransform,attrName),e=True,k=True)
exp = '{\n\t$tx1 = %s.translateX;\n'%ctrlTransform
exp += '\t$ty1 = %s.translateY;\n'%ctrlTransform
exp += '\t$tz1 = %s.translateZ;\n'%ctrlTransform
exp += '\t$tx2 = %s.translateX;\n'%jt
exp += '\t$ty2 = %s.translateY;\n'%jt
exp += '\t$tz2 = %s.translateZ;\n'%jt
exp += '\t\n\t$f = %s.follow;\n'%ctrlTransform
exp += '\t$dx = $tx1;\n'
exp += '\t$dy = $ty1;\n'
exp += '\t$dz = $tz1;\n'
exp += '\tif ($f > 0.0)\n\t{\n\t\t$dx = ($tx1-$tx2)*$f;\n'
exp += '\t\t$dy = ($ty1-$ty2)*$f;\n'
exp += '\t\t$dz = ($tz1-$tz2)*$f;\n'
exp += '\t}\n\t%s.translateX += $dx;\n'%jt
exp += '\t%s.translateY += $dy;\n'%jt
exp += '\t%s.translateZ += $dz;\n'%jt
exp += '}'
cmds.expression(s=exp)
| mit | Python |
|
58c62061c0c02682f96d6793b0570b455887d392 | Add pytest tools | matthew-brett/delocate,matthew-brett/delocate,matthew-brett/delocate | delocate/tests/pytest_tools.py | delocate/tests/pytest_tools.py | import pytest
def assert_true(condition):
__tracebackhide__ = True
assert condition
def assert_false(condition):
__tracebackhide__ = True
assert not condition
def assert_raises(expected_exception, *args, **kwargs):
__tracebackhide__ = True
return pytest.raises(expected_exception, *args, **kwargs)
def assert_equal(first, second):
__tracebackhide__ = True
assert first == second
def assert_not_equal(first, second):
__tracebackhide__ = True
assert first != second
| bsd-2-clause | Python |
|
dd93995a119323d9b67dce1f8797eb72788a044a | solve 12704 | arash16/prays,arash16/prays,arash16/prays,arash16/prays,arash16/prays,arash16/prays | UVA/vol-127/12704.py | UVA/vol-127/12704.py | from sys import stdin, stdout
I = list(map(int, stdin.read().split()))
for i in range(0, I[0]):
[x, y, r] = I[3*i + 1: 3*i + 4]
cd = (x*x + y*y) ** 0.5
stdout.write('{:.2f} {:.2f}\n'.format(r-cd, r+cd))
| mit | Python |
|
3ad0f9ee142e3a08e82749f47003870f14029bff | Fix urls.py to point to web version of view | nirmeshk/oh-mainline,onceuponatimeforever/oh-mainline,eeshangarg/oh-mainline,ehashman/oh-mainline,vipul-sharma20/oh-mainline,campbe13/openhatch,mzdaniel/oh-mainline,moijes12/oh-mainline,moijes12/oh-mainline,campbe13/openhatch,mzdaniel/oh-mainline,ehashman/oh-mainline,SnappleCap/oh-mainline,SnappleCap/oh-mainline,openhatch/oh-mainline,Changaco/oh-mainline,SnappleCap/oh-mainline,willingc/oh-mainline,openhatch/oh-mainline,sudheesh001/oh-mainline,willingc/oh-mainline,campbe13/openhatch,willingc/oh-mainline,openhatch/oh-mainline,sudheesh001/oh-mainline,jledbetter/openhatch,nirmeshk/oh-mainline,willingc/oh-mainline,willingc/oh-mainline,Changaco/oh-mainline,mzdaniel/oh-mainline,Changaco/oh-mainline,campbe13/openhatch,ehashman/oh-mainline,vipul-sharma20/oh-mainline,nirmeshk/oh-mainline,Changaco/oh-mainline,Changaco/oh-mainline,vipul-sharma20/oh-mainline,vipul-sharma20/oh-mainline,ehashman/oh-mainline,eeshangarg/oh-mainline,sudheesh001/oh-mainline,eeshangarg/oh-mainline,campbe13/openhatch,onceuponatimeforever/oh-mainline,SnappleCap/oh-mainline,jledbetter/openhatch,vipul-sharma20/oh-mainline,ojengwa/oh-mainline,jledbetter/openhatch,jledbetter/openhatch,waseem18/oh-mainline,ojengwa/oh-mainline,heeraj123/oh-mainline,ojengwa/oh-mainline,moijes12/oh-mainline,heeraj123/oh-mainline,openhatch/oh-mainline,nirmeshk/oh-mainline,heeraj123/oh-mainline,waseem18/oh-mainline,onceuponatimeforever/oh-mainline,sudheesh001/oh-mainline,waseem18/oh-mainline,eeshangarg/oh-mainline,waseem18/oh-mainline,ehashman/oh-mainline,onceuponatimeforever/oh-mainline,SnappleCap/oh-mainline,mzdaniel/oh-mainline,heeraj123/oh-mainline,openhatch/oh-mainline,waseem18/oh-mainline,sudheesh001/oh-mainline,mzdaniel/oh-mainline,moijes12/oh-mainline,mzdaniel/oh-mainline,jledbetter/openhatch,ojengwa/oh-mainline,mzdaniel/oh-mainline,moijes12/oh-mainline,heeraj123/oh-mainline,onceuponatimeforever/oh-mainline,ojengwa/oh-mainline,nirmeshk/oh-mainline,eeshangarg/oh-mainline | mysite/urls.py | mysite/urls.py | from django.conf.urls.defaults import *
import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'mysite.search.views.fetch_bugs'),
(r'^search/$', 'mysite.search.views.fetch_bugs'),
(r'^admin/(.*)', admin.site.root),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_DOC_ROOT}),
(r'^people/add_contribution$', 'mysite.profile.views.add_contribution_web'),
(r'^people/$', 'mysite.profile.views.display_person_web'),
(r'^people/get_data_for_email$', 'mysite.profile.views.get_data_for_email'),
(r'^people/change_what_like_working_on$',
'mysite.profile.views.change_what_like_working_on_web'),
(r'^people/add_tag_to_project_exp$',
'mysite.profile.views.add_tag_to_project_exp_web'),
(r'^people/project_exp_tag__remove$',
'mysite.profile.views.project_exp_tag__remove__web'),
(r'^people/make_favorite_project_exp$',
'mysite.profile.views.make_favorite_project_exp_web'),
(r'^people/make_favorite_exp_tag$',
'mysite.profile.views.make_favorite_exp_tag_web'),
(r'^people/add_contrib$',
'mysite.profile.views.display_person_old'),
(r'^people/sf_projects_by_person$',
'mysite.profile.views.sf_projects_by_person_web'),
# Experience scraper
(r'^people/exp_scraper$',
'mysite.profile.views.exp_scraper_display_input_form'),
(r'^people/exp_scrape$',
'mysite.profile.views.exp_scraper_scrape_web'),
# Get a list of suggestions for the search input, formatted the way that
# the jQuery autocomplete plugin wants it.
(r'^search/get_suggestions$', 'mysite.search.views.request_jquery_autocompletion_suggestions'),
)
# vim: set ai ts=4 sts=4 et sw=4:
| from django.conf.urls.defaults import *
import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'mysite.search.views.fetch_bugs'),
(r'^search/$', 'mysite.search.views.fetch_bugs'),
(r'^admin/(.*)', admin.site.root),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_DOC_ROOT}),
(r'^people/add_contribution$', 'mysite.profile.views.add_contribution_web'),
(r'^people/$', 'mysite.profile.views.display_person_web'),
(r'^people/get_data_for_email$', 'mysite.profile.views.get_data_for_email'),
(r'^people/change_what_like_working_on$',
'mysite.profile.views.change_what_like_working_on_web'),
(r'^people/add_tag_to_project_exp$',
'mysite.profile.views.add_tag_to_project_exp_web'),
(r'^people/project_exp_tag__remove$',
'mysite.profile.views.project_exp_tag__remove__web'),
(r'^people/make_favorite_project_exp$',
'mysite.profile.views.make_favorite_project_exp_web'),
(r'^people/make_favorite_exp_tag$',
'mysite.profile.views.make_favorite_exp_tag_web'),
(r'^people/add_contrib$',
'mysite.profile.views.display_person_old'),
(r'^people/sf_projects_by_person$',
'mysite.profile.views.sf_projects_by_person_web'),
# Experience scraper
(r'^people/exp_scraper$',
'mysite.profile.views.exp_scraper_display_input_form'),
(r'^people/exp_scrape$',
'mysite.profile.views.exp_scraper_scrape'),
# Get a list of suggestions for the search input, formatted the way that
# the jQuery autocomplete plugin wants it.
(r'^search/get_suggestions$', 'mysite.search.views.request_jquery_autocompletion_suggestions'),
)
# vim: set ai ts=4 sts=4 et sw=4:
| agpl-3.0 | Python |
9d058f4b324dabf4f2cdd2ea88f40c9aabe2d622 | Add test for the Py binding of Hash. | rectang/lucy-clownfish,nwellnhof/lucy-clownfish,apache/lucy-clownfish,apache/lucy-clownfish,rectang/lucy-clownfish,rectang/lucy-clownfish,rectang/lucy-clownfish,apache/lucy-clownfish,rectang/lucy-clownfish,rectang/lucy-clownfish,nwellnhof/lucy-clownfish,nwellnhof/lucy-clownfish,apache/lucy-clownfish,nwellnhof/lucy-clownfish,nwellnhof/lucy-clownfish,nwellnhof/lucy-clownfish,nwellnhof/lucy-clownfish,apache/lucy-clownfish,rectang/lucy-clownfish,rectang/lucy-clownfish,apache/lucy-clownfish,apache/lucy-clownfish,rectang/lucy-clownfish,apache/lucy-clownfish,nwellnhof/lucy-clownfish | runtime/python/test/test_hash.py | runtime/python/test/test_hash.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import inspect
import clownfish
class TestHash(unittest.TestCase):
def testStoreFetch(self):
h = clownfish.Hash()
h.store("foo", "bar")
h.store("foo", "bar")
self.assertEqual(h.fetch("foo"), "bar")
h.store("nada", None)
self.assertEqual(h.fetch("nada"), None)
def testDelete(self):
h = clownfish.Hash()
h.store("foo", "bar")
got = h.delete("foo")
self.assertEqual(h.get_size(), 0)
self.assertEqual(got, "bar")
def testClear(self):
h = clownfish.Hash()
h.store("foo", 1)
h.clear()
self.assertEqual(h.get_size(), 0)
def testHasKey(self):
h = clownfish.Hash()
h.store("foo", 1)
h.store("nada", None)
self.assertTrue(h.has_key("foo"))
self.assertFalse(h.has_key("bar"))
self.assertTrue(h.has_key("nada"))
def testKeys(self):
h = clownfish.Hash()
h.store("a", 1)
h.store("b", 1)
keys = sorted(h.keys())
self.assertEqual(keys, ["a", "b"])
def testValues(self):
h = clownfish.Hash()
h.store("foo", "a")
h.store("bar", "b")
got = sorted(h.values())
self.assertEqual(got, ["a", "b"])
def testGetCapacity(self):
h = clownfish.Hash(capacity=1)
self.assertGreater(h.get_capacity(), 0)
def testGetSize(self):
h = clownfish.Hash()
self.assertEqual(h.get_size(), 0)
h.store("meep", "moop")
self.assertEqual(h.get_size(), 1)
def testEquals(self):
h = clownfish.Hash()
other = clownfish.Hash()
h.store("a", "foo")
other.store("a", "foo")
self.assertTrue(h.equals(other))
other.store("b", "bar")
self.assertFalse(h.equals(other))
self.assertTrue(h.equals({"a":"foo"}),
"equals() true against a Python dict")
vec = clownfish.Vector()
self.assertFalse(h.equals(vec),
"equals() false against conflicting Clownfish type")
self.assertFalse(h.equals(1),
"equals() false against conflicting Python type")
def testIterator(self):
h = clownfish.Hash()
h.store("a", "foo")
i = clownfish.HashIterator(h)
self.assertTrue(i.next())
self.assertEqual(i.get_key(), "a")
self.assertEqual(i.get_value(), "foo")
self.assertFalse(i.next())
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
|
8706ec4678bc4740b64265ced63fb12d837e0297 | Add Basic Histogram Example | altair-viz/altair,ellisonbg/altair,jakevdp/altair | altair/vegalite/v2/examples/histogram.py | altair/vegalite/v2/examples/histogram.py | """
Histogram
-----------------
This example shows how to make a basic histogram, based on the vega-lite docs
https://vega.github.io/vega-lite/examples/histogram.html
"""
import altair as alt
movies = alt.load_dataset('movies')
chart = alt.Chart(movies).mark_bar().encode(
x=alt.X("IMDB_Rating",
type='quantitative',
bin=alt.BinTransform(
maxbins=10,
)),
y='count(*):Q',
)
| bsd-3-clause | Python |
|
0dc2417894ef1b6bd3f5386f7dfa0bb3d34a594c | Add contest calendar generation code & styles; #55 | Minkov/site,Phoenix1369/site,monouno/site,Phoenix1369/site,monouno/site,DMOJ/site,DMOJ/site,monouno/site,monouno/site,Phoenix1369/site,Minkov/site,DMOJ/site,Minkov/site,Minkov/site,Phoenix1369/site,DMOJ/site,monouno/site | judge/contest_calendar.py | judge/contest_calendar.py | import calendar, datetime
from judge.models import Contest, ContestParticipation, ContestProblem, Profile
class MyCal(calendar.HTMLCalendar):
def __init__(self, x):
super(MyCal, self).__init__(x)
self.today = datetime.datetime.date(datetime.datetime.now())
def formatweekday(self, day):
return '<th class="%s">%s</th>' % (self.cssclasses[day], calendar.day_name[day])
def formatday(self, day, weekday):
if day == 0:
return '<td class="noday"> </td>' # day outside month
elif day == self.today.day:
return '<td class="%s today"><span class="num">%d</span></td>' % (self.cssclasses[weekday], day)
elif day == 19:
return '<td class="%s"><span class="num">%d</span>%s</td>'
else:
c = '<ul>'
for c in Contest.objects.filter(start_time__month=self.today.month, start_time__day=day):
c += '<li class=\'%s\'><a href=\'#\'>%s</a></li>' % (
'oneday' if (c.end_time.day == day and c.end_time.month == self.today.month) else 'starting',
c.name)
for c in Contest.objects.filter(end_time__month=self.today.month, end_time__day=day):
c += '<li class=\'%s\'><a href=\'#\'>%s</a></li>' % ('ending', c.name)
c += '<ul>'
return '<td class="%s"><span class="num">%d</span>%s</td>' % (self.cssclasses[weekday], day, c)
today = datetime.datetime.date(datetime.datetime.now())
print '''
<head>
<style>
th.sun, th.mon, th.tue, th.wed, th.thu, th.fri, th.sat {
font-size:0.95em;
border-right:1px solid #aaa;
background:#f2f2f2;
}
th.sun {
border-left:1px solid #aaa;
}
td .num {
font-size:1.1em;
font-weight:bold;
display:block;
border-bottom:1px dashed #ccc;
padding-right:0.2em;
margin-bottom:0.4em;
}
td ul li a {
text-decoration: none;
color:#222;
}
td:hover ul li a {
font-weight: normal;
}
td ul li a:hover {
text-decoration: underline;
}
td ul {
text-decoration: none;
list-style-type: none;
text-align: left;
padding:0;
margin:0;
}
td ul li {
background-image: url('http://dev.ivybits.tk/images/bullet_diamond.png'); background-repeat: no-repeat;
background-position: 1px 1px;
padding-left:17px;
margin-bottom:0.2em;
}
td {
height:110px;
width:161px;
color:#000;
vertical-align:top;
text-align:right;
font-size:0.75em;
}
td {
border-right:1px solid #aaa;
border-bottom:1px solid #aaa;
transition-duration:0.2s;
}
td:hover {
background: rgba(0,0,255,0.3);
color:white;
}
td:hover .num {
font-weight: bold;
}
tr td:first-child {
border-left:1px solid #aaa;
}
th {
border-bottom:1px solid #aaa;
}
.noday {
background:#f1f1f1;
}
.today {
background: rgba(255,255,100,0.5);
}
</style></head>'''
cal = MyCal(calendar.SUNDAY)
print cal.formatmonth(today.year, today.month) | agpl-3.0 | Python |
|
4f1ddebb0fc185dfe4cd5167c67be8f6cea78273 | Create listenCmd.py | stephaneAG/Python_tests,stephaneAG/Python_tests,stephaneAG/Python_tests,stephaneAG/Python_tests | listenCmd.py | listenCmd.py | #!/usr/bin/python
#impoer the necessary modules
import re # the regexp module
# listen command test python file
# // THE FCNS //
# the fcn that iterate through the recognized command list to find a match with the received pseech command
def listenForCommand( theCommand ):
#for s in range( len( cmdsList ) ):
for k in commandsParams.items():
# hold the current 'loop[i]' to rung the matching process against it
#matchingCmd = re.search(r cmdsList[i], theCommand )
matchingCmd = re.search(r"say hello", theCommand )
# check if the match was successfull and end the iteraction / handle it
if matchingCmd:
print "Matching command found:"
print matchingCmd
print "Associated function:"
#print fcnsList[s]
# end the iteration as we found the command
break
else:
# continue to loop until 'cmdsList' has been fully iterated over (..)
# the settings ( commands recognized ans associated functions )
cmdsList = ["say hello", "repeat after me", "do the cleaning", "do my work"] # IndentationError: expected an indented block
fcnsList = ['sayHello', 'repeatAfterMe', 'doTheCleaning', 'doMyWork']
commandsParams = {"say hello" : "sayHello", "repeat after me" : "repeatAfterMe", "do the cleaning" : "doTheCleaning", "do my work" : "doMyWork"} # this is a dictionary
# // THE PRGM //
print "\n PRGORAM BEGIN \n"
# fake received speech on wich we iterate to find a matching command
receivedCmd = "say hello"
# try to find a match with a fake command
listenForCommand( receivedCmd )
| mit | Python |
|
fd1b2885057512d6b91a2b2ed4df183e66093e61 | Create extended_iter_with_peek.py | AdityaSoni19031997/Machine-Learning,AdityaSoni19031997/Machine-Learning | lld_practice/extended_iter_with_peek.py | lld_practice/extended_iter_with_peek.py |
class ExtendedIter:
"""An extended iterator that wraps around an existing iterators.
It provides extra methods:
- `has_next()`: checks if we can still yield items.
- `peek()`: returns the next element of our iterator, but doesn't pass by it.
If there's nothing more to return, raises `StopIteration` error.
"""
def __init__(self, i):
self._myiter = iter(i)
self._next_element = None
self._has_next = 0
self._prime()
def has_next(self):
"""Returns true if we can call next() without raising a
StopException."""
return self._has_next
def peek(self):
"""Nonexhaustively returns the next element in our iterator."""
assert self.has_next()
return self._next_element
def next(self):
"""Returns the next element in our iterator."""
if not self._has_next:
raise StopIteration
result = self._next_element
self._prime()
return result
def _prime(self):
"""Private function to initialize the states of
self._next_element and self._has_next. We poke our
self._myiter to see if it's still alive and kicking."""
try:
self._next_element = self._myiter.next()
self._has_next = 1
except StopIteration:
self.next_element = None
self._has_next = 0
| mit | Python |
|
77e980157f51af421eceb7c7b7a84945d8d33a91 | Convert caffemodel of FCN8s to chainer model | wkentaro/fcn | scripts/caffe_to_chainermodel.py | scripts/caffe_to_chainermodel.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import os.path as osp
import caffe
import chainer.functions as F
import chainer.serializers as S
import fcn
from fcn.models import FCN8s
data_dir = fcn.get_data_dir()
caffemodel = osp.join(data_dir, 'voc-fcn8s/fcn8s-heavy-pascal.caffemodel')
caffe_prototxt = osp.join(data_dir, 'voc-fcn8s/deploy.prototxt')
chainermodel = osp.join(data_dir, 'fcn8s.chainermodel')
net = caffe.Net(caffe_prototxt, caffemodel, caffe.TEST)
# TODO(pfnet): chainer CaffeFunction not support some layers
# from chainer.functions.caffe import CaffeFunction
# func = CaffeFunction(caffemodel)
model = FCN8s()
for name, param in net.params.iteritems():
layer = getattr(model, name)
has_bias = True
if len(param) == 1:
has_bias = False
print('{0}:'.format(name))
# weight
print(' - W:', param[0].data.shape, layer.W.data.shape)
assert param[0].data.shape == layer.W.data.shape
layer.W.data = param[0].data
# bias
if has_bias:
print(' - b:', param[1].data.shape, layer.b.data.shape)
assert param[1].data.shape == layer.b.data.shape
layer.b.data = param[1].data
S.save_hdf5(chainermodel, model)
| mit | Python |
|
a8423d5759a951b7f8d765203e3a02a6d3211f35 | add body task generator | stephenplaza/NeuTu,stephenplaza/NeuTu,stephenplaza/NeuTu,stephenplaza/NeuTu,stephenplaza/NeuTu,stephenplaza/NeuTu,stephenplaza/NeuTu,stephenplaza/NeuTu | neurolabi/python/flyem/BodyTaskManager.py | neurolabi/python/flyem/BodyTaskManager.py | '''
Created on Sep 18, 2013
@author: zhaot
'''
import os;
class ExtractBodyTaskManager:
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.commandPath = '';
self.minSize = 0;
self.maxSize = -1;
self.overwriteLevel = 1
self.zOffset = 0
self.bodyMapDir = ''
self.output = ''
self.bodysizeFile = ''
self.jobNumber = 5
def setCommandPath(self, path):
self.commandPath = path;
def setRange(self, bodySizeRange):
self.minSize = bodySizeRange[0];
self.maxSize = bodySizeRange[1];
def setOverwriteLevel(self, level):
self.overwriteLevel = level;
def setZOffset(self, offset):
self.zOffset = offset;
def setJobNumber(self, n):
self.jobNumber = n;
def setOutput(self, output):
self.output = output;
def setBodyMapDir(self, inputBodyMap):
self.bodyMapDir = inputBodyMap
def setBodySizeFile(self, filePath):
self.bodysizeFile = filePath
def useCluster(self, using):
self.usingCluster = using;
def getFullCommand(self):
command = self.commandPath + ' ' + self.bodyMapDir + ' -o ' + self.output + \
' --sobj ' + ' --minsize ' + str(self.minSize);
if self.maxSize >= self.minSize:
command += ' --maxsize ' + str(self.maxSize)
command += ' --overwrite_level ' + str(self.overwriteLevel);
if self.bodysizeFile:
command += ' --bodysize_file ' + self.bodysizeFile
command += ' --z_offset ' + str(self.zOffset)
return command;
def generateScript(self, outputDir):
script = self.output
scriptFile = open(script, 'w')
if scriptFile:
scriptFile.write(self.getFullCommand())
scriptFile.close()
if __name__ == '__main__':
from os.path import expanduser
home = expanduser("~")
taskManager = ExtractBodyTaskManager()
taskManager.setBodyMapDir('../body_maps')
taskManager.setOutput('.')
taskManager.setRange([100000, -1])
taskManager.setOverwriteLevel(1)
taskManager.setBodySizeFile('bodysize.txt')
taskManager.setZOffset(1490)
taskManager.setCommandPath(home + '/Work/neutube/neurolabi/cpp/'
'extract_body-build-Qt_4_8_1_gcc-Debug/extract_body');
print taskManager.getFullCommand();
| bsd-3-clause | Python |
|
18935881745b7bc65741837d63ec60e9d62583f1 | Split the big file into smaller pieces | opencog/ros-behavior-scripting,opencog/ros-behavior-scripting | face_track/sound_track.py | face_track/sound_track.py | #
# sound_track.py - Tracking of sound sources
# Copyright (C) 2014,2015,2016 Hanson Robotics
# Copyright (C) 2015,2016 Linas Vepstas
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import rospy
import logging
from std_msgs.msg import Int32
from face_atomic import FaceAtomic
from geometry_msgs.msg import PoseStamped # for sound localization
logger = logging.getLogger('hr.eva_behavior.sound_track')
# Thin python wrapper, to subscribe to ManyEars sound-source ROS
# messages, and then re-wrap these as opencog atoms, via FaceAtomic,
#a and forward them on into the OpenCog space-time server.
#
class SoundTrack:
def __init__(self):
rospy.init_node("OpenCog_Facetracker")
logger.info("Starting OpenCog Face Tracker ROS Node")
# The OpenCog API. This is used to send face data to OpenCog.
self.atomo = FaceAtomic()
# Sound localization
parameter_name = "sound_localization/mapping_matrix"
if rospy.has_param(parameter_name):
self.sl_matrix = rospy.get_param(parameter_name)
rospy.Subscriber("/manyears/source_pose", PoseStamped, \
self.sound_cb)
# ---------------------------------------------------------------
# Store the location of the strongest sound-source in the
# OpenCog space server. This data arrives at a rate of about
# 30 Hz, currently, from ManyEars.
def sound_cb(self, msg):
# Convert to camera coordinates, using an affine matrix
# (which combines a rotation and translation).
#
# A typical sl_matrix looks like this:
#
# 0.943789 0.129327 0.304204 0.00736024
# -0.131484 0.991228 -0.0134787 0.00895614
# -0.303278 -0.0272767 0.952513 0.0272001
# 0 0 0 1
#
vs = [msg.pose.position.x, \
msg.pose.position.y, \
msg.pose.position.z, \
1]
r = [0, 0, 0, 0]
for i in range(0,3):
for j in range(0,3):
r[i] += self.sl_matrix[i][j] * vs[j]
self.atomo.update_sound(r[0], r[1], r[2])
# ----------------------------------------------------------
| agpl-3.0 | Python |
|
b41444b5f7c48c4bc46a49405f7b053dcb8ea66c | rename resource function | ywang007/odo,alexmojaki/odo,Dannnno/odo,cpcloud/odo,cowlicks/odo,mrocklin/into,blaze/odo,quantopian/odo,cpcloud/odo,ywang007/odo,quantopian/odo,alexmojaki/odo,blaze/odo,mrocklin/into,ContinuumIO/odo,Dannnno/odo,cowlicks/odo,ContinuumIO/odo | into/backends/sas.py | into/backends/sas.py | from __future__ import absolute_import, division, print_function
import sas7bdat
from sas7bdat import SAS7BDAT
import datashape
from datashape import discover, dshape
from collections import Iterator
import pandas as pd
import sqlalchemy as sa
from .sql import dshape_to_alchemy, dshape_to_table
from ..append import append
from ..convert import convert
from ..resource import resource
SAS_type_map = {'number': 'float64',
'string': 'string'}
@resource.register('.+\.(sas7bdat)')
def resource_sas(uri, **kwargs):
return SAS7BDAT(uri, **kwargs)
@discover.register(SAS7BDAT)
def discover_sas(f, **kwargs):
cols = [col.name.decode("utf-8") for col in f.header.parent.columns]
types = [SAS_type_map[col.type] for col in f.header.parent.columns]
measure = ",".join(col + ":" + _type for col, _type in zip(cols, types))
ds = "var * {" + measure + "}"
return dshape(ds)
@convert.register(pd.DataFrame, SAS7BDAT, cost=4.0)
def sas_to_DataFrame(s, dshape=None, **kwargs):
return s.to_data_frame()
@convert.register(list, SAS7BDAT, cost=8.0)
def sas_to_list(s, dshape=None, **kwargs):
s.skip_header = True
return list(s.readlines())
@convert.register(Iterator, SAS7BDAT, cost=1.0)
def sas_to_iterator(s):
s.skip_header = True
return s.readlines()
@append.register(sa.Table, SAS7BDAT)
def append_sas_to_table(t, s, **kwargs):
append(t, sas_to_iterator(s), **kwargs)
def sas_to_table(s, metadata=None):
ds = discover_sas(s)
name = s.header.properties.name.decode("utf-8")
return dshape_to_table(name, ds, metadata)
| from __future__ import absolute_import, division, print_function
import sas7bdat
from sas7bdat import SAS7BDAT
import datashape
from datashape import discover, dshape
from collections import Iterator
import pandas as pd
import sqlalchemy as sa
from .sql import dshape_to_alchemy, dshape_to_table
from ..append import append
from ..convert import convert
from ..resource import resource
SAS_type_map = {'number': 'float64',
'string': 'string'}
@resource.register('.+\.(sas7bdat)')
def resource_csv(uri, **kwargs):
return SAS7BDAT(uri, **kwargs)
@discover.register(SAS7BDAT)
def discover_sas(f, **kwargs):
cols = [col.name.decode("utf-8") for col in f.header.parent.columns]
types = [SAS_type_map[col.type] for col in f.header.parent.columns]
measure = ",".join(col + ":" + _type for col, _type in zip(cols, types))
ds = "var * {" + measure + "}"
return dshape(ds)
@convert.register(pd.DataFrame, SAS7BDAT, cost=4.0)
def sas_to_DataFrame(s, dshape=None, **kwargs):
return s.to_data_frame()
@convert.register(list, SAS7BDAT, cost=8.0)
def sas_to_list(s, dshape=None, **kwargs):
s.skip_header = True
return list(s.readlines())
@convert.register(Iterator, SAS7BDAT, cost=1.0)
def sas_to_iterator(s):
s.skip_header = True
return s.readlines()
@append.register(sa.Table, SAS7BDAT)
def append_sas_to_table(t, s, **kwargs):
append(t, sas_to_iterator(s), **kwargs)
def sas_to_table(s, metadata=None):
ds = discover_sas(s)
name = s.header.properties.name.decode("utf-8")
return dshape_to_table(name, ds, metadata)
| bsd-3-clause | Python |
4e1d611a06874d478e91185a0349cfc3747e36ab | Create __init__.py | suzannerohrback/somaticCNVpipeline,suzannerohrback/somaticCNVpipeline | bin/map/__init__.py | bin/map/__init__.py | mit | Python |
||
7f4079c30bf5a693f1ccad38109bbfc83a076f22 | Add palette utilities | axt/bingraphvis | bingraphvis/util.py | bingraphvis/util.py | #generated using palettable
PALETTES = {
'grays' : ['#FFFFFD', '#D6D6D4', '#B1B1B0', '#908F8F', '#727171', '#545453', '#373737', '#1A1919', '#000000'],
'greens' : ['#F7FCF5', '#E5F5E0', '#C7E9C0', '#A1D99B', '#74C476', '#41AB5D', '#238B45', '#006D2C', '#00441B'],
'purples': ['#FCFBFD', '#EFEDF5', '#DADAEB', '#BCBDDC', '#9E9AC8', '#807DBA', '#6A51A3', '#54278F', '#3F007D'],
'blues' : ['#F7FBFF', '#DEEBF7', '#C6DBEF', '#9ECAE1', '#6BAED6', '#4292C6', '#2171B5', '#08519C', '#08306B'],
'reds' : ['#FFF5F0', '#FEE0D2', '#FCBBA1', '#FC9272', '#FB6A4A', '#EF3B2C', '#CB181D', '#A50F15', '#67000D']
}
try:
from palettable.colorbrewer.sequential import *
from palettable.cmocean.sequential import *
PALETTES.update({
'greens' : Greens_9.hex_colors,
'blues' : Blues_9.hex_colors,
'purples': Purples_9.hex_colors,
'reds' : Reds_9.hex_colors,
'grays' : Gray_9_r.hex_colors,
'algae' : Algae_8.hex_colors,
'solar' : Solar_9_r.hex_colors
})
except Exception,e:
print e
pass
print PALETTES
def get_palette(name):
return PALETTES[name]
def get_palette_names():
return PALETTES.keys()
| bsd-2-clause | Python |
|
aed1f0e4e33dd956f4499ecffd6bf50bb58e7df4 | Add fermi.py | rartino/ENVISIoN,rartino/ENVISIoN,rartino/ENVISIoN,rartino/ENVISIoN,rartino/ENVISIoN,rartino/ENVISIoN | scripts/fermi.py | scripts/fermi.py | # This example file is part of the ENVISIoN Electronic structure visualization studio
#
# Load this file into the Inviwo Python Editor (which you can access under the menu Python,
# which is available if Inviwo has been compiled with the Python module on)
#
# For Copyright and License information see the file LICENSE distributed alongside ENVISIoN
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os, sys
# Configuration
PATH_TO_ENVISION=os.path.expanduser("~/ENVISIoN/envision")
PATH_TO_VASP_CALC=os.path.expanduser("~/ENVISIoN/data/Cu/1/11")
PATH_TO_HDF5=os.path.expanduser("/tmp/envision_demo.hdf5")
sys.path.insert(0, os.path.expanduser(PATH_TO_ENVISION)) # Or `pip install --editable`.
import envision
import envision.inviwo
envision.parser.vasp.fermi(PATH_TO_HDF5, PATH_TO_VASP_CALC)
xpos=0
envision.inviwo.fermi(PATH_TO_HDF5, xpos)
| bsd-2-clause | Python |
|
b4fd94008fa5b1dcdb6dd61651d8776dfb41f2d6 | Make sure we return a list. | WadeYuChen/django-oscar,vovanbo/django-oscar,jinnykoo/wuyisj.com,ka7eh/django-oscar,ahmetdaglarbas/e-commerce,jmt4/django-oscar,thechampanurag/django-oscar,jmt4/django-oscar,faratro/django-oscar,thechampanurag/django-oscar,kapari/django-oscar,adamend/django-oscar,rocopartners/django-oscar,DrOctogon/unwash_ecom,bnprk/django-oscar,nickpack/django-oscar,QLGu/django-oscar,manevant/django-oscar,MatthewWilkes/django-oscar,mexeniz/django-oscar,taedori81/django-oscar,DrOctogon/unwash_ecom,jinnykoo/christmas,amirrpp/django-oscar,Jannes123/django-oscar,manevant/django-oscar,rocopartners/django-oscar,sonofatailor/django-oscar,jinnykoo/wuyisj.com,bschuon/django-oscar,django-oscar/django-oscar,Bogh/django-oscar,nickpack/django-oscar,josesanch/django-oscar,lijoantony/django-oscar,okfish/django-oscar,okfish/django-oscar,machtfit/django-oscar,elliotthill/django-oscar,pasqualguerrero/django-oscar,jlmadurga/django-oscar,nickpack/django-oscar,eddiep1101/django-oscar,WillisXChen/django-oscar,solarissmoke/django-oscar,jmt4/django-oscar,elliotthill/django-oscar,bschuon/django-oscar,ka7eh/django-oscar,jlmadurga/django-oscar,ahmetdaglarbas/e-commerce,pasqualguerrero/django-oscar,pasqualguerrero/django-oscar,dongguangming/django-oscar,ademuk/django-oscar,lijoantony/django-oscar,amirrpp/django-oscar,adamend/django-oscar,faratro/django-oscar,marcoantoniooliveira/labweb,sonofatailor/django-oscar,jinnykoo/wuyisj,saadatqadri/django-oscar,thechampanurag/django-oscar,adamend/django-oscar,Jannes123/django-oscar,WillisXChen/django-oscar,ademuk/django-oscar,elliotthill/django-oscar,monikasulik/django-oscar,Bogh/django-oscar,ahmetdaglarbas/e-commerce,jlmadurga/django-oscar,michaelkuty/django-oscar,solarissmoke/django-oscar,itbabu/django-oscar,pdonadeo/django-oscar,nickpack/django-oscar,amirrpp/django-oscar,ka7eh/django-oscar,django-oscar/django-oscar,sonofatailor/django-oscar,vovanbo/django-oscar,jinnykoo/christmas,amirrpp/django-oscar,saadatqadri/django-oscar,marcoantoniooliveira/labweb,WadeYuChen/django-oscar,nfletton/django-oscar,michaelkuty/django-oscar,michaelkuty/django-oscar,manevant/django-oscar,WillisXChen/django-oscar,eddiep1101/django-oscar,john-parton/django-oscar,spartonia/django-oscar,pdonadeo/django-oscar,kapari/django-oscar,QLGu/django-oscar,nfletton/django-oscar,MatthewWilkes/django-oscar,vovanbo/django-oscar,nfletton/django-oscar,john-parton/django-oscar,anentropic/django-oscar,spartonia/django-oscar,itbabu/django-oscar,binarydud/django-oscar,binarydud/django-oscar,manevant/django-oscar,anentropic/django-oscar,jinnykoo/wuyisj,QLGu/django-oscar,jinnykoo/wuyisj.com,WadeYuChen/django-oscar,bnprk/django-oscar,jinnykoo/wuyisj,faratro/django-oscar,ahmetdaglarbas/e-commerce,jmt4/django-oscar,kapt/django-oscar,saadatqadri/django-oscar,john-parton/django-oscar,jinnykoo/christmas,kapt/django-oscar,bnprk/django-oscar,kapt/django-oscar,taedori81/django-oscar,Jannes123/django-oscar,john-parton/django-oscar,django-oscar/django-oscar,WillisXChen/django-oscar,pdonadeo/django-oscar,mexeniz/django-oscar,mexeniz/django-oscar,sasha0/django-oscar,eddiep1101/django-oscar,anentropic/django-oscar,ka7eh/django-oscar,vovanbo/django-oscar,pasqualguerrero/django-oscar,MatthewWilkes/django-oscar,bnprk/django-oscar,anentropic/django-oscar,WillisXChen/django-oscar,okfish/django-oscar,Bogh/django-oscar,kapari/django-oscar,bschuon/django-oscar,monikasulik/django-oscar,taedori81/django-oscar,okfish/django-oscar,MatthewWilkes/django-oscar,marcoantoniooliveira/labweb,pdonadeo/django-oscar,nfletton/django-oscar,Jannes123/django-oscar,faratro/django-oscar,rocopartners/django-oscar,adamend/django-oscar,dongguangming/django-oscar,jinnykoo/wuyisj.com,lijoantony/django-oscar,lijoantony/django-oscar,mexeniz/django-oscar,machtfit/django-oscar,ademuk/django-oscar,DrOctogon/unwash_ecom,dongguangming/django-oscar,solarissmoke/django-oscar,bschuon/django-oscar,machtfit/django-oscar,taedori81/django-oscar,rocopartners/django-oscar,solarissmoke/django-oscar,spartonia/django-oscar,dongguangming/django-oscar,eddiep1101/django-oscar,itbabu/django-oscar,jinnykoo/wuyisj,QLGu/django-oscar,sonofatailor/django-oscar,django-oscar/django-oscar,jlmadurga/django-oscar,WillisXChen/django-oscar,Bogh/django-oscar,saadatqadri/django-oscar,marcoantoniooliveira/labweb,WadeYuChen/django-oscar,josesanch/django-oscar,ademuk/django-oscar,josesanch/django-oscar,michaelkuty/django-oscar,sasha0/django-oscar,itbabu/django-oscar,kapari/django-oscar,thechampanurag/django-oscar,binarydud/django-oscar,monikasulik/django-oscar,spartonia/django-oscar,monikasulik/django-oscar,sasha0/django-oscar,sasha0/django-oscar,binarydud/django-oscar | oscar/apps/dashboard/catalogue/widgets.py | oscar/apps/dashboard/catalogue/widgets.py | import six
from django.forms.util import flatatt
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django import forms
class ProductSelect(forms.Widget):
is_multiple = False
css = 'select2 input-xlarge'
def format_value(self, value):
return six.text_type(value or '')
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return value
else:
return six.text_type(value)
def render(self, name, value, attrs=None, choices=()):
attrs = self.build_attrs(attrs, **{
'type': 'hidden',
'class': self.css,
'name': name,
'data-ajax-url': reverse('dashboard:catalogue-product-lookup'),
'data-multiple': 'multiple' if self.is_multiple else '',
'value': self.format_value(value),
'data-required': 'required' if self.is_required else '',
})
return mark_safe(u'<input %s>' % flatatt(attrs))
class ProductSelectMultiple(ProductSelect):
is_multiple = True
css = 'select2 input-xxlarge'
def format_value(self, value):
if value:
return ','.join(map(six.text_type, filter(bool, value)))
else:
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return []
else:
return list(filter(bool, value.split(',')))
| import six
from django.forms.util import flatatt
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django import forms
class ProductSelect(forms.Widget):
is_multiple = False
css = 'select2 input-xlarge'
def format_value(self, value):
return six.text_type(value or '')
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return value
else:
return six.text_type(value)
def render(self, name, value, attrs=None, choices=()):
attrs = self.build_attrs(attrs, **{
'type': 'hidden',
'class': self.css,
'name': name,
'data-ajax-url': reverse('dashboard:catalogue-product-lookup'),
'data-multiple': 'multiple' if self.is_multiple else '',
'value': self.format_value(value),
'data-required': 'required' if self.is_required else '',
})
return mark_safe(u'<input %s>' % flatatt(attrs))
class ProductSelectMultiple(ProductSelect):
is_multiple = True
css = 'select2 input-xxlarge'
def format_value(self, value):
if value:
return ','.join(map(six.text_type, filter(bool, value)))
else:
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value is None:
return []
else:
return filter(bool, value.split(','))
| bsd-3-clause | Python |
7e449b0267f47ee08327d9d76976c5e1b197501b | Add missing migration (#9504) | Johnetordoff/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,adlius/osf.io,brianjgeiger/osf.io,felliott/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,adlius/osf.io,cslzchen/osf.io,aaxelb/osf.io,cslzchen/osf.io,felliott/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,mfraezz/osf.io,adlius/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,felliott/osf.io,Johnetordoff/osf.io,adlius/osf.io,aaxelb/osf.io | osf/migrations/0219_auto_20201020_1836.py | osf/migrations/0219_auto_20201020_1836.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-10-20 18:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0218_auto_20200929_1850'),
]
operations = [
migrations.AlterField(
model_name='draftregistration',
name='machine_state',
field=models.CharField(choices=[('initial', 'Initial'), ('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected'), ('withdrawn', 'Withdrawn'), ('pending_embargo', 'Pending_Embargo'), ('embargo', 'Embargo'), ('pending_embargo_termination', 'Pending_Embargo_Termination'), ('pending_withdraw_request', 'Pending_Withdraw_Request'), ('pending_withdraw', 'Pending_Withdraw')], db_index=True, default='initial', max_length=30),
),
migrations.AlterField(
model_name='registrationaction',
name='from_state',
field=models.CharField(choices=[('initial', 'Initial'), ('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected'), ('withdrawn', 'Withdrawn'), ('pending_embargo', 'Pending_Embargo'), ('embargo', 'Embargo'), ('pending_embargo_termination', 'Pending_Embargo_Termination'), ('pending_withdraw_request', 'Pending_Withdraw_Request'), ('pending_withdraw', 'Pending_Withdraw')], max_length=31),
),
migrations.AlterField(
model_name='registrationaction',
name='to_state',
field=models.CharField(choices=[('initial', 'Initial'), ('pending', 'Pending'), ('accepted', 'Accepted'), ('rejected', 'Rejected'), ('withdrawn', 'Withdrawn'), ('pending_embargo', 'Pending_Embargo'), ('embargo', 'Embargo'), ('pending_embargo_termination', 'Pending_Embargo_Termination'), ('pending_withdraw_request', 'Pending_Withdraw_Request'), ('pending_withdraw', 'Pending_Withdraw')], max_length=31),
),
migrations.AlterField(
model_name='registrationaction',
name='trigger',
field=models.CharField(choices=[('submit', 'Submit'), ('accept', 'Accept'), ('reject', 'Reject'), ('edit_comment', 'Edit_Comment'), ('embargo', 'Embargo'), ('withdraw', 'Withdraw'), ('request_withdraw', 'Request_Withdraw'), ('withdraw_request_fails', 'Withdraw_Request_Fails'), ('withdraw_request_pass', 'Withdraw_Request_Pass'), ('reject_withdraw', 'Reject_Withdraw'), ('force_withdraw', 'Force_Withdraw'), ('request_embargo', 'Request_Embargo'), ('request_embargo_termination', 'Request_Embargo_Termination'), ('terminate_embargo', 'Terminate_Embargo')], max_length=31),
),
]
| apache-2.0 | Python |
|
c137028a98cd762a4e93950fbde085969500999e | Build tagger | mbits-os/JiraDesktop,mbits-os/JiraDesktop,mbits-os/JiraDesktop | installer/build_tag.py | installer/build_tag.py | #!/usr/python
import os
from subprocess import call, check_output
ver = check_output([ "python", "version.py", "../apps/Tasks/src/version.h",
"PROGRAM_VERSION_MAJOR,PROGRAM_VERSION_MINOR,PROGRAM_VERSION_PATCH,PROGRAM_VERSION_BUILD",
"PROGRAM_VERSION_BUILD"])
VERSION = ver.strip()
call(["git","add","../apps/Tasks/src/version.h"])
call(["git","commit","-m","Build for version %s" % VERSION])
call(["git","tag","tag-build-v%s" % VERSION]) | mit | Python |
|
866b1c634c4fc6dc27ad953ccde6b6dcd11dcc91 | Add mood light script | etic/MayaMoodLight | moodlight.py | moodlight.py | from maya.utils import executeDeferred
import pymel.core as pm
import threading
import time
_active_mood_light = None
_running = False
class MoodLightThread(threading.Thread):
def __init__(self, speed):
self.speed = speed
super(MoodLightThread, self).__init__()
def run(self):
while _running:
time.sleep(0.05)
color = pm.dt.Color()
hue = time.time() * self.speed % 1 * 360
color.set('HSV', hue, 1, 0.3)
executeDeferred(
pm.mel.displayRGBColor,
'backgroundBottom',
color.r,
color.g,
color.b
)
color.set('HSV', hue, 0.3, 1)
executeDeferred(
pm.mel.displayRGBColor,
'backgroundTop',
color.r,
color.g,
color.b
)
def is_running():
global _active_mood_light, _running
return _active_mood_light is not None and _running
def start(speed=0.05):
global _active_mood_light, _running
stop()
_running = True
_active_mood_light = MoodLightThread(speed)
_active_mood_light.start()
def stop():
global _active_mood_light, _running
if is_running():
_running = False
_active_mood_light.join()
_active_mood_light = None | mit | Python |
|
b0d699066799d0309e7af3f8892f56a6feaac778 | Write tests for new functionality; several destinations | JakobGM/robotarm-optimization | new_tests.py | new_tests.py | from numpy import testing
import unittest
import numpy as np
from numpy import pi
from robot_arm import RobotArm
class TestRobotArm(unittest.TestCase):
def setUp(self):
self.lengths = (3, 2, 2,)
self.destinations = (
(5, 0,),
(4, 2,),
(6, 0.5),
(4, -2),
(5, -1),
)
self.theta = (pi, pi/2, 0,)
def test_init_all_arguments(self):
RobotArm(self.lengths, self.destinations, self.theta)
def test_init_without_theta(self):
RobotArm(self.lengths, self.destinations)
def test_wrong_lengths_type(self):
self.assertRaises(
AssertionError,
RobotArm,
np.array(self.lengths),
self.destinations,
self.theta)
def test_wrong_destinations_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
np.array(self.destinations),
self.theta)
def test_wrong_theta_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
self.destinations,
np.array(self.theta))
| mit | Python |
|
50f698c2fdd90bc4b3e60a583c196381fc23e099 | Implement a rudimentary API for LLTK | lltk/lltk-restful | lltk-restful/base.py | lltk-restful/base.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import lltk
import lltk.generic
import lltk.caching
import lltk.exceptions
from flask import Flask
from flask import jsonify, request
__author__ = 'Markus Beuckelmann'
__author_email__ = '[email protected]'
__version__ = '0.1.0'
DEBUG = True
CACHING = True
NAME = 'lltk-restful'
HOST = '127.0.0.1'
PORT = 5000
app = Flask(NAME)
if DEBUG:
app.debug = True
lltk.config['debug'] = True
if not CACHING:
lltk.caching.disable()
@app.route('/lltk/<string:language>/<string:method>/<string:word>', methods = ['GET'])
@app.route('/lltk/<string:language>/<string:method>/<path:extraargs>/<string:word>', methods = ['GET'])
def lltkapi(language, method, word, extraargs = tuple()):
''' Returns LLTK's results as a JSON document. '''
data = dict()
data['language'] = language
data['method'] = method
data['word'] = word
data['result'] = None
if hasattr(lltk.generic, method) and callable(getattr(lltk.generic, method)):
function = getattr(lltk.generic, method)
if not isinstance(extraargs, tuple):
extraargs = tuple(extraargs.split('/'))
kwargs = request.args.to_dict()
data['result'] = function(language, word, *extraargs, **kwargs)
else:
return http_404(NotImplementedError)
return jsonify(data)
if __name__ == '__main__':
app.run(
host = HOST,
port = PORT
)
| agpl-3.0 | Python |
|
aef4998354ee5872557392be4bc635e015e5d76d | add serial decoder | zpiman/golemScripts | serialDecoder.py | serialDecoder.py | #!/usr/bin/python2.7
import signal
import sys
import time
import serial
import io
import getopt
interval = '0.1'
device = '/dev/cu.usbserial'
try:
port=serial.Serial(port=device,
baudrate=2400,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
timeout=None)
if not port.isOpen():
port.open()
except IOError,err:
print '\nError:' + str(err) + '\n'
sys.exit(1)
## Close open port gracefully
def closePort(signal, frame):
sys.stderr.write('\n\nYou pressed Ctrl+C!\n\n')
port.flushInput()
port.close()
sys.exit(0)
signal.signal(signal.SIGINT, closePort)
# Every packet is 14 bytes long.
def getPacket():
i = 0
substr = ''
while i<14:
byte = port.read(1)
# converting every byte to binary format keeping the low nibble.
substr += '{0:08b}'.format(ord(byte))[4:]
i += 1
return substr
def stream_decode(substr):
ac = int(substr[0:1])
dc = int(substr[1:2])
auto = int(substr[2:3])
pclink = substr[3:4]
minus = int(substr[4:5])
digit1 = substr[5:12]
dot1 = int(substr[12:13])
digit2 = substr[13:20]
dot2 = int(substr[20:21])
digit3 = substr[21:28]
dot3 = int(substr[28:29])
digit4 = substr[29:36]
micro = int(substr[36:37])
nano = int(substr[37:38])
kilo = int(substr[38:39])
diotst = int(substr[39:40])
mili = int(substr[40:41])
percent = int(substr[41:42])
mega = int(substr[42:43])
contst = int(substr[43:44])
cap = int(substr[44:45])
ohm = int(substr[45:46])
rel = int(substr[46:47])
hold = int(substr[47:48])
amp = int(substr[48:49])
volts = int(substr[49:50])
hertz = int(substr[50:51])
lowbat = int(substr[51:52])
minm = int(substr[52:53])
fahrenh = substr[53:54]
celcius = int(substr[54:55])
maxm = int(substr[55:56])
digit = {"1111101":"0",
"0000101":"1",
"1011011":"2",
"0011111":"3",
"0100111":"4",
"0111110":"5",
"1111110":"6",
"0010101":"7",
"1111111":"8",
"0111111":"9",
"0000000":"",
"1101000":"L"}
valueStr = ("-" if minus else " ") +\
digit.get(digit1,"") + ("." if dot1 else "") +\
digit.get(digit2,"") + ("." if dot2 else "") +\
digit.get(digit3,"") + ("." if dot3 else "") +\
digit.get(digit4,"")
try:
valueNum = float(valueStr)
except ValueError:
valueNum = None
flags = ",".join(["AC" if ac else "",
"DC" if dc else "",
"Auto" if auto else "",
"Diode test" if diotst else "",
"Conti test" if contst else "",
"Capacity" if cap else "",
"Rel" if rel else "",
"Hold" if hold else "",
"Min" if minm else "",
"Max" if maxm else "",
"LowBat" if lowbat else ""])
if valueNum == None:
pass
elif nano:
valueNum *= 10e-9
elif micro:
valueNum *= 10e-6
elif mili:
valueNum *= 10e-3
elif kilo:
valueNum *= 10e3
elif mega:
valueNum *= 10e6
units = ("%" if percent else "") +\
("Ohm" if ohm else "") +\
("Amp" if amp else "") +\
("Volt" if volts else "") +\
("Hz" if hertz else "") +\
("C" if celcius else "")
return (valueNum, units, flags)
#while 1:
# substr = getPacket()
# data = stream_decode(substr)
# print data
# time.sleep(float(interval))
# port.flushInput()
| mit | Python |
|
04f19b29c79e1ab624d7ce596730ad9b4fd500fd | add lcdb.helpers.py | lcdb/lcdb-workflows,lcdb/lcdb-workflows,lcdb/lcdb-workflows | lcdb/helpers.py | lcdb/helpers.py | import yaml
from jsonschema import validate, ValidationError
def validate_config(config, schema):
schema = yaml.load(open(schema))
cfg = yaml.load(open(config))
try:
validate(cfg, schema)
except ValidationError as e:
msg = '\nPlease fix %s: %s\n' % (config, e.message)
raise ValidationError(msg)
| mit | Python |
|
f4944256092b085b1546eaec114e0987da6697bc | add simple cli client | mrtazz/InstapaperLibrary | instapaper_cli.py | instapaper_cli.py | #!/opt/local/bin/python2.6
from instapaper import Instapaper
from optparse import OptionParser
from getpass import getpass
def usage():
print "Usage: instapaper.py [-h] username password url"
print "Options:"
print "-h Print this help"
def main():
# initialize parser
usage = "usage: %prog -u USER [-t TITLE] url"
parser = OptionParser(usage)
parser.add_option("-u", "--user", action="store", dest="user",metavar="USER",
help="instapaper username")
parser.add_option("-t", "--title", action="store", dest="title",metavar="TITLE",
help="title of the link to add")
(options, args) = parser.parse_args()
if not options.user:
parser.error("No instapaper user given.")
else:
title = ""
if options.title:
title = options.title
pw = getpass()
inst = Instapaper(options.user,pw)
result = inst.addItem(args[0],title)
if (result == -1):
print "Uh-Oh, something went wrong."
if __name__ == "__main__":
main()
| mit | Python |
|
77886d170cba5c2427982992f3ff54f6357e3a07 | add basic inverted index tool | jasonwbw/NLPbasic | inverted_index.py | inverted_index.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# @author: Jason Wu ([email protected])
# This is a simple Inverted Index library to pretreatment for PMI compute or similar way
import math
import re
from operator import itemgetter
class InvertedIndex:
'''
Inverted Index class for docs
The library constructs an inverted index corpus from documents specified by the client or reading from input files.
It saves the document appear and handle some count for PMI or other algorithm.
'''
def __init__(self, stopword_filename = None):
'''
Initialize the index.
If a stopword file is specified, reads the stopword list from it, in
the format of one stopword per line.
Attributes:
stopword_filename: file with one stopword in one line
'''
self.num_docs = 0
self.term_doc = {} # term : [docnum]
self.stopwords = []
if stopword_filename:
stopword_file = open(stopword_filename, "r")
self.stopwords = [line.strip() for line in stopword_file]
def get_tokens(self, _str):
'''
Break a string into tokens, preserving URL tags as an entire token.
This implementation does not preserve case.
Clients may wish to override this behavior with their own tokenization.
Attributes:
_str: the string to split
'''
return _str.strip().split()
def add_input_document(self, _input):
'''
Add terms in the specified document to the inverted index.
Attributes:
_input: the input content
'''
words = set(self.get_tokens(_input))
for word in words:
try:
self.term_doc[word]
self.term_doc[word].append(num_docs)
except:
self.term_doc[word] = [num_docs]
self.num_docs += 1
def save_corpus_to_file(self, index_filename):
'''
Save the inverted index to the specified file.
Attributes:
index_filename: the specified file
'''
output_file = open(index_filename, "w")
output_file.write(str(self.num_docs) + "\n")
for key, value in term_doc.items():
output_file.write(key + "\t" + "\t".join(value) + "\n")
output_file.close()
def load_corpus_from_file(self, index_filename):
'''
Load corpus from index file, this file must builded from this class by save_corpus_to_file method
Attributes:
index_filename: build by save_corpus_to_file
'''
self.num_docs = 0
self.term_doc = {} # term : [docnum]
with open(index_filename) as fp:
for line in fp:
self.num_docs += 1
word, docs = line.split("\t", 1)
self.term_doc[word] = map(int, docs.split("\t"))
def get_num_docs(self):
'''
Return the total number of documents added.
'''
return self.num_docs
def concurrence(self, w1, w2):
'''
Return the concurrence of w1 and w2 in one document
Attributes:
w1: one word
w2: another word
'''
count = 0
try:
for item in self.term_doc[w1]:
if item in self.term_doc[w2] : count += 1
except:
pass
return count
def get_word_appear(self, word):
'''
Return the count of the document word appeared
Attributes:
word: the check word
'''
try:
return len(self.term_doc[word])
except:
return 0
| mit | Python |
|
4d740138dc7101e2816837c070d3051835977d75 | Add lc0621_task_scheduler.py | bowen0701/algorithms_data_structures | lc0621_task_scheduler.py | lc0621_task_scheduler.py | """Leetcode 621. Task Scheduler
Medium
URL: https://leetcode.com/problems/task-scheduler/
Given a char array representing tasks CPU need to do. It contains capital letters
A to Z where different letters represent differenttasks. Tasks could be done
without original order. Each task could be done in one interval. For each
interval, CPU could finish one task or just be idle.
However, there is a non-negative cooling interval n that means between two same
tasks, there must be at least n intervals that CPU are doing different tasks or
just be idle.
You need to return the least number of intervals the CPU will take to finish all
the given tasks.
Example:
Input: tasks = ["A","A","A","B","B","B"], n = 2
Output: 8
Explanation: A -> B -> idle -> A -> B -> idle -> A -> B.
Note:
- The number of tasks is in the range [1, 10000].
- The integer n is in the range [0, 100].
"""
class Solution(object):
def leastInterval(self, tasks, n):
"""
:type tasks: List[str]
:type n: int
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
af8f7a09c6cf8a96b716d016fc3a983340760869 | Create problem10.py | ptwales/Project-Euler,ptwales/Project-Euler | python/problem10.py | python/problem10.py | import primes
def problem10(limit):
ps = itertools.takewhile(lambda x: x < limit, primes.Eppstein_Sieve())
# ps = primes.Eratosthenes(limit) # memory error
return sum(ps)
| mit | Python |
|
5b276622f570adac64eda9932c7da47bf4bcd25c | Add PPM sample | ymyzk/ex4cg,ymyzk/ex4cg,ymyzk/ex4cg | ppm_practice.py | ppm_practice.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class PpmImage(object):
"""PPM 画像を表すクラス"""
def __init__(self, name, width, height, image, depth=8):
"""
:param name:
:param width:
:param height:
:param image:
:param depth depth: 各色の階調数 (bit)
:return:
"""
self.name = name
self.width = width
self.height = height
self.image = image
self.depth = depth
def dump(self, fp):
"""ファイルに画像データを書き込む処理"""
fp.write('P3\n')
fp.write('# ' + self.name + '\n')
fp.write('{0:d} {1:d}\n'.format(self.width, self.height))
fp.write('{0:d}\n'.format(2 ** self.depth - 1))
# 画像の高さが不十分であれば例外を送出
if len(self.image) != self.height:
raise IndexError()
for row in self.image:
# 画像の幅が不十分であれば例外を送出
if len(row) != 3 * self.width:
raise IndexError()
for x in range(0, self.width * 3, 3):
fp.write('{0:3d} {1:3d} {2:3d}\n'.format(*row[x:x+3]))
if __name__ == '__main__':
# 適当な画像を作成
name = "test.ppm"
depth = 8
width = height = 64
data = [[(i + j) % 2 ** depth for i in range(3 * width)]
for j in range(height)]
image = PpmImage(name, width, height, data, depth=depth)
# ファイルに保存
with open("test.ppm", 'w') as f:
image.dump(f) | mit | Python |
|
4f404a71cb7ee912bca8184fe94c97d6cfba1186 | Add script to rotate a solid angle in the xz plane | barbagroup/pygbe,barbagroup/pygbe,barbagroup/pygbe | preprocessing_tools/solid_rotation_y.py | preprocessing_tools/solid_rotation_y.py | '''
Rotates the protein by a solid angle on the plane xz
'''
import numpy
import os
from argparse import ArgumentParser
from move_prot_helper import (read_vertex, read_pqr, rotate_y,
modify_pqr)
def read_inputs():
"""
Parse command-line arguments to run move_protein.
User should provide:
-inMesh : str, mesh file you want to rotate.
-inpqr : str, pqr of the object you want to rotate.
-alpha_y: float [degrees], rotation angle, about the dipole moment.
-name : str, output file name.
"""
parser = ArgumentParser(description='Manage solid_rotation_y command line arguments')
parser.add_argument('-im', '--inMesh', dest='im', type=str, default=None,
help="mesh file you want to rotate")
parser.add_argument('-ip', '--inpqr', dest='ip', type=str, default=None,
help="pqr of the object you want to rotate")
parser.add_argument('-angy', '--angle_y', dest='angy', type=float, default=None,
help="rotation angle in the plane xz")
parser.add_argument('-n', '--name', dest='name', type=str, default='',
help="output file name")
return parser.parse_args()
args = read_inputs()
inMesh = args.im
inpqr = args.ip
angle_y = float(args.angy)*numpy.pi/180.
name = args.name
outMesh = inMesh + name
outpqr = inpqr + name
#Read mesh and pqr
#vert = read_vertex(inMesh+'.vert', float)
vert = numpy.loadtxt(inMesh+'.vert', dtype=float)
xq, q, Nq = read_pqr(inpqr+'.pqr', float)
xq_new = rotate_y(xq, angle_y)
vert_new = rotate_y(vert, angle_y)
ctr = numpy.average(vert_new, axis=0)
r_min_last = numpy.min(numpy.linalg.norm(vert_new, axis=1))
idx_rmin_last = numpy.argmin(numpy.linalg.norm(vert_new, axis=1))
print ('Desired configuration:')
print ('\tProtein is centered, {}'.format(ctr))
print ('\tProtein r minimum is {}, located at {}'.format(r_min_last,
vert_new[idx_rmin_last, :]))
#### Save to file
numpy.savetxt(outMesh+'.vert', vert_new)
cmd = 'cp '+inMesh+'.face '+outMesh+'.face'
os.system(cmd)
modify_pqr(inpqr+'.pqr', outpqr+'.pqr', xq_new)
print ('\nWritten to '+outMesh+'.vert(.face) and '+outpqr+'.pqr')
| bsd-3-clause | Python |
|
daf23cbb6d6015a2819de5d089a35903cbce9441 | Create katakan.py | agusmakmun/Some-Examples-of-Simple-Python-Script,agusmakmun/Some-Examples-of-Simple-Python-Script | list/katakan.py | list/katakan.py | """
4
2 belas
seratus 4 puluh 0
9 ribu seratus 2 puluh 1
2 puluh 1 ribu 3 puluh 0
9 ratus 5 ribu 0
8 puluh 2 juta 8 ratus 8 belas ribu seratus 8 puluh 8
3 ratus 1 juta 4 puluh 8 ribu 5 ratus 8 puluh 8
"""
def kata(n):
angka = range(11)
temp = ""
if n < 12:
temp += str(angka[n])
elif n < 20:
temp += str(n-10)+" belas"
elif n < 100:
temp += str(kata(n/10)) + " puluh "+ str(kata(n%10))
elif n < 200:
temp += "seratus "+ str(kata(n-100))
elif n < 1000:
temp += str(kata(n/100))+ " ratus " + str(kata(n%100))
elif n < 2000:
temp += "seribu "+str(kata(n-1000))
elif n < 1000000:
temp += str(kata(n/1000))+ " ribu "+ str(kata(n%1000))
elif n < 1000000000:
temp += str(kata(n/1000000)) +" juta " + str(kata(n%1000000))
return temp
print kata(4)
print kata(12)
print kata(140)
print kata(9121)
print kata(21030)
print kata(905000)
print kata(82818188)
print kata(301048588)
| agpl-3.0 | Python |
|
1555164ff275436de580a33735a2d8c6e6893b42 | Create lab4.py | JOSUEXLION/prog3-uip,JOSUEXLION/prog3-uip | laboratorios/lab4.py | laboratorios/lab4.py | #lab 4
#josue dde leon
for i in range (1, 4):
nombre = input("\n\nintroduce nombre: ")
n1 = input ("Introduce nota 1: ")
n2 = input ("Introduce nota 2: ")
n3 = input ("Introduce nota 3: ")
n4 = input ("Introduce nota 4: ")
n5 = input ("Introduce nota 5: ")
prom=(float(n1)+float(n2)+float(n3)+float(n4)+float(n5))/5
print ("\nNombre: " + str(nombre))
print ("\nQuiz 1: " + str(n1))
print ("Quiz 2: " + str(n2))
print ("Quiz 3: " + str(n3))
print ("Quiz 4: " + str(n4))
print ("Quiz 5: " + str(n5))
print ("\n\nEl promedio de " + str(nombre) + " es " + str(prom))
archivo = open(nombre, 'w')
archivo.write("Nombre: " + str(nombre))
archivo.write("\nquiz 1: " + n1)
archivo.write("\nquiz 2: " + n2)
archivo.write("\nquiz 3: " + n3)
archivo.write("\nquiz 4: " + n4)
archivo.write("\nquiz 5: " + n5)
archivo.write("\nEl promedio de " + str(nombre) + " es " + str(prom))
archivo.close()
| mit | Python |
|
7b06edf37a630d4582fc84832cd1d40b790e4aa3 | Add server | openlawlibrary/pygls,openlawlibrary/pygls,openlawlibrary/pygls | pygls/server.py | pygls/server.py | import asyncio
import logging
from .protocol import LanguageServerProtocol
logger = logging.getLogger(__name__)
class Server:
def __init__(self, protocol_cls):
assert issubclass(protocol_cls, asyncio.Protocol)
self.loop = asyncio.get_event_loop()
self.lsp = protocol_cls(self)
self.server = None
def shutdown(self):
self.server.close()
# TODO: Gracefully shutdown event loops
def start_tcp(self, host, port):
self.server = self.loop.run_until_complete(
self.loop.create_server(self.lsp, host, port)
)
self.loop.run_forever()
class LanguageServer(Server):
def __init__(self):
super().__init__(LanguageServerProtocol)
def command(self, command_name):
'''
Registers new command (delegating to FeatureManager).
Args:
command_name(str): Name of the command to register
'''
return self.lsp.fm.command(command_name)
def feature(self, *feature_names, **options):
'''
Registers one or more LSP features (delegating to FeatureManager).
Args:
*feature_names(tuple): One or more features to register
NOTE: All possible LSP features are listed in lsp module
**options(dict): Options for registered feature
E.G. triggerCharacters=['.']
'''
return self.lsp.fm.feature(*feature_names, **options)
def thread(self):
return self.lsp.thread()
| apache-2.0 | Python |
|
357ce31d1f28fbc5d12a23dfd3bb2aa40a4e27a3 | Add serialdumpbytexor.py | jj1bdx/avrhwrng,jj1bdx/avrhwrng,jj1bdx/avrhwrng | serialdumpbytexor.py | serialdumpbytexor.py | #!/usr/bin/env python
import sys, serial
if __name__ == '__main__':
ser = serial.Serial('/dev/cu.usbserial-A8004ISG', 115200, timeout=10, xonxoff=0, rtscts=0)
# ser.open()
bb = bytearray(512)
while 1:
ba = bytearray(ser.read(1024))
for i in range(512):
j = i * 2
bb[i] = ba[j] ^ ba[j+1]
sys.stdout.write(bb)
sys.stdout.flush()
| mit | Python |
|
ca99e80e04a1d7fb3ff3698f23cdc19c8ec16113 | add refresh test | scylladb/scylla-longevity-tests,amoskong/scylla-cluster-tests,scylladb/scylla-cluster-tests,scylladb/scylla-longevity-tests,amoskong/scylla-cluster-tests,amoskong/scylla-cluster-tests,scylladb/scylla-cluster-tests,amoskong/scylla-cluster-tests,scylladb/scylla-cluster-tests,scylladb/scylla-longevity-tests,scylladb/scylla-cluster-tests,amoskong/scylla-cluster-tests,scylladb/scylla-cluster-tests | refresh_test.py | refresh_test.py | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2017 ScyllaDB
import time
from avocado import main
from sdcm.tester import ClusterTester
from sdcm.nemesis import RefreshMonkey
from sdcm.nemesis import RefreshBigMonkey
class RefreshTest(ClusterTester):
"""
Nodetool refresh after uploading lot of data to a cluster with running load in the background.
:avocado: enable
"""
def test_refresh_small_node(self):
self.db_cluster.add_nemesis(nemesis=RefreshMonkey,
loaders=self.loaders,
monitoring_set=self.monitors)
# run a write workload
stress_queue = self.run_stress_thread(stress_cmd=self.params.get('stress_cmd'),
stress_num=2,
keyspace_num=1)
time.sleep(30)
self.db_cluster.start_nemesis()
self.db_cluster.stop_nemesis(timeout=None)
self.get_stress_results(queue=stress_queue, stress_num=2, keyspace_num=1)
def test_refresh_big_node(self):
self.db_cluster.add_nemesis(nemesis=RefreshBigMonkey,
loaders=self.loaders,
monitoring_set=self.monitors)
# run a write workload
stress_queue = self.run_stress_thread(stress_cmd=self.params.get('stress_cmd'),
stress_num=2,
keyspace_num=1)
time.sleep(30)
self.db_cluster.start_nemesis()
self.db_cluster.stop_nemesis(timeout=None)
self.get_stress_results(queue=stress_queue, stress_num=2, keyspace_num=1)
if __name__ == '__main__':
main()
| agpl-3.0 | Python |
|
b440872f71d37cc5bf110eb0c7c13a4a2dcb7f6c | create utils package, field_template_read update var name to template render | YACOWS/opps,jeanmask/opps,opps/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,opps/opps,YACOWS/opps,YACOWS/opps,williamroot/opps,jeanmask/opps,opps/opps,jeanmask/opps,opps/opps,williamroot/opps,williamroot/opps | opps/fields/utils.py | opps/fields/utils.py | # -*- coding: utf-8 -*-
def field_template_read(obj):
"""Use replace because the django template can't read variable with "-"
"""
fields = {}
for o in obj:
fields[o.replace("-", "_")] = obj[o]
return fields
| mit | Python |
|
2804024fbee6b825dec512ff13d7b28a1fee5b25 | Add root Api object. | pozytywnie/RouterOS-api,socialwifi/RouterOS-api,kramarz/RouterOS-api | routeros_api/api.py | routeros_api/api.py | import hashlib
import binascii
from routeros_api import api_communicator
from routeros_api import api_socket
from routeros_api import base_api
def connect(host, username='admin', password='', port=8728):
socket = api_socket.get_socket(host, port)
base = base_api.Connection(socket)
communicator = api_communicator.ApiCommunicator(base)
login(communicator, username, password)
return RouterOsApi(communicator)
def login(communicator, login, password):
communicator.send_command('/', 'login')
response = communicator.receive_single_response()
token = binascii.unhexlify(response.attributes['ret'])
hasher = hashlib.md5()
hasher.update(b'\x00')
hasher.update(password.encode())
hasher.update(token)
hashed = b'00' + hasher.hexdigest().encode('ascii')
communicator.call('/', 'login', {'name': login, 'response': hashed})
class RouterOsApi(object):
def __init__(self, communicator):
self.communicator = communicator
def get_resource(self, path):
return RouterOsResource(self.communicator, path)
def get_binary_resource(self, path):
return RouterOsResource(self.communicator, path, binary=True)
class RouterOsResource(object):
def __init__(self, communicator, path, binary=False):
self.communicator = communicator
self.path = path
self.binary = binary
def get(self, **kwargs):
return self.call('print', {}, kwargs)
def get_async(self, **kwargs):
return self.call_async('print', {}, kwargs)
def detailed_get(self, **kwargs):
return self.call('print', {'detail': ''}, kwargs)
def detailed_get_async(self, **kwargs):
return self.call_async('print', {'detail': ''}, kwargs)
def set(self, **kwargs):
return self.call('set', kwargs)
def set_async(self, **kwargs):
return self.call('set', kwargs)
def add(self, **kwargs):
return self.call('add', kwargs)
def add_async(self, **kwargs):
return self.call_async('add', kwargs)
def remove(self, **kwargs):
return self.call('remove', kwargs)
def remove_async(self, **kwargs):
return self.call_async('remove', kwargs)
def call(self, command, arguments=None, queries=None,
additional_queries=()):
return self.communicator.call(
self.path, command, arguments=arguments, queries=queries,
additional_queries=additional_queries, binary=self.binary)
def call_async(self, command, arguments=None, queries=None,
additional_queries=()):
return self.communicator.call_async(
self.path, command, arguments=arguments, queries=queries,
additional_queries=additional_queries, binary=self.binary)
| mit | Python |
|
52f715af4b1cf6dd964e71cafdf807d1133fe717 | add a basic script that tests nvlist_in and nvlist_out functionality | ClusterHQ/pyzfs | tests/test_nvlist.py | tests/test_nvlist.py | import json
import math
from libzfs_core.nvlist import *
from libzfs_core.nvlist import _lib
props_in = {
"key1": "str",
"key2": 10,
"key3": {
"skey1": True,
"skey2": None,
"skey3": [
True,
False,
True
]
},
"key4": [
"ab",
"bc"
],
"key5": [
int(math.pow(2, 62)),
1,
2,
3
],
"key6": [
uint32_t(10),
uint32_t(11)
],
"key7": [
{
"skey71": "a",
"skey72": "b",
},
{
"skey71": "c",
"skey72": "d",
},
{
"skey71": "e",
"skey72": "f",
}
]
}
props_out = {}
with nvlist_in(props_in) as x:
print "Dumping a C nvlist_t produced from a python dictionary:"
_lib.dump_nvlist(x, 2)
with nvlist_out(props_out) as y:
_lib.nvlist_dup(x, y, 0)
print "\n\n"
print "Dumping a dictionary reconstructed from the nvlist_t:"
print json.dumps(props_out, sort_keys=True, indent=4)
| apache-2.0 | Python |
|
5348379759caa9576c3194ae0795e2fcc6ed3308 | add unit tests | mirnylab/cooler | tests/test_region.py | tests/test_region.py | # -*- coding: utf-8 -*-
from cooler.region import *
import nose
def test_bool_ops():
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 15, 20))
assert comes_before(a, b) == True
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == False
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 10, 20))
assert comes_before(a, b) == True
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == False
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 6, 10))
assert comes_before(a, b) == True
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == True
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 5, 10))
assert comes_before(a, b) == False
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == True
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 0, 6))
assert comes_before(a, b) == False
assert comes_after(a, b) == True
assert contains(a, b) == False
assert overlaps(a, b) == True
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 0, 5))
assert comes_before(a, b) == False
assert comes_after(a, b) == True
assert contains(a, b) == False
assert overlaps(a, b) == False
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 0, 15))
assert comes_before(a, b) == False
assert comes_after(a, b) == False
assert contains(a, b) == False
assert overlaps(a, b) == True
def test_set_ops():
a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))
assert intersection(a, b) == Region('chr1', 10, 15)
a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))
assert union(a, b) == Region('chr1', 5, 20)
a, b = parse_region(('chr1', 5, 10)), parse_region(('chr1', 15, 20))
assert hull(a, b) == Region('chr1', 5, 20)
a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))
assert diff(a, b) == Region('chr1', 5, 10)
a, b = parse_region(('chr1', 5, 15)), parse_region(('chr1', 10, 20))
x, y, z = partition(a, b)
assert x == Region('chr1', 5, 10)
assert y == Region('chr1', 10, 15)
assert z == Region('chr1', 15, 20)
| bsd-3-clause | Python |
|
c9fc6d4f98ba102d94fa54eedae6a50d38459d71 | add test_invalid_files to test_schema | NREL/hescore-hpxml | tests/test_schema.py | tests/test_schema.py | import os
import jsonschema
import json
import pathlib
import copy
def get_example_json(filebase):
rootdir = pathlib.Path(__file__).resolve().parent.parent
jsonfilepath = str(rootdir / 'examples' / f'{filebase}.json')
with open(jsonfilepath) as f:
js = json.load(f)
return js
def get_json_schema():
this_path = os.path.dirname(os.path.abspath(__file__))
schema_path = os.path.join(os.path.dirname(this_path), 'hescorehpxml', 'schemas', 'hescore_json.schema.json')
with open(schema_path, 'r') as js:
schema = json.loads(js.read())
return schema
def get_error_messages(jsonfile, jsonschema):
errors = []
for error in sorted(jsonschema.iter_errors(jsonfile), key=str):
errors.append(error.message)
return errors
def test_schema_version_validation():
schema = get_json_schema()
error = jsonschema.Draft7Validator.check_schema(schema)
assert error is None
def test_invalid_files():
hpxml_filebase = 'townhouse_walls'
schema = get_json_schema()
js_schema = jsonschema.Draft7Validator(schema)
js = get_example_json(hpxml_filebase)
js1 = copy.deepcopy(js)
del js1['building']['about']['town_house_walls']
errors = get_error_messages(js1, js_schema)
assert "'town_house_walls' is a required property" in errors
js2 = copy.deepcopy(js)
js2_about = copy.deepcopy(js['building']['about'])
del js2['building']['about']
js2['building']['about'] = []
js2['building']['about'].append(js2_about)
js2['building']['about'].append(js2_about)
errors = get_error_messages(js2, js_schema)
assert any(error.startswith("[{'assessment_date': '2014-12-02', 'shape': 'town_house'") and
error.endswith("is not of type 'object'") for error in errors)
js3 = copy.deepcopy(js)
js3_zone = copy.deepcopy(js['building']['zone'])
del js3['building']['zone']
js3['building']['zone'] = []
js3['building']['zone'].append(js3_zone)
js3['building']['zone'].append(js3_zone)
errors = get_error_messages(js3, js_schema)
assert any(error.startswith("[{'zone_roof': [{'roof_name': 'roof1', 'roof_area': 1200.0") and
error.endswith("is not of type 'object'") for error in errors)
# TODO: Add more tests | bsd-2-clause | Python |
|
5e4fd7fb37f9e16d27a7751221f6e3725509f2fc | Prepare to use unittests | thomnico/fortiosapi,thomnico/fortigateconf,thomnico/fortiosapi | tests/testapi.py | tests/testapi.py | #!/usr/bin/python
from fortigateconf import FortiOSConf
import sys
import json
import pprint
import json
from argparse import Namespace
import logging
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logger = logging.getLogger('fortinetconflib')
hdlr = logging.FileHandler('/var/tmp/testapi.log')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
logger.debug('often makes a very good meal of %s', 'visiting tourists')
fgt = FortiOSConf()
def json2obj(data):
return json.loads(data, object_hook=lambda d: Namespace(**d))
def main():
# Login to the FGT ip
fgt.debug('on')
fgt.login('192.168.40.8','admin','')
data = {
# "action" : "add",
"seq-num" :"8",
"dst": "10.10.30.0 255.255.255.0",
"device": "port2",
"gateway": "192.168.40.254",
}
pp = pprint.PrettyPrinter(indent=4)
d=json2obj(json.dumps(data))
pp.pprint(fgt.get_name_path_dict( vdom="root"))
# resp = fgt.schema('diagnose__tree__','debug', vdom="root")
# pp.pprint(resp)
resp = fgt.post('diagnose__tree__','debug', vdom="root", mkey="enable")
pp.pprint(resp)
fgt.logout()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
cd653c3657aa14d3845a253d916e9f0d336910ce | add logger convenience class | AlekSi/loggerglue | loggerglue/logger.py | loggerglue/logger.py | # -*- coding: utf-8 -*-
"""
An rfc5424/rfc5425 syslog server implementation
Copyright © 2011 Evax Software <[email protected]>
"""
import socket,os,sys
from datetime import datetime
from loggerglue.rfc5424 import DEFAULT_PRIVAL,SyslogEntry
from loggerglue.emitter import UNIXSyslogEmitter
class Logger(object):
"""
Convenience class to log RFC5424 messages to the
local syslog daemon.
"""
def __init__(self, emitter=None, hostname=None, app_name=None, procid=None):
"""
Create a new logger object.
Keyword arguments:
emitter -- Emitter object to send syslog messages, default to Unix socket /dev/log
hostname -- Hostname to send with log messages, defaults to current hostname
app_name -- Application name to send with log messages, defaults to application name
procid -- Process ID to send with log messages, default to current process ID
"""
if hostname is None:
# Compute host name to submit to syslog
hostname = socket.gethostname()
if app_name is None:
# Compute default app name from name of executable,
# without extension.
app_name = os.path.basename(sys.argv[0])
(app_name, _, _) = app_name.partition(".")
if procid is None:
procid = os.getpid()
if emitter is None:
emitter = UNIXSyslogEmitter()
self.hostname = hostname
self.app_name = app_name
self.procid = procid
self.emitter = emitter
def log(self, msg=None, msgid=None, structured_data=None, prival=DEFAULT_PRIVAL,
timestamp=None):
"""
Log a message.
Example:
>>> logger.log("test", prival=LOG_DEBUG|LOG_MAIL)
Keyword arguments:
msg -- Human readable message to log
msgid -- Message identifier
structured_data -- Structured data to attach to log message
prival -- Priority and facility of message (defaults to INFO|USER)
timestamp -- UTC time of log message (default to current time)
"""
if timestamp is None:
timestamp = datetime.utcnow()
msg = SyslogEntry(
prival=prival, timestamp=datetime.utcnow(),
hostname=self.hostname, app_name=self.app_name, procid=self.procid, msgid=msgid,
structured_data=structured_data,
msg=msg
)
self.emitter.emit(msg)
def close(self):
"""
Close connection to logger.
"""
self.emitter.close()
| mit | Python |
|
1ac75fafc9c67e0fc1f898f4653593730ed66326 | Create uber.py | jasuka/pyBot,jasuka/pyBot | modules/uber.py | modules/uber.py | def uber(self):
self.send_chan("Prkl, toimii!")
| mit | Python |
|
7b8d7bf81b094f554f3d820b1e0df5d54917f4c0 | Create getCITask.py | xebialabs-community/xlr-xldeploy-plugin,xebialabs-community/xlr-xldeploy-plugin,xebialabs-community/xlr-xldeploy-plugin,xebialabs-community/xlr-xldeploy-plugin | src/main/resources/xlr_xldeploy/getCITask.py | src/main/resources/xlr_xldeploy/getCITask.py | #
# Copyright 2017 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from xlr_xldeploy.XLDeployClientUtil import XLDeployClientUtil
xld_client = XLDeployClientUtil.create_xldeploy_client(xldeployServer, username, password)
test = xld_client.check_ci_exist(ciID)
if throwOnFail and not test:
raise Exception(ciID + " does not exist")
else:
response = xld_client.get_ci(ciID,accept)
| mit | Python |
|
116babc38e2e4023eb0b45eabc02050ed433e240 | Include a helpful MOD analyser script | keirf/Amiga-Stuff,keirf/Amiga-Stuff | scripts/mod_info.py | scripts/mod_info.py | # mod_info.py
#
# Display information about a Protracker module.
#
# Written & released by Keir Fraser <[email protected]>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
import struct, sys
with open(sys.argv[1], "rb") as f:
dat = f.read()
dlen = len(dat)
tname, = struct.unpack("20s", dat[:20])
print("Name: '%s'" % tname.decode('utf-8'))
dat = dat[20:]
samples_len = 0
for i in range(31):
name, wordlen, finetune, volume, repstart, replen = struct.unpack(
">22sH2B2H", dat[:30])
dat = dat[30:]
if wordlen == 0:
continue
samples_len += wordlen*2
print("Sample Data: %u" % samples_len)
songlen, pad = struct.unpack("2B", dat[:2])
dat = dat[2:]
#assert pad == 127
assert songlen <= 128
print("Song Length: %u" % songlen)
patterns = list(struct.unpack("128B", dat[:128]))
dat = dat[128:]
patterns = patterns[:songlen]
nr_patterns = max(patterns)+1
print("Nr Patterns: %u (%u bytes)" % (nr_patterns, nr_patterns*1024))
mksig, = struct.unpack("4s", dat[:4])
dat = dat[4:]
assert mksig == b'M.K.'
totlen = 1084 + nr_patterns*1024 + samples_len
print("Total Bytes: %u (0x%x)" % (totlen, totlen))
assert totlen <= dlen
| unlicense | Python |
|
9e7acd4e7d80cffb0274e3a01aee517fb63d3db9 | Create Josuel_Concordance.py | Joshlix95/Juntos | Josuel_Concordance.py | Josuel_Concordance.py | # Author: Josuel Musambaghani
# library that breaks text into parts
import nltk
import string
with open('c:/Python27/fileIn.txt', 'r') as in_file:
text = in_file.read()
f = nltk.sent_tokenize(text)
# This code deals with the proble of parenthesis
for item in range(len(f)-1):
if '(' in f[item] and ')' in f[item+1]:
f[item] += ' ' + f[item+1]
f.remove(f[item+1])
'''
# This code solve the problem of having punctuations appended to words
# when running. For example 'english:' and 'english' that might be consider
# as different because of the punctuation mark
punctuations = ['.', ':', ':', "'", ',', '...', '?', '!', '~']
g = []
for elt in f:
for mark in punctuations:
if mark in elt:
z = elt.split(mark)
new = z[0] + z[1]
g.append(new)
print g
################################################################
for elt in f:
for let in elt[len(elt)-2:]:
if let in string.punctuation:
elt = elt.replace(let, "")
for elt in f:
for let in elt[:1]:
if let in string.punctuation:
elt = elt.replace(let, "")
print f
'''
# count and display results of counted words
myDict = {}
linenum = -1
for line in f:
line = line.strip()
line = line.lower()
line = line.split()
linenum += 1
for word in line:
###################################################
# Trying to eliminate punctuations that are appended to words
if word in string.punctuation:
line.remove(word)
for elt in word[len(word)-2:]:
if "e.g." in word:
continue
elif elt in string.punctuation:
word = word.replace(elt, "")
for elt in word[:1]:
if elt in string.punctuation:
word = word.replace(elt, "")
###################################################
# the code continues as normal ...
word = word.strip()
word = word.lower()
if not word in myDict:
myDict[word] = []
myDict[word].append(linenum)
print "%-15s %5s %s" %("Word", 'Count', "Line Numbers")
print "%-15s %5s %s" %("====", '=====', "============")
for key in sorted(myDict):
print '%-15s %5d: %s' % (key, len(myDict[key]), myDict[key])
| mit | Python |
|
2032a823b2dad6f7cebb63ee276bcfb6ea02b7a0 | improve notes | parrt/msan692,parrt/msan692,parrt/msan692 | notes/code/lolviz.py | notes/code/lolviz.py | import graphviz
def lolviz(table):
"""
Given a list of lists such as:
[ [('a','3')], [], [('b',230), ('c',21)] ]
return the dot/graphviz to display as a two-dimensional
structure.
"""
s = """
digraph G {
nodesep=.05;
rankdir=LR;
node [shape=record,width=.1,height=.1];
"""
# Make outer list as vertical
labels = []
for i in range(len(table)):
bucket = table[i]
if len(bucket)==0: labels.append(str(i))
else: labels.append("<f%d> %d" % (i,i))
s += ' mainlist [color="#444443", fontsize="9", fontcolor="#444443", fontname="Helvetica", style=filled, fillcolor="#D9E6F5", label = "'+'|'.join(labels)+'"];\n'
# define inner lists
for i in range(len(table)):
bucket = table[i]
if not bucket or len(bucket)==0: continue
elements = []
for j, el in enumerate(bucket):
if type(el)==tuple and len(el)==2: els = "%s→%s" % el
else: els = repr(el)
elements.append('<table BORDER="0" CELLBORDER="1" CELLSPACING="0"><tr><td cellspacing="0" bgcolor="#FBFEB0" border="1" sides="b" valign="top"><font color="#444443" point-size="9">%d</font></td></tr><tr><td bgcolor="#FBFEB0" border="0" align="center">%s</td></tr></table>' % (j, els))
s += 'node%d [color="#444443", fontname="Helvetica", margin="0.01", space="0.0", shape=record label=<{%s}>];\n' % (i, '|'.join(elements))
# Do edges
for i in range(len(table)):
bucket = table[i]
if not bucket or len(bucket)==0: continue
s += 'mainlist:f%d -> node%d [arrowsize=.5]\n' % (i,i)
s += "}\n"
print s
return s
x = [ [('a','3')], [], [('b',230), ('c',21)] ]
dot = lolviz(x)
g = graphviz.Source(dot)
g.render(view=True) | mit | Python |
|
70b6fde787018daf5b87f485e60c9a26fa542f2e | add basic affine 3D transforms | FeodorM/Computer-Graphics | lab_3/affine_transform.py | lab_3/affine_transform.py | from util.matrix import Matrix
from math import cos, sin
def translation(x, y, z):
return Matrix([
[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]
])
# den = (phi ** 2 + psi ** 2) ** .5
# phi /= den
# psi /= den
# return Matrix([
# [phi, -psi, 0],
# [psi, phi, 0],
# [0, 0, 1]
# ])
def rotation_x(phi):
c = cos(phi)
s = sin(phi)
return Matrix([
[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]
])
def rotation_y(phi):
c = cos(phi)
s = sin(phi)
return Matrix([
[c, 0, s, 0],
[0, 1, 0, 0],
[-s, 0, c, 0],
[0, 0, 0, 1]
])
def rotation_z(phi):
c = cos(phi)
s = sin(phi)
return Matrix([
[c, -s, 0, 0],
[s, c, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
def scaling(kx, ky=None, kz=None):
if ky is None and kz is None:
ky = kz = kx
return Matrix([
[kx, 0, 0, 0],
[0, ky, 0, 0],
[0, 0, kz, 0],
[0, 0, 0, 1]
])
mirroring_x = Matrix([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
mirroring_y = Matrix([
[-1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
mirroring_z = Matrix([
[-1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
| mit | Python |
|
786f75be946427024fa96ae8dcd06d8d1ecd49cc | Add the init method to the node model. | yiyangyi/cc98-tornado | model/node.py | model/node.py | class NodeModel(Query):
def __init__(self, db):
self.db = db
self.table_name = "node"
super(NodeModel, self).__init__() | mit | Python |
|
ebc6368c11048a9182d848cff7f47e3dd8532933 | Add files via upload | huntercbx/games-with-python | my_game_04.py | my_game_04.py | import pygame
import os
# ширина и высота игрового экрана
WIDTH = 640
HEIGHT = 480
# частота кадров
FPS = 60
# путь к изображениям
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, "images");
# класс для корабля игрока
class PlayerShip(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(img_folder, "player_ship.png")).convert()
self.image.set_colorkey((0, 0, 0))
self.rect = self.image.get_rect()
self.rect.x = 10
self.rect.centery = HEIGHT / 2
class Meteor(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(img_folder, "meteor.png")).convert()
self.image.set_colorkey((0, 0, 0))
self.rect = self.image.get_rect()
self.rect.left = x
self.rect.top = y
def update(self):
self.rect.left -= 3
# инициализация библиотеки pygame
pygame.init()
# создание объекта для отслеживания времени
clock = pygame.time.Clock()
# создание игрового экрана
screen = pygame.display.set_mode((WIDTH, HEIGHT))
# смена залоголовка окна
pygame.display.set_caption("My Game")
# все спрайты будут храниться здесь
sprites = pygame.sprite.Group()
sprites.add(PlayerShip())
sprites.add(Meteor(WIDTH - 50, 40))
sprites.add(Meteor(WIDTH - 100, 200))
# цикл событий
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# изменение движения
sprites.update()
# очистка фона и рисование спрайтов
screen.fill((0, 0, 80))
sprites.draw(screen)
# переключение буферов
pygame.display.flip()
# задает частоту запуска цикла
clock.tick(FPS)
# завершение работы библиотеки pygame
pygame.quit()
| mit | Python |
|
07467664b699612e10b51bbeafdce79a9d1e0127 | Write unit test for utility functions | unnonouno/cudnnenv | test/test_util.py | test/test_util.py | from __future__ import unicode_literals
try:
import io
StringIO = io.StringIO
except ImportError:
import StringIO
StringIO = StringIO.StringIO
import os
import shutil
import sys
import tempfile
import unittest
import cudnnenv
class TestSafeTempDir(unittest.TestCase):
def test_safe_temp_dir(self):
with cudnnenv.safe_temp_dir() as path:
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path))
def test_safe_temp_dir_error(self):
try:
with cudnnenv.safe_temp_dir() as path:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(path))
class TestSafeDir(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_safe_dir(self):
path = os.path.join(self.path, 'd')
with cudnnenv.safe_dir(path) as p:
self.assertTrue(os.path.exists(p))
self.assertTrue(os.path.exists(path))
def test_safe_dir_error(self):
path = os.path.join(self.path, 'd')
try:
with cudnnenv.safe_dir(path) as p:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(p))
self.assertFalse(os.path.exists(path))
class TestYesNo(unittest.TestCase):
def tearDown(self):
sys.stdin = sys.__stdin__
def test_yes(self):
sys.stdin = StringIO('y\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
def test_no(self):
sys.stdin = StringIO('n\n')
self.assertFalse(cudnnenv.yes_no_query('q'))
def test_invalid(self):
sys.stdin = StringIO('a\nb\nc\nd\ny\nn\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
| mit | Python |
|
448f18769d7c701d9dd03ff65489656380513d07 | Add test init. | FelixLoether/flask-image-upload-thing,FelixLoether/flask-uploads | tests/__init__.py | tests/__init__.py | from flexmock import flexmock
from flask.ext.storage import MockStorage
from flask_uploads import init
created_objects = []
added_objects = []
deleted_objects = []
committed_objects = []
class MockModel(object):
def __init__(self, **kw):
created_objects.append(self)
for key, val in kw.iteritems():
setattr(self, key, val)
db_mock = flexmock(
Column=lambda *a, **kw: ('column', a, kw),
Integer=('integer', [], {}),
Unicode=lambda *a, **kw: ('unicode', a, kw),
Model=MockModel,
session=flexmock(
add=added_objects.append,
commit=lambda: committed_objects.extend(
added_objects + deleted_objects
),
delete=deleted_objects.append,
),
)
class TestCase(object):
def setup_method(self, method, resizer=None):
init(db_mock, MockStorage, resizer)
self.db = db_mock
self.Storage = MockStorage
self.resizer = resizer
| mit | Python |
|
256648ad4effd9811d7c35ed6ef45de67f108926 | Add pytest option for specifying the typing module to use | bintoro/overloading.py | tests/conftest.py | tests/conftest.py | import sys
def pytest_addoption(parser):
parser.addoption('--typing', action='store', default='typing')
def pytest_configure(config):
if config.option.typing == 'no':
sys.modules['typing'] = None
elif config.option.typing != 'typing':
sys.modules['typing'] = __import__(config.option.typing)
| mit | Python |
|
08f6d31feb493b24792eaabfa11d08faea68c62b | add textample plug | Rouji/Yui,Rj48/ircbot | plugins/textample/textample.py | plugins/textample/textample.py | # coding=utf-8
import gzip
import os
import random
import re
def search(regex, base_dir, file_contains=''):
reg = re.compile(regex, re.IGNORECASE)
for root, _, files in os.walk(base_dir):
for file in files:
if file.endswith('.gz'):
file_path = os.path.join(root, file)
if file_contains not in file_path:
continue
with gzip.open(file_path) as f:
for line in f:
line = line.decode('utf-8')
if reg.search(line):
yield (file_path[len(base_dir) + 1:-3], ' '.join(line.split()))
@yui.threaded
@yui.command('example', 'ex')
def example(argv):
"""Regex search for sentences. Usage: example <regex> [file]"""
if len(argv) < 2:
return
base = os.path.join(os.path.dirname(__file__), 'texts')
if not os.path.isdir(base):
return 'Directory %s does not exist' % base
se = search(argv[1], base, file_contains=argv[2] if len(argv) > 2 else '')
try:
return '%s: %s' % random.choice(list(se))
except IndexError as e:
return 'No matching sentences found'
| mit | Python |
|
da3248f782d83c46b698c31736b29a42d380511c | Add the playground | thewizardplusplus/micro,thewizardplusplus/micro,thewizardplusplus/micro | micro/_playground.py | micro/_playground.py | CODE = '''
out str + 2 3
'''
if __name__ == '__main__':
import lexer
import preparser
import parser
import builtin_functions
import sys
import evaluate
specific_lexer = lexer.Lexer()
specific_preparser = preparser.Preparser(specific_lexer)
preast = specific_preparser.preparse(CODE)
specific_parser = parser.Parser()
ast = specific_parser.parse(preast, builtin_functions.BUILTIN_FUNCTIONS)
errors = specific_lexer.get_errors() + specific_preparser.get_errors() + specific_parser.get_errors()
for some_error in errors:
some_error.detect_position(CODE)
print(some_error)
if errors:
sys.exit()
evaluate.evaluate(ast, builtin_functions.BUILTIN_FUNCTIONS)
| mit | Python |
|
686c0d0c8f2e520375315c84e2320b087b9a3831 | add scan dir test | hirokihamasaki/irma,quarkslab/irma,deloittem/irma-frontend,quarkslab/irma,deloittem/irma-frontend,quarkslab/irma,deloittem/irma-frontend,hirokihamasaki/irma,hirokihamasaki/irma,hirokihamasaki/irma,quarkslab/irma,hirokihamasaki/irma,deloittem/irma-frontend | tests/scan_dir.py | tests/scan_dir.py | import os
import json
import datetime
import random
import hashlib
import signal
import sys
from frontend.cli.irma import _scan_new, _scan_add, _scan_launch, \
_scan_progress, _scan_cancel, IrmaScanStatus, _scan_result
import time
RES_PATH = "."
SRC_PATH = "."
DEBUG = True
SCAN_TIMEOUT_SEC = 300
BEFORE_NEXT_PROGRESS = 5
DEBUG = False
Probelist = [u'ClamAV', u'VirusTotal', u'Kaspersky', u'Sophos',
u'McAfeeVSCL', u'Symantec', u'StaticAnalyzer']
scanner = None
def handler(_, _):
print 'Cancelling...'
if scanner is not None:
scanner.cancel()
sys.exit(0)
class ScannerError(Exception):
pass
class Scanner(object):
def __init__(self):
# test setup
date_str = str(datetime.datetime.now().date())
date_str = date_str.replace('-', '')
self.res_dir = os.path.join(RES_PATH, date_str)
self.scanid = None
try:
if not os.path.exists(self.res_dir):
os.mkdir(self.res_dir)
except OSError:
raise ScannerError("Can't create [{0}]".format(self.res_dir))
def cancel(self):
if self.scanid is not None:
_scan_cancel(self.scanid, DEBUG)
def scan_files(self, files,
force=False,
probe=None,
timeout=SCAN_TIMEOUT_SEC):
self.scanid = _scan_new(DEBUG)
_scan_add(self.scanid, files, DEBUG)
probelist = _scan_launch(self.scanid, force, probe, DEBUG)
scanid = self.scanid
nb = len(files)
probes = " - ".join(sorted(probelist))
print ("launching scan {0}".format(scanid) +
" of {0} files on {1}".format(scanid, nb, probes))
start = time.time()
while True:
time.sleep(BEFORE_NEXT_PROGRESS)
(status, fin, tot, suc) = _scan_progress(self.scanid, DEBUG)
if fin is not None:
# write in place
sys.stdout.write("\r\tjobs {0}({1})/{2}".format(fin, suc, tot))
sys.stdout.flush()
if status == IrmaScanStatus.label[IrmaScanStatus.finished]:
break
now = time.time()
if now > (start + timeout):
_scan_cancel(self.scanid, DEBUG)
raise ScannerError("Results Timeout")
return _scan_result(self.scanid, DEBUG)
def _write_result(self, res):
print "Writing results"
for (sha256, results) in res.items():
res_file = os.path.join(self.res_dir, sha256)
with open(res_file, "w") as dst:
dst.write(json.dumps(results))
return
def _write_timeout_result(self, file_list):
print "Timeout results"
for tf in file_list:
with open(tf) as t:
sha256 = hashlib.sha256(t.read()).hexdigest()
res_file = os.path.join(self.res_dir, sha256)
with open(res_file, "w") as dst:
dst.write("timeout")
def scan_dir(self, dirname, nb_files_per_scan):
if not os.path.exists(dirname):
raise ScannerError("dir to scan does not exits")
# get all files in dir
filenames = []
for _, _, filename in os.walk(dirname):
for f in filename:
filenames.append(os.path.join(dirname, f))
random.shuffle(filenames)
for i in xrange(0, len(filenames), nb_files_per_scan):
file_list = filenames[i:i + nb_files_per_scan]
try:
res = self.scan_files(file_list, force=True)
except ScannerError:
self._write_timeout_result(file_list)
res = _scan_result(self.scanid, DEBUG)
self._write_result(res)
return
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
scanner = Scanner()
scanner.scan_dir("samples", 5)
| apache-2.0 | Python |
|
682b064f29c7a6cfea0c9866da03703822e70cb3 | Add machinery to slurp dhcpd.leases journal into usable format. | ceph/propernoun,ceph/propernoun | propernoun/leases.py | propernoun/leases.py | from . import parser
from . import watch
def gen_leases(path):
"""
Keep track of currently valid leases for ISC dhcpd.
Yields dictionaries that map ``ip`` to information about the
lease. Will block until new information is available.
"""
g = watch.watch_dhcp_leases(path)
for _ in g:
with file(path) as f:
s = f.read()
leases = {}
for l in parser.parse(s):
assert 'ip' in l
leases[l['ip']] = l
yield leases
| mit | Python |
|
ba6c50d0b2fd973c34f2df3779d78df11f671598 | Create mongo_import_keywords.py | ecohealthalliance/EpiTator | mongo_import_keywords.py | mongo_import_keywords.py | """
Load mongo database with keywords for annie annotation.
The keyword_array pickle is packaged with the GRITS classifier.
"""
import sys
import re
import pickle
from pymongo import MongoClient
def load_keyword_array(file_path):
with open(file_path) as f:
keyword_array = pickle.load(f)
return keyword_array
def insert_set(names_set, collection):
"""Insert a list of names into a collection"""
for name in names_set:
collection.insert({'_id': name})
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--mongo_url", default='localhost'
)
parser.add_argument(
"--db_name", default='annotation'
)
args = parser.parse_args()
client = MongoClient(args.mongo_url)
db = client[args.db_name]
category_labels = {
'doid/diseases': 'diseases',
'eha/disease': 'diseases',
'pm/disease': 'diseases',
'hm/disease': 'diseases',
'biocaster/diseases': 'diseases',
'eha/symptom': 'symptoms',
'biocaster/symptoms': 'symptoms',
'doid/has_symptom': 'symptoms',
'pm/symptom': 'symptoms',
'symp/symptoms': 'symptoms',
'wordnet/hosts': 'hosts',
'eha/vector': 'hosts',
'wordnet/pathogens': 'pathogens',
'biocaster/pathogens': 'pathogens',
'pm/mode of transmission': 'modes',
'doid/transmitted_by': 'modes',
'eha/mode of transmission': 'modes'
}
collection_labels = set(category_labels.values())
for collection in collection_labels:
db[collection].drop()
keyword_array = load_keyword_array('current_classifier/keyword_array.p')
for keyword in keyword_array:
if keyword['category'] in category_labels:
collection = category_labels[keyword['category']]
db[collection].insert(
{ '_id': keyword['keyword'],
'source': keyword['category'],
'linked_keywords': keyword['linked_keywords'],
'case_sensitive': keyword['case_sensitive']} )
| apache-2.0 | Python |
|
a21ed2d12b763d93722b6c8e9f6d6ff39d15938c | add utility to fetch satellites and corresponding TLEs | valpo-sats/scheduling-bazaar,valpo-sats/scheduling-bazaar | python-files/get-satellites.py | python-files/get-satellites.py | #!/usr/bin/env python3
"""
Utility to get the station information from a SatNOGS Network server.
Collects the paginated objects into a single JSON list and stores in a file.
"""
import json
import sqlite3
import requests
import orbit
# default expire time is 24 hours
orbit.tle.requests_cache.configure(expire_after=60*60*6)
URL = 'https://db.satnogs.org/api/satellites'
SATELLITES_JSON = 'satellites.json'
TLE_DB = 'tle.db'
# fetch known satellites
r = requests.get(URL)
satellites = r.json()
with open(SATELLITES_JSON, 'w') as fp:
json.dump(satellites, fp)
conn = sqlite3.connect('file:' + TLE_DB, uri=True,
detect_types=sqlite3.PARSE_DECLTYPES)
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS tle
(norad integer,
epoch timestamp,
line0 text,
line1 text,
line2 text,
unique(norad, epoch)
);''')
for sat in satellites:
norad = sat['norad_cat_id']
print(norad, end='')
try:
tle = orbit.satellite(norad)
except KeyError:
print(' ** not at CelesTrak')
continue
try:
cur.execute(
'INSERT INTO tle VALUES (?,?,?,?,?);',
(norad, tle.epoch(), tle.tle_raw[0], tle.tle_raw[1], tle.tle_raw[2]))
# 'INSERT OR IGNORE INTO ...' will suppress the exception
except sqlite3.IntegrityError:
pass
else:
print(' TLE updated', end='')
finally:
print()
conn.commit()
conn.close()
| agpl-3.0 | Python |
|
550873226ec0879a86fea2527b56535a329981b1 | Add upcoming_match.py | the-blue-alliance/the-blue-alliance-android,the-blue-alliance/the-blue-alliance-android,Adam8234/the-blue-alliance-android,nwalters512/the-blue-alliance-android,1fish2/the-blue-alliance-android,nwalters512/the-blue-alliance-android,1fish2/the-blue-alliance-android,phil-lopreiato/the-blue-alliance-android,nwalters512/the-blue-alliance-android,Zero2848/the-blue-alliance-android,Zero2848/the-blue-alliance-android,Adam8234/the-blue-alliance-android,phil-lopreiato/the-blue-alliance-android,1fish2/the-blue-alliance-android,phil-lopreiato/the-blue-alliance-android,the-blue-alliance/the-blue-alliance-android | upcoming_match.py | upcoming_match.py | #! /usr/bin/env python
#
# Tests sending an upcoming_match notification via adb to The Blue Alliance
# Android app.
import test_notification
json_data = {"match_key": "2007cmp_sf1m3",
"event_name": "Championship - Einstein Field",
"team_keys": ["frc173","frc1319","frc1902","frc177","frc987","frc190"],
"scheduled_time":12345,
"predicted_time":122345}
if __name__ == '__main__':
test_notification.upcoming_match_command(json_data)
| mit | Python |
|
1d8cbf94f127571358aee97677a09f7cea3bf3a7 | Add helper functions for to/from bytes/unicode | rh314/p23serialize | p23serialize/util.py | p23serialize/util.py | from . import str_mode
if str_mode == 'bytes':
unicode_type = unicode
else: # str_mode == 'unicode'
unicode_type = str
def recursive_unicode(obj):
if isinstance(obj, bytes):
return obj.decode('latin1')
elif isinstance(obj, list):
return [recursive_unicode(_) for _ in obj]
else:
return obj
def recursive_bytes(obj):
if isinstance(obj, unicode_type):
return obj.encode('latin1')
elif isinstance(obj, list):
return [recursive_bytes(_) for _ in obj]
else:
return obj
| mit | Python |
|
01f21a16e4bcecccf51a565b51222ab18b79adb4 | Add tests for shell utils. | tonybaloney/st2,pixelrebel/st2,Plexxi/st2,emedvedev/st2,pixelrebel/st2,grengojbo/st2,armab/st2,StackStorm/st2,StackStorm/st2,lakshmi-kannan/st2,Itxaka/st2,emedvedev/st2,punalpatel/st2,alfasin/st2,punalpatel/st2,dennybaa/st2,pinterb/st2,dennybaa/st2,StackStorm/st2,peak6/st2,armab/st2,tonybaloney/st2,Plexxi/st2,Plexxi/st2,dennybaa/st2,armab/st2,jtopjian/st2,emedvedev/st2,jtopjian/st2,Itxaka/st2,nzlosh/st2,Plexxi/st2,peak6/st2,StackStorm/st2,lakshmi-kannan/st2,Itxaka/st2,pixelrebel/st2,pinterb/st2,grengojbo/st2,alfasin/st2,jtopjian/st2,alfasin/st2,peak6/st2,punalpatel/st2,nzlosh/st2,nzlosh/st2,nzlosh/st2,grengojbo/st2,tonybaloney/st2,lakshmi-kannan/st2,pinterb/st2 | st2common/tests/unit/test_util_shell.py | st2common/tests/unit/test_util_shell.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.shell import quote_unix
from st2common.util.shell import quote_windows
class ShellUtilsTestCase(unittest2.TestCase):
def test_quote_unix(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
'foo bar'
""",
"""
'foo1 bar1'
""",
"""
'"foo"'
""",
"""
'"foo" "bar"'
""",
"""
''"'"'foo bar'"'"''
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_unix(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
def test_quote_windows(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
"foo bar"
""",
"""
"foo1 bar1"
""",
"""
\\"foo\\"
""",
"""
"\\"foo\\" \\"bar\\""
""",
"""
"'foo bar'"
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_windows(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
| apache-2.0 | Python |
|
f56a902f2e7ca45bb4bf1dfa7dacefd3fefff524 | Create config.sample | Xi-Plus/Xiplus-Wikipedia-Bot,Xi-Plus/Xiplus-Wikipedia-Bot | zhwikt-broken-file-links/config.sample.py | zhwikt-broken-file-links/config.sample.py | # -*- coding: utf-8 -*-
cfg = {
"category": "Category:含有受损文件链接的页面"
}
| mit | Python |
|
076f65b4d67cb44cd48ee5eedc134a83ab01ca4a | Add unit test for md.pair.lj1208 (duplicated from test_pair_lj.py) | joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue | hoomd/md/test-py/test_pair_lj1208.py | hoomd/md/test-py/test_pair_lj1208.py | # -*- coding: iso-8859-1 -*-
# Maintainer: unassigned
from hoomd import *
from hoomd import deprecated
from hoomd import md;
context.initialize()
import unittest
import os
# md.pair.lj1208
class pair_lj1208_tests (unittest.TestCase):
def setUp(self):
print
self.s = deprecated.init.create_random(N=100, phi_p=0.05);
self.nl = md.nlist.cell()
context.current.sorter.set_params(grid=8)
# basic test of creation
def test(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0, alpha=1.0, r_cut=2.5, r_on=2.0);
lj1208.update_coeffs();
# test missing coefficients
def test_set_missing_epsilon(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', sigma=1.0, alpha=1.0);
self.assertRaises(RuntimeError, lj1208.update_coeffs);
# test missing coefficients
def test_set_missing_sigma(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', epsilon=1.0, alpha=1.0);
self.assertRaises(RuntimeError, lj1208.update_coeffs);
# test missing coefficients
def test_missing_AA(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
self.assertRaises(RuntimeError, lj1208.update_coeffs);
# test set params
def test_set_params(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.set_params(mode="no_shift");
lj1208.set_params(mode="shift");
lj1208.set_params(mode="xplor");
self.assertRaises(RuntimeError, lj1208.set_params, mode="blah");
# test default coefficients
def test_default_coeff(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
# (alpha, r_cut, and r_on are default)
lj1208.pair_coeff.set('A', 'A', sigma=1.0, epsilon=1.0)
lj1208.update_coeffs()
# test max rcut
def test_max_rcut(self):
lj1208 = md.pair.lj1208(r_cut=2.5, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', sigma=1.0, epsilon=1.0)
self.assertAlmostEqual(2.5, lj1208.get_max_rcut());
lj1208.pair_coeff.set('A', 'A', r_cut = 2.0)
self.assertAlmostEqual(2.0, lj1208.get_max_rcut());
# test specific nlist subscription
def test_nlist_subscribe(self):
lj1208 = md.pair.lj1208(r_cut=2.5, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', sigma = 1.0, epsilon=1.0)
self.nl.update_rcut();
self.assertAlmostEqual(2.5, self.nl.r_cut.get_pair('A','A'));
lj1208.pair_coeff.set('A', 'A', r_cut = 2.0)
self.nl.update_rcut();
self.assertAlmostEqual(2.0, self.nl.r_cut.get_pair('A','A'));
# test coeff list
def test_coeff_list(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set(['A', 'B'], ['A', 'C'], epsilon=1.0, sigma=1.0, alpha=1.0, r_cut=2.5, r_on=2.0);
lj1208.update_coeffs();
# test adding types
def test_type_add(self):
lj1208 = md.pair.lj1208(r_cut=3.0, nlist = self.nl);
lj1208.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0);
self.s.particles.types.add('B')
self.assertRaises(RuntimeError, lj1208.update_coeffs);
lj1208.pair_coeff.set('A', 'B', epsilon=1.0, sigma=1.0)
lj1208.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0)
lj1208.update_coeffs();
def tearDown(self):
del self.s, self.nl
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| bsd-3-clause | Python |
|
18d129613c5a576b770a812f18ff05873925fb2c | refactor to a shorter version. | UWIT-IAM/uw-restclients,uw-it-aca/uw-restclients,UWIT-IAM/uw-restclients,jeffFranklin/uw-restclients,jeffFranklin/uw-restclients,uw-it-cte/uw-restclients,jeffFranklin/uw-restclients,uw-it-aca/uw-restclients,uw-it-cte/uw-restclients,uw-it-cte/uw-restclients,UWIT-IAM/uw-restclients | restclients/digitlib/curric.py | restclients/digitlib/curric.py | """
This is the interface for interacting with the UW Libraries Web Service.
"""
import logging
from restclients.digitlib import get_resource
url_prefix = "/php/currics/service.php?code="
sln_prefix = "&sln="
quarter_prefix = "&quarter="
year_prefix = "&year="
logger = logging.getLogger(__name__)
def get_subject_guide(course_code, sln, quarter, year):
"""
:param sln: positive integer
:param year: four digit number
Return the string representing the url of
the Library subject guide page
"""
url = "%s%s%s%s%s%s%s%s" % (url_prefix,
course_code.replace(" ", "%20"),
sln_prefix, sln,
quarter_prefix, quarter,
year_prefix, year)
return _extract_url(get_resource(url))
def _extract_url(data_in_resp):
"""
:param data_in_resp: dict
Return the string representing the url
"""
if data_in_resp is not None:
if "Location" in data_in_resp:
return data_in_resp["Location"]
if "location" in data_in_resp:
return data_in_resp["location"]
logger.warn("Invalid library curric response: %s" % data_in_resp)
return None
| """
This is the interface for interacting with the UW Libraries Web Service.
"""
import logging
from restclients.digitlib import get_resource
url_prefix = "/php/currics/service.php?code="
sln_prefix = "&sln="
quarter_prefix = "&quarter="
year_prefix = "&year="
logger = logging.getLogger(__name__)
def get_subject_guide(course_code, sln, quarter, year):
"""
:param sln: positive integer
:param year: four digit number
Return the string representing the url of
the Library subject guide page
"""
url = "%s%s%s%s%s%s%s%s" % (url_prefix,
course_code.replace(" ", "%20"),
sln_prefix, sln,
quarter_prefix, quarter,
year_prefix, year)
return _extract_url(get_resource(url))
def _extract_url(data_in_resp):
"""
:param data_in_resp: dict
Return the string representing the url
"""
if data_in_resp is not None:
if data_in_resp.get("Location") is not None:
return data_in_resp.get("Location")
if data_in_resp.get("location") is not None:
return data_in_resp.get("location")
logger.warn("Invalid library curric response: %s" % data_in_resp)
return None
| apache-2.0 | Python |
f22f833efb45bdfe0458d045cfd300721185dc84 | Revert "bug fix" | phtagn/sickbeard_mp4_automator,phtagn/sickbeard_mp4_automator,Filechaser/sickbeard_mp4_automator,Filechaser/sickbeard_mp4_automator,Collisionc/sickbeard_mp4_automator,Collisionc/sickbeard_mp4_automator | sabToSickBeardwithConverter.py | sabToSickBeardwithConverter.py | import os
import sys
import autoProcessTV
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from extensions import valid_input_extensions
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
path = str(sys.argv[1])
for r, d, f in os.walk(path):
for files in f:
if os.path.splitext(files)[1][1:] in valid_input_extensions:
file = os.path.join(r, files)
convert = MkvtoMp4(path, FFMPEG_PATH=settings.ffmpeg, FFPROBE_PATH=settings.ffprobe, delete=settings.delete, output_extension=settings.output_extension, relocate_moov=settings.relocate_moov, iOS=settings.iOS)
"""Contents of sabToSickbeard.py"""
if len(sys.argv) < 2:
print "No folder supplied - is this being called from SABnzbd?"
sys.exit()
elif len(sys.argv) >= 3:
autoProcessTV.processEpisode(sys.argv[1], sys.argv[2])
else:
autoProcessTV.processEpisode(sys.argv[1])
| import os
import sys
import autoProcessTV
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from extensions import valid_input_extensions
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
path = str(sys.argv[1])
for r, d, f in os.walk(path):
for files in f:
if os.path.splitext(files)[1][1:] in valid_input_extensions:
file = os.path.join(r, files)
convert = MkvtoMp4(file, FFMPEG_PATH=settings.ffmpeg, FFPROBE_PATH=settings.ffprobe, delete=settings.delete, output_extension=settings.output_extension, relocate_moov=settings.relocate_moov, iOS=settings.iOS)
"""Contents of sabToSickbeard.py"""
if len(sys.argv) < 2:
print "No folder supplied - is this being called from SABnzbd?"
sys.exit()
elif len(sys.argv) >= 3:
autoProcessTV.processEpisode(sys.argv[1], sys.argv[2])
else:
autoProcessTV.processEpisode(sys.argv[1])
| mit | Python |
d3a9824ea2f7675e9e0008b5d914f02e63e19d85 | Add new package. (#22639) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/liblbfgs/package.py | var/spack/repos/builtin/packages/liblbfgs/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Liblbfgs(AutotoolsPackage):
"""libLBFGS is a C port of the implementation of Limited-memory
Broyden-Fletcher-Goldfarb-Shanno (L-BFGS) method written by Jorge Nocedal.
The L-BFGS method solves the unconstrainted minimization problem:
minimize F(x), x = (x1, x2, ..., xN),
only if the objective function F(x) and its gradient G(x) are computable."""
homepage = "http://www.chokkan.org/software/liblbfgs/"
url = "https://github.com/downloads/chokkan/liblbfgs/liblbfgs-1.10.tar.gz"
git = "https://github.com/chokkan/liblbfgs.git"
maintainers = ['RemiLacroix-IDRIS']
version('master', branch='master')
version('1.10', sha256='4158ab7402b573e5c69d5f6b03c973047a91e16ca5737d3347e3af9c906868cf')
depends_on('autoconf', type='build', when='@master')
depends_on('automake', type='build', when='@master')
depends_on('libtool', type='build', when='@master')
depends_on('m4', type='build', when='@master')
| lgpl-2.1 | Python |
|
a568663ebcf8b45a801df2cf2185dd3e7c969a79 | Fix fragile command description | RianFuro/vint,Kuniwak/vint,Kuniwak/vint,RianFuro/vint | vint/linting/policy/prohibit_command_rely_on_user.py | vint/linting/policy/prohibit_command_rely_on_user.py | import re
from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy.reference.googlevimscriptstyleguide import get_reference_source
from vint.linting.policy_loader import register_policy
PROHIBITED_COMMAND_PATTERN = re.compile(r'norm(al)?\s|'
r's(u(bstitute)?)?/')
@register_policy
class ProhibitCommandRelyOnUser(AbstractPolicy):
def __init__(self):
super(ProhibitCommandRelyOnUser, self).__init__()
self.description = 'Avoid commands that rely on user settings'
self.reference = get_reference_source('FRAGILE')
self.level = Level.WARNING
def listen_node_types(self):
return [NodeType.EXCMD]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
This policy prohibit following commands:
- normal without !
- substitute
"""
command = node['str']
is_command_not_prohibited = PROHIBITED_COMMAND_PATTERN.search(command) is None
return is_command_not_prohibited
| import re
from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy.reference.googlevimscriptstyleguide import get_reference_source
from vint.linting.policy_loader import register_policy
PROHIBITED_COMMAND_PATTERN = re.compile(r'norm(al)?\s|'
r's(u(bstitute)?)?/')
@register_policy
class ProhibitCommandRelyOnUser(AbstractPolicy):
def __init__(self):
super(ProhibitCommandRelyOnUser, self).__init__()
self.description = 'Prefer single quoted strings'
self.reference = get_reference_source('FRAGILE')
self.level = Level.WARNING
def listen_node_types(self):
return [NodeType.EXCMD]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
This policy prohibit following commands:
- normal without !
- substitute
"""
command = node['str']
is_command_not_prohibited = PROHIBITED_COMMAND_PATTERN.search(command) is None
return is_command_not_prohibited
| mit | Python |
b55277497559fad19f790ba8821f02ff2ce20c91 | add a minimal smoke test of multi-run | ericdill/bluesky,ericdill/bluesky | bluesky/tests/test_multi_runs.py | bluesky/tests/test_multi_runs.py | from bluesky import preprocessors as bpp
from bluesky import plans as bp
from bluesky import plan_stubs as bps
from bluesky.preprocessors import define_run_wrapper as drw
from ophyd.sim import motor, det
from bluesky.tests.utils import DocCollector
def test_multirun_smoke(RE, hw):
dc = DocCollector()
RE.subscribe(dc.insert)
def interlaced_plan(dets, motor):
to_read = (motor, *dets)
run_ids = list("abc")
for rid in run_ids:
yield from drw(bps.open_run(md={rid: rid}), run_id=rid)
for j in range(5):
for i, rid in enumerate(run_ids):
yield from bps.mov(motor, j + 0.1 * i)
yield from drw(bps.trigger_and_read(to_read), run_id=rid)
for rid in run_ids:
yield from drw(bps.close_run(), run_id=rid)
RE(interlaced_plan([hw.det], hw.motor))
assert len(dc.start) == 3
for start in dc.start:
desc, = dc.descriptor[start["uid"]]
assert len(dc.event[desc["uid"]]) == 5
for stop in dc.stop.values():
for start in dc.start:
assert start["time"] < stop["time"]
| bsd-3-clause | Python |
|
f31b11b2cf1f6924c4373fbfaf4b911102272876 | add base serializer | ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend | cla_backend/apps/complaints/serializers.py | cla_backend/apps/complaints/serializers.py | # -*- coding: utf-8 -*-
from rest_framework import serializers
from .models import Category, Complaint
class CategorySerializerBase(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'name')
class ComplaintSerializerBase(serializers.ModelSerializer):
category = CategorySerializerBase()
class Meta:
model = Complaint
| mit | Python |
|
9d348cba1c800a4de9a0078ded1e03540256f8a6 | Add backwards-compatible registration.urls, but have it warn pending deprecation. | awakeup/django-registration,hacklabr/django-registration,austinhappel/django-registration,jnns/django-registration,tdruez/django-registration,Troyhy/django-registration,sandipagr/django-registration,spurfly/django-registration,hacklabr/django-registration,euanlau/django-registration,artursmet/django-registration,myimages/django-registration,futurecolors/django-registration,kennydude/djregs,ubernostrum/django-registration,dirtycoder/django-registration,liberation/django-registration,Troyhy/django-registration,spurfly/django-registration,akvo/django-registration,mypebble/djregs,gone/django-registration,euanlau/django-registration,artursmet/django-registration,liberation/django-registration,danielsamuels/django-registration,akvo/django-registration,gone/django-registration,sandipagr/django-registration,futurecolors/django-registration,austinhappel/django-registration | registration/urls.py | registration/urls.py | import warnings
warnings.warn("Using include('registration.urls') is deprecated; use include('registration.backends.default.urls') instead",
PendingDeprecationWarning)
from registration.backends.default.urls import *
| bsd-3-clause | Python |
|
d028db776b92c4d968434a64b2c5d7e02867b32e | Create db_init.py | leokhachatorians/Password-Manager | db_init.py | db_init.py | from sqlalchemy import create_engine, Column, Integer, String, Sequence, update
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///passwords.db')
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
class Locker(Base):
__tablename__ = 'locker'
id = Column(Integer, Sequence('website_id_seq'), primary_key=True)
url = Column(String(60))
user = Column(String(60))
password = Column(String(60))
def __repr__(self):
return "<Website(url={}, user={}, password={}>".format(url,user,password)
Base.metadata.create_all(engine)
| mit | Python |
|
b40512e834e88f24c20885cddb220188fce11339 | Add verbose names to UserProfile fields. | ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio | accounts/migrations/0004_auto_20150227_2347.py | accounts/migrations/0004_auto_20150227_2347.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20150227_2158'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='email_on_comment_answer',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Antwort auf meine Kommentare'),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='email_on_message',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Nachrichten'),
preserve_default=True,
),
]
| bsd-3-clause | Python |
|
18baab37c3f1924b104f4ef86224c1b197ef1dad | add problem 054 | smrmkt/project_euler | problem_054.py | problem_054.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
'''
import timeit
class Poker:
def __init__(self, cards):
self.numbers = {}
self.suits = {}
for card in cards:
n = self._to_number(card[0])
s = card[1]
self.numbers[n] = self.numbers.get(n, 0)+1
self.suits[s] = self.suits.get(s, 0)+1
def hand(self):
n_max, n_min, n_len = max(self.numbers), min(self.numbers), len(self.numbers)
sames = max(self.numbers.values())
s_len = len(self.suits)
n_diff = n_max-n_min
if n_len == 5:
if n_diff > 4:
if s_len == 1: return 5 # flush
else: return 0 # high card
elif s_len > 1: return 4 # straight
elif n_min == 10: return 9 # royal straight flush
else: return 8 # straight flush
elif n_len == 4: return 1 # one pair
elif n_len == 3:
if sames == 3: return 3 # three cards
else: return 2 # two pair
elif n_len == 2:
if sames == 4: return 7 # four cards
else: return 6 # full house
def rank(self):
s = ''
for k,v in sorted(self.numbers.items(), key=lambda (k, v): (v, k), reverse=True):
s += "{0:0>2}".format(str(k))*v
return s
def _to_number(self, s):
s = str(s).replace('T', '10').replace('J', '11')\
.replace('Q', '12').replace('K', '13').replace('A', '14')
return int(s)
def calc():
wins = [0]*3
for line in open('data/problem_054.txt', 'r').readlines():
cards = line.split(' ')
p1 = Poker([card.rstrip() for card in cards[:5]])
p2 = Poker([card.rstrip() for card in cards[5:]])
if p1.hand() > p2.hand(): wins[0] += 1
elif p1.hand() < p2.hand(): wins[2] += 1
else:
if p1.rank() > p2.rank(): wins[0] += 1
elif p1.rank() < p2.rank(): wins[2] += 1
else: wins[1] += 1
return wins
if __name__ == '__main__':
print calc()
# print timeit.Timer('problem_030.calc(5)', 'import problem_030').timeit(1)
| mit | Python |
|
e21d6d88f49dbdeb2dfb96e68f174ba587eaa27a | Add pre-deploy version match | urschrei/pyzotero | pre-deploy.py | pre-deploy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pre-deploy.py
Created by Stephan Hügel on 2017-06-06
A simple check to ensure that the tag version and the library version coincide
Intended to be called before a Wheel is written using "upload"
"""
import os
import sys
import subprocess
import re
import io
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def check():
git_version = subprocess.check_output(
["git", "describe", "--abbrev=0", "--tags"]
).strip()
library_version = unicode("v" + find_version("pyzotero/zotero.py")).strip()
return library_version == git_version
if __name__ == '__main__':
if check():
sys.exit(1)
else:
sys.exit(0)
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.