commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
c0f690fe1d43edc4fc5cc4b3aeb40594c1abd674 | Create pollard_rho_algorithm.py | IEEE-NITK/Daedalus,IEEE-NITK/Daedalus,chinmaydd/NITK_IEEE_SaS,IEEE-NITK/Daedalus | daedalus/attacks/pollard_rho_algorithm.py | daedalus/attacks/pollard_rho_algorithm.py | #pollard rho algorithm of integer factorization
def gcd(a,b):
if a is 0:
return b
return gcd(b%a,a)
def pollard_rho(number,x,y):
d = 1
while d is 1:
x = (x**2+1)%number
for i in range(0,2,1):
y = (y**2+1)%number
if x>y:
z = x-y
else:
z=y-x
d = gcd(z,number)
return d
x=2
y=2
number = 84923983
factor = pollard_rho(number,x,y)
while factor is number or 1:
x = x+1
y = y+1
pollard_rho(number,x,y)
factor2 = int(number/factor)
print(factor,factor2)
| mit | Python |
|
c5dfcffdf743e2c26b8dba6e3be8aee7d7aaa608 | Test `write_*` and `join_*` on bytes | jwodder/linesep | test/test_join_bytes.py | test/test_join_bytes.py | import re
import linesep
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# Based on <https://pytest.org/latest/example/parametrize.html#a-quick-port-of-testscenarios>
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.module.scenarios:
idlist.append(scenario[0])
argvalues.append([scenario[1][argname] for argname in metafunc.fixturenames])
metafunc.parametrize(metafunc.fixturenames, argvalues, ids=idlist, scope="module")
scenarios = [
('empty', {
"entries": [],
"sep": b'\n',
"preceded": b'',
"terminated": b'',
"separated": b'',
}),
('empty_str', {
"entries": [b''],
"sep": b'\n',
"preceded": b'\n',
"terminated": b'\n',
"separated": b'',
}),
]
def test_join_preceded(entries, sep, preceded):
assert linesep.join_preceded(entries, sep) == preceded
def test_join_terminated(entries, sep, terminated):
assert linesep.join_terminated(entries, sep) == terminated
def test_join_separated(entries, sep, separated):
assert linesep.join_separated(entries, sep) == separated
def test_write_preceded(entries, sep, preceded):
fp = BytesIO()
linesep.write_preceded(fp, entries, sep)
assert fp.getvalue() == preceded
def test_write_terminated(entries, sep, terminated):
fp = BytesIO()
linesep.write_terminated(fp, entries, sep)
assert fp.getvalue() == terminated
def test_write_separated(entries, sep, separated):
fp = BytesIO()
linesep.write_separated(fp, entries, sep)
assert fp.getvalue() == separated
| mit | Python |
|
a30cd68e77242df4efadc75c4390dd8a3ce68612 | Add data migration for Audit's empty status | AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core | src/ggrc/migrations/versions/20170103101308_42b22b9ca859__fix_audit_empty_status.py | src/ggrc/migrations/versions/20170103101308_42b22b9ca859__fix_audit_empty_status.py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix audit empty status
Create Date: 2016-12-22 13:53:24.497701
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '42b22b9ca859'
down_revision = '4fcaef05479f'
VALID_STATES = (
u'Planned', u'In Progress', u'Manager Review',
u'Ready for External Review', u'Completed'
)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE audits SET status='Planned' WHERE status=0")
op.alter_column('audits', 'status', nullable=True, type_=sa.String(250),
existing_type=sa.Enum(*VALID_STATES))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column('audits', 'status', nullable=False,
type_=sa.Enum(*VALID_STATES), existing_type=sa.String)
| apache-2.0 | Python |
|
52eb461f1679f134aed25c221cfcc63abd8d3768 | add test | PyBossa/pybossa,geotagx/pybossa,Scifabric/pybossa,PyBossa/pybossa,Scifabric/pybossa,geotagx/pybossa | test/test_importers/test_youtube_importer.py | test/test_importers/test_youtube_importer.py | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2016 SciFabric LTD.
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from mock import patch, Mock
from pybossa.importers.youtubeapi import BulkTaskYoutubeImport
def create_importer_with_form_data(**form_data):
with patch('pybossa.importers.youtubeapi.build'):
form_data['youtube_api_server_key'] = 'apikey'
importer = BulkTaskYoutubeImport(**form_data)
importer.client.api = Mock()
return importer
class TestBulkYoutubeImport(object):
form_data = {
'playlist_url': 'https://www.youtube.com/playlist?list=playlistid'
'youtube_api_server_key': 'apikey'
}
def test_count_tasks_returns_0_if_no_files_to_import(self):
form_data = {
'playlist_url': '',
'youtube_api_server_key': 'apikey'
}
number_of_tasks = BulkTaskYoutubeImport(**form_data).count_tasks()
assert number_of_tasks == 0, number_of_tasks
| agpl-3.0 | Python |
|
1e9a64fe6324d8b4ac96daafa7427e9f55e6dd38 | add Geom.decompose tests | chandler14362/panda3d,chandler14362/panda3d,chandler14362/panda3d,chandler14362/panda3d,chandler14362/panda3d,chandler14362/panda3d,chandler14362/panda3d,chandler14362/panda3d,chandler14362/panda3d,chandler14362/panda3d | tests/gobj/test_geom.py | tests/gobj/test_geom.py | from panda3d import core
empty_format = core.GeomVertexFormat.get_empty()
def test_geom_decompose_in_place():
vertex_data = core.GeomVertexData("", empty_format, core.GeomEnums.UH_static)
prim = core.GeomTristrips(core.GeomEnums.UH_static)
prim.add_vertex(0)
prim.add_vertex(1)
prim.add_vertex(2)
prim.add_vertex(3)
prim.close_primitive()
geom = core.Geom(vertex_data)
geom.add_primitive(prim)
geom.decompose_in_place()
prim = geom.get_primitive(0)
assert tuple(prim.get_vertex_list()) == (0, 1, 2, 2, 1, 3)
def test_geom_decompose():
vertex_data = core.GeomVertexData("", empty_format, core.GeomEnums.UH_static)
prim = core.GeomTristrips(core.GeomEnums.UH_static)
prim.add_vertex(0)
prim.add_vertex(1)
prim.add_vertex(2)
prim.add_vertex(3)
prim.close_primitive()
geom = core.Geom(vertex_data)
geom.add_primitive(prim)
new_geom = geom.decompose()
new_prim = new_geom.get_primitive(0)
assert tuple(new_prim.get_vertex_list()) == (0, 1, 2, 2, 1, 3)
# Old primitive should still be unchanged
assert prim == geom.get_primitive(0)
| bsd-3-clause | Python |
|
66b5a1089ed0ce2e615f889f35b5e39db91950ae | Fix serving uploaded files during development. | SoLoHiC/mezzanine,industrydive/mezzanine,jjz/mezzanine,Kniyl/mezzanine,stephenmcd/mezzanine,adrian-the-git/mezzanine,biomassives/mezzanine,emile2016/mezzanine,dekomote/mezzanine-modeltranslation-backport,Cicero-Zhao/mezzanine,nikolas/mezzanine,industrydive/mezzanine,fusionbox/mezzanine,saintbird/mezzanine,douglaskastle/mezzanine,PegasusWang/mezzanine,damnfine/mezzanine,dustinrb/mezzanine,douglaskastle/mezzanine,gradel/mezzanine,viaregio/mezzanine,vladir/mezzanine,ryneeverett/mezzanine,webounty/mezzanine,nikolas/mezzanine,theclanks/mezzanine,batpad/mezzanine,Skytorn86/mezzanine,cccs-web/mezzanine,agepoly/mezzanine,ryneeverett/mezzanine,AlexHill/mezzanine,readevalprint/mezzanine,wyzex/mezzanine,tuxinhang1989/mezzanine,frankier/mezzanine,jjz/mezzanine,Cajoline/mezzanine,Kniyl/mezzanine,geodesign/mezzanine,dustinrb/mezzanine,wyzex/mezzanine,biomassives/mezzanine,frankier/mezzanine,dsanders11/mezzanine,sjdines/mezzanine,wyzex/mezzanine,molokov/mezzanine,PegasusWang/mezzanine,molokov/mezzanine,frankchin/mezzanine,nikolas/mezzanine,gradel/mezzanine,adrian-the-git/mezzanine,sjuxax/mezzanine,stephenmcd/mezzanine,jerivas/mezzanine,damnfine/mezzanine,wbtuomela/mezzanine,christianwgd/mezzanine,sjdines/mezzanine,vladir/mezzanine,dovydas/mezzanine,viaregio/mezzanine,dustinrb/mezzanine,frankier/mezzanine,readevalprint/mezzanine,viaregio/mezzanine,dekomote/mezzanine-modeltranslation-backport,jjz/mezzanine,jerivas/mezzanine,mush42/mezzanine,spookylukey/mezzanine,Skytorn86/mezzanine,joshcartme/mezzanine,promil23/mezzanine,dsanders11/mezzanine,spookylukey/mezzanine,mush42/mezzanine,jerivas/mezzanine,spookylukey/mezzanine,fusionbox/mezzanine,ZeroXn/mezzanine,joshcartme/mezzanine,geodesign/mezzanine,wbtuomela/mezzanine,SoLoHiC/mezzanine,sjuxax/mezzanine,tuxinhang1989/mezzanine,eino-makitalo/mezzanine,industrydive/mezzanine,douglaskastle/mezzanine,saintbird/mezzanine,dovydas/mezzanine,stephenmcd/mezzanine,geodesign/mezzanine,PegasusWang/mezzanine,agepoly/mezzanine,sjdines/mezzanine,molokov/mezzanine,christianwgd/mezzanine,theclanks/mezzanine,adrian-the-git/mezzanine,dekomote/mezzanine-modeltranslation-backport,theclanks/mezzanine,webounty/mezzanine,promil23/mezzanine,ZeroXn/mezzanine,Cajoline/mezzanine,emile2016/mezzanine,eino-makitalo/mezzanine,emile2016/mezzanine,damnfine/mezzanine,readevalprint/mezzanine,eino-makitalo/mezzanine,AlexHill/mezzanine,ryneeverett/mezzanine,Kniyl/mezzanine,mush42/mezzanine,webounty/mezzanine,frankchin/mezzanine,wbtuomela/mezzanine,cccs-web/mezzanine,Cicero-Zhao/mezzanine,Cajoline/mezzanine,agepoly/mezzanine,batpad/mezzanine,biomassives/mezzanine,SoLoHiC/mezzanine,gradel/mezzanine,joshcartme/mezzanine,tuxinhang1989/mezzanine,sjuxax/mezzanine,frankchin/mezzanine,dovydas/mezzanine,christianwgd/mezzanine,vladir/mezzanine,ZeroXn/mezzanine,promil23/mezzanine,dsanders11/mezzanine,saintbird/mezzanine,Skytorn86/mezzanine | mezzanine/core/management/commands/runserver.py | mezzanine/core/management/commands/runserver.py |
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
response = super(MezzStaticFilesHandler, self).get_response(request)
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if response.status_code == 404 and request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
|
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.http import Http404
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
try:
return super(MezzStaticFilesHandler, self).get_response(request)
except Http404:
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
raise
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
| bsd-2-clause | Python |
93a7f4cb914de537e477a6c6bd45e0aa28ce2e4f | update model fields | openego/oeplatform,openego/oeplatform,openego/oeplatform,openego/oeplatform | modelview/migrations/0053_auto_20200408_1442.py | modelview/migrations/0053_auto_20200408_1442.py | # Generated by Django 3.0 on 2020-04-08 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelview', '0052_auto_20200408_1308'),
]
operations = [
migrations.AddField(
model_name='energyframework',
name='data_postprocessing',
field=models.BooleanField(default=False, help_text='Which output format(s) can the framework apply? Please list!', verbose_name='data postprocessing'),
),
migrations.AlterField(
model_name='energyframework',
name='agricultural_demand',
field=models.BooleanField(default=False, help_text='Which agricultural demands are already modelled with the framework?', verbose_name='Agricultural demand'),
),
migrations.AlterField(
model_name='energyframework',
name='gm_singleNode',
field=models.BooleanField(default=False, verbose_name='Single-node model'),
),
]
| agpl-3.0 | Python |
|
bc52778a5ed9ee44f40400cc2693f86318434527 | Add missing file | MiltosD/CEF-ELRC,zeehio/META-SHARE,zeehio/META-SHARE,MiltosD/CEFELRC,MiltosD/CEFELRC,MiltosD/CEF-ELRC,JuliBakagianni/META-SHARE,zeehio/META-SHARE,MiltosD/CEFELRC,JuliBakagianni/META-SHARE,zeehio/META-SHARE,JuliBakagianni/CEF-ELRC,MiltosD/CEFELRC,MiltosD/CEF-ELRC,zeehio/META-SHARE,JuliBakagianni/CEF-ELRC,zeehio/META-SHARE,JuliBakagianni/META-SHARE,MiltosD/CEF-ELRC,MiltosD/CEFELRC,JuliBakagianni/CEF-ELRC,JuliBakagianni/CEF-ELRC,MiltosD/CEF-ELRC,JuliBakagianni/META-SHARE,JuliBakagianni/META-SHARE,MiltosD/CEFELRC,JuliBakagianni/META-SHARE,zeehio/META-SHARE,JuliBakagianni/META-SHARE,MiltosD/CEFELRC,JuliBakagianni/CEF-ELRC,MiltosD/CEF-ELRC,JuliBakagianni/CEF-ELRC,JuliBakagianni/CEF-ELRC,MiltosD/CEF-ELRC | metashare/repository/editor/lang.py | metashare/repository/editor/lang.py |
from xml.etree.ElementTree import XML
import os
import logging
from metashare.settings import LOG_LEVEL, LOG_HANDLER
import pycountry
# Setup logging support.
logging.basicConfig(level=LOG_LEVEL)
LOGGER = logging.getLogger('metashare.xml_utils')
LOGGER.addHandler(LOG_HANDLER)
def read_langs(filename):
if not os.path.isfile(filename):
LOGGER.error('read_langs: {0} not found'.format(filename))
return None
file_hnd = os.open(filename, os.O_RDONLY)
data = os.read(file_hnd, 10000)
print data
xml_langs = XML(data)
return xml_langs
def read_languages():
langs = pycountry.languages
lang_list = []
for index in range(len(langs.objects)):
lang = langs.objects[index]
if hasattr(lang, 'alpha2'):
lang_item = (index, lang.alpha2, lang.name)
lang_list.append(lang_item)
else:
#lang_item = (index, '', lang.name)
pass
return lang_list
def read_lang_alpha2():
langs = pycountry.languages
lang_list = []
for index in range(len(langs.objects)):
lang = langs.objects[index]
if hasattr(lang, 'alpha2'):
lang_item = (lang.alpha2)
lang_list.append(lang_item)
return lang_list
def get_lang_list(xml_tree):
lang_el_list = xml_tree.findall('lang')
lang_list = []
for el in lang_el_list:
lang_id = el.find('id').text
lang_name = el.find('name').text
lang_list.append((lang_id, lang_name))
return lang_list
| bsd-3-clause | Python |
|
e580995de78c3658951b119577a0f7c335352e13 | Create feature_class_info_to_csv.py | jamaps/arcpy_scripts | feature_class_info_to_csv.py | feature_class_info_to_csv.py | import arcpy
import os
import time
import csv
begin_time = time.clock()
arcpy.env.workspace = ws = r"\\192-86\DFSRoot\Data\allenj\Desktop\gdb\test.gdb"
mrcsv = r"\\192-86\DFSRoot\Data\allenj\Desktop\gdb\write.csv"
ls = [1,2,3]
writer = csv.writer(open(mrcsv, 'a'))
writer.writerow(["Feature","Feature_Count","Extents"])
c = 0
for fds in arcpy.ListDatasets('','feature') + ['']:
for fc in arcpy.ListFeatureClasses('','',fds):
print fc
x = fc
y = arcpy.GetCount_management(fc)
z = "meow"
row = [(x),(y),(z)]
writer.writerow(row)
c = c + 1
print "Feature Class Count:"
print c
print "--------------"
end_time = time.clock()
print "Elapsed Time:"
print (end_time - begin_time)
print "Seconds"
print "--------------"
print "Goodbye"
| mit | Python |
|
ae477223f296de9ee6b81a15d56d7140a5bf26ac | Create __init__.py | gauravssnl/PyPastebin-Symbian | requests/packages/urllib3/contrib/packages/ssl_match_hostname/__init__.py | requests/packages/urllib3/contrib/packages/ssl_match_hostname/__init__.py | apache-2.0 | Python |
||
2ef9fce02be94f8c4e9b5c52ca04a05cce1b5ede | Allow to start server as a module | LogicalDash/LiSE,LogicalDash/LiSE | LiSE/LiSE/server/__main__.py | LiSE/LiSE/server/__main__.py | import cherrypy
from argparse import ArgumentParser
from . import LiSEHandleWebService
parser = ArgumentParser()
parser.add_argument('world', action='store', required=True)
parser.add_argument('-c', '--code', action='store')
args = parser.parse_args()
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')],
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8'
}
}
cherrypy.quickstart(LiSEHandleWebService(args.world, args.code), '/', conf)
| agpl-3.0 | Python |
|
a0124a990b4afe0cd5fd3971bae1e43f417bc1b2 | Add management command to find domains impacted by 502 bug | puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq | corehq/apps/domain/management/commands/find_secure_submission_image_domains.py | corehq/apps/domain/management/commands/find_secure_submission_image_domains.py | from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
import csv
class Command(BaseCommand):
help = 'Find domains with secure submissions and image questions'
def handle(self, *args, **options):
with open('domain_results.csv', 'wb+') as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
csv_writer.writerow(['domain', 'app', 'domain_creator'])
for domain in Domain.get_all(include_docs=True):
if domain.secure_submissions:
for app in domain.full_applications(include_builds=False):
for module in app.modules:
for form in module.forms:
for question in form.get_questions(app.langs):
if question['type'] == 'Image':
csv_writer.writerow([
domain.name,
app.name,
domain.creating_user
])
| bsd-3-clause | Python |
|
361a075efed0ca4a9877f7268b2e91725ef8be65 | Add encoder.py | danielbreves/auto_encoder | encoder.py | encoder.py | """
Source: https://trac.ffmpeg.org/wiki/Encode/H.264
"""
import os
import sys
import subprocess
FFMPEG_PATH = '/usr/local/bin/ffmpeg'
VIDEO_CODEC = 'h264'
VIDEO_ENCODER = 'h264_omx'
AUDIO_CODEC = 'aac'
AUDIO_ENCODER = 'aac'
BITRATE = '2500k'
SRC_DIR = os.path.expanduser('~/Desktop')
DEST_DIR = os.path.expanduser('~/Desktop/Media')
INPUT_EXTS = ['.mkv']
OUTPUT_EXT = '.mp4'
def stream_codec(stream, filename):
"""return the codec name for a stream"""
return subprocess.check_output([
'ffprobe',
'-v',
'error',
'-select_streams',
stream,
'-show_entries',
'stream=codec_name',
'-of',
'default=nokey=1:noprint_wrappers=1',
filename
])
def walk_src_media(callback):
"""get a sorted list of files that have a valid input extension"""
for root, _dirs, files in os.walk(os.path.expanduser(SRC_DIR)):
for filename in files:
if os.path.splitext(filename)[1] in INPUT_EXTS:
callback(root, filename)
def encode(root, filename, opts):
"""encode file using ffmpeg"""
input_filename = os.path.join(root, filename)
path_to_create = os.path.dirname(os.path.relpath(input_filename, SRC_DIR))
path_to_create = os.path.join(DEST_DIR, path_to_create)
output_filename = os.path.join(path_to_create, os.path.splitext(filename)[0] + OUTPUT_EXT)
if os.path.isfile(output_filename):
return
command = [FFMPEG_PATH, '-i', os.path.expanduser(input_filename)]
v_encoder = 'copy' if stream_codec('v:0', input_filename) == VIDEO_CODEC else VIDEO_ENCODER
command += ['-c:v', v_encoder]
a_encoder = 'copy' if stream_codec('a:0', input_filename) == AUDIO_CODEC else AUDIO_ENCODER
command += ['-c:a', a_encoder]
command += ['-b:v', BITRATE]
if '--debug' in opts:
command += ['-to', '15']
command += [os.path.expanduser(output_filename)]
if '--dry' in opts:
print(' '.join(command), '\n')
else:
os.makedirs(path_to_create, exist_ok=True)
subprocess.run(command)
def process(args):
"""encode media from the source directory into the destination directory"""
walk_src_media(lambda root, filename: encode(root, filename, args))
if __name__ == "__main__":
process(sys.argv[1:])
| mit | Python |
|
00c7e9a020b60b9bfbc2c8c8e1b3e40869f9a73e | Add unit tests for agent membership | yamt/networking-midonet,yamt/networking-midonet | midonet/neutron/tests/unit/test_extension_agent_membership.py | midonet/neutron/tests/unit/test_extension_agent_membership.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2015 Midokura SARL.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import uuid
import webob.exc
from midonet.neutron.extensions import agent_membership as ext_am
from midonet.neutron.tests.unit import test_midonet_plugin as test_mn
from neutron.openstack.common import uuidutils
from neutron.tests.unit import test_extensions as test_ex
FAKE_AGENT_ID = uuidutils.generate_uuid()
FAKE_IP = '10.0.0.3'
class AgentMembershipExtensionManager(object):
def get_resources(self):
return ext_am.Agent_membership.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class AgentMembershipTestCase(test_mn.MidonetPluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
ext_mgr = AgentMembershipExtensionManager()
super(AgentMembershipTestCase, self).setUp()
self.ext_api = test_ex.setup_extensions_middleware(ext_mgr)
def _create_agent_membership(self, agent_id, ip_address):
data = {'agent_membership': {'id': agent_id,
'tenant_id': str(uuid.uuid4()),
'ip_address': ip_address}}
am_req = self.new_create_request('agent_memberships', data, self.fmt)
return am_req.get_response(self.ext_api)
def _make_agent_membership(self, agent_id, ip_address):
res = self._create_agent_membership(agent_id, ip_address)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
@contextlib.contextmanager
def agent_membership(self, agent_id=FAKE_AGENT_ID, ip_address=FAKE_IP):
am = self._make_agent_membership(agent_id, ip_address)
yield am
def test_create_agent_membership(self):
expected = {'id': FAKE_AGENT_ID, 'ip_address': FAKE_IP}
with self.agent_membership() as am:
for k, v in expected.iteritems():
self.assertEqual(am['agent_membership'][k], v)
def test_delete_agent_membership(self):
with self.agent_membership() as am:
req = self.new_delete_request('agent_memberships',
am['agent_membership']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_show_agent_membership(self):
expected = {'id': FAKE_AGENT_ID, 'ip_address': FAKE_IP}
with self.agent_membership() as am:
req = self.new_show_request('agent_memberships',
am['agent_membership']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in expected.iteritems():
self.assertEqual(res['agent_membership'][k], v)
def test_list_agent_memberships(self):
with self.agent_membership():
with self.agent_membership(uuidutils.generate_uuid(), '10.0.0.4'):
req = self.new_list_request('agent_memberships')
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res['agent_memberships']), 2)
| apache-2.0 | Python |
|
3344c49bf36a4bd74fb9db079297b98a2e0ee46f | Implement cht.sh release script | chubin/cheat.sh,chubin/cheat.sh,chubin/cheat.sh,chubin/cheat.sh | bin/release.py | bin/release.py | #!/usr/bin/env python
from __future__ import print_function
from datetime import datetime
import os
from os import path
import re
import shutil
import subprocess
from subprocess import Popen
import sys
SHARE_DIR = path.join(path.dirname(__file__), "../share/")
def run(args):
return Popen(args, stdout=sys.stdout, stderr=sys.stderr).wait()
status = subprocess.check_output(["git", "status", "--porcelain"])
if len(status) > 0:
print("Unclean working tree. Commit or stash changes first.", file=sys.stderr)
sys.exit(1)
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S +0000")
cht_curr = path.join(SHARE_DIR, "cht.sh.txt")
cht_new = path.join(SHARE_DIR, "cht.sh.txt.new")
re_version = re.compile(r"^__CHTSH_VERSION=(.*)$")
re_timestamp = re.compile(r"^__CHTSH_DATETIME=.*$")
with open(cht_curr, "rt") as fin:
with open(cht_new, "wt") as fout:
for line in fin:
match = re_version.match(line)
if match:
version = int(match.group(1)) + 1
fout.write("__CHTSH_VERSION=%s\n" % version)
continue
match = re_timestamp.match(line)
if match:
fout.write('__CHTSH_DATETIME="%s"\n' % timestamp)
continue
fout.write(line)
shutil.copymode(cht_curr, cht_new)
os.remove(cht_curr)
os.rename(cht_new, cht_curr)
message = "cht: v%s" % version
run(["git", "add", cht_curr])
run(["git", "commit", "-m", message])
run(["git", "tag", "cht@%s" % version, "-m", message])
| mit | Python |
|
278cd37ada508701896c2669a215365785f5a261 | Add eval dispatch (copied from compyle) | nickdrozd/ecio-lisp,nickdrozd/ecio-lisp | evalExp.py | evalExp.py | from keywords import *
from reg import *
from parse import parse
def evalExp():
expr = parse(fetch(EXPR)) # make dedicated fetch_expr()?
# expr = transformMacros(expr)
evalFunc = getEvalFunc(expr)
# evalFunc()
# reassign next step
def getEvalFunc(expr):
if isVar(expr):
return compVar
if isNum(expr):
return compNum
# else
tag, *_ = expr
keyword_groups = {
define_keys : evalDef,
ass_keys : evalAss,
lambda_keys : evalLambda,
if_keys : evalIf,
begin_keys : evalBegin,
quote_keys : evalQuote
}
for group in keyword_groups:
if tag in group:
return keyword_groups[group]
# default
return evalApp
def isNum(exp):
try:
return type(int(exp)) == int
except:
return False
def isVar(exp):
return type(exp) == str
| mit | Python |
|
5de57ff00037d6f9a04307e60685f47f368cb29f | add example script to test calling the ffi | leethargo/scipcffi | example.py | example.py | import scipcffi.ffi as s
scip_ptr = s.ffi.new('SCIP**')
rc = s.lib.SCIPcreate(scip_ptr)
assert rc == s.lib.SCIP_OKAY
scip = scip_ptr[0]
| mit | Python |
|
3a2a311c3c3f8a6bc2f027bfa247d912122e512e | Add test for gaussian | pfnet/chainer,ronekko/chainer,cemoody/chainer,wkentaro/chainer,chainer/chainer,aonotas/chainer,chainer/chainer,truongdq/chainer,keisuke-umezawa/chainer,ktnyt/chainer,laysakura/chainer,minhpqn/chainer,ktnyt/chainer,tscohen/chainer,okuta/chainer,chainer/chainer,t-abe/chainer,muupan/chainer,niboshi/chainer,ytoyama/yans_chainer_hackathon,benob/chainer,jnishi/chainer,jnishi/chainer,okuta/chainer,hvy/chainer,wkentaro/chainer,tigerneil/chainer,AlpacaDB/chainer,chainer/chainer,AlpacaDB/chainer,cupy/cupy,keisuke-umezawa/chainer,kiyukuta/chainer,tkerola/chainer,okuta/chainer,niboshi/chainer,hvy/chainer,1986ks/chainer,benob/chainer,hidenori-t/chainer,keisuke-umezawa/chainer,jnishi/chainer,kikusu/chainer,cupy/cupy,ktnyt/chainer,yanweifu/chainer,kashif/chainer,cupy/cupy,masia02/chainer,niboshi/chainer,t-abe/chainer,jnishi/chainer,sinhrks/chainer,wkentaro/chainer,hvy/chainer,wkentaro/chainer,kikusu/chainer,cupy/cupy,okuta/chainer,delta2323/chainer,truongdq/chainer,hvy/chainer,sinhrks/chainer,woodshop/complex-chainer,ktnyt/chainer,niboshi/chainer,ikasumi/chainer,Kaisuke5/chainer,keisuke-umezawa/chainer,ysekky/chainer,sou81821/chainer,muupan/chainer,kuwa32/chainer,anaruse/chainer,umitanuki/chainer,wavelets/chainer,rezoo/chainer,woodshop/chainer | tests/functions_tests/test_gaussian.py | tests/functions_tests/test_gaussian.py | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import condition
if cuda.available:
cuda.init()
class TestGaussian(unittest.TestCase):
def setUp(self):
self.m = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.v = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def check_backward(self, m_data, v_data, y_grad):
m = chainer.Variable(m_data)
v = chainer.Variable(v_data)
y = functions.gaussian(m, v)
self.assertEqual(y.data.dtype, numpy.float32)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((m.data, v.data))
gm, gv = gradient_check.numerical_grad(f, (m.data, v.data), (y.grad,))
gradient_check.assert_allclose(gm, m.grad)
gradient_check.assert_allclose(gv, v.grad)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.m, self.v, self.gy)
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.m),
cuda.to_gpu(self.v),
cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| mit | Python |
|
945c2c620634c2c816aa446d91773adb75cb87e3 | Add airmass tool | joshwalawender/KeckUtilities | airmass.py | airmass.py | #!/usr/env/python
import argparse
import numpy as np
from astropy import units as u
##-------------------------------------------------------------------------
## Parse Command Line Arguments
##-------------------------------------------------------------------------
## create a parser object for understanding command-line arguments
p = argparse.ArgumentParser(description=
'''Convert an elevation above the horizon to an airmass using the Pickering
(2002) formula:
1 / sin(h + 244/(165 + 47*h^1.1))
and estimate the extinction.
''')
## add arguments
p.add_argument('elevation', type=float,
help="Elevation (in degrees) above the horizon")
## add options
p.add_argument("--extinction", dest="extinction", type=float,
default=0.13, help="Extinction in magnitudes per airmass.")
args = p.parse_args()
##-------------------------------------------------------------------------
## Main Program
##-------------------------------------------------------------------------
def main():
h = args.elevation * u.degree # elevation of target above horizon
magnitudes_per_airmass = args.extinction * u.mag
# Pickering 2002 Airmass
value = h.value + 244/(165.0 + 47.0*h.value**1.1)
airmass = 1.0 / np.sin(value*u.degree)
print(f'for EL = {h:.1f}')
print(f'airmass = {airmass:.2f}')
extinction = airmass * magnitudes_per_airmass
print(f'extinction = {extinction:.2f}')
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
12f2198a53d474bb69a6b9118fca0638dcce8aac | add data migration | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/migrations/0088_remove_community_participation_read_more_prompts.py | accelerator/migrations/0088_remove_community_participation_read_more_prompts.py | # Generated by Django 2.2.24 on 2022-03-07 12:10
import re
from django.db import migrations
def remove_community_participation_read_more_prompts(apps, schema_editor):
"""
Target read more prompts:
For more information, read about Judging at Mass Challenge.
Read more about Mentoring at Mass Challenge.
Read more about being an Entrepreneur at Mass Challenge.
Read more about Office Hours at Mass Challenge.
Read more about Speaking at Mass Challenge.
"""
CommunityParticipation = apps.get_model(
'accelerator', 'CommunityParticipation')
for participation in CommunityParticipation.objects.all():
# remove prompts starting with "Read more about"
participation.description = re.sub(
r' Read more about[a-zA-Z ]*.$', '', participation.description)
# remove prompts starting with "For more information"
participation.description = re.sub(
r' For more information[a-zA-Z, ]*.$', '', participation.description)
# replace non-ascii char "’" with "'"
participation.description = participation.description.replace('\u2019', "'")
participation.save()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0087_update_startup_profile'),
]
operations = [
migrations.RunPython(remove_community_participation_read_more_prompts,
migrations.RunPython.noop)
]
| mit | Python |
|
b166cd8cc95ceb56f8d03cacb8903b0936e69210 | Create solution.py | lilsweetcaligula/Algorithms,lilsweetcaligula/Algorithms,lilsweetcaligula/Algorithms | data_structures/linked_list/problems/find_pattern_in_linked_list/py/solution.py | data_structures/linked_list/problems/find_pattern_in_linked_list/py/solution.py | import LinkedList
# Linked List Node inside the LinkedList module is declared as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def FindPatternInLinkedList(head: LinkedList.Node, pattern: LinkedList.Node) -> int:
if head == None or pattern == None:
return -1
index = 0
tslow = head
pnode = pattern
while tslow != None:
if tslow.val == pattern.val:
tfast = tslow
pnode = pattern
while tfast != None and pnode != None:
if tfast.val == pnode.val:
tfast = tfast.nxt
pnode = pnode.nxt
else:
break
if pnode == None:
return index
tslow = tslow.nxt
index += 1
return -1
| mit | Python |
|
9bffe981c018213b87d015a20603c092567bbdf4 | Initialize multiple class setup; add remaining APIs | kshvmdn/cobalt-uoft-python | cobaltuoft/cobalt.py | cobaltuoft/cobalt.py | from .endpoints import Endpoints
from .helpers import get, scrape_filters
class Cobalt:
def __init__(self, api_key=None):
self.host = 'http://cobalt.qas.im/api/1.0'
self.headers = {
'Referer': 'Cobalt-UofT-Python'
}
if not api_key or not self._is_valid_key(api_key):
raise ValueError('Expected valid API key.')
self.headers['Authorization'] = api_key
self.filter_map = scrape_filters()
def _get(self, url, params=None):
return get(url=url, params=params, headers=self.headers)
def _is_valid_key(self, key):
payload = {'key': key}
r = self._get(self.host, params=payload)
return r.reason == 'Not Found' and r.status_code == 404
def _run(self, api, endpoint=None, params=None):
res = Endpoints.run(api=api,
endpoint=endpoint,
params=params,
map=self.filter_map[api],
get=self._get)
return res.json()
def athletics(self, endpoint=None, params=None):
return self._run(api='athletics', endpoint=endpoint, params=params)
def buildings(self, endpoint=None, params=None):
return self._run(api='buildings', endpoint=endpoint, params=params)
def courses(self, endpoint=None, params=None):
return self._run(api='courses', endpoint=endpoint, params=params)
def food(self, endpoint=None, params=None):
return self._run(api='food', endpoint=endpoint, params=params)
def textbooks(self, endpoint=None, params=None):
return self._run(api='textbooks', endpoint=endpoint, params=params)
| mit | Python |
|
54864841267c4d2cb53ce581c05d8ba9c15eef0c | Add lexer | balloon-lang/pygments-balloon | balloon.py | balloon.py | from pygments.lexer import *
from pygments.token import *
class CustomLexer(RegexLexer):
name = 'Balloon'
aliases = ['balloon']
filenames = '*.bl'
tokens = {
'root': [
include('keywords'),
(r'[]{}(),:;[]', Punctuation),
(r'#.*?$', Comment),
(r'[+-]?[0-9]+\.[0-9]+', Number.Float),
(r'[+-]?[0-9]+', Number.Integer),
(r'<=|>=|==|[+*<>=%\-\/]', Operator),
(r'(and|or|not)\b', Operator.Word),
(r'".*"', String),
(r'(var|fn)\b', Keyword.Declaration),
(r'[a-zA-Z_][a-zA-Z0-9_]*[!?]?', Name),
(r'\s+', Text)
],
'keywords': [
(words((
'if', 'else', 'loop', 'break', 'continue', 'return',
'Number', 'Bool', 'String', 'Function', 'Tuple',
'any', 'void', 'true', 'false'), suffix=r'\b'),
Keyword),
],
}
| mpl-2.0 | Python |
|
d0306518dcc395a051460115d7ef9488f26426cc | Add paper shortening tool: input text, output shorter text | smanilov/playground,smanilov/playground,smanilov/playground,smanilov/playground,smanilov/playground,smanilov/playground | shorten-pdf/shorten.py | shorten-pdf/shorten.py | #!/usr/bin/python
import sys
LONG_PARAGRAPH_THRESH = 400
LONG_START_LEN = 197
LONG_END_LEN = 197
if len(sys.argv) < 2:
print 'Give me a text file as an argument.'
sys.exit(0)
f = open(sys.argv[1]) # open file
t = f.read() # read text
ps = t.split('\n\n') # get paragraphs
ps_ = [] # shortened paragraphs go here
for p in ps:
if len(p) < LONG_PARAGRAPH_THRESH:
ps_.append(p)
continue
ss = p.split('. ') # get sentences
ss_ = [] # short paragraph sentences go here
totlen = 0 # total length of accepted sentences
for s in ss:
if totlen + len(s) > LONG_START_LEN:
ss_.append(s[:LONG_START_LEN - totlen] + "..")
break;
ss_.append(s)
totlen += len(s)
index = len(ss_) # index to insert end sentences
totlen = 0
ss.reverse()
for s in ss:
if totlen + len(s) > LONG_END_LEN:
ss_.insert(index, "..." + s[len(s) - (LONG_END_LEN - totlen):])
break;
ss_.insert(index, s)
totlen += len(s)
p_ = '. '.join(ss_)
ps_.append(p_)
t_ = '\n\n'.join(ps_)
print t_
| mit | Python |
|
0267ada9eed8c9759c4fe5ec5b4cd184bc2d5de1 | Create ode.py | imwiththou/PathwayNet | ode.py | ode.py | import sys
def rk4(func, x, y, step, xmax):
"""
Integrates y'=f(x,y) using 4th step-order Runge-Kutta.
@param func: a differential equation
@type func: list
@param x: initial value of x-axis, which is usually starting time
@type x: float
@param y: initial value for y-axis
@type y: float
@param step: step size on the x-axis (also known as step in calculus)
@type step: float
@param xmax: maximum value of x-axis, which is usually ending time
@type xmax: float
"""
yield [x, y]
while x < xmax:
f1 = func(x, y)
f2 = func(x+0.5*step, y+0.5*step*f1)
f3 = func(x+0.5*step, y+0.5*step*f2)
f4 = func(x+step, y+step*f3)
x = x + step
y = y + step*(f1+2.0*f2+2.0*f3+f4)/6.0
yield [x, y]
def boundary_checker(y, boundary, type):
for k in boundary.keys():
if y[int(k)] < boundary[k][0] and type == 'lower':
y[int(k)] = boundary[k][1]
if y[int(k)] > boundary[k][0] and type == 'higher':
y[int(k)] = boundary[k][1]
return y
def multirk4(funcs, x0, y0, step, xmax,
lower_bound=None, upper_bound=None):
"""
Integrates a system of ODEs, y' = f(x, y), using fourth
order Runge-Kutta method.
@param funcs: system of differential equations
@type funcs: list
@param x0: initial value of x-axis, which is usually starting time
@type x0: float
@param y0: initial values for variables
@type y0: list
@param step: step size on the x-axis (also known as step in calculus)
@type step: float
@param xmax: maximum value of x-axis, which is usually ending time
@type xmax: float
"""
n = len(funcs)
yield [x0] + y0
f1, f2, f3, f4 = [0]*n, [0]*n, [0]*n, [0]*n
max = 1e100
while x0 < xmax:
y1 = [0]*n
for i in range(n):
try: f1[i] = funcs[i](x0, y0)
except TypeError: pass
except ZeroDivisionError: f1[i] = max
except OverflowError: f1[i] = max
for j in range(n):
y1[j] = y0[j] + (0.5*step*f1[j])
for i in range(n):
try: f2[i] = funcs[i]((x0+(0.5*step)), y1)
except TypeError: pass
except ZeroDivisionError: f2[i] = max
except OverflowError: f2[i] = max
for j in range(n):
y1[j] = y0[j] + (0.5*step*f2[j])
for i in range(n):
try: f3[i] = funcs[i]((x0+(0.5*step)), y1)
except TypeError: pass
except ZeroDivisionError: f3[i] = max
except OverflowError: f3[i] = max
for j in range(n):
y1[j] = y0[j] + (step*f3[j])
for i in range(n):
try: f4[i] = funcs[i]((x0+step), y1)
except TypeError: pass
except ZeroDivisionError: f4[i] = max
except OverflowError: f4[i] = max
x0 = x0 + step
for i in range(n):
y1[i] = y0[i] + (step * \
(f1[i] + (2.0*f2[i]) + (2.0*f3[i]) + f4[i]) / 6.0)
if lower_bound:
y1 = boundary_checker(y1, lower_bound, 'lower')
if upper_bound:
y1 = boundary_checker(y1, upper_bound, 'upper')
y0 = y1
yield [x0] + y1
| mit | Python |
|
316a82c5465a13770404b6a302348f192618cd27 | Add an interface for eagerly evaluating command graph elements | soulchainer/qtile,zordsdavini/qtile,ramnes/qtile,qtile/qtile,tych0/qtile,soulchainer/qtile,qtile/qtile,zordsdavini/qtile,ramnes/qtile,tych0/qtile | libqtile/command_interface.py | libqtile/command_interface.py | # Copyright (c) 2019, Sean Vig. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import abstractmethod, ABCMeta
from typing import Any, Dict, Tuple
from libqtile.command_graph import CommandGraphCall
class EagerCommandInterface(metaclass=ABCMeta):
"""
Defines an interface which can be used to eagerly evaluate a given call on
a command graph. The implementations of this may use an IPC call to access
the running qtile instance remotely, or directly access the qtile instance
from within the same process.
"""
@abstractmethod
def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> Any:
"""Execute the given call, returning the result of the execution
Perform the given command graph call, calling the function with the
given arguments and keyword arguments.
Parameters
----------
call: CommandGraphCall
The call on the command graph that is to be performed.
args:
The arguments to pass into the command graph call.
kwargs:
The keyword arguments to pass into the command graph call.
"""
pass # pragma: no cover
| mit | Python |
|
dff1f9176d7ce77a242263bfc9a0760cd31f0585 | Add a prototype for cached regex.compile() | Charcoal-SE/SmokeDetector,Charcoal-SE/SmokeDetector | regex_proxy.py | regex_proxy.py | from regex import *
from regex import compile as raw_compile
_cache = {}
# Wrap regex.compile up so we have a global cache
def compile(s, *args, **args):
global _cache
try:
return _cache[s]
except KeyError:
r = raw_compile(s, *args, **kwargs)
_cache[s] = r
return r
| apache-2.0 | Python |
|
231e19ed29314bc0d9aad3cd1d69b757364fce7d | Create pms.py | BollMose/daynote | pms.py | pms.py | import serial
# we stop terminal with raspi-config,
# we stop bluethooth from /boot/config.txt first,
# and currently UART device is /dev/ttyAMAO,
# but we still cannot read data from device
# failure devices
#dev = "ttyS0"
# work devices
#dev = "ttyAMA0"
#dev = "serial0"
dev = "ttyUSB0"
ser = serial.Serial(port="/dev/"+dev,
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS, timeout=2)
while True:
data = ser.read()
print str(data), len(data)
ser.close()
| apache-2.0 | Python |
|
dd1e3a665298a616d9b78f0c019288a9d6d883b8 | Add unit tests for the OfficeAdminExtraGeoLocation model | StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite | labonneboite/tests/app/test_models.py | labonneboite/tests/app/test_models.py | # coding: utf8
import unittest
from labonneboite.common.models import OfficeAdminExtraGeoLocation
class OfficeAdminExtraGeoLocationTest(unittest.TestCase):
"""
Tests for the OfficeAdminExtraGeoLocation model.
"""
def test_codes_as_list(self):
codes = u" 57070\n\n\n\n\n\n 75010 \n 54 "
codes_as_list = OfficeAdminExtraGeoLocation.codes_as_list(codes)
self.assertItemsEqual(codes_as_list, [u'54', u'57070', u'75010'])
codes = u"75\r57\n13"
codes_as_list = OfficeAdminExtraGeoLocation.codes_as_list(codes)
self.assertItemsEqual(codes_as_list, [u'13', u'57', u'75'])
def test_codes_as_geolocations(self):
codes = u"75\n57070"
codes_as_geolocations = OfficeAdminExtraGeoLocation.codes_as_geolocations(codes)
expected = [
# Found for the departement 75.
('48.8264581543', '2.32690527897'),
('48.8280603003', '2.3544809727'),
('48.8365381105', '2.42075934432'),
('48.8421891171', '2.29652252417'),
('48.8449537128', '2.37608588424'),
('48.846262612', '2.34839040879'),
('48.8501003498', '2.33402139523'),
('48.8543439464', '2.31294138206'),
('48.8553815318', '2.35541102422'),
('48.8566390262', '2.25972331102'),
('48.8566390262', '2.25972331102'),
('48.8590284068', '2.37705679761'),
('48.8622892805', '2.36158587519'),
('48.8628435865', '2.33807010768'),
('48.8643142257', '2.39961435812'),
('48.8684296759', '2.34149433888'),
('48.8729556556', '2.31369616661'),
('48.8758285242', '2.33869789273'),
('48.8761941084', '2.36107097577'),
('48.8878020912', '2.30862255671'),
('48.8928608126', '2.3479701879'),
('49.157869706', '6.2212499254'),
# Found for 57070.
('48.8840228115', '2.38234715656'),
]
self.assertItemsEqual(expected, codes_as_geolocations)
def test_codes_as_json_geolocations(self):
codes = u"75010"
codes_as_json_geolocations = OfficeAdminExtraGeoLocation.codes_as_json_geolocations(codes)
expected = '[["48.8761941084", "2.36107097577"]]'
self.assertEqual(expected, codes_as_json_geolocations)
| agpl-3.0 | Python |
|
7b1b343c552ee6f124ccceee05f1a6732657c9e1 | Add initial startup program (pox.py) | andiwundsam/_of_normalize,xAKLx/pox,MurphyMc/pox,denovogroup/pox,PrincetonUniversity/pox,PrincetonUniversity/pox,waltznetworks/pox,chenyuntc/pox,chenyuntc/pox,VamsikrishnaNallabothu/pox,pthien92/sdn,kpengboy/pox-exercise,MurphyMc/pox,waltznetworks/pox,PrincetonUniversity/pox,MurphyMc/pox,noxrepo/pox,carlye566/IoT-POX,diogommartins/pox,kavitshah8/SDNDeveloper,pthien92/sdn,diogommartins/pox,adusia/pox,waltznetworks/pox,kulawczukmarcin/mypox,noxrepo/pox,VamsikrishnaNallabothu/pox,chenyuntc/pox,kpengboy/pox-exercise,carlye566/IoT-POX,xAKLx/pox,noxrepo/pox,kulawczukmarcin/mypox,MurphyMc/pox,denovogroup/pox,diogommartins/pox,MurphyMc/pox,denovogroup/pox,pthien92/sdn,waltznetworks/pox,adusia/pox,waltznetworks/pox,adusia/pox,denovogroup/pox,andiwundsam/_of_normalize,adusia/pox,noxrepo/pox,kavitshah8/SDNDeveloper,jacobq/csci5221-viro-project,xAKLx/pox,carlye566/IoT-POX,VamsikrishnaNallabothu/pox,diogommartins/pox,xAKLx/pox,PrincetonUniversity/pox,jacobq/csci5221-viro-project,kpengboy/pox-exercise,kavitshah8/SDNDeveloper,jacobq/csci5221-viro-project,carlye566/IoT-POX,xAKLx/pox,kulawczukmarcin/mypox,PrincetonUniversity/pox,pthien92/sdn,jacobq/csci5221-viro-project,chenyuntc/pox,kpengboy/pox-exercise,andiwundsam/_of_normalize,VamsikrishnaNallabothu/pox,denovogroup/pox,kulawczukmarcin/mypox,pthien92/sdn,chenyuntc/pox,kpengboy/pox-exercise,andiwundsam/_of_normalize,kulawczukmarcin/mypox,VamsikrishnaNallabothu/pox,diogommartins/pox,carlye566/IoT-POX,adusia/pox,kavitshah8/SDNDeveloper,jacobq/csci5221-viro-project | pox.py | pox.py | #!/usr/bin/python
from pox.core import core
import pox.openflow.openflow
import pox.topology.topology
import pox.openflow.of_01
import pox.dumb_l3_switch.dumb_l3_switch
# Set default log level
import logging
logging.basicConfig(level=logging.DEBUG)
# Turn on extra info for event exceptions
import pox.lib.revent.revent as revent
revent.showEventExceptions = True
def startup ():
core.register("topology", pox.topology.topology.Topology())
core.register("openflow", pox.openflow.openflow.OpenFlowHub())
core.register("switch", pox.dumb_l3_switch.dumb_l3_switch.dumb_l3_switch())
pox.openflow.of_01.start()
if __name__ == '__main__':
try:
startup()
core.goUp()
except:
import traceback
traceback.print_exc()
import code
code.interact('Ready.')
pox.core.core.quit()
| apache-2.0 | Python |
|
35887b39b0151432423cca7832f1c9bc4ab7d836 | Create OutputNeuronGroup.py | ricardodeazambuja/BrianConnectUDP | examples/OutputNeuronGroup.py | examples/OutputNeuronGroup.py | '''
Example of a spike receptor (only receives spikes)
In this example spikes are received and processed creating a raster plot at the end of the simulation.
'''
from brian import *
import numpy
from brian_multiprocess_udp import BrianConnectUDP
# The main function with the NeuronGroup(s) and Synapse(s) must be named "main_NeuronGroup".
# It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group
# will supply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput.
# The size of the output spike train equals NumOfNeuronsOutput and must be the same size of the NeuronGroup who is
# going to interface with the rest of the system to send spikes.
# The function must return all the NeuronGroup objects and all the Synapse objects this way:
# ([list of all NeuronGroups],[list of all Synapses])
# and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation.
#
# Here is also possible to use "dummy" NeuronGroups only to receive and/or send spikes.
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
Nr=NeuronGroup(45, model='v:1', reset=0, threshold=0.5, clock=simclock)
Nr.v=0
# SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT
Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock)
Syn_iNG_Nr[:,:]='i==j'
print "Total Number of Synapses:", len(Syn_iNG_Nr) #DEBUG!
Syn_iNG_Nr.w=1
MExt=SpikeMonitor(Nr) # Spikes sent by UDP
Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP
return ([Nr],[Syn_iNG_Nr],[MExt,Mdummy])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
pass
figure()
raster_plot(simulation_MN[1])
title("Spikes Received by UDP")
show(block=True)
# savefig('output.pdf')
if __name__=="__main__":
# my_simulation = BrainConnectUDP(main_NeuronGroup, NumOfNeuronsInput=45, post_simulation_function=post_simulation_function,
# UDP_IPI="192.168.1.123", UDP_PORTI=20202, simclock_dt=5, inputclock_dt=5, TotalSimulationTime=5000, sim_repetitions=0)
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=45, post_simulation_function=post_simulation_function,
UDP_IPI="127.0.0.1", UDP_PORTI=10101, simclock_dt=5, inputclock_dt=5, TotalSimulationTime=5000, sim_repetitions=0)
| cc0-1.0 | Python |
|
3834af9b3a6381ac7a2334c7bd2ae6d562e0f20b | Create HR_pythonIsLeap.py | bluewitch/Code-Blue-Python | HR_pythonIsLeap.py | HR_pythonIsLeap.py | def is_leap(year):
leap = False
# Write your logic here
# thought process
#if year%4==0:
# return True
#elif year%100==0:
# return False
#elif year%400==0:
# return True
# Optimized, Python 3
return ((year%4==0)and(year%100!=0)or(year%400==0))
| mit | Python |
|
b5cc6ead2e17ef54612b3072c7991166955bee77 | Add user commands | Vultour/dropbox-static-cli | dropbox-cli.py | dropbox-cli.py | #!/usr/bin/env python
import os
import logging
import dropbox
import argparse
APP_NAME = "dropbox-static-cli"
DEFAULT_KEY_PATH = "{}/.dropbox-static-cli-key".format(os.environ["HOME"])
L = None
def parse_arguments():
parser = argparse.ArgumentParser(
prog="dropbox-static-cli",
description="A command line tool for interfacing with Dropbox without the need for local sync storage",
epilog="Note: Put your API key in {} to avoid having to pass in --api-key with every command!".format(DEFAULT_KEY_PATH)
)
parser.add_argument("-v", "--verbose", action="count", default=0, help="Verbose output")
parser.add_argument("-k", "--api-key", default=DEFAULT_KEY_PATH, help="Dropbox API key")
parser.set_defaults(func=exec_default)
subparsers = parser.add_subparsers(title="Available subcommands")
parser_list = subparsers.add_parser("list", help="List items in Dropbox")
parser_list.add_argument("DROPBOX_PATH")
parser_list.add_argument("-m", "--more", action="count", help="Display more pages (if available)")
parser_list.set_defaults(func=exec_list)
parser_get = subparsers.add_parser("get", help="Download items from Dropbox")
parser_get.add_argument("-o", "--output", required=True, help="Save path for the downloaded file")
parser_get.add_argument("DROPBOX_PATH", help="Path inside your Dropbox")
parser_get.set_defaults(func=exec_get)
parser_put = subparsers.add_parser("put", help="Upload items to Dropbox")
parser_put.add_argument("-f", "--file", required=True, help="File to upload")
parser_put.add_argument("DROPBOX_PATH", help="Path inside your Dropbox")
parser_put.set_defaults(func=exec_put)
parser_info = subparsers.add_parser("info", help="Dropbox account information")
parser_info_sub = parser_info.add_subparsers(title="Available subcommands")
parser_info_sub.add_parser("user", help="User information").set_defaults(func=exec_info_user)
parser_info_sub.add_parser("quota", help="Quota information").set_defaults(func=exec_info_quota)
args = parser.parse_args()
return global_init(args)
def global_init(args):
global L
log_level = logging.WARNING
if (args.verbose == 1): log_level = logging.INFO
if (args.verbose > 1) : log_level = logging.DEBUG
init_logger(log_level)
dbx = init_dropbox(parse_key(args.api_key))
return args.func(args, dbx)
def init_logger(log_level):
global L
L = logging.getLogger(APP_NAME)
L.setLevel(log_level)
ch = logging.StreamHandler()
ch.setLevel(log_level)
ch.setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5s]: %(message)s"))
L.addHandler(ch)
L.debug("Logger initialized")
def parse_key(key):
global L
if (os.path.isfile(key)):
L.info("Using supplied key as a file - '{}'".format(key))
s = "";
with open(key) as f:
s = f.read().strip()
return s
L.info("Supplied key is not a valid file, using as a raw Dropbox API key - '{}'".format(key))
return key
def init_dropbox(key):
global L
L.info("Initializing Dropbox instance with key '{}'".format(key))
dbx = dropbox.Dropbox(key)
return dbx
def exec_default(args):
print "Executing no command"
print args
def exec_list(args, dbx):
print "Executing LIST command"
print args
def exec_get(args):
print "Executing GET command"
print args
def exec_put(args):
print "Executing PUT command"
print args
def exec_info_user(args, dbx):
global L
L.info("Executing INFO-USER command")
user = dbx.users_get_current_account()
print """\
User ID : {}
Account type : {}
Display Name : {}
Familiar Name: {}
First Name : {}
Last Name : {}
E-Mail : {}
Verified : {}
Disabled : {}
Referral link: {}\
""".format(
user.account_id,
user.account_type._tag,
user.name.display_name,
user.name.familiar_name,
user.name.given_name,
user.name.surname,
user.email,
user.email_verified,
user.disabled,
user.referral_link
)
def exec_info_quota(args, dbx):
L.info("Executing INFO-QUOTA command")
usage = dbx.users_get_space_usage()
if (usage.allocation.is_individual()):
print "Allocated: {:.2f}MB".format(usage.allocation.get_individual().allocated / 1024.0 / 1024.0)
print "Used : {:.2f}MB".format(usage.used / 1024.0 / 1024.0)
else:
L.error("Team accounts are not supported")
def main():
try:
parse_arguments()
except dropbox.exceptions.AuthError as e:
L.error("Authentication error")
except dropbox.exceptions.BadInputError as e:
L.error("Invalid input: {}".format(e.message))
if (__name__ == "__main__"):
main()
| apache-2.0 | Python |
|
d160d73740c73e2cab8325179e7f0a9ee4ae8c50 | add disk_usage.py example script | mindw/psutil,mrjefftang/psutil,mindw/psutil,tomprince/psutil,msarahan/psutil,giampaolo/psutil,landryb/psutil,qbit/psutil,cloudbase/psutil,tomprince/psutil,Q-Leap-Networks/psutil,msarahan/psutil,tomprince/psutil,packages/psutil,mindw/psutil,Q-Leap-Networks/psutil,landryb/psutil,landryb/psutil,0-wiz-0/psutil,jorik041/psutil,0-wiz-0/psutil,jamesblunt/psutil,mindw/psutil,landryb/psutil,mindw/psutil,jorik041/psutil,packages/psutil,jorik041/psutil,qbit/psutil,jamesblunt/psutil,msarahan/psutil,cloudbase/psutil,tomprince/psutil,cloudbase/psutil,jamesblunt/psutil,giampaolo/psutil,packages/psutil,qbit/psutil,tomprince/psutil,mrjefftang/psutil,mrjefftang/psutil,landryb/psutil | examples/disk_usage.py | examples/disk_usage.py | #!/usr/bin/env python
"""
List all mounted disk partitions a-la "df" command.
"""
import sys
import psutil
def convert_bytes(n):
if n == 0:
return "0B"
symbols = ('k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
def main():
print "Device Total Used Free Use % Type Mount"
for part in psutil.disk_partitions(0):
usage = psutil.disk_usage(part.mountpoint)
print "%-9s %8s %8s %8s %5s%% %8s %s" % (part.device,
convert_bytes(usage.total),
convert_bytes(usage.used),
convert_bytes(usage.free),
int(usage.percent),
part.fstype,
part.mountpoint)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | Python |
|
7475b73072f0037fc53bcae59e331c4d5a997e86 | Add auto-fill test cases | verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool | depot/tests/test_checkout.py | depot/tests/test_checkout.py | from django.contrib.auth.models import User
from depot.models import Depot, Organization
from verleihtool.test import ClientTestCase
class AutoFillTestCase(ClientTestCase):
"""
Test cases asserting the auto-fill functionality for checkout-form
:author: Stefan Su
"""
def setUp(self):
super(AutoFillTestCase, self).setUp()
organization = Organization.objects.create()
self.depot = Depot.objects.create(
name='My Depot',
organization=organization
)
def test_logged_in_autofill_username(self):
response = self.as_user.get('/depots/%d/' % self.depot.id)
self.assertInHTML(
'<input type="text" class="form-control" id="id_username" name="name" value="user">',
response.content.decode()
)
def test_not_logged_in_no_autofill(self):
response = self.as_guest.get('/depots/%d/' % self.depot.id)
self.assertInHTML(
str('<input type="text" class ="form-control" id="id_username" name="name" value="">'),
response.content.decode()
)
| agpl-3.0 | Python |
|
0caa9035e06e6596a295ed2ed0a6238a2b09f353 | add PCA and TSNE representation | Totoketchup/das,Totoketchup/das | utils/postprocessing/representation.py | utils/postprocessing/representation.py | import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
def PCA_representation(data, n_components):
pca = PCA(n_components=n_components)
return pca.fit_transform(data)
def TSNE_representation(data, n_components):
model = TSNE(n_components=n_components, random_state=0)
return model.fit_transform(data)
def plot_PCA(data, n_components, name='PCA Representation'):
pca = PCA_representation(data, n_components)
def plot_TSNE(data, n_components, name='TSNE Representation'):
tsne = TSNE_representation(data, n_components)
| mit | Python |
|
3ee47b0adbc379d77f01df51927399ecf3fb24e6 | Add docstring and comment. | lmjohns3/theanets,devdoer/theanets,chrinide/theanets | examples/mnist-autoencoder.py | examples/mnist-autoencoder.py | #!/usr/bin/env python
'''Single-layer autoencoder example using MNIST digit data.
This example shows one way to train a single-layer autoencoder model using the
handwritten MNIST digits.
This example also shows the use of climate command-line arguments.
'''
import climate
import matplotlib.pyplot as plt
import theanets
from utils import load_mnist, plot_layers, plot_images
g = climate.add_group('MNIST Example')
g.add_argument('--features', type=int, default=8, metavar='N',
help='train a model using N^2 hidden-layer features')
def main(args):
# load up the MNIST digit dataset.
train, valid, _ = load_mnist()
e = theanets.Experiment(
theanets.Autoencoder,
layers=(784, args.features ** 2, 784))
e.train(train, valid,
input_noise=0.1,
weight_l2=0.0001,
algorithm='rmsprop',
momentum=0.9,
min_improvement=0.1)
plot_layers([e.network.find('hid1', 'w'), e.network.find('out', 'w')])
plt.tight_layout()
plt.show()
v = valid[:100]
plot_images(v, 121, 'Sample data')
plot_images(e.network.predict(v), 122, 'Reconstructed data')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
climate.call(main)
| #!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import theanets
from utils import load_mnist, plot_layers, plot_images
g = climate.add_group('MNIST Example')
g.add_argument('--features', type=int, default=8, metavar='N',
help='train a model using N^2 hidden-layer features')
def main(args):
train, valid, _ = load_mnist()
e = theanets.Experiment(
theanets.Autoencoder,
layers=(784, args.features ** 2, 784))
e.train(train, valid,
input_noise=0.1,
weight_l2=0.0001,
algorithm='rmsprop',
momentum=0.9,
min_improvement=0.1)
plot_layers([e.network.find('hid1', 'w'), e.network.find('out', 'w')])
plt.tight_layout()
plt.show()
v = valid[:100]
plot_images(v, 121, 'Sample data')
plot_images(e.network.predict(v), 122, 'Reconstructed data')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
climate.call(main)
| mit | Python |
09112412a4814e3727def2547765546bf44c1e7d | Test joint refinement of 300 cspad images using Brewster 2018 methods. | dials/dials,dials/dials,dials/dials,dials/dials,dials/dials | test/algorithms/refinement/test_cspad_refinement.py | test/algorithms/refinement/test_cspad_refinement.py | # Test multiple stills refinement.
from __future__ import absolute_import, division, print_function
import os
from dxtbx.model.experiment_list import ExperimentListFactory
import procrunner
def test1(dials_regression, run_in_tmpdir):
"""
Refinement test of 300 CSPAD images, testing auto_reduction, parameter
fixing, constraints, SparseLevMar, and sauter_poon outlier rejection. See
README in the regression folder for more details.
"""
from scitbx import matrix
data_dir = os.path.join(dials_regression, "refinement_test_data", "cspad_refinement")
result = procrunner.run_process([
"dials.refine",
os.path.join(data_dir, "cspad_refined_experiments_step6_level2_300.json"),
os.path.join(data_dir, "cspad_reflections_step7_300.pickle"),
os.path.join(data_dir, "refine.phil"),
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
# load results
reg_exp = ExperimentListFactory.from_json_file(
os.path.join(data_dir, "regression_experiments.json"),
check_format=False)
ref_exp = ExperimentListFactory.from_json_file("refined_experiments.json",
check_format=False)
# compare results
tol = 1e-5
for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):
assert b1.is_similar_to(b2, wavelength_tolerance=tol,
direction_tolerance=tol,
polarization_normal_tolerance=tol,
polarization_fraction_tolerance=tol)
s0_1 = matrix.col(b1.get_unit_s0())
s0_2 = matrix.col(b2.get_unit_s0())
assert s0_1.accute_angle(s0_2, deg=True) < 0.0057 # ~0.1 mrad
for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):
assert d1.is_similar_to(d2,
fast_axis_tolerance=1e-4, slow_axis_tolerance=1e-4, origin_tolerance=1e-2)
| bsd-3-clause | Python |
|
6bac7268df94d73555c0b594c89b4d5ed0bf53ed | Create NN.py | ghost9023/DeepLearningPythonStudy | DeepLearning/DeepLearning/04_Deep_LeeWJ/NN.py | DeepLearning/DeepLearning/04_Deep_LeeWJ/NN.py | """
mnist데이터 셋은 파일이 크기에, 첫 실행에서 다운 받은 후,
pickle로 로드하여 객체를 보원하는 식으로 속도를 줄일 수 있다.
"""
import sys, os
import numpy as np
from mnist import load_mnist
import pickle
sys.path.append(os.pardir) #부모 디렉터리의 파일을 가져올 수 있도록 설정한다.
#load_mnist 메소드의 3가지 매개변수
#1. flatten --> 입려기미지의 생성 배열 설정 false = 13차원배열, true = 1차원 배열
#1차원 배열저장한 데이터는 .reshape으로 원래 이미지를 볼 수 있다.
#2.normalize --> 0~ 1사이의 값으로 정규화 여부옵션
#3.one_hot encoding --> 정답을 뜻하는 원소만 1이고, 나머진 0으로 두는 인코딩 방법
# with open('sample_weight.pkl', 'rb') as f:
# network= pickle.load(f)
# print(network)
#
# (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)
# 소프트맥스함수
def softmax(a):
c = np.max(a)
exp_a = np.exp(a-c) # 오버플로 방지
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
# 시그모이드함수
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True,
one_hot_label=False)
return x_test, t_test
# 가중치와 편향을 초기화, 인스턴스화
def init_network():
with open("sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
# 은닉층 활성함수로 시그모이드함수, 출력층 활성함수로 소프트맥스함수를 쓴 순전파 신경망
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax(a3)
return y
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(len(x)):
y = predict(network, x[i])
p= np.argmax(y) # 확률이 가장 높은 원소의 인덱스를 얻는다.
if p == t[i]:
accuracy_cnt += 1
print("Accuracy:" + str(float(accuracy_cnt) / len(x)))
| mit | Python |
|
99b0aeb3257b8125a30340c06cc1bf834e914461 | add bar_contact.py that was missing | bremond/siconos,siconos/siconos,radarsat1/siconos,bremond/siconos,fperignon/siconos,bremond/siconos,radarsat1/siconos,siconos/siconos,radarsat1/siconos,bremond/siconos,bremond/siconos,siconos/siconos,fperignon/siconos,fperignon/siconos,fperignon/siconos,siconos/siconos,radarsat1/siconos,radarsat1/siconos,fperignon/siconos | examples/Mechanics/ContactDetection/BulletIO/bar_contact.py | examples/Mechanics/ContactDetection/BulletIO/bar_contact.py | import os,sys
import numpy
import math
import pickle
import random
from siconos.mechanics.contact_detection.tools import Contactor
from siconos.io.mechanics_io import Hdf5
#sys.path.append('../..')
#from mechanics_io import Hdf5
import siconos.numerics as Numerics
import siconos.kernel as Kernel
# WARNING : in 3D by default z-axis is upward
# this is very important to direct PLANx objects
dim = 3
unscaled_bar_length=1.5
aspect_ratio=100.0
unscaled_bar_height=unscaled_bar_length/aspect_ratio
unscaled_bar_width=unscaled_bar_length/aspect_ratio
unscaled_volume = unscaled_bar_length*unscaled_bar_height*unscaled_bar_width
unscaled_density=1000
unscaled_mass=unscaled_volume*unscaled_density
print('unscaled_mass',unscaled_mass)
scale=1.0/unscaled_bar_length*1.0
density = unscaled_density/(scale**3)
bar_height = unscaled_bar_height*scale
bar_length = unscaled_bar_length*scale
bar_width = unscaled_bar_width*scale
body_collection={}
body_collection['plan_id']= {}
id_plan=0
# scale =1
# mass :3.375000e-01
# Inertia :
# 3.600000e-04, 0.000000e+00, 0.000000e+00,
# 0.000000e+00, 1.195312e-01, 0.000000e+00,
# 0.000000e+00, 0.000000e+00, 1.195312e-01,
#create some bodies
# Creation of the hdf5 file for input/output
with Hdf5() as io:
volume = bar_height * bar_length * bar_width
mass = volume*density
print('mass', mass)
print('scale', scale)
# raw_input()
# Definition of a cube as a convex shape
io.addConvexShape('Bar', [ (-bar_length, bar_width, -bar_height),
(-bar_length, -bar_width, -bar_height),
(-bar_length, -bar_width, bar_height),
(-bar_length, bar_width, bar_height),
( bar_length , bar_width, bar_height),
( bar_length, bar_width, -bar_height),
( bar_length, -bar_width, -bar_height),
( bar_length ,-bar_width, bar_height)])
angle= math.pi/8.0
trans=[0,0,4.0*scale]
ori = [math.cos(angle/2.0),0.0,math.sin(angle/2.0),0]
axis = numpy.zeros(3)
angle_test = Kernel.getAxisAngle(trans+ori, axis)
print angle_test,axis
print('ori initial', ori)
io.addObject('bar', [Contactor('Bar')],
translation=trans,
orientation = ori,
velocity=[0, 0, 0, 0, 0.0, 0],
mass=mass)
# Definition of the ground shape
io.addPrimitiveShape('Ground', 'Box', (5*scale, 5*scale, 0.1*scale))
angleground= -math.pi/4.0
axis = [1.0, 0.0, 0.0]
origround = [math.cos(angleground/2.0),
axis[0] * math.sin(angleground/2.0),
axis[1] * math.sin(angleground/2.0),
axis[2] * math.sin(angleground/2.0)]
io.addObject('ground', [Contactor('Ground')],
translation=[0, 0, 0.0],
orientation = origround)
# Definition of a non smooth law. As no group ids are specified it
# is between contactors of group id 0.
io.addNewtonImpactFrictionNSL('contact', mu=0.3)
print body_collection
f = open('body_collection.dict', 'w')
pickle.dump(body_collection,f)
f.close()
step=2000
hstep=0.001
gravity_scale=1.0/scale
import scipy.constants as constants
def apply_forces(body):
g = constants.g / gravity_scale
weight = [0, 0, - body.scalarMass() * g]
body.setFExtPtr(weight)
# Run the simulation from the inputs previously defined and add
# results to the hdf5 file. The visualisation of the output may be done
# with the vview command.
with Hdf5(mode='r+', collision_margin=0.01) as io:
# By default earth gravity is applied and the units are those
# of the International System of Units.
# Because of fixed collision margins used in the collision detection,
# sizes of small objects may need to be expressed in cm or mm.
io.run(with_timer=False,
time_stepping=None,
space_filter=None,
body_class=None,
shape_class=None,
face_class=None,
edge_class=None,
gravity_scale=gravity_scale,
t0=0,
T=step*hstep,
h=hstep,
multipoints_iterations=True,
theta=1.0,
Newton_max_iter=10,
set_external_forces=apply_forces,
solver=Numerics.SICONOS_FRICTION_3D_NSGS,
itermax=1000,
tolerance=1e-12,
numerics_verbose=False,
violation_verbose=True,
output_frequency=10)
| apache-2.0 | Python |
|
fa521b4358a06d1667864a09cd7195d3a6db764d | Add lc206_reverse_linked_list.py | bowen0701/algorithms_data_structures | lc206_reverse_linked_list.py | lc206_reverse_linked_list.py | """206. Reverse Linked List
Easy
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively. Could you implement both?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
pass
def main():
# print Solution().reverseList(head)
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
2dd6049c1fa9340d14f4b73f843f7ed4408e84f5 | Prepare release script init | AnalogJ/lexicon,AnalogJ/lexicon | utils/create_release.py | utils/create_release.py | #!/usr/bin/env python3
import os
import datetime
import subprocess
from distutils.version import StrictVersion
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def main():
# git_clean = subprocess.check_output(
# "git status --porcelain", shell=True, universal_newlines=True,
# ).strip()
# if git_clean:
# raise RuntimeError("Error, git workspace is not clean: \n{0}".format(git_clean))
with open(os.path.join(PROJECT_ROOT, "VERSION")) as file_h:
current_version = file_h.read().strip()
print("Current version is: {0}".format(current_version))
print("Please insert new version:")
new_version = str(input())
if StrictVersion(new_version) <= StrictVersion(current_version):
raise RuntimeError(
"Error new version is below current version: {0} < {1}".format(
new_version, current_version
)
)
try:
with open(os.path.join(PROJECT_ROOT, "CHANGELOG.md")) as file_h:
changelog = file_h.read()
today = datetime.datetime.today()
changelog = changelog.replace(
"## master - CURRENT\n",
"""\
## master - CURRENT
## {0} - {1}
""".format(
new_version, today.strftime("%d/%m/%Y")
),
)
with open(os.path.join(PROJECT_ROOT, "CHANGELOG.md"), "w") as file_h:
file_h.write(changelog)
with open(os.path.join(PROJECT_ROOT, "VERSION"), "w") as file_h:
file_h.write(new_version)
subprocess.check_call(
'git commit -a -m "Version {0}"'.format(new_version), shell=True
)
# subprocess.check_call("git tag v{0}".format(new_version), shell=True)
# subprocess.check_call("git push --tags", shell=True)
# subprocess.check_call("git push", shell=True)
except subprocess.CalledProcessError as e:
print("Error detected, cleaning state.")
# subprocess.call("git tag -d v{0}".format(new_version), shell=True)
# subprocess.check_call("git reset --hard", shell=True)
raise e
if __name__ == "__main__":
main()
| mit | Python |
|
2850713d0add5cb1ae084898bdd6929c0f5bfb3e | add simulated annealing stat script | OPU-Surveillance-System/monitoring,OPU-Surveillance-System/monitoring,OPU-Surveillance-System/monitoring | master/scripts/planner/solvers/hyperparameter_optimization/test_stat_sa.py | master/scripts/planner/solvers/hyperparameter_optimization/test_stat_sa.py | import GPy
import GPyOpt
import numpy as np
from sys import path
import pickle
import time
from tqdm import tqdm
path.append("..")
path.append("../..")
path.append("../../..")
from solver import SimulatedAnnealingSolver, RandomSolver
import map_converter as m
fs = open("../../../webserver/data/serialization/mapper.pickle", "rb")
mapper = pickle.load(fs)
fs.close()
nb_drone = 1
state = [(1059, 842), (505, 1214), (400, 1122), (502, 339), (866, 512), (1073, 82), (669, 1202), (32, 1122), (45, 52), (209, 993), (118, 653), (487, 896), (748, 638), (271, 1067), (1576, 567), (683, 316), (1483, 1156), (1448, 634), (303, 1220), (759, 823), (1614, 991), (1387, 174), (1618, 227), (367, 39), (35, 902), (967, 690), (944, 327), (912, 1029), (184, 1205), (779, 1026), (694, 123), (1502, 395)]
rplan = RandomSolver(state, mapper, nb_drone)
saplan = SimulatedAnnealingSolver(rplan.state, mapper, nb_drone)
hist = []
for i in tqdm(range(100)):
rplan.solve()
saplan.state = list(rplan.state)
saplan.copy_strategy = "slice"
saplan.steps = 10000000
tmax = 987.57443341
tmin = 1
saplan.Tmax = tmax
saplan.Tmin = tmin
saplan.updates = 0
itinerary, energy = saplan.solve()
hist.append(energy)
hist = np.array(hist)
print("Mean:", np.mean(hist), "Var:", np.var(hist), "Std:", np.std(hist))
print(hist)
| mit | Python |
|
1d4693b6f5b6f8b3912aae1216665272a36b1411 | Add missing group.py | flexo/evolutron,flexo/evolutron | group.py | group.py | from pygame.sprite import Group as pygame_Group
class Group(pygame_Group):
def draw(self, onto, *args, **kw):
for sprite in self:
sprite.draw(*args, **kw)
super(Group, self).draw(onto)
| mit | Python |
|
b680141b9ec5468a5a0890edf25045a6af8b46c2 | Add run.py | kkstu/DNStack,kkstu/DNStack,kkstu/DNStack | run.py | run.py | #!/usr/bin/python
# -*- coding:utf8 -*-
# Powered By KK Studio
from app.DNStack import DNStack
if __name__ == "__main__":
app = DNStack()
app.run()
| mit | Python |
|
d3e786b554bfafeb4f0c16635b80f9911acc4bba | add stacked auto encoder file. | nel215/py-sae | sae.py | sae.py | #coding: utf-8
import requests
import random, numpy
from aa import AutoEncoder
class StackedAutoEncoder:
def __init__(self, visible, hiddens):
# TODO: fine-tuning layer
num_of_nodes= [visible] + hiddens
self.auto_encoders = []
for i in xrange(len(num_of_nodes)-1):
self.auto_encoders.append(AutoEncoder(num_of_nodes[i], num_of_nodes[i+1]))
self.training_layer = 0
def train(self, samples, alpha=0.05):
for i in xrange(self.training_layer):
samples = map(self.auto_encoders[i].encode, samples)
self.auto_encoders[self.training_layer].train(samples,alpha)
def error(self, samples, alpha=0.05):
for i in xrange(self.training_layer):
samples = map(self.auto_encoders[i].encode, samples)
return self.auto_encoders[self.training_layer].error(samples)
def output(self, sample):
for i in xrange(self.training_layer):
sample = self.auto_encoders[i].encode(sample)
top = self.auto_encoders[self.training_layer]
return top.decode(top.encode(sample))
def fix_traning_layer(self):
self.training_layer += 1
if __name__=='__main__':
resp = requests.get('https://archive.ics.uci.edu/ml/machine-learning-databases/spect/SPECT.train')
samples = map(lambda row: row.split(','), resp.text.split('\n'))
titles = samples[0]
samples = samples[1:]
samples = filter(lambda arr: len(arr) > 1, samples)
samples = map(lambda arr: numpy.matrix([map(float, arr)]), samples)
samples = map(lambda mat: mat.transpose(), samples)
V = samples[0].shape[0]
H = 2*V
sae = StackedAutoEncoder(V, [V+2,V])
for i in xrange(1000):
j = int(random.random()*len(samples))
#print samples[j:j+10]
sae.train(samples[j:j+10])
if i<100 or i%1000 == 0:
print sae.error(samples)
sae.fix_traning_layer()
for i in xrange(1000):
j = int(random.random()*len(samples))
#print samples[j:j+10]
sae.train(samples[j:j+10])
if i<100 or i%1000 == 0:
print sae.error(samples)
for sample in samples:
print sae.output(sample)
| mit | Python |
|
13486556a15cdb2dbfe3f390f973942d93338995 | Create TryRecord.py | Larz60p/Python-Record-Structure | TryRecord.py | TryRecord.py | """
Example usage of Record class
The MIT License (MIT)
Copyright (c) <2016> <Larry McCaig (aka: Larz60+ aka: Larz60p)>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import Record
class TryRecord:
def __init__(self, filename=None):
if filename:
self.rec = Record.Record(filename)
def try_record(self):
stkrec = self.rec.record
print('\nrecords:')
for record in stkrec:
print(record)
keys = stkrec._asdict().keys()
print('\nKeys:')
for key in keys:
print('\nkey: {}'.format(key))
thisrec = getattr(stkrec, key)
print('filename: {}'.format(thisrec.filename))
print('number of columns: {}'.format(len(thisrec.columns)))
print('column 0 column name: {}'.format(thisrec.columns[0].db_column_name))
if __name__ == '__main__':
tr = TryRecord('StockData.json')
tr.try_record()
| mit | Python |
|
8d94bbc272b0b39ea3a561671faf696a4851c1a1 | Create app.py | Fillll/reddit2telegram,Fillll/reddit2telegram | reddit2telegram/channels/MoreTankieChapo/app.py | reddit2telegram/channels/MoreTankieChapo/app.py | #encoding:utf-8
subreddit = 'MoreTankieChapo'
t_channel = '@MoreTankieChapo'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| mit | Python |
|
e5ae14b4438fc7ae15156615206453097b8f759b | add wave test | mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets,mzlogin/snippets | Python/WaveTest.py | Python/WaveTest.py | import requests
def text2code(text):
'''
convert a string to wave code
'''
ret = None
get_wave_params = {'type' : 'text', 'content' : text}
response = requests.post('http://rest.sinaapp.com/api/post', data=get_wave_params)
if response.status_code == 200:
try:
data = response.json()
ret = data['code']
except: # json() may cause ValueError
pass
return ret
def code2text(code):
'''
convert a wave code to string
'''
ret = None
get_text_params = {'code' : code}
response = requests.get('http://rest.sinaapp.com/api/get', params=get_text_params)
if (response.status_code == 200):
try:
data = response.json()
ret = data['content']
except:
pass
return ret
def main():
text = 'Flame-Team'
code = text2code(text)
if code is not None:
print text + ' to code is ' + code
text_restore = code2text(code)
if text_restore is not None:
print code + ' to text is ' + text_restore
if __name__ == '__main__':
main()
| mit | Python |
|
561f595337106c60c55212dd87d90ed3002de07f | disable pretty json (reduces size by 30%) | waiyin21/test123,Mickey32111/pogom,dalim/pogom,favll/pogom,PokeHunterProject/pogom-linux,DenL/pogom-webhook,Mickey32111/pogom,Mickey32111/pogom,waiyin21/test123,DenL/pogom-webhook,favll/pogom,CaptorOfSin/pogom,falau/pogom,dalim/pogom,DenL/pogom-webhook,falau/pogom,dalim/pogom,CaptorOfSin/pogom,falau/pogom,PokeHunterProject/pogom-linux,PokeHunterProject/pogom-linux,favll/pogom,CaptorOfSin/pogom,waiyin21/test123 | runserver.py | runserver.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from threading import Thread
from pogom import config
from pogom.app import Pogom
from pogom.search import search_loop, set_cover, set_location
from pogom.utils import get_args, insert_mock_data
from pogom.models import create_tables, SearchConfig
from pogom.pgoapi.utilities import get_pos_by_name
log = logging.getLogger(__name__)
def start_locator_thread(args):
search_thread = Thread(target=search_loop, args=(args,))
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
if __name__ == '__main__':
args = get_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
if not args.debug:
logging.getLogger("peewee").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.rpc_api").setLevel(logging.CRITICAL)
logging.getLogger("pogom.models").setLevel(logging.WARNING)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
elif args.debug == "info":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.INFO)
logging.getLogger("pogom.models").setLevel(logging.INFO)
logging.getLogger("werkzeug").setLevel(logging.INFO)
elif args.debug == "debug":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.models").setLevel(logging.DEBUG)
logging.getLogger("werkzeug").setLevel(logging.INFO)
create_tables()
set_location(args.location, args.radius)
set_cover()
if not args.mock:
start_locator_thread(args)
else:
insert_mock_data(config, 6)
app = Pogom(__name__)
config['ROOT_PATH'] = app.root_path
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.run(threaded=True, debug=args.debug, host=args.host, port=args.port)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from threading import Thread
from pogom import config
from pogom.app import Pogom
from pogom.search import search_loop, set_cover, set_location
from pogom.utils import get_args, insert_mock_data
from pogom.models import create_tables, SearchConfig
from pogom.pgoapi.utilities import get_pos_by_name
log = logging.getLogger(__name__)
def start_locator_thread(args):
search_thread = Thread(target=search_loop, args=(args,))
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
if __name__ == '__main__':
args = get_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
if not args.debug:
logging.getLogger("peewee").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.rpc_api").setLevel(logging.CRITICAL)
logging.getLogger("pogom.models").setLevel(logging.WARNING)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
elif args.debug == "info":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.INFO)
logging.getLogger("pogom.models").setLevel(logging.INFO)
logging.getLogger("werkzeug").setLevel(logging.INFO)
elif args.debug == "debug":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.models").setLevel(logging.DEBUG)
logging.getLogger("werkzeug").setLevel(logging.INFO)
create_tables()
set_location(args.location, args.radius)
set_cover()
if not args.mock:
start_locator_thread(args)
else:
insert_mock_data(config, 6)
app = Pogom(__name__)
config['ROOT_PATH'] = app.root_path
app.run(threaded=True, debug=args.debug, host=args.host, port=args.port)
| mit | Python |
fb1498abaca07e3594d2f24edc1596fb03225dea | Add new package: dnsmasq (#16253) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/dnsmasq/package.py | var/spack/repos/builtin/packages/dnsmasq/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dnsmasq(MakefilePackage):
"""A lightweight, caching DNS proxy with integrated DHCP server."""
homepage = "http://www.thekelleys.org.uk/dnsmasq/doc.html"
url = "http://www.thekelleys.org.uk/dnsmasq/dnsmasq-2.70.tar.gz"
version('2.81', sha256='3c28c68c6c2967c3a96e9b432c0c046a5df17a426d3a43cffe9e693cf05804d0')
version('2.80', sha256='9e4a58f816ce0033ce383c549b7d4058ad9b823968d352d2b76614f83ea39adc')
version('2.79', sha256='77512dd6f31ffd96718e8dcbbf54f02c083f051d4cca709bd32540aea269f789')
version('2.78', sha256='c92e5d78aa6353354d02aabf74590d08980bb1385d8a00b80ef9bc80430aa1dc')
version('2.77', sha256='ae97a68c4e64f07633f31249eb03190d673bdb444a05796a3a2d3f521bfe9d38')
version('2.76', sha256='777c4762d2fee3738a0380401f2d087b47faa41db2317c60660d69ad10a76c32')
version('2.75', sha256='f8252c0a0ba162c2cd45f81140c7c17cc40a5fca2b869d1a420835b74acad294')
version('2.74', sha256='27b95a8b933d7eb88e93a4c405b808d09268246d4e108606e423ac518aede78f')
version('2.73', sha256='9f350f74ae2c7990b1c7c6c8591d274c37b674aa987f54dfee7ca856fae0d02d')
version('2.72', sha256='635f1b47417d17cf32e45cfcfd0213ac39fd09918479a25373ba9b2ce4adc05d')
version('2.71', sha256='7d8c64f66a396442e01b639df3ea6b4e02ba88cbe206c80be8de68b6841634c4')
version('2.70', sha256='8eb7bf53688d6aaede5c90cfd2afcce04803a4efbddfbeecc6297180749e98af')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('./src/dnsmasq', prefix.bin)
| lgpl-2.1 | Python |
|
7b71bbd87234c8cbe8c7fa189c0617b4ca191989 | Add tweak_billing_log command | PressLabs/silver,PressLabs/silver,PressLabs/silver | silver/management/commands/tweak_billing_log.py | silver/management/commands/tweak_billing_log.py | import datetime as dt
from datetime import datetime
from optparse import make_option
from django.core.management.base import BaseCommand
from django.utils import timezone
from silver.models import Subscription, BillingLog
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--date',
action='store',
dest='date'),
)
def handle(self, *args, **options):
if options['date']:
date = datetime.strptime(options['date'], '%Y-%m-%d')
else:
now = timezone.now().date()
date = dt.date(now.year, now.month - 1, 1)
for subscription in Subscription.objects.all():
self.stdout.write('Tweaking for subscription %d' % subscription.id)
BillingLog.objects.create(subscription=subscription,
billing_date=date)
| apache-2.0 | Python |
|
1b023e8471dad22bfb6b8de0d30c0796c30e2a40 | Copy hello.py from add_snippet branch | hnakamur/cygroonga | hello.py | hello.py | import cygroonga as grn
import datetime
with grn.Groonga():
with grn.Context() as ctx:
db = ctx.open_or_create_database("test.db")
table1 = ctx.open_or_create_table("table1",
grn.OBJ_TABLE_HASH_KEY | grn.OBJ_PERSISTENT,
ctx.at(grn.DB_SHORT_TEXT))
print("table1 path: %s" % table1.path())
print("table1 name: %s" % table1.name())
table1.open_or_create_column("column1",
grn.OBJ_PERSISTENT | grn.OBJ_COLUMN_SCALAR,
ctx.at(grn.DB_TEXT))
table1.open_or_create_column("created_at",
grn.OBJ_PERSISTENT | grn.OBJ_COLUMN_SCALAR,
ctx.at(grn.DB_TIME))
id, added = table1.add_record("foo")
print("id=%d, added=%s" % (id, added))
table1.column("column1").set_string(id, "foo1")
table1.column("created_at").set_time(id, datetime.datetime.now())
print("record count=%d" % table1.record_count())
id = table1.get_record("foo")
print("id=%d" % id)
print("column1 value=%s" % table1.column("column1").get_string(id))
print("created_at value=%s" % table1.column("created_at").get_time(id))
index_table1 = ctx.open_or_create_table("table1_index",
grn.OBJ_TABLE_PAT_KEY | grn.OBJ_KEY_NORMALIZE |
grn.OBJ_PERSISTENT,
ctx.at(grn.DB_SHORT_TEXT))
index_table1.set_default_tokenizer("TokenBigram")
index_table1.open_or_create_index_column("table1_index",
grn.OBJ_PERSISTENT | grn.OBJ_COLUMN_INDEX |
grn.OBJ_WITH_POSITION | grn.OBJ_WITH_SECTION,
"table1", ["_key"])
q = table1.create_query()
print("after create_query")
q.parse("_key:@foo", None, grn.OP_MATCH, grn.OP_AND,
grn.EXPR_SYNTAX_QUERY | grn.EXPR_ALLOW_PRAGMA | grn.EXPR_ALLOW_COLUMN)
print("after parse")
records = table1.select(q)
print("matched record count=%d" % records.record_count())
with records.open_table_cursor() as c:
while True:
record_id = c.next()
if not record_id:
break
print("record_id=%d" % record_id)
#db.remove()
| apache-2.0 | Python |
|
53b6b1f4b7f58b1a7d748f67e220bd4da147df0e | Create hello.py | BobbyJacobs/cs3240-demo | hello.py | hello.py | def main():
print("Hello!")
| mit | Python |
|
708fe9f6765717e1f1dabce1f9ac9ed56a7cc769 | Add a new pacakage: HiC-Pro. (#7858) | matthiasdiener/spack,tmerrick1/spack,krafczyk/spack,matthiasdiener/spack,krafczyk/spack,matthiasdiener/spack,LLNL/spack,EmreAtes/spack,tmerrick1/spack,mfherbst/spack,iulian787/spack,iulian787/spack,EmreAtes/spack,LLNL/spack,iulian787/spack,EmreAtes/spack,mfherbst/spack,tmerrick1/spack,mfherbst/spack,mfherbst/spack,tmerrick1/spack,LLNL/spack,iulian787/spack,EmreAtes/spack,iulian787/spack,krafczyk/spack,tmerrick1/spack,krafczyk/spack,krafczyk/spack,LLNL/spack,matthiasdiener/spack,LLNL/spack,mfherbst/spack,EmreAtes/spack,matthiasdiener/spack | var/spack/repos/builtin/packages/hic-pro/package.py | var/spack/repos/builtin/packages/hic-pro/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class HicPro(MakefilePackage):
"""HiC-Pro is a package designed to process Hi-C data,
from raw fastq files (paired-end Illumina data)
to the normalized contact maps"""
homepage = "https://github.com/nservant/HiC-Pro"
url = "https://github.com/nservant/HiC-Pro/archive/v2.10.0.tar.gz"
version('2.10.0', '6ae2213dcc984b722d1a1f65fcbb21a2')
depends_on('bowtie2')
depends_on('samtools')
depends_on('[email protected]:2.8')
depends_on('r')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('py-bx-python', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
def edit(self, spec, prefix):
config = FileFilter('config-install.txt')
config.filter('PREFIX =.*', 'PREFIX = {0}'.format(prefix))
config.filter('BOWTIE2 PATH =.*',
'BOWTIE2_PATH = {0}'.format(spec['bowtie2'].prefix))
config.filter('SAMTOOLS_PATH =.*',
'SAMTOOLS_PATH = {0}'.format(spec['samtools'].prefix))
config.filter('R_PATH =.*',
'R_RPTH ={0}'.format(spec['r'].prefix))
config.filter('PYTHON_PATH =.*',
'PYTHON_RPTH ={0}'.format(spec['python'].prefix))
def build(self, spec, preifx):
make('-f', './scripts/install/Makefile',
'CONFIG_SYS=./config-install.txt')
make('mapbuilder')
make('readstrimming')
make('iced')
def install(sefl, spec, prefix):
# Patch INSTALLPATH in config-system.txt
config = FileFilter('config-system.txt')
config.filter('/HiC-Pro_2.10.0', '')
# Install
install('config-hicpro.txt', prefix)
install('config-install.txt', prefix)
install('config-system.txt', prefix)
install_tree('bin', prefix.bin)
install_tree('annotation', prefix.annotation)
install_tree('doc', prefix.doc)
install_tree('scripts', prefix.scripts)
install_tree('test-op', join_path(prefix, 'test-op'))
| lgpl-2.1 | Python |
|
033032260a43a416857b7057bd4fc212422abc51 | Add a simple command line skew-T plotter | julienchastang/unidata-python-workshop,julienchastang/unidata-python-workshop,Unidata/unidata-python-workshop | notebooks/Command_Line_Tools/skewt.py | notebooks/Command_Line_Tools/skewt.py | # skewt.py - A simple Skew-T plotting tool
import argparse
from datetime import datetime
import matplotlib.pyplot as plt
import metpy.calc as mpcalc
from metpy.io.upperair import get_upper_air_data
from metpy.plots import Hodograph, SkewT
from metpy.units import units
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
def get_sounding_data(date, station):
ds = get_upper_air_data(date, station)
p = ds.variables['pressure'][:]
T = ds.variables['temperature'][:]
Td = ds.variables['dewpoint'][:]
u = ds.variables['u_wind'][:]
v = ds.variables['v_wind'][:]
windspeed = ds.variables['speed'][:]
return p, T, Td, u, v, windspeed
def plot_sounding(date, station):
p, T, Td, u, v, windspeed = get_sounding_data(date, station)
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
lfc_pressure, lfc_temperature = mpcalc.lfc(p, T, Td)
parcel_path = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(8, 8))
skew = SkewT(fig)
# Plot the data
skew.plot(p, T, color='tab:red')
skew.plot(p, Td, color='tab:green')
# Plot thermodynamic parameters and parcel path
skew.plot(p, parcel_path, color='black')
if lcl_pressure:
skew.ax.axhline(lcl_pressure, color='black')
if lfc_pressure:
skew.ax.axhline(lfc_pressure, color='0.7')
# Add the relevant special lines
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Shade areas representing CAPE and CIN
skew.shade_cin(p, T, parcel_path)
skew.shade_cape(p, T, parcel_path)
# Add wind barbs
skew.plot_barbs(p, u, v)
# Add an axes to the plot
ax_hod = inset_axes(skew.ax, '30%', '30%', loc=1, borderpad=3)
# Plot the hodograph
h = Hodograph(ax_hod, component_range=100.)
# Grid the hodograph
h.add_grid(increment=20)
# Plot the data on the hodograph
mask = (p >= 100 * units.mbar)
h.plot_colormapped(u[mask], v[mask], windspeed[mask]) # Plot a line colored by wind speed
# Set some sensible axis limits
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
return fig, skew
if __name__ == '__main__':
# Parse out the command line arguments
parser = argparse.ArgumentParser(description='''Make an advanced SkewT
plot of upper air observations.''')
parser.add_argument('--date', required=True,
help='Date of the sounding YYYYMMDD')
parser.add_argument('--hour', required=True,
help='Time of the sounding in hours')
parser.add_argument('--station', default='OUN',
help='Station three letter identifier')
parser.add_argument('--savefig', action='store_true',
help='Save out figure instead of displaying it')
parser.add_argument('--imgformat', default='png',
help='Format to save the resulting image as.')
args = parser.parse_args()
# Parse out the date time stamp
date = datetime.strptime('{0}{1}'.format(args.date, args.hour), '%Y%m%d%H')
# Make the sounding figure
fig, skew = plot_sounding(date, args.station)
# Save or show figurexs
if args.savefig:
plt.savefig('{0}_{1}.{2}'.format(args.station,
datetime.strftime(date, '%Y%m%d_%HZ'),
args.imgformat))
else:
plt.show()
| mit | Python |
|
724e86e31b6584012af5afe458e0823b9a2ca7ab | Create a class named "CreateSpark", which is to solove the problem of "Cannot run multiple SparkContexts at once; existing SparkContext(app=spam-msg-classifier, master=local[8]) created by __init__" | ysh329/spam-msg-classifier | myclass/class_create_spark.py | myclass/class_create_spark.py | # -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_save_word_to_database.py
# Description:
#
# Author: Shuai Yuan
# E-mail: [email protected]
# Create: 2015-11-17 20:43:09
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import time
from pyspark import SparkContext, SparkConf
################################### PART2 CLASS && FUNCTION ###########################
class CreateSpark(object):
def __init__(self, pyspark_app_name):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateSpark.__name__))
# Configure Spark
try:
conf = SparkConf().setAppName(pyspark_app_name).setMaster("local[8]")
self.sc = SparkContext(conf = conf)
logging.info("Start pyspark successfully.")
except Exception as e:
logging.error("Fail in starting pyspark.")
logging.error(e)
def return_spark_context(self):
return self.sc
def __del__(self):
# Close SparkContext
try:
self.sc.stop()
logging.info("close SparkContext successfully.")
except Exception as e:
logging.error(e)
logging.info("END CLASS {class_name}.".format(class_name = CreateSpark.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateSpark.__name__, delta_time = self.end))
################################### PART3 CLASS TEST ##################################
"""
# initialization parameter
pyspark_app_name = "spam-msg-classifier"
SparkCreator = CreateSpark(pyspark_app_name = pyspark_app_name)
pyspark_sc = SparkCreator.return_spark_context()
logging.info("sc.version:{0}".format(pyspark_sc.version))
""" | apache-2.0 | Python |
|
2bd453c4a7402f24cd43b49e73d0b95e371e6654 | add package Feature/sentieon (#9557) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/sentieon-genomics/package.py | var/spack/repos/builtin/packages/sentieon-genomics/package.py | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
from spack import *
class SentieonGenomics(Package):
"""Sentieon provides complete solutions for secondary DNA analysis.
Our software improves upon BWA, GATK, Mutect, and Mutect2 based pipelines.
The Sentieon tools are deployable on any CPU-based computing system.
Please set the path to the sentieon license server with:
export SENTIEON_LICENSE=[FQDN]:[PORT]
Note: A manual download is required.
Spack will search your current directory for the download file.
Alternatively, add this file to a mirror so that Spack can find it.
For instructions on how to set up a mirror, see
http://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "https://www.sentieon.com/"
url = "file://{0}/sentieon-genomics-201808.01.tar.gz".format(os.getcwd())
version('201808.01', sha256='6d77bcd5a35539549b28eccae07b19a3b353d027720536e68f46dcf4b980d5f7')
# Licensing.
license_require = True
license_vars = ['SENTIEON_LICENSE']
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('doc', prefix.doc)
install_tree('etc', prefix.etc)
install_tree('lib', prefix.lib)
install_tree('libexec', prefix.libexec)
install_tree('share', prefix.share)
| lgpl-2.1 | Python |
|
8c1cd72d11836ad913af5c3614137358ddf3efee | add mgmt cmd to set related user | greglinch/sourcelist,greglinch/sourcelist | sources/management/commands/set_related_user.py | sources/management/commands/set_related_user.py | from django.core.management.base import BaseCommand, CommandError
from django.core.mail import send_mail
from django.contrib.auth.models import User
# from sources.models import Person
import random
def set_related_user(email_address, person_id):
obj = Person.objects.get(id=person_id)
try:
user_existing = User.objects.get(email=obj.email_address)
except:
user_existing = False
if user_existing:
obj.related_user = user_existing
else:
username = '{}{}'.format(obj.first_name, obj.last_name).lower().replace('-','')
choices = 'abcdefghijklmnopqrstuvwxyz0123456789'
middle_choices = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
password = \
''.join([random.SystemRandom().choice(choices) for i in range(1)]) + \
''.join([random.SystemRandom().choice(middle_choices) for i in range(23)]) + \
''.join([random.SystemRandom().choice(choices) for i in range(1)])
user_new = User.objects.create_user(username, password=password)
user_new.email = obj.email_address
user_new.first_name = obj.first_name
user_new.last_name = obj.last_name
user_new.save()
class Command(BaseCommand):
help = 'Set the related user for a Person.'
def add_arguments(self, parser):
## required
parser.add_argument('email',
help='Specify the user emamil.'
)
## optional
# parser.add_argument('-t' '--test',
# action='store_true',
# # type=str,
# dest='test',
# default=False,
# help="Specific whether it's a test or not"
# )
def handle(self, *args, **options):
## unpack args
email_address = options['email']
## call the function
email_add_user(email_address)
| mit | Python |
|
208077afd9b1ba741df6bccafdd5f008e7b75e38 | Add nftables test | YinThong/intel-iot-refkit,YinThong/intel-iot-refkit,ipuustin/intel-iot-refkit,mythi/intel-iot-refkit,klihub/intel-iot-refkit,mythi/intel-iot-refkit,intel/intel-iot-refkit,jairglez/intel-iot-refkit,YinThong/intel-iot-refkit,ipuustin/intel-iot-refkit,intel/intel-iot-refkit,klihub/intel-iot-refkit,YinThong/intel-iot-refkit,ipuustin/intel-iot-refkit,YinThong/intel-iot-refkit,jairglez/intel-iot-refkit,jairglez/intel-iot-refkit,mythi/intel-iot-refkit,klihub/intel-iot-refkit,ipuustin/intel-iot-refkit,mythi/intel-iot-refkit,klihub/intel-iot-refkit,ipuustin/intel-iot-refkit,klihub/intel-iot-refkit,YinThong/intel-iot-refkit,intel/intel-iot-refkit,klihub/intel-iot-refkit,jairglez/intel-iot-refkit,ipuustin/intel-iot-refkit,intel/intel-iot-refkit,ipuustin/intel-iot-refkit,jairglez/intel-iot-refkit,mythi/intel-iot-refkit,jairglez/intel-iot-refkit,intel/intel-iot-refkit,mythi/intel-iot-refkit,klihub/intel-iot-refkit,intel/intel-iot-refkit,YinThong/intel-iot-refkit,jairglez/intel-iot-refkit,intel/intel-iot-refkit,mythi/intel-iot-refkit | meta-iotqa/lib/oeqa/runtime/sanity/nftables.py | meta-iotqa/lib/oeqa/runtime/sanity/nftables.py | import os
import subprocess
from time import sleep
from oeqa.oetest import oeRuntimeTest
class NftablesTest(oeRuntimeTest):
def check_ssh_connection(self):
'''Check SSH connection to DUT port 2222'''
process = subprocess.Popen(("ssh -o UserKnownHostsFile=/dev/null " \
"-o ConnectTimeout=3 " \
"-o StrictHostKeyChecking=no root@" + \
self.target.ip +" -p 2222 ls").split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, err = process.communicate()
output = output.decode("utf-8")
returncode = process.returncode
return returncode, output
def add_test_table(self):
self.target.run("nft add table ip test")
self.target.run("nft add chain ip test input {type filter hook input priority 0\;}")
self.target.run("nft add chain ip test donothing")
self.target.run("nft add chain ip test prerouting {type nat hook prerouting priority 0 \;}")
self.target.run("nft add chain ip test postrouting {type nat hook postrouting priority 100 \;}")
def delete_test_table(self):
self.target.run("nft delete table ip test")
def test_reject(self):
'''Test rejecting SSH with nftables'''
self.add_test_table()
self.target.run("nft add rule ip test input tcp dport 2222 reject")
self.target.run("nft add rule ip test input goto donothing")
returncode, output = self.check_ssh_connection()
self.delete_test_table()
self.assertIn("Connection refused", output, msg="Error message: %s" % output)
def test_drop(self):
'''Test dropping SSH with nftables'''
self.add_test_table()
self.target.run("nft add rule ip test input tcp dport 2222 drop")
self.target.run("nft add rule ip test input goto donothing")
returncode, output = self.check_ssh_connection()
self.delete_test_table()
self.assertIn("Connection timed out", output, msg="Error message: %s" % output)
def test_redirect(self):
'''Test redirecting port'''
# Check that SSH can't connect to port 2222
returncode, output = self.check_ssh_connection()
self.assertNotEqual(returncode, 0, msg="Error message: %s" % output)
self.add_test_table()
self.target.run("nft add rule ip test prerouting tcp dport 2222 redirect to 22")
# Check that SSH can connect to port 2222
returncode, output = self.check_ssh_connection()
self.assertEqual(returncode, 0, msg="Error message: %s" % output)
self.delete_test_table()
# Check that SSH can't connect to port 2222
returncode, output = self.check_ssh_connection()
self.assertNotEqual(returncode, 0, msg="Error message: %s" % output)
| mit | Python |
|
3be145af359df5bcf928da1b984af8635ea33c27 | add model for parcels, temp until i figure out psql migrations in flask | codeforamerica/westsac-urban-land-locator,codeforamerica/westsac-urban-land-locator,codeforamerica/westsac-urban-land-locator,codeforamerica/westsac-urban-land-locator | farmsList/farmsList/public/models.py | farmsList/farmsList/public/models.py | # -*- coding: utf-8 -*-
from farmsList.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK,
)
class Parcel(SurrogatePK, Model):
__tablename__ = 'parcels'
name = Column(db.String(80), unique=True, nullable=False)
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<Role({name})>'.format(name=self.name)
| bsd-3-clause | Python |
|
a5d63ec0f8f192aaeae8b9a7f1cf423d18de25dc | Add test runner to handle issue with import path | ganemone/ontheside,ganemone/ontheside,ganemone/ontheside | server/test.py | server/test.py | import pytest
pytest.main('-x tests/')
| mit | Python |
|
8632b60718fa353797ffc53281e57a37caf9452f | Add config command for setting the address of rf sensors. | geekylou/sensor_net,geekylou/sensor_net,geekylou/sensor_net,geekylou/sensor_net,geekylou/sensor_net | set_address.py | set_address.py | import zmq
import time
import sys
print sys.argv[1:]
# ZeroMQ Context
context = zmq.Context()
sock_live = context.socket(zmq.PUB)
sock_live.connect("tcp://"+sys.argv[1])
time.sleep(1)
# Send multipart only allows send byte arrays, so we convert everything to strings before sending
# [TODO] add .encode('UTF-8') when we switch to python3.
sock_live.send_multipart(["set-address",'pair',sys.argv[2],"0"])
sock_live.close()
| bsd-3-clause | Python |
|
f5f6bc0999d5b6f065adb81982ce3a322e1ab987 | add regression test for fit_spectrum() Python 3.x issue | jjhelmus/nmrglue,kaustubhmote/nmrglue,jjhelmus/nmrglue,kaustubhmote/nmrglue | nmrglue/analysis/tests/test_analysis_linesh.py | nmrglue/analysis/tests/test_analysis_linesh.py | import numpy as np
import nmrglue as ng
from nmrglue.analysis.linesh import fit_spectrum
def test_fit_spectrum():
_bb = np.random.uniform(0, 77, size=65536)
lineshapes = ['g']
params = [[(13797.0, 2.2495075273313034)],
[(38979.0, 5.8705185693227664)],
[(39066.0, 5.7125954296137103)],
[(39153.0, 5.7791485451283791)],
[(41649.0, 4.260242375400459)],
[(49007.0, 4.2683625950679964)],
[(54774.0, 3.2907139764685569)]]
amps = [35083.008667, 32493.824402, 32716.156556, 33310.711914, 82682.928405,
82876.544313, 85355.658142]
bounds = [[[(None, None), (0, None)]], [[(None, None), (0, None)]],
[[(None, None), (0, None)]], [[(None, None), (0, None)]],
[[(None, None), (0, None)]], [[(None, None), (0, None)]],
[[(None, None), (0, None)]]]
ampbounds = [None, None, None, None, None, None, None]
centers = [(13797.0,), (38979.0,), (39066.0,), (39153.0,), (41649.0,),
(49007.0,), (54774.0,)]
rIDs = [1, 2, 3, 4, 5, 6, 7]
box_width = (5,)
error_flag = False
verb = False
params_best, amp_best, iers = ng.linesh.fit_spectrum(
_bb, lineshapes, params, amps, bounds, ampbounds, centers,
rIDs, box_width, error_flag, verb=False)
| bsd-3-clause | Python |
|
c5a2167a63516c23390263408fcd2c9a4f654fc8 | Add tests for the parse method of the spider | J-CPelletier/webcomix,J-CPelletier/webcomix,J-CPelletier/WebComicToCBZ | webcomix/tests/test_comic_spider.py | webcomix/tests/test_comic_spider.py | from webcomix.comic_spider import ComicSpider
def test_parse_yields_good_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [
'//imgs.xkcd.com/comics/tree_cropped_(1).jpg', 'xkcd.com/3/'
]
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 2
assert results[0].get(
'url') == "http://imgs.xkcd.com/comics/tree_cropped_(1).jpg"
assert results[1].url == "http://xkcd.com/3/"
def test_parse_yields_bad_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [None, 'xkcd.com/3/']
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 1
assert results[0].url == "http://xkcd.com/3/"
| mit | Python |
|
fd5da951feee92c055853c63b698b44397ead6be | Add save function for use across the application | brayoh/bucket-list-api | app/db_instance.py | app/db_instance.py | from app import db
def save(data):
try:
print(data)
db.session.add(data)
db.session.commit()
except Exception as e:
raise e
| mit | Python |
|
585fec12673ab0207f5b641a9ba0df4a510667ac | Add harvester for mblwhoilibrary | erinspace/scrapi,felliott/scrapi,mehanig/scrapi,jeffreyliu3230/scrapi,felliott/scrapi,mehanig/scrapi,CenterForOpenScience/scrapi,alexgarciac/scrapi,fabianvf/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi,fabianvf/scrapi | scrapi/harvesters/mblwhoilibrary.py | scrapi/harvesters/mblwhoilibrary.py | '''
Harvester for the WHOAS at MBLWHOI Library for the SHARE project
Example API call: http://darchive.mblwhoilibrary.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import helpers
from scrapi.base import OAIHarvester
class MblwhoilibraryHarvester(OAIHarvester):
short_name = 'mblwhoilibrary'
long_name = 'WHOAS at MBLWHOI Library'
url = 'http://darchive.mblwhoilibrary.org/oai/request'
@property
def schema(self):
return helpers.updated_schema(self._schema, {
"uris": {
"objectUris": ('//dc:relation/node()', helpers.oai_extract_dois)
}
})
base_url = 'http://darchive.mblwhoilibrary.org/oai/request'
property_list = ['date', 'relation', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
| apache-2.0 | Python |
|
f5cc9c86b1cfbb2cda1b4c1c4c8656a6ca7a2a7f | Create graphingWIP.py | InvisibleTree/highAltBalloon | src/graphingWIP.py | src/graphingWIP.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.dates as md
import dateutil
# create empty dynamic arrays
temp_x = []
x = []
y = []
f = open("temp.log", "r") # open log folder
for line in f: # load x and y values
temp_line = line.split('=')
temp_x.append(temp_line[0][:-1]) # trim spaces
y.append(float(temp_line[1][1:-2])) # trim C
f.close()
x = [dateutil.parser.parse(s) for s in temp_x]
ax = plt.gca()
xfmt = md.DateFormatter('%d/%m/%Y %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.plot(x, y)
plt.title('Temprature against time')
plt.xlabel('Date and Time (DD/MM/YYYY HH:MM:SS)')
plt.ylabel('Temprature C')
plt.show()
| mit | Python |
|
534fcff9f812df4cef273ca7853df12647b25d06 | Add preliminary metrics file and import some from sklearn | mwojcikowski/opendrugdiscovery | metrics.py | metrics.py | from sklern.metrics import roc_curve as roc, roc_auc_score as auc
def enrichment_factor():
pass
def log_auc():
pass
| bsd-3-clause | Python |
|
3c45de2506d6fd86ad96ee9f2e1b5b773aad82d9 | split out common functionality | ArrantSquid/squiddly-dotfiles,johnpneumann/dotfiles | fabfile/common.py | fabfile/common.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Common pieces that work on all Nix OS'
.. module:: common
:platform: Linux, MacOS
.. moduleauthor:: John P. Neumann
.. note::
None
"""
# Built In
import os
import sys
# Third Party
from fabric.api import local, cd, task, execute
# Custom
@task
def setup_devdirs():
"""Creates all of our directories."""
home = os.path.expanduser('~')
dirpaths = ['go', 'src', 'bin', 'envs', 'repos']
execute(setup_vimdirs)
for pth in dirpaths:
_create_dir(os.path.join(home, pth))
@task
def setup_vimdirs():
"""Sets up vim directories."""
home = os.path.expanduser('~')
_create_dir(os.path.join(home, '.vim_swap'))
_create_dir(os.path.join(home, '.vim_undo'))
@task
def powerline_fonts(repo_dir):
"""Download and install the powerline fonts.
:param repo_dir: The base directory to check the repo out to.
:type repo_dir: str
:returns: None
"""
execute(setup_devdirs)
with cd(repo_dir):
if not os.path.exists(os.path.join(repo_dir, 'powerline-fonts')):
local('git clone [email protected]:powerline/fonts.git powerline-fonts')
with cd('powerline-fonts'):
local('./install.sh')
@task
def dotfiles(repo_dir):
"""Download dotfiles and create our symlinks.
:param repo_dir: The base directory to check the repo out to.
:type repo_dir: str
:returns: None
"""
execute(setup_devdirs)
dotfiles_dir = os.path.join(
repo_dir, 'dotfiles'
)
if os.path.exists(dotfiles_dir):
sys.exit('dotfiles repo already exists')
with cd(repo_dir):
local('git clone [email protected]:johnpneumann/dotfiles.git')
@task
def dotfiles_symlinks(repo_dir):
"""Creates all of our dotfile symlinks.
:param repo_dir: The base directory to check the repo out to.
:type repo_dir: str
:returns: None
"""
linkage = {
'.bash_aliases': 'bash_aliases_prev',
'.bash_profile': 'bash_profile_prev',
'.bashrc': 'bashrc_prev', '.profile': 'profile_prev',
'.vimrc': 'vimrc_prev', '.vim': 'vim_prev',
'iterm2_prefs': 'iterm2_prefs_prev',
'public_scripts': 'public_scripts_prev'
}
home_dir = os.path.expanduser('~')
for key, value in linkage.items():
dest = os.path.join(home_dir, key)
backup = os.path.join(home_dir, value)
source = os.path.join(repo_dir, key)
_create_symlinks(
source=source, destination=dest, backup=backup
)
@task
def vundle_install():
"""Install vundle and all of the plugins."""
local('git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim')
local('vim +PluginInstall +qall')
def _create_symlinks(source, destination, backup):
"""Creates symlinks and backs up directories.
:param source: The source file.
:type source: str
:param destination: The destination for the symlink.
:type destination: str
:param backup: The destination to backup the file to if it exists.
:type backup: str
:returns: None
"""
if os.path.exists(source):
if os.path.exists(destination):
local('mv {dst} {bckp}'.format(dst=destination, bckp=backup))
local('ln -s {src} {dst}'.format(src=source, dst=destination))
def _create_dir(path):
"""Creates a directory.
:param path: The path to the directory to create.
:type path: str
:returns: None
"""
if os.path.exists(path):
sys.stdout.write('{path} exists\n'.format(path=path))
return
local('mkdir -p {pth}'.format(pth=path))
| mit | Python |
|
40e9825ee0a2ccf7c3e92d4fd6599c1976a240a3 | Add deprecated public `graphql` module | carpedm20/fbchat | fbchat/graphql.py | fbchat/graphql.py | # -*- coding: UTF-8 -*-
"""This file is here to maintain backwards compatability."""
from __future__ import unicode_literals
from .models import *
from .utils import *
from ._graphql import (
FLAGS,
WHITESPACE,
ConcatJSONDecoder,
graphql_color_to_enum,
get_customization_info,
graphql_to_sticker,
graphql_to_attachment,
graphql_to_extensible_attachment,
graphql_to_subattachment,
graphql_to_live_location,
graphql_to_poll,
graphql_to_poll_option,
graphql_to_plan,
graphql_to_quick_reply,
graphql_to_message,
graphql_to_user,
graphql_to_thread,
graphql_to_group,
graphql_to_page,
graphql_queries_to_json,
graphql_response_to_json,
GraphQL,
)
| bsd-3-clause | Python |
|
f98aa5f336cd81ad55bc46122821df3ad314a4cb | Add py-dockerpy-creds (#19198) | iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-dockerpy-creds/package.py | var/spack/repos/builtin/packages/py-dockerpy-creds/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDockerpyCreds(PythonPackage):
"""Python bindings for the docker credentials store API """
homepage = "https://github.com/shin-/dockerpy-creds"
url = "https://github.com/shin-/dockerpy-creds/archive/0.4.0.tar.gz"
version('0.4.0', sha256='c76c2863c6e9a31b8f70ee5b8b0e5ac6860bfd422d930c04a387599e4272b4b9')
version('0.3.0', sha256='3660a5e9fc7c2816ab967e4bdb4802f211e35011357ae612a601d6944721e153')
version('0.2.3', sha256='7278a7e3c904ccea4bcc777b991a39cac9d4702bfd7d76b95ff6179500d886c4')
version('0.2.2', sha256='bb26b8a8882b9d115a43169663cd9557d132a68147d9a1c77cb4a3ffc9897398')
version('0.2.1', sha256='7882efd95f44b5df166b4e34c054b486dc7287932a49cd491edf406763695351')
version('0.2.0', sha256='f2838348e1175079e3062bf0769b9fa5070c29f4d94435674b9f8a76144f4e5b')
version('0.1.0', sha256='f7ab290cb536e7ef1c774d4eb5df86237e579a9c7a87805da39ff07bd14e0aff')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-six', type=('build', 'run'))
| lgpl-2.1 | Python |
|
07a1612250a9c3b2de1ffe53fb916a8cff153c3f | add count of collisions | hchiam/cognateLanguage | findCollisions.py | findCollisions.py | from collections import Counter
def countCollisions(entries):
collisions = [k for k,v in Counter(entries).items() if v>1]
num_collisions = len(collisions)
print(num_collisions,'word collisions:\n',collisions)
return num_collisions
def countCollisionsInFile(filename):
entries = []
with open(filename,'r') as f:
for line in f:
# get just the words
entries.append(line.split(',')[1].replace(' \'',''))
return countCollisions(entries)
def countCollisionsInList(entries):
return countCollisions(entries)
| mit | Python |
|
4a0fa1028f22944f30e39c65806f0d123e18420f | Create input.py | aniketyevankar/Twitter-Sentiment-Analysis | input.py | input.py | ckey=""
csecret=""
atoken=""
asecret=""
query='' #Add keyword for which you want to start the miner
| mit | Python |
|
98499f07c6dcccba3605e9ab9c8eaef9463b0634 | Add some validators | johnbachman/indra,sorgerlab/belpy,bgyori/indra,johnbachman/belpy,sorgerlab/indra,sorgerlab/indra,johnbachman/belpy,sorgerlab/belpy,sorgerlab/indra,bgyori/indra,bgyori/indra,johnbachman/belpy,sorgerlab/belpy,johnbachman/indra,johnbachman/indra | indra/tools/stmt_validator.py | indra/tools/stmt_validator.py | class StatementValidator:
def __init__(self):
class DbRefsEntryValidator:
@staticmethod
def validate(entry):
raise NotImplementedError()
class ChebiPrefix(DbRefsEntryValidator):
@staticmethod
def validate(entry):
return not entry or entry.startswith('CHEBI')
class UniProtIDNotList(DbRefsEntryValidator):
@staticmethod
def validate(entry):
if not isinstance(entry, str):
return False
if ',' in entry:
return False
return True | bsd-2-clause | Python |
|
ba1f04337d0653d4808427b5d07ed8673526b315 | add mygpo.wsgi | gpodder/mygpo,gpodder/mygpo,gpodder/mygpo,gpodder/mygpo | mygpo.wsgi | mygpo.wsgi | #!/usr/bin/python
# -*- coding: utf-8 -*-
# my.gpodder.org FastCGI handler for lighttpd (default setup)
#
# This file is part of my.gpodder.org.
#
# my.gpodder.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# my.gpodder.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with my.gpodder.org. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import _strptime
# Add this directory as custom Python path
mygpo_root = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, mygpo_root)
sys.path.insert(0, os.path.join(mygpo_root, 'lib'))
# Set the DJANGO_SETTINGS_MODULE environment variable
os.environ['DJANGO_SETTINGS_MODULE'] = 'mygpo.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| agpl-3.0 | Python |
|
1086259090a396b2a2ed40788d1cb8c8ff7c95f3 | fix the fixme | fingeronthebutton/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,robotframework/RIDE,caio2k/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE | src/robotide/plugins/connector.py | src/robotide/plugins/connector.py | # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.context import LOG, SETTINGS
from robotide import utils
def PluginFactory(application, plugin_class):
try:
plugin = plugin_class(application)
except Exception, err:
return BrokenPlugin(str(err), plugin_class)
else:
return PluginConnector(application, plugin)
class _PluginConnector(object):
def __init__(self, name, doc='', error=None):
self.name = name
self.doc = doc
self.error = error
self.active = False
self.metadata = {}
self.config_panel = lambda self: None
class PluginConnector(_PluginConnector):
def __init__(self, application, plugin):
_PluginConnector.__init__(self, plugin.name, plugin.doc)
self._plugin = plugin
self._settings = SETTINGS['Plugins'].add_section(plugin.name)
self.config_panel = plugin.config_panel
self.metadata = plugin.metadata
if self._settings.get('_active', plugin.initially_active):
self.activate()
def activate(self):
self._plugin.activate()
self.active = True
self._settings.set('_active', True)
def deactivate(self):
self._plugin.deactivate()
self.active = False
self._settings.set('_active', False)
class BrokenPlugin(_PluginConnector):
def __init__(self, error, plugin_class):
name = utils.name_from_class(plugin_class, 'Plugin')
doc = 'This plugin is disabled because it failed to load properly.\n' \
+ 'Error: ' + error
_PluginConnector.__init__(self, name, doc=doc, error=error)
LOG.error("Taking %s plugin into use failed:\n%s" % (name, error))
| # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.context import LOG, SETTINGS
from robotide import utils
def PluginFactory(application, plugin_class):
try:
plugin = plugin_class(application)
except Exception, err:
return BrokenPlugin(str(err), plugin_class)
else:
return PluginConnector(application, plugin)
class _PluginConnector(object):
def __init__(self, name, doc='', error=None):
self.name = name
self.doc = doc
self.error = error
self.active = False
self.metadata = {}
self.config_panel = lambda self: None
class PluginConnector(_PluginConnector):
def __init__(self, application, plugin):
_PluginConnector.__init__(self, plugin.name, plugin.doc)
self._plugin = plugin
# FIXME: breaks in case the section does not exist
self._settings = SETTINGS['Plugins'][plugin.name]
self.config_panel = plugin.config_panel
self.metadata = plugin.metadata
if self._settings.get('_active', plugin.initially_active):
self.activate()
def activate(self):
self._plugin.activate()
self.active = True
self._settings.set('_active', True)
def deactivate(self):
self._plugin.deactivate()
self.active = False
self._settings.set('_active', False)
class BrokenPlugin(_PluginConnector):
def __init__(self, error, plugin_class):
name = utils.name_from_class(plugin_class, 'Plugin')
doc = 'This plugin is disabled because it failed to load properly.\n' \
+ 'Error: ' + error
_PluginConnector.__init__(self, name, doc=doc, error=error)
LOG.error("Taking %s plugin into use failed:\n%s" % (name, error))
| apache-2.0 | Python |
0d22f1ab7f4c83af280edb799f863fa0f46ea326 | Create generic views for index/login | MarkGalloway/RIS,MarkGalloway/RIS | app/views.py | app/views.py | from flask import render_template, flash, redirect
from app import app
from .forms.login import LoginForm
@app.route('/')
@app.route('/index')
def index():
user = {'nickname': 'Mark'} # fake user
return render_template("index.html",
title='Home',
user=user)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
# Debug Print
flash('Login requested for Username="%s", remember_me=%s' %
(form.username.data, str(form.remember_me.data)))
return redirect('/index')
return render_template('login.html',
title='Sign In',
form=form)
| apache-2.0 | Python |
|
45939892a21bbf11ddcd1400d26cf2e94fa8ebac | add nox tests. | abilian/abilian-core,abilian/abilian-core,abilian/abilian-core,abilian/abilian-core,abilian/abilian-core | noxfile.py | noxfile.py | import nox
PYTHON_VERSIONS = ["3.6", "3.7", "3.8"]
PACKAGE = "abilian"
@nox.session(python="python3.6")
def lint(session):
# session.env["LC_ALL"] = "en_US.UTF-8"
session.install("poetry", "psycopg2-binary")
session.run("poetry", "install", "-q")
session.run("yarn", external="True")
session.run("make", "lint-ci")
@nox.session(python=PYTHON_VERSIONS)
def pytest(session):
# session.env["LC_ALL"] = "en_US.UTF-8"
session.install("psycopg2-binary")
cmd = "echo ; echo SQLALCHEMY_DATABASE_URI = $SQLALCHEMY_DATABASE_URI ; echo"
session.run("sh", "-c", cmd, external=True)
session.run("poetry", "install", "-q", external="True")
session.run("yarn", external="True")
session.run("pip", "check")
session.run("pytest", "-q")
# TODO later
# @nox.session(python="3.8")
# def typeguard(session):
# # session.env["LC_ALL"] = "en_US.UTF-8"
# session.install("psycopg2-binary")
# session.run("poetry", "install", "-q", external="True")
# session.run("yarn", external="True")
# session.run("pytest", f"--typeguard-packages={PACKAGE}")
| lgpl-2.1 | Python |
|
5addf2c2992cfdedf06da58861dae93347e02fb9 | Support for nox test runner (alternative to tox), provides a workaround for #80. | heuer/segno | noxfile.py | noxfile.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Nox test runner configuration.
"""
import os
from functools import partial
import shutil
import nox
@nox.session(python="3")
def docs(session):
"""\
Build the documentation.
"""
session.install('-Ur', 'requirements.rtd')
output_dir = os.path.abspath(os.path.join(session.create_tmp(), 'output'))
doctrees, html, man = map(partial(os.path.join, output_dir), ['doctrees', 'html', 'man'])
shutil.rmtree(output_dir, ignore_errors=True)
session.install('.')
session.cd('docs')
session.run('sphinx-build', '-W', '-b', 'html', '-d', doctrees, '.', html)
session.run('sphinx-build', '-W', '-b', 'man', '-d', doctrees, '.', man)
@nox.session(python='3')
def coverage(session):
"""\
Run coverage.
"""
session.install('coverage', '-Ur', 'requirements.testing.txt')
session.install('.')
session.run('coverage', 'erase')
session.run('coverage', 'run', './tests/alltests.py')
session.run('coverage', 'report', '--include=segno*')
session.run('coverage', 'html', '--include=segno*')
@nox.session(python=['2.7', '3.7', 'pypy', 'pypy3'])
def test(session):
"""\
Run test suite.
"""
if session.python == 'pypy':
# See <https://github.com/heuer/segno/issues/80>
session.run('pip', 'uninstall', '-y', 'pip')
session.run('easy_install', 'pip==20.1')
session.install('-Ur', 'requirements.testing.txt')
session.install('.')
session.run('py.test')
| bsd-3-clause | Python |
|
510b90d42dbccd0aa1e3ff48ee8dbe7230b65185 | Add script to compute some stats about data from energy consumption measures | SOMCA/hot-pepper,SOMCA/hot-pepper,SOMCA/hot-pepper,SOMCA/hot-pepper,SOMCA/hot-pepper | get_stats_from.py | get_stats_from.py | import argparse
import csv
from glob import glob
import re
import statistics
import sys
def get_stats_from(files_names, files_content):
for i in range(len(files_content)):
file_name = files_names[i]
file_content = files_content[i]
print("FILE : {0}".format(files_names[i]))
print("\t*MEAN : {0}".format(statistics.mean(file_content)))
print("\t*MEDIAN : {0}".format(statistics.median(file_content)))
try:
print("\t*MOST TYPICAL VALUE : {0}".format(statistics.mode(file_content)))
except:
print("2 most typical values!")
print("\t*STANDARD DEVIATION : {0}".format(statistics.stdev(file_content)))
print("\t*VARIANCE : {0}".format(statistics.variance(file_content)))
def get_global_stats(files_content):
data = []
for sublist in files_content:
data = data + sublist
print("*GLOBAL MEAN : {0}".format(statistics.mean(data)))
print("*GLOBAL MEDIAN : {0}".format(statistics.median(data)))
try:
print("*GLOBAL MOST TYPICAL VALUE : {0}".format(statistics.mode(data)))
except:
print("2 most typical values!")
print("*GLOBAL STANDARD DEVIATION : {0}".format(statistics.stdev(data)))
print("*GLOBAL VARIANCE : {0}".format(statistics.variance(data)))
def main():
parser = argparse.ArgumentParser(description='Get stats from Powertool output')
parser.add_argument('-p', '--path', type=str, default=None, required=True,
help="specify path to your directories")
parser.add_argument('-o', '--output', action="store_true",
help="save the output in the analysed directory")
args = parser.parse_args()
directories = glob(args.path+"*")
if len(directories) == 0:
sys.exit(1)
csv_files = []
for directory in directories:
current_files = [x for x in glob(directory + "/*") if ".csv" in x]
csv_files = csv_files + current_files
files_content = []
for csv_file in csv_files:
with open(csv_file, "r") as csv_content:
csv_reader = csv.reader(csv_content)
files_content.append([float(row[0]) for row in csv_reader if not (re.match("^\d+?\.\d+?$", row[0]) is None)])
get_stats_from(directories, files_content)
get_global_stats(files_content)
if __name__ == '__main__':
main()
| agpl-3.0 | Python |
|
32dcc681a82ef2246d0fad441481d6e68f79ddd6 | Add Python benchmark | stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib | lib/node_modules/@stdlib/math/base/special/ln/benchmark/python/benchmark.py | lib/node_modules/@stdlib/math/base/special/ln/benchmark/python/benchmark.py | #!/usr/bin/env python
"""Benchmark ln."""
from __future__ import print_function
import timeit
NAME = "ln"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from math import log; from random import random;"
stmt = "y = log(10000.0*random() - 0.0)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in xrange(REPEATS):
print("# python::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| apache-2.0 | Python |
|
20b450c4cd0ff9c57d894fa263056ff4cd2dbf07 | Add a vim version of merge business hours | ealter/vim_turing_machine,ealter/vim_turing_machine | vim_turing_machine/machines/merge_business_hours/vim_merge_business_hours.py | vim_turing_machine/machines/merge_business_hours/vim_merge_business_hours.py | from vim_turing_machine.machines.merge_business_hours.merge_business_hours import merge_business_hours_transitions
from vim_turing_machine.vim_machine import VimTuringMachine
if __name__ == '__main__':
merge_business_hours = VimTuringMachine(merge_business_hours_transitions(), debug=True)
merge_business_hours.run(initial_tape=sys.argv[1], max_steps=50)
| mit | Python |
|
9ac5bfb17346f364414f17e3e16ba15ab812f5a0 | tidy up | lsaffre/timtools | src/lino/tools/mail.py | src/lino/tools/mail.py | ## Copyright Luc Saffre 2003-2004.
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
used by :
scripts/openmail.py
tests/etc/1.py
"""
import sys,os
import urllib
import email
import webbrowser
def mailto_url(to=None,subject=None,body=None,cc=None):
"""
encodes the content as a mailto link as described on
http://www.faqs.org/rfcs/rfc2368.html
Examples partly taken from
http://selfhtml.teamone.de/html/verweise/email.htm
"""
#url = "mailto:" + urllib.quote(to.strip())
url = "mailto:" + urllib.quote(to.strip(),"@,")
sep = "?"
if cc:
url+= sep + "cc=" + urllib.quote(cc,"@,")
sep = "&"
if subject:
url+= sep + "subject=" + urllib.quote(subject,"")
sep = "&"
if body:
# Also note that line breaks in the body of a message MUST be
# encoded with "%0D%0A". (RFC 2368)
body="\r\n".join(body.splitlines())
url+= sep + "body=" + urllib.quote(body,"")
sep = "&"
# if not confirm("okay"): return
return url
## def readmail2(filename):
## "reads a real RFC2822 file"
## msg = email.message_from_file(open(filename))
## if msg.is_multipart():
## raise "%s contains a multipart message : not supported" % filename
## return msg
def readmail(filename):
"""reads a "simplified pseudo-RFC2822" file
"""
from email.Message import Message
msg = Message()
text = open(filename).read()
text = text.decode("cp850")
text = text.encode("iso-8859-1","replace")
headersDone = False
subject = None
to = None
body = ""
for line in text.splitlines():
if headersDone:
body += line + "\n"
else:
if len(line) == 0:
headersDone = True
else:
(name,value) = line.split(':')
msg[name] = value.strip()
## if name.lower() == 'subject':
## subject = value.strip()
## elif name.lower() == 'to':
## to = value.strip()
## else:
## raise "%s : invalid header field in line %s" % (
## name,repr(line))
msg.set_payload(body)
return msg
def openmail(msg):
url = mailto_url(msg.get('to'),msg.get("subject"),msg.get_payload())
webbrowser.open(url,new=1)
| bsd-2-clause | Python |
|
137a7c6e98e0ba8bd916d4ba696b0f0f4e2cdc56 | Create uptime.py | mc3k/graph-stats,mc3k/graph-stats | plot-uptime/uptime.py | plot-uptime/uptime.py | mit | Python |
||
e90c48ba46d7971386e01b3def9edbb2df5d74e8 | Create mummy.py | airtonix/django-mummy-command | management/commands/mummy.py | management/commands/mummy.py | """
1. Install model-mommy
`pip install model-mommy`
2. Use the command
`./manage mummy someotherapp.HilariousModelName:9000 yetanotherapp.OmgTheseModelNamesLawl:1`
"""
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from model_mommy import mommy
class Command(BaseCommand):
args = '<modelpath modelpath:count ...>'
help = 'Generate model instances using model-mommy'
def handle(self, *args, **options):
for modelpath in args:
count = 1
if ":" in modelpath:
modelpath, count = modelpath.split(":")
self.stdout.write("Processing: {}".format(modelpath))
mommy.make(modelpath, _quantity=count)
| mit | Python |
|
3b7de4dbe3611863620cb528092779d25efde025 | remove dj 3.2 warnings | Christophe31/django-data-exports,Christophe31/django-data-exports | data_exports/apps.py | data_exports/apps.py | #!/usr/bin/env python
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class CsvExportConfig(AppConfig):
name = 'data_exports'
default_auto_field = "django.db.models.AutoField"
| bsd-3-clause | Python |
|
85c732e395e3db4ec63a0d8580d895363d82e4a0 | Add the salt.output module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/output.py | salt/output.py | """
A simple way of setting the output format for data from modules
"""
import pprint
# Conditionally import the json and yaml modules
try:
import json
JSON = True
except ImportError:
JSON = False
try:
import yaml
YAML = True
except ImportError:
YAML = False
__all__ = ('get_outputter',)
class Outputter(object):
"""
Class for outputting data to the screen.
"""
supports = None
@classmethod
def check(klass, name):
# Don't advertise Outputter classes for optional modules
if hasattr(klass, "enabled") and not klass.enabled:
return False
return klass.supports == name
def __call__(self, data, **kwargs):
print "Calling Outputter.__call__()"
pprint.pprint(data)
class TxtOutputter(Outputter):
"""
Plain text output. Primarily for returning output from
shell commands in the exact same way they would output
on the shell when ran directly.
"""
supports = "txt"
def __call__(self, data, **kwargs):
if hasattr(data, "keys"):
for key in data.keys():
value = data[key]
for line in value.split('\n'):
print "{0}: {1}".format(key, line)
else:
# For non-dictionary data, run pprint
super(TxtOutputter, self).__call__(data)
class JSONOutputter(Outputter):
"""JSON output. Chokes on non-serializable objects."""
supports = "json"
enabled = JSON
def __call__(self, data, **kwargs):
try:
# A good kwarg might be: indent=4
print json.dumps(data, **kwargs)
except TypeError:
super(JSONOutputter, self).__call__(data)
class YamlOutputter(Outputter):
"""Yaml output. All of the cool kids are doing it."""
supports = "yaml"
enabled = YAML
def __call__(self, data, **kwargs):
print yaml.dump(data, **kwargs)
class RawOutputter(Outputter):
"""Raw output. This calls repr() on the returned data."""
supports = "raw"
def __call__(self, data, **kwargs):
print data
def get_outputter(name=None):
"""
Factory function for returning the right output class.
Usage:
printout = get_outputter("txt")
printout(ret)
"""
# Return an actual instance of the correct output class
for i in Outputter.__subclasses__():
if i.check(name):
return i()
return Outputter()
| apache-2.0 | Python |
|
380acd0e40ad2924f1434d4ae7f7e0a8a163139f | add script for building the cluster catalog | legacysurvey/legacypipe,legacysurvey/legacypipe | bin/build-cluster-catalog.py | bin/build-cluster-catalog.py | #!/usr/bin/env python
"""Build and write out the NGC-star-clusters.fits catalog.
"""
import os
import numpy as np
import numpy.ma as ma
from astropy.io import ascii
from astropy.table import Table
from astrometry.util.starutil_numpy import hmsstring2ra, dmsstring2dec
from astrometry.libkd.spherematch import match_radec
from pkg_resources import resource_filename
#import desimodel.io
#import desimodel.footprint
#tiles = desimodel.io.load_tiles(onlydesi=True)
if not os.path.isfile('/tmp/NGC.csv'):
os.system('wget -P /tmp https://raw.githubusercontent.com/mattiaverga/OpenNGC/master/NGC.csv')
names = ('name', 'type', 'ra_hms', 'dec_dms', 'const', 'majax', 'minax',
'pa', 'bmag', 'vmag', 'jmag', 'hmag', 'kmag', 'sbrightn', 'hubble',
'cstarumag', 'cstarbmag', 'cstarvmag', 'messier', 'ngc', 'ic',
'cstarnames', 'identifiers', 'commonnames', 'nednotes', 'ongcnotes')
NGC = ascii.read('/tmp/NGC.csv', delimiter=';', names=names)
NGC = NGC[(NGC['ra_hms'] != 'N/A')]
ra, dec = [], []
for _ra, _dec in zip(ma.getdata(NGC['ra_hms']), ma.getdata(NGC['dec_dms'])):
ra.append(hmsstring2ra(_ra.replace('h', ':').replace('m', ':').replace('s','')))
dec.append(dmsstring2dec(_dec.replace('d', ':').replace('m', ':').replace('s','')))
NGC['ra'] = ra
NGC['dec'] = dec
objtype = np.char.strip(ma.getdata(NGC['type']))
# Keep all globular clusters and planetary nebulae
keeptype = ('PN', 'GCl')
keep = np.zeros(len(NGC), dtype=bool)
for otype in keeptype:
ww = [otype == tt for tt in objtype]
keep = np.logical_or(keep, ww)
print(np.sum(keep))
clusters = NGC[keep]
# Fill missing major axes with a nominal 0.4 arcmin (roughly works
# for NGC7009, which is the only missing PN in the footprint).
ma.set_fill_value(clusters['majax'], 0.4)
clusters['majax'] = ma.filled(clusters['majax'].data)
# Increase the radius of IC4593
# https://github.com/legacysurvey/legacypipe/issues/347
clusters[clusters['name'] == 'IC4593']['majax'] = 0.5
#indesi = desimodel.footprint.is_point_in_desi(tiles, ma.getdata(clusters['ra']),
# ma.getdata(clusters['dec']))
#print(np.sum(indesi))
#bb = clusters[indesi]
#bb[np.argsort(bb['majax'])[::-1]]['name', 'ra', 'dec', 'majax', 'type']
# Build the output catalog: select a subset of the columns and rename
# majax-->radius (arcmin-->degree)
out = Table()
out['name'] = clusters['name']
out['alt_name'] = ['' if mm == 0 else 'M{}'.format(str(mm))
for mm in ma.getdata(clusters['messier'])]
out['type'] = clusters['type']
out['ra'] = clusters['ra']
out['dec'] = clusters['dec']
out['radius_orig'] = (clusters['majax'] / 60).astype('f4') # [degrees]
out['radius'] = out['radius_orig']
# Read the ancillary globular cluster catalog and update the radii in the NGC.
#https://heasarc.gsfc.nasa.gov/db-perl/W3Browse/w3table.pl?tablehead=name%3Dglobclust&Action=More+Options
if False:
gcfile = resource_filename('legacypipe', 'data/globular_clusters.fits')
gcs = Table.read(gcfile)
I, J, _ = match_radec(clusters['ra'], clusters['dec'], gcs['RA'], gcs['DEC'], 10./3600., nearest=True)
out['radius'][I] = (gcs['HALF_LIGHT_RADIUS'][J] / 60).astype('f4') # [degrees]
if False: # debugging
bb = out[['M' in nn for nn in out['alt_name']]]
bb[np.argsort(bb['radius'])]
bb['radius'] *= 60
bb['radius_orig'] *= 60
print(bb)
clusterfile = resource_filename('legacypipe', 'data/NGC-star-clusters.fits')
print('Writing {}'.format(clusterfile))
out.write(clusterfile, overwrite=True)
# Code to help visually check all the globular clusters.
if False:
checktype = ('GCl', 'PN')
check = np.zeros(len(NGC), dtype=bool)
for otype in checktype:
ww = [otype == tt for tt in objtype]
check = np.logical_or(check, ww)
check_clusters = NGC[check] # 845 of them
# Write out a catalog, load it into the viewer and look at each of them.
check_clusters[['ra', 'dec', 'name']].write('/tmp/check.fits', overwrite=True) # 25 of them
| bsd-3-clause | Python |
|
9b0c335fc956c2d2156d169e3636d862ebfbadc0 | add a scraping script | showa-yojyo/bin,showa-yojyo/bin | hadairopink.py | hadairopink.py | #!/usr/bin/env python
"""
No description.
"""
import sys
from scrapy import cmdline, Request
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
TARGET_DOMAIN = 'hadairopink.com'
XPATH_IMAGE_SRC = '//div[@class="kizi"]//a/img[contains(@src, "/wp-content/uploads/")]/@src'
XPATH_PAGINATION = '/html/body//div[@class="pagination"]/a[@data-wpel-link="internal"]'
XPATH_ENTRY = '/html/body//h3[@class="entry-title-ac"]/a'
class Crawler(CrawlSpider):
"""No descrition"""
name = TARGET_DOMAIN
allowed_domains = [TARGET_DOMAIN]
custom_settings = {
'DOWNLOAD_DELAY': 1,
}
rules = (
Rule(LinkExtractor(restrict_xpaths=XPATH_ENTRY), callback='parse_entry'),
Rule(LinkExtractor(restrict_xpaths=XPATH_PAGINATION)),
)
def start_requests(self):
"""No descrition"""
url = self.tag
yield Request(url, dont_filter=True)
def parse_entry(self, response):
"""No descrition"""
if images := response.xpath(XPATH_IMAGE_SRC).getall():
yield {
'title': response.xpath('//title/text()').get(),
'url': response.url,
'images': images}
if __name__ == '__main__':
#cmdline.execute(f"scrapy runspider {sys.argv[0]} -a tag={sys.argv[1]} -O images.csv".split())
command_line = ["scrapy", "runspider"]
command_line.extend(sys.argv)
cmdline.execute(command_line)
| mit | Python |
|
285cddc3ed75f70e077738a206c50a57671245ea | add hello world script by pyThon | m60dx/raspiweb | hello_flask.py | hello_flask.py | # -*- coding: utf-8 -*-
from flask import Flask
app = flask(__name__)
@app.route('/')
def hello_flask():
return 'Hello Flask!'
if __name__ == '__main__':
app.run() | mit | Python |
|
7478a6605b4d722e2eec9031457fb33ee99857f5 | add geo tools from dingo | openego/eDisGo,openego/eDisGo | edisgo/tools/geo.py | edisgo/tools/geo.py | from geopy.distance import vincenty
import os
if not 'READTHEDOCS' in os.environ:
from shapely.geometry import LineString
from shapely.ops import transform
import logging
logger = logging.getLogger('edisgo')
def calc_geo_branches_in_polygon(mv_grid, polygon, mode, proj):
# TODO: DOCSTRING
branches = []
polygon_shp = transform(proj, polygon)
for branch in mv_grid.graph_edges():
nodes = branch['adj_nodes']
branch_shp = transform(proj, LineString([nodes[0].geo_data, nodes[1].geo_data]))
# check if branches intersect with polygon if mode = 'intersects'
if mode == 'intersects':
if polygon_shp.intersects(branch_shp):
branches.append(branch)
# check if polygon contains branches if mode = 'contains'
elif mode == 'contains':
if polygon_shp.contains(branch_shp):
branches.append(branch)
# error
else:
raise ValueError('Mode is invalid!')
return branches
def calc_geo_branches_in_buffer(node, mv_grid, radius, radius_inc, proj):
""" Determines branches in nodes' associated graph that are at least partly within buffer of `radius` from `node`.
If there are no nodes, the buffer is successively extended by `radius_inc` until nodes are found.
Args:
node: origin node (e.g. LVStationDing0 object) with associated shapely object (attribute `geo_data`) in any CRS
(e.g. WGS84)
radius: buffer radius in m
radius_inc: radius increment in m
proj: pyproj projection object: nodes' CRS to equidistant CRS (e.g. WGS84 -> ETRS)
Returns:
list of branches (NetworkX branch objects)
"""
branches = []
while not branches:
node_shp = transform(proj, node.geo_data)
buffer_zone_shp = node_shp.buffer(radius)
for branch in mv_grid.graph_edges():
nodes = branch['adj_nodes']
branch_shp = transform(proj, LineString([nodes[0].geo_data, nodes[1].geo_data]))
if buffer_zone_shp.intersects(branch_shp):
branches.append(branch)
radius += radius_inc
return branches
def calc_geo_dist_vincenty(node_source, node_target):
""" Calculates the geodesic distance between `node_source` and `node_target` incorporating the detour factor in
config_calc.cfg.
Args:
node_source: source node (Ding0 object), member of _graph
node_target: target node (Ding0 object), member of _graph
Returns:
Distance in m
"""
branch_detour_factor = cfg_ding0.get('assumptions', 'branch_detour_factor')
# notice: vincenty takes (lat,lon)
branch_length = branch_detour_factor * vincenty((node_source.geo_data.y, node_source.geo_data.x),
(node_target.geo_data.y, node_target.geo_data.x)).m
# ========= BUG: LINE LENGTH=0 WHEN CONNECTING GENERATORS ===========
# When importing generators, the geom_new field is used as position. If it is empty, EnergyMap's geom
# is used and so there are a couple of generators at the same position => length of interconnecting
# line is 0. See issue #76
if branch_length == 0:
branch_length = 1
logger.warning('Geo distance is zero, check objects\' positions. '
'Distance is set to 1m')
# ===================================================================
return branch_length
| agpl-3.0 | Python |
|
27ea547fbd7c936bd017b64b31ecf09ed991c6c0 | Add index to fixed_ips.address | Juniper/nova,thomasem/nova,varunarya10/nova_test_latest,phenoxim/nova,sridevikoushik31/openstack,maheshp/novatest,mahak/nova,CEG-FYP-OpenStack/scheduler,Brocade-OpenSource/OpenStack-DNRM-Nova,redhat-openstack/nova,eharney/nova,saleemjaveds/https-github.com-openstack-nova,yrobla/nova,gspilio/nova,yrobla/nova,usc-isi/nova,BeyondTheClouds/nova,zaina/nova,mandeepdhami/nova,zhimin711/nova,MountainWei/nova,cloudbase/nova,Stavitsky/nova,bgxavier/nova,badock/nova,tealover/nova,fajoy/nova,dstroppa/openstack-smartos-nova-grizzly,Yusuke1987/openstack_template,openstack/nova,psiwczak/openstack,klmitch/nova,dstroppa/openstack-smartos-nova-grizzly,blueboxgroup/nova,kimjaejoong/nova,petrutlucian94/nova_dev,shootstar/novatest,eonpatapon/nova,j-carpentier/nova,thomasem/nova,felixma/nova,akash1808/nova,scripnichenko/nova,zzicewind/nova,vmturbo/nova,citrix-openstack-build/nova,vmturbo/nova,yatinkumbhare/openstack-nova,NeCTAR-RC/nova,BeyondTheClouds/nova,leilihh/nova,devendermishrajio/nova_test_latest,zzicewind/nova,mahak/nova,takeshineshiro/nova,alvarolopez/nova,rahulunair/nova,saleemjaveds/https-github.com-openstack-nova,cloudbau/nova,mikalstill/nova,cernops/nova,fajoy/nova,eayunstack/nova,cernops/nova,paulmathews/nova,noironetworks/nova,vmturbo/nova,BeyondTheClouds/nova,tangfeixiong/nova,NoBodyCam/TftpPxeBootBareMetal,cloudbase/nova,Metaswitch/calico-nova,redhat-openstack/nova,CloudServer/nova,badock/nova,whitepages/nova,dims/nova,iuliat/nova,cloudbau/nova,mandeepdhami/nova,SUSE-Cloud/nova,sridevikoushik31/nova,gooddata/openstack-nova,orbitfp7/nova,phenoxim/nova,usc-isi/extra-specs,bclau/nova,bigswitch/nova,shahar-stratoscale/nova,tanglei528/nova,sridevikoushik31/nova,CEG-FYP-OpenStack/scheduler,openstack/nova,berrange/nova,klmitch/nova,yosshy/nova,rickerc/nova_audit,dawnpower/nova,isyippee/nova,aristanetworks/arista-ovs-nova,alaski/nova,viggates/nova,ewindisch/nova,belmiromoreira/nova,NewpTone/stacklab-nova,sacharya/nova,devoid/nova,rajalokan/nova,aristanetworks/arista-ovs-nova,adelina-t/nova,houshengbo/nova_vmware_compute_driver,akash1808/nova,cyx1231st/nova,mikalstill/nova,spring-week-topos/nova-week,rajalokan/nova,paulmathews/nova,houshengbo/nova_vmware_compute_driver,savi-dev/nova,gooddata/openstack-nova,dawnpower/nova,watonyweng/nova,apporc/nova,JianyuWang/nova,rajalokan/nova,zaina/nova,gspilio/nova,tangfeixiong/nova,JianyuWang/nova,petrutlucian94/nova,nikesh-mahalka/nova,hanlind/nova,Tehsmash/nova,JioCloud/nova,scripnichenko/nova,joker946/nova,qwefi/nova,mmnelemane/nova,rahulunair/nova,apporc/nova,CiscoSystems/nova,DirectXMan12/nova-hacking,paulmathews/nova,bgxavier/nova,Yusuke1987/openstack_template,gooddata/openstack-nova,TwinkleChawla/nova,barnsnake351/nova,jianghuaw/nova,cloudbase/nova-virtualbox,Francis-Liu/animated-broccoli,maoy/zknova,joker946/nova,luogangyi/bcec-nova,sridevikoushik31/nova,josephsuh/extra-specs,mmnelemane/nova,mikalstill/nova,j-carpentier/nova,noironetworks/nova,fnordahl/nova,tudorvio/nova,savi-dev/nova,edulramirez/nova,projectcalico/calico-nova,tudorvio/nova,Triv90/Nova,akash1808/nova_test_latest,cyx1231st/nova,zhimin711/nova,raildo/nova,Juniper/nova,josephsuh/extra-specs,silenceli/nova,NewpTone/stacklab-nova,TieWei/nova,affo/nova,isyippee/nova,ntt-sic/nova,OpenAcademy-OpenStack/nova-scheduler,rahulunair/nova,Yuriy-Leonov/nova,tealover/nova,double12gzh/nova,fajoy/nova,dims/nova,klmitch/nova,leilihh/novaha,Tehsmash/nova,vladikr/nova_drafts,bclau/nova,tianweizhang/nova,mgagne/nova,Triv90/Nova,JioCloud/nova_test_latest,projectcalico/calico-nova,psiwczak/openstack,blueboxgroup/nova,plumgrid/plumgrid-nova,sebrandon1/nova,DirectXMan12/nova-hacking,cloudbase/nova,affo/nova,leilihh/nova,barnsnake351/nova,dstroppa/openstack-smartos-nova-grizzly,tianweizhang/nova,watonyweng/nova,maheshp/novatest,usc-isi/extra-specs,rajalokan/nova,vladikr/nova_drafts,shahar-stratoscale/nova,takeshineshiro/nova,nikesh-mahalka/nova,openstack/nova,sridevikoushik31/nova,jianghuaw/nova,mahak/nova,houshengbo/nova_vmware_compute_driver,josephsuh/extra-specs,maoy/zknova,virtualopensystems/nova,NoBodyCam/TftpPxeBootBareMetal,jianghuaw/nova,double12gzh/nova,hanlind/nova,jeffrey4l/nova,tanglei528/nova,CCI-MOC/nova,OpenAcademy-OpenStack/nova-scheduler,rrader/nova-docker-plugin,savi-dev/nova,ruslanloman/nova,virtualopensystems/nova,vmturbo/nova,JioCloud/nova,cloudbase/nova-virtualbox,angdraug/nova,devendermishrajio/nova,eonpatapon/nova,sridevikoushik31/openstack,Juniper/nova,berrange/nova,jeffrey4l/nova,eayunstack/nova,whitepages/nova,viggates/nova,cernops/nova,alexandrucoman/vbox-nova-driver,leilihh/novaha,TieWei/nova,jianghuaw/nova,felixma/nova,plumgrid/plumgrid-nova,hanlind/nova,CiscoSystems/nova,CloudServer/nova,maelnor/nova,psiwczak/openstack,usc-isi/nova,shail2810/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,gooddata/openstack-nova,LoHChina/nova,raildo/nova,sebrandon1/nova,klmitch/nova,aristanetworks/arista-ovs-nova,kimjaejoong/nova,usc-isi/nova,Triv90/Nova,iuliat/nova,varunarya10/nova_test_latest,devendermishrajio/nova_test_latest,orbitfp7/nova,NewpTone/stacklab-nova,silenceli/nova,imsplitbit/nova,sacharya/nova,alexandrucoman/vbox-nova-driver,alvarolopez/nova,eharney/nova,MountainWei/nova,Juniper/nova,adelina-t/nova,mgagne/nova,ruslanloman/nova,devoid/nova,ewindisch/nova,yatinkumbhare/openstack-nova,maheshp/novatest,belmiromoreira/nova,Francis-Liu/animated-broccoli,JioCloud/nova_test_latest,NeCTAR-RC/nova,petrutlucian94/nova,citrix-openstack-build/nova,Metaswitch/calico-nova,maelnor/nova,devendermishrajio/nova,Yuriy-Leonov/nova,maoy/zknova,DirectXMan12/nova-hacking,ntt-sic/nova,rrader/nova-docker-plugin,SUSE-Cloud/nova,shail2810/nova,yrobla/nova,qwefi/nova,usc-isi/extra-specs,rickerc/nova_audit,edulramirez/nova,akash1808/nova_test_latest,gspilio/nova,alaski/nova,TwinkleChawla/nova,CCI-MOC/nova,NoBodyCam/TftpPxeBootBareMetal,petrutlucian94/nova_dev,angdraug/nova,luogangyi/bcec-nova,bigswitch/nova,Stavitsky/nova,fnordahl/nova,LoHChina/nova,shootstar/novatest,ted-gould/nova,spring-week-topos/nova-week,imsplitbit/nova,yosshy/nova,sebrandon1/nova,sridevikoushik31/openstack,ted-gould/nova | nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py | nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py | # Copyright 2012 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('fixed_ips', meta, autoload=True)
index = Index('address', instances.c.address)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('fixed_ips', meta, autoload=True)
index = Index('address', instances.c.address)
index.drop(migrate_engine)
| apache-2.0 | Python |
|
65258cf8d11e8e5c7cce3e07d9a389e5617948dd | Add boilerplate code | Tigge/advent-of-code-2016 | aoc.py | aoc.py | import argparse
import importlib
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Advent of Code 2016")
parser.add_argument("--day", type=int, dest="days", nargs="+", default=range(1, 25))
parser.add_argument("--stdin", dest='stdin', action='store_true', default=False)
args = parser.parse_args()
print("Advent of Code 2016")
print("===================")
print()
for day in args.days:
try:
problem_module = importlib.import_module("day_{}".format(day))
input_file = open("day_{}.txt".format(day)) if not args.stdin else sys.stdin
problem = problem_module.Problem(input_file)
print("Day", day)
print("------")
if hasattr(problem, 'step1') and callable(getattr(problem, 'step1')):
print("Step 1:", problem.step1())
if hasattr(problem, 'step2') and callable(getattr(problem, 'step2')):
print("Step 2:", problem.step2())
print()
except ImportError as e:
print("Day", day, "is not implemented yet")
| mit | Python |
|
6db7902e5f78d28b9a00eb801c12d15c91949453 | Add gruneisen script for making figure | atztogo/phonondb,atztogo/phonondb,atztogo/phonondb | phonondb/phonopy/gruneisen.py | phonondb/phonopy/gruneisen.py | import numpy as np
from cogue.crystal.utility import klength2mesh
class ModeGruneisen:
def __init__(self,
phonon_orig,
phonon_plus,
phonon_minus,
distance=100):
self._phonopy_gruneisen = phonopy_gruneisen
self._phonon = phonon
self._lattice = np.array(phonon.get_unitcell().get_cell().T,
dtype='double')
self._mesh = None
self._gruneisen = None
def run(self):
self._set_mesh(distance=distance)
if self._run_mesh_sampling():
self._run_gruneisen()
return True
return False
def get_lattice(self):
return self._lattice
def get_mesh(self):
return self._mesh
def get_mesh_gruneisen(self):
return self._gruneisen
def plot(self, plt, max_index=101):
temps, fe, entropy, cv = self._thermal_properties
fig = plt.figure()
fig.subplots_adjust(left=0.20, right=0.92, bottom=0.18)
plt.tick_params(axis='both', which='major', labelsize=10.5)
ax = fig.add_subplot(111)
plt.plot(temps[:max_index], fe[:max_index], 'r-')
plt.plot(temps[:max_index], entropy[:max_index], 'b-')
plt.plot(temps[:max_index], cv[:max_index], 'g-')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
aspect = (xlim[1] - xlim[0]) / (ylim[1] - ylim[0])
# ax.set_aspect(aspect * 0.7)
plt.legend(('Free energy [kJ/mol]', 'Entropy [J/K/mol]',
r'C$_\mathrm{V}$ [J/K/mol]'),
loc='best',
prop={'size':8.5},
frameon=False)
plt.xlabel("Temperatures (K)")
plt.ylabel("Thermal properties (*/unitcell)")
def save_figure(self, plt):
plt.savefig("gruneisen.png")
def _set_mesh(self, distance=100):
self._mesh = klength2mesh(distance, self._lattice)
def _run_mesh_sampling(self):
return self._phonopy_gruneisen.set_mesh(self._mesh)
def _run_gruneisen(self):
self._thermal_properties = self._phonon.get_thermal_properties()
if __name__ == '__main__':
import sys
import yaml
from phonopy import Phonopy
from phonopy.gruneisen.mesh import Mesh as GruneisenMesh
from phonopy.interface.phonopy_yaml import phonopyYaml
from phonopy.file_IO import parse_FORCE_SETS
from cogue.crystal.utility import get_angles, get_lattice_parameters
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update({'figure.figsize': (4.5, 3)})
import matplotlib.pyplot as plt
phonons = []
for dirname in ('orig', 'plus', 'minus'):
if len(sys.argv) > 1:
cell = phonopyYaml("%s/" % dirname + sys.argv[1]).get_atoms()
else:
cell = phonopyYaml("%s/POSCAR-unitcell.yaml" % dirname).get_atoms()
phonon_info = yaml.load(open("%s/%s.yaml" % (dirname, dirname)))
phonon = Phonopy(cell,
phonon_info['supercell_matrix'],
is_auto_displacements=False)
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
phonons.append(phonon)
distance = 100
gruneisen = ModeGruneisen(phonons[0], phonons[1], phonons[2], distance=distance)
if gruneisen.run():
gruneisen.plot(plt)
lattice = gruneisen.get_lattice()
print "a, b, c =", get_lattice_parameters(lattice)
print "alpha, beta, gamma =", get_angles(lattice)
print "mesh (x=%f) =" % distance, gruneisen.get_mesh()
gruneisen.save_figure(plt)
else:
print "Mode Gruneisen parameter calculation failed."
| bsd-3-clause | Python |
|
813eb3b6bdc01906e39f11f93b4a326fc2fb1ee5 | Add kitchen-sink base test | lantiga/pytorch2c,lantiga/pytorch2c,lantiga/pytorch2c | test/base.py | test/base.py | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import os
import uuid
import torch2c
def base_test():
fc1 = nn.Linear(10,20)
fc1.weight.data.normal_(0.0,1.0)
fc1.bias.data.normal_(0.0,1.0)
fc2 = nn.Linear(20,2)
fc2.weight.data.normal_(0.0,1.0)
fc2.bias.data.normal_(0.0,1.0)
model_0 = lambda x: F.log_softmax(fc2(F.relu(fc1(x))))
fc3 = nn.Linear(10,2)
fc3.weight.data.normal_(0.0,1.0)
fc3.bias.data.normal_(0.0,1.0)
model_1 = lambda x: F.softmax(F.relu(fc3(x)))
data = Variable(torch.rand(10,10))
out = model_0(data) + model_1(data) + 1
out_path = 'out'
if not os.path.isdir(out_path):
os.mkdir(out_path)
uid = str(uuid.uuid4())
torch2c.compile(out,'base',os.path.join(out_path,uid),compile_test=True)
if __name__=='__main__':
base_test()
| mit | Python |
|
407f7fcf8f481c57df59789b7f845928428f1bf9 | Add example script. | killarny/telegram-bot | telegrambot/example.py | telegrambot/example.py | from telegrambot import TelegramBot, main
from telegrambot.commands import GetCommand
class DemoTelegramBot(TelegramBot, GetCommand):
pass
if __name__ == '__main__':
main(bot_class=DemoTelegramBot) | mit | Python |
|
9fdd671d9c0b91dc789ebf3b24226edb3e6a072a | Add new migration to load metrics fixtures | sleepers-anonymous/zscore,sleepers-anonymous/zscore,sleepers-anonymous/zscore,sleepers-anonymous/zscore | sleep/migrations/0002_load_metrics.py | sleep/migrations/0002_load_metrics.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.management import call_command
def load_metrics():
call_command('loaddata', 'metrics.json')
class Migration(migrations.Migration):
dependencies = [
('sleep', '0001_initial'),
]
operations = [
]
| mit | Python |
|
7ecc619104177c72b69337a35c7604491f2b06ec | Create amman.py | ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube | ideascube/conf/amman.py | ideascube/conf/amman.py | # -*- coding: utf-8 -*-
"""Amman box in Jordan"""
from .base import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"مخيم الأزرق"
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'refugee_id', 'birth_year',
'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['refugee_id', 'short_name', 'full_name', 'latin_name', 'birth_year', 'gender']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the camp'), ['camp_entry_date', 'camp_activities', 'current_occupation', 'camp_address']), # noqa
(_('Origin'), ['country', 'city', 'country_of_origin_occupation', 'school_level', 'is_sent_to_school']), # noqa
(_('Language skills'), ['ar_level', 'en_level']),
(_('National residents'), ['id_card_number']),
)
ENTRY_ACTIVITY_CHOICES = [
('16 Days of Activism', _('16 Days of Activism')),
("AMANI Campaign", _("AMANI Campaign")),
("Anger Management Training", _("Anger Management Training")),
("Basic Computer Training", _("Basic Computer Training")),
("Beauty Training", _("Beauty Training")),
("Book Club", _("Book Club")),
("Conflict Resolution Training", _("Conflict Resolution Training")),
("Coping Skills and Mechanisms Training", _("Coping Skills and Mechanisms Training")), # noqa
("EDRAAK", _("EDRAAK")),
("Emotional intelligence Training", _("Emotional intelligence Training")),
("Handicrafts", _("Handicrafts")),
("How to be a Psychosocial Counselor Training", _("How to be a Psychosocial Counselor Training")), # noqa
("I am Woman", _("I am Woman")),
("International Children Day", _("International Children Day")),
("International Refugee Day", _("International Refugee Day")),
("Marathon", _("Marathon")),
("Mother's day celebration", _("Mother's day celebration")),
("Parenting Skills Training", _("Parenting Skills Training")),
("Peer Support Group", _("Peer Support Group")),
("Psychosocial ART Interventions Training", _("Psychosocial ART Interventions Training")), # noqa
("Puppets and Theatre", _("Puppets and Theatre")),
("Sewing and stitching", _("Sewing and stitching")),
("SIMSIM Club", _("SIMSIM Club")),
("Social Work Training", _("Social Work Training")),
("Stress Management Training", _("Stress Management Training")),
("Training of Trainers", _("Training of Trainers")),
("World Mental Health Day", _("World Mental Health Day")),
]
| agpl-3.0 | Python |
|
ef4d7e4fb43b5db29576f95625fd612c259731be | Create ServoSync.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/Mats/ServoSync.py | home/Mats/ServoSync.py | port = "COM99"
arduino = Runtime.start("arduino","Arduino")
vard = Runtime.start("va","VirtualArduino")
vard.connect(port)
arduino.connect(port)
servo1 = Runtime.start("servo1","Servo")
servo2 = Runtime.start("servo2","Servo")
servo1.attach("arduino",1)
servo2.attach("arduino",2)
servo1.sync(servo2)
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.