commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
a03da2611de32a38ab5b505f85136a3a9c5345f3 | add ycm config | chxuan/easyrpc,chxuan/easyrpc,chxuan/easyrpc,chxuan/easyrpc | .ycm_extra_conf.py | .ycm_extra_conf.py | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
# c/c++ include path
'-isystem',
'/usr/include/c++/4.8',
'-isystem',
'/usr/include/c++/4.8.5',
'-isystem',
'/usr/include/c++/4.9.3',
'-isystem',
'/usr/include/c++/5',
'-isystem',
'/usr/include/c++/6',
'-isystem',
'/usr/include/c++/7',
'-isystem',
'/usr/include/c++/8',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
# 3rdparty
'-isystem',
'/usr/local/3rdparty/include',
# project
'-isystem',
'./',
#'-isystem',
#'../BoostParts',
#'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
#'/System/Library/Frameworks/Python.framework/Headers',
#'-isystem',
#'../llvm/include',
#'-isystem',
#'../llvm/tools/clang/include',
#'-I',
#'.',
#'-I',
#'./ClangCompleter',
#'-isystem',
#'./tests/gmock/gtest',
#'-isystem',
#'./tests/gmock/gtest/include',
#'-isystem',
#'./tests/gmock',
#'-isystem',
#'./tests/gmock/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if not database:
return {
'flags': flags,
'include_paths_relative_to_dir': DirectoryOfThisScript()
}
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object.
final_flags = list( compilation_info.compiler_flags_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
return {
'flags': final_flags,
'include_paths_relative_to_dir': compilation_info.compiler_working_dir_
}
| mit | Python |
|
7ce57e27265d4ea7639aaf6f806b9312d17c5c5a | Create HR_pythonSwapCase.py | bluewitch/Code-Blue-Python | HR_pythonSwapCase.py | HR_pythonSwapCase.py | #pythonSwapCase.py
def swap_case(s):
return s.swapcase()
| mit | Python |
|
104ce4eb41a8d1d8307618f619dbf5336af1056d | Add CVE plugin. | sk89q/Plumeria,sk89q/Plumeria,sk89q/Plumeria | plumeria/plugins/cve.py | plumeria/plugins/cve.py | import re
import urllib.parse
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
CVE_PATTERN = re.compile("^(CVE-\\d{4,5}-\d+)$", re.IGNORECASE)
@commands.register("cve", category="Search")
@rate_limit()
async def cve(message):
"""
Look up information about a CVE.
Example::
/cve CVE-2010-3213
Response::
CVE-2010-3213 - Cross-site request forgery (CSRF) vulner[...]
Auth: NONE / Complexity: MEDIUM / Vector: NETWORK
https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-3213
• (462) Cross-Domain Search Timing
• (467) Cross Site Identification
• (62) Cross Site Request Forgery (aka Session Riding)
• (111) JSON Hijacking (aka JavaScript Hijacking)
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
m = CVE_PATTERN.search(q)
if not m:
raise CommandError("No CVE found in the given input")
r = await http.get("https://cve.circl.lu/api/cve/{}".format(m.group(1).upper()))
data = r.json()
if len(data.keys()):
capecs = "\n".join(
map(lambda e: "\u2022 ({id}) {name}".format(id=e['id'], name=e['name']), data.get("capec", [])))
return "**{cve}** [{cvss}] - {summary}\n*Auth: {auth} / Complexity: {complexity} / Vector: {vector}*\n<{url}>\n{capecs}".format(
cve=data['id'],
cvss=data['cvss'],
summary=data['summary'],
auth=data['access']['authentication'],
complexity=data['access']['complexity'],
vector=data['access']['vector'],
capecs=capecs,
url="https://cve.mitre.org/cgi-bin/cvename.cgi?name={}".format(urllib.parse.quote(data['id'])))
else:
raise CommandError("no results found")
| mit | Python |
|
ac7c5f51e270e48d3be9363a7c65b4b2f019c90c | Add tests for xkcd bot. | shubhamdhama/zulip,shubhamdhama/zulip,verma-varsha/zulip,punchagan/zulip,vabs22/zulip,shubhamdhama/zulip,vaidap/zulip,brockwhittaker/zulip,rishig/zulip,punchagan/zulip,synicalsyntax/zulip,hackerkid/zulip,brainwane/zulip,jrowan/zulip,amanharitsh123/zulip,punchagan/zulip,brainwane/zulip,rishig/zulip,synicalsyntax/zulip,rishig/zulip,rishig/zulip,showell/zulip,showell/zulip,eeshangarg/zulip,shubhamdhama/zulip,eeshangarg/zulip,dhcrzf/zulip,jrowan/zulip,Galexrt/zulip,andersk/zulip,brainwane/zulip,timabbott/zulip,amanharitsh123/zulip,showell/zulip,kou/zulip,hackerkid/zulip,andersk/zulip,kou/zulip,rht/zulip,tommyip/zulip,brockwhittaker/zulip,hackerkid/zulip,punchagan/zulip,Galexrt/zulip,rht/zulip,showell/zulip,rht/zulip,kou/zulip,shubhamdhama/zulip,eeshangarg/zulip,hackerkid/zulip,rishig/zulip,dhcrzf/zulip,andersk/zulip,timabbott/zulip,jrowan/zulip,rishig/zulip,synicalsyntax/zulip,tommyip/zulip,dhcrzf/zulip,mahim97/zulip,vaidap/zulip,punchagan/zulip,jackrzhang/zulip,amanharitsh123/zulip,eeshangarg/zulip,tommyip/zulip,timabbott/zulip,jrowan/zulip,rishig/zulip,verma-varsha/zulip,Galexrt/zulip,zulip/zulip,hackerkid/zulip,rht/zulip,vaidap/zulip,amanharitsh123/zulip,eeshangarg/zulip,vabs22/zulip,zulip/zulip,showell/zulip,jackrzhang/zulip,Galexrt/zulip,vabs22/zulip,jackrzhang/zulip,timabbott/zulip,synicalsyntax/zulip,jackrzhang/zulip,verma-varsha/zulip,showell/zulip,brainwane/zulip,rht/zulip,dhcrzf/zulip,synicalsyntax/zulip,mahim97/zulip,andersk/zulip,synicalsyntax/zulip,kou/zulip,jackrzhang/zulip,zulip/zulip,vabs22/zulip,rht/zulip,Galexrt/zulip,shubhamdhama/zulip,amanharitsh123/zulip,eeshangarg/zulip,zulip/zulip,jrowan/zulip,brainwane/zulip,verma-varsha/zulip,mahim97/zulip,timabbott/zulip,mahim97/zulip,verma-varsha/zulip,hackerkid/zulip,verma-varsha/zulip,punchagan/zulip,zulip/zulip,timabbott/zulip,eeshangarg/zulip,synicalsyntax/zulip,dhcrzf/zulip,kou/zulip,brainwane/zulip,dhcrzf/zulip,tommyip/zulip,dhcrzf/zulip,mahim97/zulip,tommyip/zulip,rht/zulip,brockwhittaker/zulip,hackerkid/zulip,vaidap/zulip,andersk/zulip,brockwhittaker/zulip,vabs22/zulip,shubhamdhama/zulip,mahim97/zulip,brainwane/zulip,brockwhittaker/zulip,andersk/zulip,vaidap/zulip,timabbott/zulip,Galexrt/zulip,showell/zulip,jrowan/zulip,andersk/zulip,zulip/zulip,kou/zulip,amanharitsh123/zulip,jackrzhang/zulip,tommyip/zulip,kou/zulip,tommyip/zulip,brockwhittaker/zulip,vaidap/zulip,Galexrt/zulip,jackrzhang/zulip,punchagan/zulip,zulip/zulip,vabs22/zulip | contrib_bots/bots/xkcd/test_xkcd.py | contrib_bots/bots/xkcd/test_xkcd.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import mock
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestXkcdBot(BotTestCase):
bot_name = "xkcd"
@mock.patch('logging.exception')
def test_bot(self, mock_logging_exception):
help_txt = "xkcd bot supports these commands:"
err_txt = "xkcd bot only supports these commands:"
commands = '''
* `@xkcd help` to show this help message.
* `@xkcd latest` to fetch the latest comic strip from xkcd.
* `@xkcd random` to fetch a random comic strip from xkcd.
* `@xkcd <comic id>` to fetch a comic strip based on `<comic id>` e.g `@xkcd 1234`.'''
invalid_id_txt = "Sorry, there is likely no xkcd comic strip with id: #"
expected = {
"": err_txt+commands,
"help": help_txt+commands,
"x": err_txt+commands,
"0": invalid_id_txt + "0",
"1": ("#1: **Barrel - Part 1**\n[Don't we all.]"
"(https://imgs.xkcd.com/comics/barrel_cropped_(1).jpg)"),
"1800": ("#1800: **Chess Notation**\n"
"[I've decided to score all my conversations "
"using chess win-loss notation. (??)]"
"(https://imgs.xkcd.com/comics/chess_notation.png)"),
"999999999": invalid_id_txt + "999999999",
}
for m, r in expected.items():
self.assert_bot_output(
{'content': m, 'type': "private", 'sender_email': "foo"}, r)
self.assert_bot_output(
{'content': m, 'type': "stream", 'sender_email': "foo"}, r)
| apache-2.0 | Python |
|
552e2381b25c9d3591e7b4bf4a4c5796744b15ba | Add demo configuration | makinacorpus/Geotrek,mabhub/Geotrek,mabhub/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,Anaethelion/Geotrek,camillemonchicourt/Geotrek,mabhub/Geotrek,camillemonchicourt/Geotrek,Anaethelion/Geotrek,camillemonchicourt/Geotrek,johan--/Geotrek,GeotrekCE/Geotrek-admin,johan--/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,Anaethelion/Geotrek,Anaethelion/Geotrek,johan--/Geotrek,johan--/Geotrek,GeotrekCE/Geotrek-admin,mabhub/Geotrek | .salt/files/demo.py | .salt/files/demo.py | from .prod import *
LEAFLET_CONFIG['TILES'] = [
(gettext_noop('Scan'), 'http://{s}.livembtiles.makina-corpus.net/makina/OSMTopo/{z}/{x}/{y}.png', 'OSM Topo'),
(gettext_noop('Ortho'), 'https://{s}.tiles.mapbox.com/v3/makina-corpus.i3p1001l/{z}/{x}/{y}.png', '© MapBox Satellite'),
]
LEAFLET_CONFIG['SRID'] = 3857
ALTIMETRIC_PROFILE_COLOR = '#F77E00'
MAPENTITY_CONFIG['MAP_BACKGROUND_FOGGED'] = False
| bsd-2-clause | Python |
|
4b4bfd8d1bfb5e6db7ac5d24be526f188ceb6e68 | add payout exceptions | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/payouts_dorado/exceptions.py | bluebottle/payouts_dorado/exceptions.py | class PayoutException(Exception):
def __init__(self, message, error_list=None):
self.message = message
self.error_list = error_list
def __str__(self):
return str(self.message)
def __unicode__(self):
return unicode(self.message)
| bsd-3-clause | Python |
|
5908d941fc113ee02b7d5962f0209a528ab9ecb1 | Add cross-site css module | jaeyung1001/phishing_site_detection,mjkim610/phishing-detection | core/modules/uses_stylesheet_naver.py | core/modules/uses_stylesheet_naver.py | from bs4 import BeautifulSoup
"""
Sites that are in the Naver domain are already checked by is_masquerading. So no need to check url again
"""
def uses_stylesheet_naver(resp):
print('uses_stylesheet_naver')
answer = "U"
current_page = BeautifulSoup(resp.text, 'lxml')
stylesheets = current_page.find_all('link', rel="stylesheet")
for stylesheet in stylesheets:
if "naver.com" in stylesheet['href']:
return "P"
return answer
| bsd-2-clause | Python |
|
d217ee9c830a6cccb70155ceff44746b4e5215d6 | Add missing csv migration | mociepka/saleor,mociepka/saleor,mociepka/saleor | saleor/csv/migrations/0004_auto_20200604_0633.py | saleor/csv/migrations/0004_auto_20200604_0633.py | # Generated by Django 3.0.6 on 2020-06-04 11:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("csv", "0003_auto_20200520_0247"),
]
operations = [
migrations.AlterField(
model_name="exportevent",
name="type",
field=models.CharField(
choices=[
("export_pending", "Data export was started."),
("export_success", "Data export was completed successfully."),
("export_failed", "Data export failed."),
("export_deleted", "Export file was started."),
(
"exported_file_sent",
"Email with link to download file was sent to the customer.",
),
(
"Export_failed_info_sent",
"Email with info that export failed was sent to the customer.",
),
],
max_length=255,
),
),
]
| bsd-3-clause | Python |
|
2b86b727cd701464969de5679d30f9bea38a08f3 | Create TheDescent.py | Flavoured/CodinGamePuzzles,Flavoured/CodinGamePuzzles | Easy/TheDescent/TheDescent.py | Easy/TheDescent/TheDescent.py | import sys
import math
while True:
tallest_index = -1
tallest_height = -1
for i in range(8):
mountain_h = int(input()) # represents the height of one mountain.
if(tallest_height != -1):
if(mountain_h > tallest_height):
tallest_index = i
tallest_height = mountain_h
else:
tallest_index = i
tallest_height = mountain_h
print(tallest_index)
| unlicense | Python |
|
2a01ffdbac602873615925e6f99fab801a324fef | Create minesweeper.py | vzabazarnikh/minesweeper | minesweeper.py | minesweeper.py | import tkinter
import random
def pol_param():
size = 50
a = 8
b = 8
n = 10
return [size, a, b, n]
def perevod(a, v, d):
f = a*v + d
return f
def sozd_bomb(a, b, n):
m = []
for x in range(n):
k = (random.randrange(a * b))
while k in m:
k = (random.randrange(a * b))
m.append (k)
return m
def prov(m, f):
if f in m:
return 1
else:
return 0
def sh_kl(m, f, a):
if f in m:
return(9)
c = prov (m, (f + 1)) + prov (m, (f - 1)) + prov (m, (f + a)) + prov (m, (f - a)) + prov (m, (f + a + 1)) + prov (m, (f + 1 - a)) + prov (m, (f - 1 - a)) + prov (m, (f - 1 + a))
return c
def sh_znach(m, a, b):
pole = []
for i in range(b):
pole.append([0; 0; 0] * a)
for x in range(a):
for y in range(b):
pole[x][y][0] = sh_kl(m, perevod(a, x, y), a)
pole.append(0)
return pole
def right_click(a, b, pole, k):
if pole[-1] == 0:
if pole[b][a][1] == 0:
if pole[b][a][2] == 0:
pole[b][a][2] = 1
else:
pole[b][a][2] = 0
paint(pole)
return (pole)
def left_click(a, b, pole, k):
if pole[-1] == 0:
if pole[b][a][2] == 0:
pole[b][a][1] = 0
paint(pole)
if pole[b][a][0] == 9:
pole[-1] = 1
return (pole)
def LClick(x, y, size, pole, k):
x, y = event.x, event.y
b = y//size
a = x//size
left_click(a, b, pole)
def RClick(event, size, pole, k):
x, y = event.x, event.y
b = y//size
a = x//size
right_click(a, b, pole)
size = dan[0]
dan = pol_param()
m = sozd_bomb(dan[1], dan[2], dan[3])
canvas = tkinter.Canvas()
canvas.pack()
pole = sh_znach(m, dan[1], dan[2])
canvas.bind("<Button-2>", lambda evt: RClick(evt, size, pole))
canvas.bind("<Button-1>", lambda evt: LClick(evt, size, pole))
canvas.mainloop()
print(m)
print(pole)
| mit | Python |
|
280aa4c8db7b5580b73ab6980f10d21a6ef2d761 | Add an audio output using the pygame mixer. This abuses pygame to a fair extent, but works reasonably with large-ish buffer sizes. | sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia | Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/PyGameOutput.py | Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/PyGameOutput.py | import numpy
import Numeric
import pygame
import Axon
import time
class PyGameOutput(Axon.ThreadedComponent.threadedcomponent):
bufferSize = 1024
sampleRate = 44100
def __init__(self, **argd):
super(PyGameOutput, self).__init__(**argd)
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
def main(self):
while 1:
if not pygame.mixer.get_init():
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
else:
if self.dataReady("inbox"):
numpyArray = self.recv("inbox")
# Scale to 16 bit int
numpyArray *= 2**15-1
numpyArray = numpyArray.astype("int16")
numericArray = Numeric.asarray(numpyArray)
sound = pygame.sndarray.make_sound(numericArray)
sound.play()
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Audio.SineSynth import SineOsc
Pipeline(SineOsc(), PyGameOutput()).run()
| apache-2.0 | Python |
|
4433cadaa39dd84b922329c84a7e791d81cac7c6 | Add a very simple test that *must* always pass. * Useful for testing the newstyle API | Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,hackerberry/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,hackerberry/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe | nettests/simpletest.py | nettests/simpletest.py | from ooni import nettest
class SimpleTest(nettest.TestCase):
inputs = range(1,100)
optParameters = [['asset', 'a', None, 'Asset file'],
['controlserver', 'c', 'google.com', 'Specify the control server'],
['resume', 'r', 0, 'Resume at this index'],
['other', 'o', None, 'Other arguments']]
def test_foo(self, *arg, **kw):
print "Running %s with %s" % ("test_foo", self.input)
self.report['test_foo'] = 'Antani'
self.report['shared'] = "sblinda"
self.assertEqual(1,1)
def test_f4oo(self, *arg, **kw):
print "Running %s with %s" % ("test_f4oo", self.input)
self.report['test_f4oo'] = 'Antani'
self.report['shared'] = "sblinda2"
self.assertEqual(1,1)
| bsd-2-clause | Python |
|
124c4f30455d0892608622ddd09a0e7d83c3e8da | Create xmltodict_implementation.py | minesh1291/Learning-Python | Useful-Libs/xmltodict_implementation.py | Useful-Libs/xmltodict_implementation.py | import xmltodict
with open('path/to/file.xml') as fd:
doc = xmltodict.parse(fd.read())
doc['mydocument']['@has'] # == u'an attribute'
doc['mydocument']['and']['many'] # == [u'elements', u'more elements']
doc['mydocument']['plus']['@a'] # == u'complex'
doc['mydocument']['plus']['#text'] # == u'element as well'
| apache-2.0 | Python |
|
3971a15aea15e097fac00f680068b505cc9047b8 | Add new descend functionality. | JohnReid/pyseqan,JohnReid/pyseqan,JohnReid/pyseqan,JohnReid/pyseqan | python/seqan/descend.py | python/seqan/descend.py | #
# Copyright John Reid 2014
#
"""
Code to descend indexes with top-down and top-down-history iterators.
"""
def depthpredicate(maxdepth):
"""Create a predicate that only descends the tree to a maximum depth.
"""
def predicate(it):
return it.repLength < maxdepth
return predicate
def suffixpredicate(suffix):
"""Create a predicate that only descends the part of the tree
that matches the suffix.
"""
def predicate(it):
minlen = min(it.repLength, len(suffix))
return suffix[:minlen] == it.representative[:minlen]
return predicate
class Descender(object):
"""Mix-in class to descend an index. Base class must implement
the visit_node(parent, child) method. A predicate can be supplied to
filter parts of the tree from the descent. See depthpredicate() and
suffixpredicate().
"""
def descend(self, it):
"""Descend the index."""
if self.visitvertex(it):
if it.goDown():
while True:
self.descend(it)
if not it.goRight():
break
it.goUp()
class ParallelDescender(object):
"""Descends two indexes (primary and secondary) in parallel. Each vertex in
the primary is visited and the corresponding vertices (or closest vertices)
in the secondary are simultaneously visited. The two iterators are called
synchronised if and only if the representative of the primary iterator
matches that start of the representative of the secondary iterator.
"""
def descend(self, primit, secit, stillsynced=True):
self.visitvertex(primit, secit, stillsynced)
assert stillsynced == (
primit.repLength <= secit.repLength
and primit.representative == secit.representative[:primit.repLength])
if primit.goDown():
while True:
# We have moved the primary iterator,
# we should check if we are still synchronised
if stillsynced:
parentstart = primit.repLength - primit.parentEdgeLength
end = min(primit.repLength, secit.repLength)
newstillsynced = (
primit.representative[parentstart:end]
== secit.representative[parentstart:end])
else:
newstillsynced = False
# Count the number of vertexes we descend
numgodowns = 0
# Only move the secondary iterator if we are still
# synchronised with primary iterator
if newstillsynced:
# Move secondary iterator to same (or similar) position
# as primary iterator
while secit.repLength < primit.repLength:
# Try and descend
if not secit.goDown(
primit.representative[secit.repLength]):
#logger.debug('Could not goDown()')
newstillsynced = False
break
numgodowns += 1
# Check we descended successfully
start = secit.repLength - secit.parentEdgeLength + 1
end = min(primit.repLength, secit.repLength)
if secit.representative[start:end] != primit.representative[start:end]:
# We did not manage to match primary's parent edge
#logger.debug('Parent edge mismatch')
newstillsynced = False
break
# recurse
self.descend(primit, secit, newstillsynced)
# Move secondary iterator back up to original position
for i in xrange(numgodowns):
secit.goUp()
# Go to next vertex in primary index
if not primit.goRight():
break
primit.goUp()
class CallbackParallelDescender(ParallelDescender):
"""Class that descends two indexes in a top-down manner,
calling a callback at each vertex."""
def __init__(self, callback):
ParallelDescender.__init__(self)
self.visitvertex = callback
| mit | Python |
|
1e6956fb793e12b720b521feb4c0eeabaf490cea | add cache.py to cleanup cache | owenwater/alfred-forvo | cache.py | cache.py | #!/usr/bin/python
import os
import time
from workflow import Workflow
AGE = 3600 * 24
LOG = None
class Cache(object):
def __init__(self):
global LOG
self.wf = Workflow()
LOG = self.wf.logger
self.cachedir = self.wf.cachedir
self.wf.cached_data_age = self.cached_data_age
def cached_data_age(self, name):
cache_path = self.wf.cachefile(name)
if not os.path.exists(cache_path):
return 0
return time.time() - os.stat(cache_path).st_mtime
def clean(self):
for file in os.listdir(self.wf.cachedir):
if file.endswith(".log"):
continue
if not self.wf.cached_data_fresh(file, AGE):
LOG.debug("deleting cache file: " + file)
os.remove(self.wf.cachefile(file))
if __name__=="__main__":
cache = Cache()
cache.clean()
| mit | Python |
|
25e09e4dbbc6dbc87c3b1cc2833021a9ae022a0e | Create compact/struct.py for python2/3 compatibility | hgrecco/pyvisa,MatthieuDartiailh/pyvisa,pyvisa/pyvisa | pyvisa/compat/struct.py | pyvisa/compat/struct.py | # -*- coding: utf-8 -*-
"""
pyvisa.compat.struct
~~~~~~~~~~~~~~~~~~~~~~~~~
Python 2/3 compatibility for struct module
:copyright: 2015, PSF
:license: PSF License
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import sys
import struct
# we always want the exception to be able to catch it
error = struct.error
# compatibility for unicode literals was introduced in 2.7.8
# if we're above that there is nothing to do except aliasing
if sys.hexversion >= 0x02070800:
pack = struct.pack
pack_into = struct.pack_into
unpack = struct.unpack
unpack_from = struct.unpack_from
calcsize = struct.calcsize
else:
def pack(fmt, *args):
return struct.pack(str(fmt), *args)
def pack_into(fmt, *args, **argk):
return struct.pack_into(str(fmt), *args, **argk)
def unpack(fmt, string):
return struct.unpack(str(fmt), string)
def unpack_from(fmt, *args, **kwargs):
return struct.unpack_from(str(fmt), *args, **kwargs)
def calcsize(fmt):
return struct.calcsize(str(fmt))
| mit | Python |
|
ef8bc0ddffa142e8580606377bff1d2737365711 | add various utilities in dog.util | slice/dogbot,sliceofcode/dogbot,slice/dogbot,slice/dogbot,sliceofcode/dogbot | dog/util.py | dog/util.py | import discord
def make_profile_embed(member):
embed = discord.Embed()
embed.set_author(name=f'{member.name}#{member.discriminator}',
icon_url=member.avatar_url)
return embed
def american_datetime(datetime):
return datetime.strftime('%m/%d/%Y %I:%M:%S %p')
def pretty_timedelta(delta):
big = ''
if delta.days >= 7 and delta.days < 21:
weeks = round(delta.days / 7, 2)
plural = 's' if weeks == 0 or weeks > 1 else ''
big = f'{weeks} week{plural}'
# assume that a month is 31 days long, i am not trying
# to be aware
if delta.days >= 21 and delta.days < 365:
days = round(delta.days / 31, 2)
plural = 's' if days == 0 or days > 1 else ''
big = f'{days} month{plural}'
if delta.days >= 365:
years = round(delta.days / 365)
plural = 's' if years == 0 or years > 1 else ''
big = f'{years} year{plural}'
m, s = divmod(delta.seconds, 60)
h, m = divmod(m, 60)
return '{}, {:02d}h{:02d}m{:02d}s'.format(big, h, m, s)
| mit | Python |
|
976eda9cbbce4a4fad759c4197b630209dd5a2bd | add programmer wrapper script | authmillenon/RIOT,kaspar030/RIOT,kYc0o/RIOT,jasonatran/RIOT,OTAkeys/RIOT,ant9000/RIOT,OTAkeys/RIOT,OlegHahm/RIOT,RIOT-OS/RIOT,miri64/RIOT,ant9000/RIOT,authmillenon/RIOT,RIOT-OS/RIOT,authmillenon/RIOT,kYc0o/RIOT,kaspar030/RIOT,kaspar030/RIOT,kaspar030/RIOT,RIOT-OS/RIOT,authmillenon/RIOT,OlegHahm/RIOT,jasonatran/RIOT,kYc0o/RIOT,kYc0o/RIOT,ant9000/RIOT,jasonatran/RIOT,ant9000/RIOT,OlegHahm/RIOT,kaspar030/RIOT,miri64/RIOT,kYc0o/RIOT,OlegHahm/RIOT,OTAkeys/RIOT,jasonatran/RIOT,ant9000/RIOT,jasonatran/RIOT,authmillenon/RIOT,OlegHahm/RIOT,miri64/RIOT,OTAkeys/RIOT,authmillenon/RIOT,RIOT-OS/RIOT,miri64/RIOT,OTAkeys/RIOT,miri64/RIOT,RIOT-OS/RIOT | dist/tools/programmer/programmer.py | dist/tools/programmer/programmer.py | #!/usr/bin/env python3
# Copyright (C) 2021 Inria
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
import time
import shlex
import subprocess
import argparse
from contextlib import contextmanager
SUCCESS = "\033[32;1m✓\033[0m"
FAILED = "\033[31;1m×\033[0m"
SPIN = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
class Programmer:
@contextmanager
def spawn_process(self):
"""Yield a subprocess running in background."""
kwargs = {} if self.verbose else {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT
}
yield subprocess.Popen(shlex.split(self.cmd), **kwargs)
def spin(self, process):
"""Print a spinning icon while programmer process is running."""
while process.poll() is None:
for index in range(len(SPIN)):
sys.stdout.write(
"\r \033[36;1m{}\033[0m {} in progress "
"(programmer: '{}')"
.format(SPIN[index], self.action, self.programmer)
)
sys.stdout.flush()
time.sleep(0.1)
def print_status(self, process, elapsed):
"""Print status of background programmer process."""
print(
"\r \u001b[2K{} {} {} (programmer: '{}' - duration: {:0.2f}s)"
.format(
FAILED if process.returncode != 0 else SUCCESS,
self.action,
"failed!" if process.returncode != 0 else "done!",
self.programmer,
elapsed
)
)
# Print content of stdout (which also contain stderr) when the
# subprocess failed
if process.returncode != 0:
print(process.stdout.read().decode())
else:
print(
"(for full programmer output add PROGRAMMER_QUIET=0 or "
"QUIET=0 to the make command line)"
)
def run(self):
"""Run the programmer in a background process."""
if not self.cmd.strip():
# Do nothing if programmer command is empty
return 0
if self.verbose:
print(self.cmd)
start = time.time()
with self.spawn_process() as proc:
try:
if self.verbose:
proc.communicate()
else:
self.spin(proc)
except KeyboardInterrupt:
proc.terminate()
proc.kill()
elapsed = time.time() - start
if not self.verbose:
# When using the spinning icon, print the programmer status
self.print_status(proc, elapsed)
return proc.returncode
def main(parser):
"""Main function."""
programmer = Programmer()
parser.parse_args(namespace=programmer)
# Return with same return code as subprocess
sys.exit(programmer.run())
def parser():
"""Return an argument parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--action", help="Programmer action")
parser.add_argument("--cmd", help="Programmer command")
parser.add_argument("--programmer", help="Programmer")
parser.add_argument(
"--verbose", action='store_true', default=False, help="Verbose output"
)
return parser
if __name__ == "__main__":
main(parser())
| lgpl-2.1 | Python |
|
d9c197840282c6bdedf5e001a1092aa707ae139c | update email field length | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/data_analytics/migrations/0008_auto_20161114_1903.py | corehq/apps/data_analytics/migrations/0008_auto_20161114_1903.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-14 19:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_analytics', '0007_auto_20160819_1423'),
]
operations = [
migrations.AlterField(
model_name='maltrow',
name='email',
field=models.EmailField(max_length=254),
),
]
| bsd-3-clause | Python |
|
0c2f730ad2e4db7f53b2a867711e00048428d82d | add rest api | knodir/son-emu,knodir/son-emu,knodir/son-emu,knodir/son-emu | src/emuvim/examples/simple_topology_restapi.py | src/emuvim/examples/simple_topology_restapi.py | """
This is an example topology for the distributed cloud emulator (dcemulator).
(c) 2015 by Manuel Peuster <[email protected]>
This is an example that shows how a user of the emulation tool can
define network topologies with multiple emulated cloud data centers.
The definition is done with a Python API which looks very similar to the
Mininet API (in fact it is a wrapper for it).
We only specify the topology *between* data centers not within a single
data center (data center internal setups or placements are not of interest,
we want to experiment with VNF chains deployed across multiple PoPs).
The original Mininet API has to be completely hidden and not be used by this
script.
"""
import logging
from mininet.log import setLogLevel
from emuvim.dcemulator.net import DCNetwork
from emuvim.api.rest.compute import RestApiEndpoint
#from emuvim.api.zerorpc.compute import ZeroRpcApiEndpoint
from emuvim.api.zerorpc.network import ZeroRpcApiEndpointDCNetwork
logging.basicConfig(level=logging.INFO)
def create_topology1():
"""
1. Create a data center network object (DCNetwork)
"""
net = DCNetwork()
"""
1b. add a monitoring agent to the DCNetwork
"""
mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
mon_api.connectDCNetwork(net)
mon_api.start()
"""
2. Add (logical) data centers to the topology
(each data center is one "bigswitch" in our simplified
first prototype)
"""
dc1 = net.addDatacenter("datacenter1")
dc2 = net.addDatacenter("datacenter2")
dc3 = net.addDatacenter("long_data_center_name3")
dc4 = net.addDatacenter(
"datacenter4",
metadata={"mydata": "we can also add arbitrary metadata to each DC"})
"""
3. You can add additional SDN switches for data center
interconnections to the network.
"""
s1 = net.addSwitch("s1")
"""
4. Add links between your data centers and additional switches
to define you topology.
These links can use Mininet's features to limit bw, add delay or jitter.
"""
net.addLink(dc1, dc2)
net.addLink("datacenter1", s1)
net.addLink(s1, dc3)
net.addLink(s1, "datacenter4")
"""
5. We want to access and control our data centers from the outside,
e.g., we want to connect an orchestrator to start/stop compute
resources aka. VNFs (represented by Docker containers in the emulated)
So we need to instantiate API endpoints (e.g. a zerorpc or REST
interface). Depending on the endpoint implementations, we can connect
one or more data centers to it, which can then be controlled through
this API, e.g., start/stop/list compute instances.
"""
# create a new instance of a endpoint implementation
api1 = RestApiEndpoint("127.0.0.1", 5000)
# connect data centers to this endpoint
api1.connectDatacenter(dc1)
api1.connectDatacenter(dc2)
api1.connectDatacenter(dc3)
api1.connectDatacenter(dc4)
# run API endpoint server (in another thread, don't block)
api1.start()
"""
6. Finally we are done and can start our network (the emulator).
We can also enter the Mininet CLI to interactively interact
with our compute resources (just like in default Mininet).
But we can also implement fully automated experiments that
can be executed again and again.
"""
net.start()
net.CLI()
# when the user types exit in the CLI, we stop the emulator
net.stop()
def main():
setLogLevel('info') # set Mininet loglevel
create_topology1()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
8ccf3d937d25ec93d1ce22d60735ffbcaf776fe3 | Add a script for plotting distance to target. | lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment | analysis/plot-target-distance.py | analysis/plot-target-distance.py | import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block*/*circuit*.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
with plots.plot() as ax:
for i, trial in enumerate(experiment.Experiment(root).trials_matching(pattern)):
for t in range(11):
s = trial.movement_to(t).distance_to_target().interpolate().reset_index(drop=True)
ax.plot(s.index, s.values, color=lmj.plot.COLOR11[t])
if __name__ == '__main__':
climate.call(main)
| mit | Python |
|
d8c18d9244ca09e942af57d74a407498c25d05ce | Add Linear Discriminant Analaysis. | prasanna08/MachineLearning | LDA.py | LDA.py | import numpy as np
from scipy import linalg as LA
class LDA(object):
def __init__(self, data_inputs, data_labels):
self.data_inputs = np.array(data_inputs)
self.data_labels = data_labels
self.test_cases = self.data_inputs.shape[0]
self.labels = np.unique(data_labels)
self.Sw = np.zeros((self.data_inputs.shape[1], self.data_inputs.shape[1]))
self.Sb = self.Sw.copy()
def analyse(self):
C = np.cov(self.data_inputs.T)
for label in self.labels:
indices = np.where(self.data_labels == label)
points = self.data_inputs[indices[0]]
classcov = np.cov(points.T)
self.Sw += (np.float(points.shape[0])/self.test_cases) * classcov
self.Sb = C - self.Sw
evals, evecs = LA.eig(self.Sw, self.Sb)
indices = np.argsort(evals)
indices = indices[::-1]
evals = evals[indices]
evecs = evecs[indices]
self.eigen_vals = evals
self.eigen_vecs = evecs
def reduce_dim(self, red_n, data_inputs=None):
w = self.eigen_vecs[:,:red_n]
if data_inputs is None:
data_inputs = self.data_inputs
return np.dot(data_inputs, w)
def expand_dim(self, red_data):
red_n = red_data.shape[1]
return np.transpose(np.dot(self.eigen_vecs[:,:red_n], red_data.T))
| mit | Python |
|
3cc1bceaca2fe74d3d9f9fa846f976ba99cc7dee | Create RDF.py | Shirui816/FTinMS | RDF.py | RDF.py | from sys import argv
import pandas as pd
import numpy as np
from functions import crdf
import time
import accelerate as acc
import matplotlib
from matplotlib import pyplot as plt
fn = argv[1]
print('Box origin must be at the center!')
pos = pd.read_csv(fn, delim_whitespace=True, squeeze=1, header=None).values
import time
import numpy as np
Ndim = 500 # Finess of delta function
V = box[0]*box[1]*box[2]
rho_bins = Ndim**3/V # Number density of bins
rho = pos.shape[0]/V
s = time.time()
p, e = np.histogramdd(pos, bins=(Ndim, Ndim, Ndim), range=((-box[0]/2, box[0]/2), (-box[1]/2, box[1]/2),(-box[2]/2, box[2]/2)))
print('Binning particles: %s' % (time.time()-s))
p = np.fft.fftshift(p) # POS is of center-origin, here move origin to cornor.
s = time.time()
fp = acc.mkl.fftpack.fftn(p) # Accelerate package
print('FFT time: %s' % (time.time()-s))
FP = fp*fp.conj()
s = time.time()
RDF = np.fft.ifftn(FP).real # IFFT{<rho(K)rho(-K)>}, 1/N\sum_i......(see numpy.fft, so rho_bins is needed)
print('IFFT time: %s' % (time.time()-s))
RDF[0,0,0] -= pos.shape[0]
RDF = np.fft.fftshift(RDF)
rbin = 0.2 # >= box / Ndiv
rx = e[0][:Ndim] + 0.5*(e[0][-1]-e[0][-2])
ry = e[1][:Ndim] + 0.5*(e[1][-1]-e[1][-2])
rz = e[2][:Ndim] + 0.5*(e[2][-1]-e[2][-2])
from numba import jit
@jit # normalize g(R) to g(r)
def norm_r(RDF, rbin, rx, ry, rz):
rdf = np.zeros(int(box.max()/2*3**0.5/rbin)+1, dtype=np.float)
cter = np.zeros(rdf.shape, dtype=np.float)
for i in range(Ndim):
for j in range(Ndim):
for k in range(Ndim):
rr = rx[i]**2+ry[j]**2+rz[k]**2
r = int(rr**0.5/rbin)
rdf[r] += RDF[i,j,k]
cter[r] += 1
return np.nan_to_num(rdf/cter)
rdf = norm_r(RDF, rbin, rx,ry,rz)
rdf /= pos.shape[0] * rho # NA*NB/V for gAB(r)
rdf *= rho_bins # NORMED BY BIN DENSITY
rr = np.arange(rdf.shape[0])*rbin
o = open('rdf.txt', 'w')
for i, y in enumerate(rdf):
o.write('%.8f %.8f\n' % ((i+0.5) * rbin, y))
o.close()
| bsd-3-clause | Python |
|
10dbbe5b10abf954ab912fc3a2cdfe1532bf71cf | test file added | clemense/cortex-py | cortex-py/test/test_cortex.py | cortex-py/test/test_cortex.py | import time
import cortex
class MyDataHandler:
def __init__(self):
self.alldata = []
def MyErrorHandler(self, iLevel, msg):
print("ERROR: ")
print(iLevel, msg.contents)
return 0
def MyDataHandler(self, Frame):
print("got called")
try:
print("Received multi-cast frame no %d\n"%(Frame.contents.iFrame))
print "Bodies: ", Frame.contents.nBodies
print "BodyData: ", Frame.contents.BodyData[0].szName
print "Number of Markers of Body[0]: ", Frame.contents.BodyData[0].nMarkers
for i in range(Frame.contents.BodyData[0].nMarkers):
print "MarkerX ", Frame.contents.BodyData[0].Markers[i][0]
print "MarkerY ", Frame.contents.BodyData[0].Markers[i][1]
print "MarkerZ ", Frame.contents.BodyData[0].Markers[i][2]
print "BodyMarker[2].x: ", Frame.contents.BodyData[0].Markers[3][0]
print "Unidentified markers: ", Frame.contents.nUnidentifiedMarkers
print "Delay: ", Frame.contents.fDelay
print "", Frame.contents.UnidentifiedMarkers[0][0]
self.alldata.append(Frame.contents.UnidentifiedMarkers[0][0])
except:
print("Frame empty")
return 0
if __name__ == "__main__":
my_obj = MyDataHandler()
Cortex_SetErrorMsgHandlerFunc(my_obj.MyErrorHandler)
Cortex_SetDataHandlerFunc(my_obj.MyDataHandler)
if Cortex_Initialize() != 0:
print("ERROR: unable to initialize")
Cortex_Exit()
exit(0)
pBodyDefs = Cortex_GetBodyDefs()
if pBodyDefs == None:
print("Failed to get body defs")
else:
print("Got body defs")
print("bodydefs: ", pBodyDefs.contents.nBodyDefs)
print "Marker names: "
print "", pBodyDefs.contents.BodyDefs[0].szName
for i in range(pBodyDefs.contents.BodyDefs[0].nMarkers):
print "Marker: ", pBodyDefs.contents.BodyDefs[0].szMarkerNames[i]
Cortex_FreeBodyDefs(pBodyDefs)
pBodyDefs = None
pResponse = c_void_p
nBytes = c_int
retval = Cortex_Request("GetContextFrameRate", pResponse, nBytes)
if retval != 0:
print("ERROR, GetContextFrameRate")
#contextFrameRate = cast(pResponse, POINTER(c_float))
#print("ContextFrameRate = %3.1f Hz", contextFrameRate)
print("*** Starting live mode ***")
retval = Cortex_Request("LiveMode", pResponse, nBytes)
time.sleep(1.0)
retval = Cortex_Request("Pause", pResponse, nBytes)
print("*** Paused live mode ***")
print("****** Cortex_Exit ******")
retval = Cortex_Exit();
print my_obj.alldata
| mit | Python |
|
8b257c2a4b8f949f81965b7ffaa80d18c48974a4 | add app framework | aaronsamuel137/whats-up,aaronsamuel137/whats-up | app.py | app.py | import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| mit | Python |
|
e258b608c40b2abca30fbc85601e05c48558fff9 | add weird migration | Fisiu/calendar-oswiecim,hackerspace-silesia/calendar-oswiecim,Fisiu/calendar-oswiecim,firemark/calendar-oswiecim,Fisiu/calendar-oswiecim,hackerspace-silesia/calendar-oswiecim,firemark/calendar-oswiecim,firemark/calendar-oswiecim,hackerspace-silesia/calendar-oswiecim | webapp/calendars/migrations/0023_auto_20160109_1307.py | webapp/calendars/migrations/0023_auto_20160109_1307.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('calendars', '0022_auto_20151121_1628'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AlterField(
model_name='category',
name='color',
field=models.CharField(max_length=16, default='primary', choices=[('primary', 'Niebieski'), ('success', 'Zielony'), ('info', 'Jasno niebieski'), ('warning', 'Żółty'), ('danger', 'Czerwony')]),
),
]
| agpl-3.0 | Python |
|
7b15ad790631926030f8b0b6c32f214f2c8001b1 | Create __init__.py | cellnopt/cellnopt,cellnopt/cellnopt | cno/boolean/__init__.py | cno/boolean/__init__.py | bsd-2-clause | Python |
||
edf6e9ceacab9aa2d8795340089182ead07c30b3 | Add ipopt v3.12.4 package. | lgarren/spack,krafczyk/spack,matthiasdiener/spack,TheTimmy/spack,LLNL/spack,EmreAtes/spack,matthiasdiener/spack,krafczyk/spack,tmerrick1/spack,TheTimmy/spack,skosukhin/spack,lgarren/spack,lgarren/spack,LLNL/spack,EmreAtes/spack,EmreAtes/spack,tmerrick1/spack,mfherbst/spack,LLNL/spack,tmerrick1/spack,tmerrick1/spack,iulian787/spack,matthiasdiener/spack,skosukhin/spack,TheTimmy/spack,skosukhin/spack,EmreAtes/spack,iulian787/spack,krafczyk/spack,iulian787/spack,skosukhin/spack,TheTimmy/spack,mfherbst/spack,krafczyk/spack,skosukhin/spack,LLNL/spack,matthiasdiener/spack,matthiasdiener/spack,krafczyk/spack,mfherbst/spack,LLNL/spack,lgarren/spack,lgarren/spack,TheTimmy/spack,iulian787/spack,mfherbst/spack,EmreAtes/spack,tmerrick1/spack,mfherbst/spack,iulian787/spack | var/spack/repos/builtin/packages/ipopt/package.py | var/spack/repos/builtin/packages/ipopt/package.py | from spack import *
class Ipopt(Package):
"""Ipopt (Interior Point OPTimizer, pronounced eye-pea-Opt) is a
software package for large-scale nonlinear optimization."""
homepage = "https://projects.coin-or.org/Ipopt"
url = "http://www.coin-or.org/download/source/Ipopt/Ipopt-3.12.4.tgz"
version('3.12.4', '12a8ecaff8dd90025ddea6c65b49cb03')
version('3.12.3', 'c560cbfa9cbf62acf8b485823c255a1b')
version('3.12.2', 'ec1e855257d7de09e122c446506fb00d')
version('3.12.1', 'ceaf895ce80c77778f2cab68ba9f17f3')
version('3.12.0', 'f7dfc3aa106a6711a85214de7595e827')
depends_on("blas")
depends_on("lapack")
depends_on("pkg-config")
depends_on("mumps+double~mpi")
def install(self, spec, prefix):
# Dependency directories
blas_dir = spec['blas'].prefix
lapack_dir = spec['lapack'].prefix
mumps_dir = spec['mumps'].prefix
# Add directory with fake MPI headers in sequential MUMPS
# install to header search path
mumps_flags = "-ldmumps -lmumps_common -lpord -lmpiseq"
mumps_libcmd = "-L%s " % mumps_dir.lib + mumps_flags
# By convention, spack links blas & lapack libs to libblas & liblapack
blas_lib = "-L%s" % blas_dir.lib + " -lblas"
lapack_lib = "-L%s" % lapack_dir.lib + " -llapack"
configure_args = [
"--prefix=%s" % prefix,
"--with-mumps-incdir=%s" % mumps_dir.include,
"--with-mumps-lib=%s" % mumps_libcmd,
"--enable-shared",
"--with-blas-incdir=%s" % blas_dir.include,
"--with-blas-lib=%s" % blas_lib,
"--with-lapack-incdir=%s" % lapack_dir.include,
"--with-lapack-lib=%s" % lapack_lib
]
configure(*configure_args)
# IPOPT does not build correctly in parallel on OS X
make(parallel=False)
make("test", parallel=False)
make("install", parallel=False)
| lgpl-2.1 | Python |
|
b69476c28ed3e67d77c93f8c1fc75fde0cb33f2a | Add WikiaSearch module | Didero/DideRobot | commands/WikiaSearch.py | commands/WikiaSearch.py | import requests
from CommandTemplate import CommandTemplate
from IrcMessage import IrcMessage
import Constants
class Command(CommandTemplate):
triggers = ['wikiasearch']
helptext = "Searches a wiki on Wikia.com. Usage: '{commandPrefix}wikiasearch [wiki-name] [search]'. Wiki names aren't case-sensitive, but searches are, sorry"
def execute(self, message):
"""
:type message: IrcMessage
"""
#First check if enough parameters were passed
if message.messagePartsLength == 0:
return message.reply("Please tell me which Wikia wiki you want me to search, there's a BILLION of 'em", "say")
elif message.messagePartsLength == 1:
return message.reply("What do you want me to search for on the {} Wikia wiki?".format(message.messageParts[0]), "say")
searchterm = " ".join(message.messageParts[1:])
articleSearchResult = self.retrieveArticleAbstract(message.messageParts[0], searchterm)
message.reply(articleSearchResult[1], "say")
@staticmethod
def retrieveArticleAbstract(wikiName, articleName):
#Retrieve the page, if we can
try:
r = requests.get("http://{}.wikia.com/api/v1/Articles/Details".format(wikiName), params={"titles": articleName.replace(" ", "_"), "abstract": "200"}, timeout=10.0)
except requests.exceptions.Timeout:
return (False, "Apparently Wikia got caught up reading that article, because it didn't get back to me. Maybe try again later")
#If the wiki doesn't exist, we get redirected to a different page
if r.url == "http://community.wikia.com/wiki/Community_Central:Not_a_valid_community?from={}.wikia.com".format(wikiName.lower()):
return (False, "Apparently the wiki '{}' doesn't exist on Wikia. You invented a new fandom!".format(wikiName))
#Request succeeded, wiki exists
apireply = r.json()
#If the requested page doesn't exist, the return is empty
if len(apireply['items']) == 0:
return (False, "Apparently the page '{}' doesn't exist. Seems you know more about {} than the fandom. Or maybe you made a typo?".format(articleName, wikiName))
articleId = apireply['items'].keys()[0]
articleInfo = apireply['items'][articleId]
print "[WikiaSearch] article info:", articleInfo
#Apparently the page exists. It could still be a redirect page though
if articleInfo['abstract'].startswith("REDIRECT "):
redirectArticleName = articleInfo['abstract'].split(' ', 1)[1]
return Command.retrieveArticleAbstract(wikiName, redirectArticleName)
#From here it's a success. We need the URL to append
url = "{}{}".format(apireply['basepath'], articleInfo['url'])
#Check if it isn't a disambiguation page
if articleInfo['abstract'].startswith("{} may refer to:".format(articleInfo['title'])):
return (True, "Apparently '{}' can mean multiple things. Who knew? Here's the list of what it can mean: {}".format(articleName, url))
#Seems we got an article start! Return that
return (True, articleInfo['abstract'] + Constants.GREY_SEPARATOR + url)
| mit | Python |
|
533aeb6cdc045f7d4cbfc4bc20dd89da4179ab35 | Add application class to hold all the resources pertaining to an end-user application including RPC servers, HTTP servers etc. | supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer | app/core/services/application.py | app/core/services/application.py | from threading import RLock
from uuid import uuid4
from app.core.messaging import Sender
class Application(object):
APPLICATION_INFO_QUEUE = "/_system/applications"
def __init__(self):
self.unique_id = str(uuid4())
self.rpc_servers = {}
self.app_servers = {}
self.info_lock = RLock()
def register_rpc_server(self, rpc_server):
with self.info_lock:
names = {x.name for x in self.rpc_servers.values()}
if rpc_server.name in names:
raise ValueError("Name already exists: " + rpc_server.name)
self.rpc_servers[rpc_server.queue_name] = rpc_server
self.push_update()
def register_application_server(self, server):
with self.info_lock:
self.app_servers[server.id] = server
self.push_update()
def push_update(self):
sender = Sender(self.APPLICATION_INFO_QUEUE)
sender.start()
sender.send(self.info_message, headers={"KEY": self.unique_id})
sender.close()
@property
def info_message(self):
with self.info_lock:
return {
"apps": {x.unique_id: x.info_message for x in self.app_servers},
"rpc": {x.unique_id: x.info_message for x in self.rpc_servers}
}
| mit | Python |
|
1df5619347b8f3e2a9fd49c95455e8b3aba07cf9 | Add example of desired new quick server usage | MuhammadAlkarouri/hug,giserh/hug,timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,giserh/hug,timothycrosley/hug | examples/quick_server.py | examples/quick_server.py | import hug
@hug.get()
def quick():
return "Serving!"
if __name__ == '__main__':
__hug__.serve()
| mit | Python |
|
ce1a080c01a5f792d128278fbb035f50e106e959 | set up general logging and twitter stream log | meyersj/geotweet,meyersj/geotweet,meyersj/geotweet | geotweet/log.py | geotweet/log.py | import logging
from logging.handlers import TimedRotatingFileHandler
import os
LOG_NAME = 'geotweet'
LOG_FILE = os.getenv('GEOTWEET_LOG', '/tmp/geotweet.log')
LOG_LEVEL = logging.DEBUG
TWITTER_LOG_NAME = 'twitter-stream'
def get_logger():
logger = logging.getLogger(LOG_NAME)
logger.setLevel(LOG_LEVEL)
fh = logging.FileHandler(LOG_FILE)
logformat = '%(levelname)s %(asctime)s: %(message)s'
formatter = logging.Formatter(logformat)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def get_rotating_logger(logfile, interval, when="M"):
logger = logging.getLogger(TWITTER_LOG_NAME)
logger.setLevel(LOG_LEVEL)
handler = TimedRotatingFileHandler(logfile, when=when, interval=interval)
logger.addHandler(handler)
return logger
logger = get_logger()
| mit | Python |
|
3b8d2cc0279e4da1ab758251f00fd065d951df53 | Add base for `help` command | 6180/foxybot | foxybot/commands/help.py | foxybot/commands/help.py | """Command to retrieve help for other commands and topics"""
from command import AbstractCommand, bot_command
from bot_help import HelpManager
@bot_command
class Help(AbstractCommand):
_aliases = ('help', 'h')
async def execute(self, shards, client, msg):
try:
args, extra = self._parser.parse_known_args(msg.content.split()[1:])
except SystemExit as ex:
await client.send_message(msg.channel, 'Something very very bad happened')
return
# await client.send_message(msg.channel, (args, extra))
await client.send_message(msg.channel, "Hello, World!")
@property
def name(self):
return self._name
@property
def aliases(self):
return self._aliases
| bsd-2-clause | Python |
|
453df6abe7741fe0f24c03754b26c197fa282656 | Create ValidateBST_002_iter.py | Chasego/codi,Chasego/codi,Chasego/codirit,cc13ny/Allin,Chasego/codi,cc13ny/algo,Chasego/codi,cc13ny/Allin,Chasego/cod,Chasego/cod,Chasego/codirit,cc13ny/algo,cc13ny/Allin,cc13ny/Allin,cc13ny/algo,Chasego/codi,cc13ny/algo,Chasego/codirit,Chasego/cod,cc13ny/Allin,Chasego/cod,cc13ny/algo,Chasego/cod,Chasego/codirit,Chasego/codirit | leetcode/098-Validate-Binary-Search-Tree/ValidateBST_002_iter.py | leetcode/098-Validate-Binary-Search-Tree/ValidateBST_002_iter.py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
stack = [root]
pre = None
while stack != [] and stack[0]:
p = stack.pop()
while p:
stack.append(p)
p = p.left
p = stack.pop()
if pre and pre.val >= p.val:
return False
pre = p
stack.append(p.right)
return True
| mit | Python |
|
0e5bbc4df461c17ff7d1297ee4236afaa9e52a96 | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | leetcode/easy/remove_duplicates_from_sorted_array/py/solution.py | leetcode/easy/remove_duplicates_from_sorted_array/py/solution.py | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Without this check, the function
# will return slow + 1 when called
# with an empty array. This would
# be an error.
if len(nums) == 0:
return 0
slow = 0
for fast in range(len(nums)):
if nums[slow] != nums[fast]:
slow += 1
nums[slow] = nums[fast]
return slow + 1
| mit | Python |
|
e8fa15603b275a690d96e37ab9dc560e68dedbb1 | Add tests | RomanKharin/lrmq | test/test_02.py | test/test_02.py | import unittest
import os
import sys
import lrmq
import timeout_decorator
import tempfile
import pickle
import struct
import asyncio
TEST_TIMEOUT = 5 # it can fail in slow environment
def read_log(fn):
logs = []
with open(fn, "rb") as f:
while True:
slen = f.read(4)
if not slen:
break
slen = struct.unpack(">L", slen)[0]
data = pickle.loads(f.read(slen))
logs.append(data)
assert len(logs) > 0
return logs
class TestRPC(unittest.TestCase):
def setUp(self):
# reinitialize loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# prepare test folder
self.logdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.logdir.cleanup()
@timeout_decorator.timeout(TEST_TIMEOUT)
def test_single_master(self):
logname = os.path.join(self.logdir.name, "single_master")
code = lrmq.main({
"debuglogger": logname + ".pkl",
"loglevel": "DEBUG",
"log": logname + "_hub.log",
"agents": [{
"type": "stdio",
"cmd": "test/msc1.py",
"id": "test02_master",
"name": "test02_master",
"log": logname + "_master.log",
"loglevel": "DEBUG",
"args": ["master"]}
]
})
assert code == 0
for log in read_log(logname + ".pkl"):
log_id = None
if "log_id" in log:
print(log)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
011fd9f5414d9f824a2c120084b98a1dc34cba0f | Add github_stargazers.py | marius92mc/github-stargazers,marius92mc/github-stargazers | src/github_stargazers.py | src/github_stargazers.py | import typing
import os
from bs4 import BeautifulSoup
import click
from halo import Halo
import requests
class UsernameRepositoryError(Exception):
def __init__(self) -> None:
super().__init__("Argument should be of form username/repository.")
class GitHub:
"""Creates a GitHub instance for listing the stargazers of a given repository
and checking if a user's full name is in the list of stargazers.
The constructor requires a string of the following form: `username/repository`,
both representing the GitHub meaning of them.
"""
__GITHUB_URL: str = "https://github.com"
__STARGAZERS_URL_SUFFIX: str = "/stargazers"
__PAGE_SUFFIX: str = "?page="
__OK_STATUS_CODE: int = 200
__TOO_MANY_REQUESTS_STATUS_CODE: int = 429
__MARK_END_OF_STARGAZERS: typing.List[str] = ['This repository has no more stargazers.']
def __init__(self, username_and_repository: str) -> None:
self.__username, self.__repository = self.__extract_user_and_repo(username_and_repository)
self.__repository_url = self.__get_repository_url()
self.__stargazers_base_url = self.__repository_url + self.__STARGAZERS_URL_SUFFIX
@classmethod
def __extract_user_and_repo(cls, username_and_repository: str) -> typing.Optional[typing.Tuple[str, str]]:
components: typing.List[str] = username_and_repository.split("/")
if len(components) != 2:
raise UsernameRepositoryError()
return components[0], components[1]
def __get_repository_url(self):
return os.path.join(self.__GITHUB_URL, self.__username, self.__repository)
def __get_soup(self, url: str) -> typing.Optional[BeautifulSoup]:
response = requests.get(url)
status_code: int = requests.get(url).status_code
if status_code == self.__OK_STATUS_CODE:
return BeautifulSoup(response.text, "html.parser")
if status_code == self.__TOO_MANY_REQUESTS_STATUS_CODE:
Halo().fail("Too many requests.")
print("{} HTTP".format(status_code))
return None
def __extract_stargazers_from_url(self, url: str) -> typing.Optional[typing.List[str]]:
spinner = Halo(text="Loading... " + url, spinner="dots")
spinner.start()
soup = self.__get_soup(url)
if not soup:
return None
h3_components = soup.find_all('h3')
users: typing.List[str] = []
for component in h3_components:
users.append(component.get_text())
spinner.stop()
if users == self.__MARK_END_OF_STARGAZERS:
return []
return users
def __get_url_page_template(self, page_number: int) -> str:
return self.__stargazers_base_url + self.__PAGE_SUFFIX + str(page_number)
def get_all_stargazers(self) -> typing.List[str]:
page_number: int = 1
all_stargazers: typing.List[str] = []
while True:
current_url: str = self.__get_url_page_template(page_number)
current_stargazers: typing.List[str] = self.__extract_stargazers_from_url(current_url)
if not current_stargazers:
break
all_stargazers += current_stargazers
page_number += 1
return sorted(all_stargazers)
def is_stargazer(self, user: str) -> bool:
page_number: int = 1
while True:
current_url: str = self.__get_url_page_template(page_number)
current_stargazers: typing.List[str] = self.__extract_stargazers_from_url(current_url)
if not current_stargazers:
break
if user in current_stargazers:
return True
page_number += 1
return False
@click.command()
@click.argument('username_and_repository')
@click.option('--user', default=None, help='User name to see if it is a stargazer')
def process_command(username_and_repository, user):
github = GitHub(username_and_repository)
if not user:
stargazers: typing.List[str] = github.get_all_stargazers()
print("Stargazers: ")
for stargazer in stargazers:
print(stargazer)
return
if github.is_stargazer(user):
Halo().succeed("Stargazer")
else:
Halo().fail("Not a Stargazer")
def main():
process_command() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
main()
| mit | Python |
|
74550ef0c76a941c473c8d024ccc0a0403631c49 | Add basic structure for "/glossary" routes test | zsloan/genenetwork2,zsloan/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2 | wqflask/tests/integration/test_markdown_routes.py | wqflask/tests/integration/test_markdown_routes.py | "Integration tests for markdown routes"
import unittest
from bs4 import BeautifulSoup
from wqflask import app
class TestGenMenu(unittest.TestCase):
"""Tests for glossary"""
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def test_glossary_page(self):
"""Test that the glossary page is rendered properly"""
response = self.app.get('/glossary', follow_redirects=True)
pass
| agpl-3.0 | Python |
|
5e1c48f9d00266290a8739f88085f050b1baa805 | Add test_backend.py in preparation for migrating backend to rigor's database layer | blindsightcorp/rigor-webapp,blindsightcorp/rigor-webapp,blindsightcorp/rigor-webapp | test_backend.py | test_backend.py | #!/usr/bin/env python
import types
import pprint
import backend
import config
from utils import *
DBNAME = config.CROWD_DB
debugMain('dbQueryDict')
sql = 'SELECT COUNT(*) FROM image;'
conn = backend.getDbConnection(DBNAME)
gen = backend.dbQueryDict(conn, sql)
assert isinstance(gen, types.GeneratorType)
rows = list(gen)
assert len(rows) == 1
assert isinstance(rows[0], dict)
assert 'count' in rows[0]
debugMain('getDatabaseNames')
names = backend.getDatabaseNames()
assert DBNAME in names
debugDetail(names)
debugMain('getTags')
tags = backend.getTags(DBNAME)
assert len(tags) > 0
assert isinstance(tags[0], basestring)
assert sorted(tags)[0] == 'align=center'
debugMain('getImage by id')
ID = 1
imgDict = backend.getImage(DBNAME, id=ID)
assert isinstance(imgDict, dict)
assert 'id' in imgDict
assert imgDict['id'] == ID
assert 'tags' in imgDict
assert len(imgDict['tags']) > 0
assert isinstance(imgDict['tags'][0], basestring)
debugMain('searchImages')
queryDict = dict(
database_name = DBNAME,
has_tags = ['align=left'],
page = 1,
max_count = 4,
)
count, results = backend.searchImages(queryDict)
assert count > 1
assert isinstance(results, list)
assert isinstance(results[0], dict)
assert 'tags' in results[0]
debugMain('getImage by locator')
LOCATOR = '4075c8de-fb2e-41e8-831b-ea4bdcb5a6a3'
imgDict = backend.getImage(DBNAME, locator=LOCATOR)
assert isinstance(imgDict, dict)
assert 'locator' in imgDict
assert imgDict['locator'] == LOCATOR
assert 'tags' in imgDict
assert len(imgDict['tags']) > 0
assert isinstance(imgDict['tags'][0], basestring)
debugMain('getImageAnnotations')
ID = 1
annotations = backend.getImageAnnotations(DBNAME, ID)
assert isinstance(annotations, list)
assert isinstance(annotations[0], dict)
assert 'domain' in annotations[0]
print green('===== success =====')
| bsd-2-clause | Python |
|
986ff101ce224494a5cdb047a1aefd99c8a6d840 | Add an aioredis example | channelcat/sanic,yunstanford/sanic,ashleysommer/sanic,Tim-Erwin/sanic,lixxu/sanic,ashleysommer/sanic,ai0/sanic,yunstanford/sanic,r0fls/sanic,lixxu/sanic,Tim-Erwin/sanic,ai0/sanic,lixxu/sanic,jrocketfingers/sanic,jrocketfingers/sanic,yunstanford/sanic,channelcat/sanic,lixxu/sanic,yunstanford/sanic,r0fls/sanic,channelcat/sanic,channelcat/sanic,ashleysommer/sanic | examples/sanic_aioredis_example.py | examples/sanic_aioredis_example.py | """ To run this example you need additional aioredis package
"""
from sanic import Sanic, response
import aioredis
app = Sanic(__name__)
@app.route("/")
async def handle(request):
async with request.app.redis_pool.get() as redis:
await redis.set('test-my-key', 'value')
val = await redis.get('test-my-key')
return response.text(val.decode('utf-8'))
@app.listener('before_server_start')
async def before_server_start(app, loop):
app.redis_pool = await aioredis.create_pool(
('localhost', 6379),
minsize=5,
maxsize=10,
loop=loop
)
@app.listener('after_server_stop')
async def after_server_stop(app, loop):
app.redis_pool.close()
await app.redis_pool.wait_closed()
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000)
| mit | Python |
|
e355a926155355ccc5d8b545534f331bdb683f02 | Add management | wailashi/podcastsync,wailashi/podcastsync | podcastsync.py | podcastsync.py | import click
from getpass import getpass
from gposerver import create_app, db, User, Device, EpisodeAction
app = create_app()
@app.shell_context_processor
def make_shell_context():
return dict(app=app, db=db, User=User, Device=Device, EpisodeAction=EpisodeAction)
@app.cli.command()
def adduser():
"""Add new user."""
username = input("Username: ")
password = getpass("Password: ")
u = User(username, password)
db.session.add(u)
db.session.commit()
@app.cli.command()
def init():
"""Initialise database."""
db.create_all()
| mit | Python |
|
1786ebacb85b2ddce816fb21b80285d991761695 | Implement classes to be used by the deserializer | hackebrot/poyo | poyo/_nodes.py | poyo/_nodes.py | # -*- coding: utf-8 -*-
class TreeElement(object):
"""Helper class to identify internal classes."""
def __init__(self, **kwargs):
pass
class ContainerMixin(object):
"""Mixin that can hold TreeElement instances.
Containers can be called to return a dict representation.
"""
def __init__(self, **kwargs):
self._children = []
super(ContainerMixin, self).__init__(**kwargs)
def __iter__(self):
for c in self._children:
yield c
def __call__(self):
return {c.name: c() for c in self}
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child)
class ChildMixin(object):
"""Mixin that can be attached to Container object."""
def __init__(self, **kwargs):
parent = kwargs['parent']
if not isinstance(parent, ContainerMixin):
raise ValueError(
'Parent of ChildMixin instance needs to be a Container.'
)
parent.add_child(self)
super(ChildMixin, self).__init__(**kwargs)
class Root(ContainerMixin, TreeElement):
"""Pure Container class to represent the root of a YAML config."""
def __init__(self, **kwargs):
super(Root, self).__init__(**kwargs)
self.level = -1
class Section(ContainerMixin, ChildMixin, TreeElement):
"""Class that can act as a Child, but also as a Container."""
def __init__(self, name, level, **kwargs):
super(Section, self).__init__(**kwargs)
self.name = name
self.level = level
class Simple(ChildMixin, TreeElement):
"""Class that can solely be used as a Child, f.i. simple key value pairs
in a config.
"""
def __init__(self, name, level, value, **kwargs):
super(Simple, self).__init__(**kwargs)
self.name = name
self.level = level
self.value = value
def __call__(self):
return self.value
| mit | Python |
|
9f276fba97318431d85c08fc0718b30bf39ed1bf | Create add-one-row-to-tree.py | kamyu104/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,kamyu104/LeetCode,jaredkoontz/leetcode,kamyu104/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/add-one-row-to-tree.py | Python/add-one-row-to-tree.py | # Time: O(n)
# Space: O(h)
# Given the root of a binary tree, then value v and depth d,
# you need to add a row of nodes with value v at the given depth d. The root node is at depth 1.
#
# The adding rule is: given a positive integer depth d,
# for each NOT null tree nodes N in depth d-1, create two tree nodes
# with value v as N's left subtree root and right subtree root.
# And N's original left subtree should be the left subtree of the new left subtree root,
# its original right subtree should be the right subtree of the new right subtree root.
# If depth d is 1 that means there is no depth d-1 at all,
# then create a tree node with value v as the new root of the whole original tree,
# and the original tree is the new root's left subtree.
#
# Example 1:
# Input:
# A binary tree as following:
# 4
# / \
# 2 6
# / \ /
# 3 1 5
#
# v = 1
#
# d = 2
#
# Output:
# 4
# / \
# 1 1
# / \
# 2 6
# / \ /
# 3 1 5
#
# Example 2:
# Input:
# A binary tree as following:
# 4
# /
# 2
# / \
# 3 1
#
# v = 1
#
# d = 3
#
# Output:
# 4
# /
# 2
# / \
# 1 1
# / \
# 3 1
# Note:
# 1. The given d is in range [1, maximum depth of the given tree + 1].
# 2. The given binary tree has at least one tree node.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def addOneRow(self, root, v, d):
"""
:type root: TreeNode
:type v: int
:type d: int
:rtype: TreeNode
"""
if d in (0, 1):
node = TreeNode(v)
if d == 1:
node.left = root
else:
node.right = root
return node
if root and d >= 2:
root.left = self.addOneRow(root.left, v, d-1 if d > 2 else 1)
root.right = self.addOneRow(root.right, v, d-1 if d > 2 else 0)
return root
| mit | Python |
|
2e7e83a0c3b789a0d0ba89134b64a0f6b723c3af | add forgotten path-building test | INCF/pybids | bids/layout/tests/test_path_building.py | bids/layout/tests/test_path_building.py | import pytest
from bids.layout import BIDSLayout
from os.path import join, abspath, sep
from bids.tests import get_test_data_path
@pytest.fixture(scope='module')
def layout():
data_dir = join(get_test_data_path(), '7t_trt')
return BIDSLayout(data_dir)
def test_bold_construction(layout):
ents = dict(subject='01', run=1, task='rest', suffix='bold')
assert layout.build_path(ents) == "sub-01/func/sub-01_task-rest_run-1_bold.nii.gz"
ents['acquisition'] = 'random'
assert layout.build_path(ents) == "sub-01/func/sub-01_task-rest_acq-random_run-1_bold.nii.gz" | mit | Python |
|
b447711c4396c36bc845184961d28660735c6f3d | Create window.py | mpjoseca/ate | src/new/window.py | src/new/window.py | # window draws
# editor window
class EditorWindow(Fl_Double_Window) :
search = ""
def __init__(self, w, h, label) :
Fl_Double_Window.__init__(self, w, h, label)
# set/update title
def set_title(win):
global filename, title
if len(filename) == 0:
title = "Untitled"
else:
title = os.path.basename(filename)
if changed:
title = title+" (modified)"
win.label(title)
| isc | Python |
|
7ef6c8c3ea0e2481a424bcca91496ce14c0aec4a | add basic file verifier, just checks dimensions, need to add header and vlr checks. | blazbratanic/laspy,silyko/laspy,blazbratanic/laspy,silyko/laspy | misc/file_verify.py | misc/file_verify.py | #!/usr/bin/env python
import sys
sys.path.append("../")
from laspy import file as File
inFile1 = File.File(sys.argv[1],mode= "r")
inFile2 = File.File(sys.argv[2],mode= "r")
spec = inFile1.reader.point_format.lookup.keys()
def f(x):
return(list(inFile1.reader.get_dimension(x)) == list(inFile2.reader.get_dimension(x)))
passed = 0
failed = 0
for dim in spec:
if f(dim):
passed += 1
print("Dimension: " + dim + " is identical.")
else:
failed += 1
print("Dimension: " + dim + " is not identical")
print(str(passed) + " identical dimensions out of " + str(passed + failed))
inFile1.close()
inFile2.close()
| bsd-2-clause | Python |
|
895571ec359e7571f8581f3635ae1c452ed911a5 | add a nova command | rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh | cloudmesh_cmd3/plugins/cm_shell_nova.py | cloudmesh_cmd3/plugins/cm_shell_nova.py | from cmd3.shell import command
from cloudmesh_common.logger import LOGGER
import os
from cloudmesh_common.tables import row_table
log = LOGGER(__file__)
class cm_shell_nova:
"""opt_example class"""
def activate_cm_shell_nova(self):
self.register_command_topic('cloud','nova')
pass
@command
def do_nova(self, args, arguments):
"""
Usage:
nova login
nova info
nova help
nova ARGUMENTS
A simple wrapper for the openstack nova command
Arguments:
ARGUMENTS The arguments passed to nova
help Prints the nova manual
login reads the information from the current cloud
and updates the environment variables if
the cloud is an openstack cloud
info the environment values for OS
Options:
-v verbose mode
"""
# log.info(arguments)
if arguments["help"]:
os.system("nova help")
return
elif arguments["info"]:
#
# prints the current os env variables for nova
#
d = {}
for attribute in ['OS_USER_ID',
'OS_USERNAME',
'OS_TENANT_NAME',
'OS_AUTH_URL',
'OS_CACERT',
'OS_PASSWORD',
'OS_REGION']:
try:
d[attribute] = os.environ[attribute]
except:
d[attribute] = None
print row_table(d, order=None, labels=["Variable", "Value"])
return
elif arguments["login"]:
print "Not yet implemented"
#
# TODO: implemet
#
# cloud = get current default
# if cloud type is openstack:
# credentials = get credentials
# set the credentials in the current os system env variables
#
else:
os.system("nova {0}".format(arguments["ARGUMENTS"]))
return
| apache-2.0 | Python |
|
2bf2a0849c1524f3ac56533d9f36eb907213f819 | Add WebAPI plugin | alama/PSO2Proxy,flyergo/PSO2Proxy,cyberkitsune/PSO2Proxy,alama/PSO2Proxy,alama/PSO2Proxy,cyberkitsune/PSO2Proxy,cyberkitsune/PSO2Proxy,flyergo/PSO2Proxy | proxy/plugins/WebAPI.py | proxy/plugins/WebAPI.py | from ..data import clients, blocks, players
from twisted.web.server import Site
from twisted.web.resource import Resource
import json, time
upStart = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
class WebAPI(Resource):
def render_GET(self, request):
currData = {'count' : len(clients.connectedClients), 'blocksCached' : len(blocks.blockList), 'playersCached' : len(players.playerList), 'upSince' : upStart}
return json.dumps(currData) | agpl-3.0 | Python |
|
19348f5d8e2832fbf378578d38516df66dc849b6 | Implement IRCv3.1 StartTLS | Heufneutje/PyHeufyBot,Heufneutje/PyHeufyBot | heufybot/modules/ircv3/starttls.py | heufybot/modules/ircv3/starttls.py | from twisted.internet.interfaces import ISSLTransport
from twisted.plugin import IPlugin
from heufybot.moduleinterface import BotModule, IBotModule
from zope.interface import implements
try:
from twisted.internet import ssl
except ImportError:
ssl = None
class IRCv3StartTLS(BotModule):
implements(IPlugin, IBotModule)
name = "StartTLS"
def actions(self):
return [ ("listcaps", 1, self.addToCapList),
("caps-acknowledged", 1, self.requestNegotiation),
("pre-handlenumeric-670", 1, self.startNegotiation),
("pre-handlenumeric-691", 1, self.negotiationFailed) ]
def addToCapList(self, server, caps):
if not self.bot.servers[server].secureConnection and ssl is not None:
caps.append("tls")
def requestNegotiation(self, server, caps):
if "tls" in caps:
self.bot.log.info("[{server}] Trying to initiate StartTLS...", server=server)
self.bot.servers[server].sendMessage("STARTTLS")
def startNegotiation(self, server, prefix, params):
self.bot.log.info("[{server}] Server replied: \"{reply}\"", server=server, reply=params[1])
self.bot.log.info("[{server}] Proceeding with TLS handshake...", server=server)
self.bot.servers[server].transport.startTLS(ssl.CertificateOptions())
if ISSLTransport.providedBy(self.bot.servers[server].transport):
self.bot.servers[server].secureConnection = True
self.bot.log.info("[{server}] TLS handshake successful. Connection is now secure.", server=server)
return True
def negotiationFailed(self, server, prefix, params):
self.bot.log.warn("[{server}] StartTLS failed, reason: \"{reply}\".", server=server, reply=params[1])
return True
startTLS = IRCv3StartTLS()
| mit | Python |
|
63e14ae4485bcca682b952e5ab7f125f58c3d960 | Add pwnypack ipython extension. | edibledinos/pwnypack,edibledinos/pwnypack | pwnypack/ipython_ext.py | pwnypack/ipython_ext.py | import functools
import shlex
import pwny
import pwnypack.main
__all__ = []
def call_main_func(func_name, ipython, line):
pwnypack.main.main([func_name] + shlex.split(line))
def load_ipython_extension(ipython):
ipython.push(vars(pwny))
for f_name in pwnypack.main.MAIN_FUNCTIONS:
ipython.define_magic(f_name, functools.partial(call_main_func, f_name))
def unload_ipython_extension(ipython):
ipython.drop_by_id(vars(pwny))
| mit | Python |
|
7fbfca47b2b435a0aa4df8d39699831f752f351d | Add initial code for scraping standings data | jldbc/pybaseball | pybaseball/standings.py | pybaseball/standings.py | from bs4 import BeautifulSoup
import requests
import datetime
def get_soup(date):
#year, month, day = [today.strftime("%Y"), today.strftime("%m"), today.strftime("%d")]
#url = "http://www.baseball-reference.com/boxes?year={}&month={}&day={}".format(year, month, day)
year = date.strftime("%Y")
url = 'http://www.baseball-reference.com/leagues/MLB/{}-standings.shtml'.format(year)
s=requests.get(url).content
return BeautifulSoup(s)
def get_tables(soup):
tables = soup.find_all('table')
datasets = []
for table in tables:
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
#data.append(row.find_all('a')[0]['title']) # team name
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
cols.insert(0,row.find_all('a')[0]['title'])
data.append([ele for ele in cols if ele])
datasets.append(data)
return datasets
def standings(date=None):
# get most recent standings if date not specified
if(date is None):
date = datetime.datetime.today()
# retrieve html from baseball reference
soup = get_soup(date)
tables = get_tables(soup)
return tables
| mit | Python |
|
d187c51ccd9dc1676b6f16eddecee6dce752d668 | Make class test-class name more specific | RaoUmer/distarray,enthought/distarray,RaoUmer/distarray,enthought/distarray | distarray/tests/test_client.py | distarray/tests/test_client.py | import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestDistArrayContext(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def test_create_DAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def test_create_DAC_with_targets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
| import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestClient(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def testCreateDAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def testCreateDACwithTargets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | Python |
b5d8b29a34a4675ad5de33511bfca486f648a134 | Create _source.py | Manazius/blacksmith-bot,Manazius/blacksmith-bot | static/_source.py | static/_source.py | # coding: utf-8
# BlackSmith general configuration file
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Jabber server to connect
SERVER = 'example.com'
# Connecting Port
PORT = 5222
# Jabber server`s connecting Host
HOST = 'example.com'
# Using TLS (True - to enable, False - to disable)
SECURE = True
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# User`s account
USERNAME = 'username'
# Jabber ID`s Password
PASSWORD = 'password'
# Resourse (please don`t touch it)
RESOURCE = u'simpleApps' # You can write unicode symbols here
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Default chatroom nick
DEFAULT_NICK = u'BlackSmith-m.1' # You can write unicode symbols here
# Groupchat message size limit
CHAT_MSG_LIMIT = 1024
# Private/Roster message size limit
PRIV_MSG_LIMIT = 2024
# Incoming message size limit
INC_MSG_LIMIT = 8960
# Working without rights of moder (True - to enable, False - to disable)
MSERVE = False
# Jabber account of bot`s owner
BOSS = '[email protected]'
# Memory usage limit (size in kilobytes, 0 - not limited)
MEMORY_LIMIT = 49152
# Admin password, used as a key to command "login"
BOSS_PASS = ''
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
| apache-2.0 | Python |
|
647f0c1409dcd22d69a79d21571d2c03f794a2a8 | Test iter and yield | zzz0072/Python_Exercises,zzz0072/Python_Exercises | 99_misc/iterator.py | 99_misc/iterator.py | #/usr/bin/env python
# Test yield generator
def my_double(arr):
for i in arr:
yield i * 2
for i in my_double(range(1, 10)):
print("{0} ".format(i)),
print("\n"),
# Text iteration
i = iter(my_double(range(10, 21)))
print i
for j in range (1, 10):
print("{0} ".format(i.next())),
| bsd-2-clause | Python |
|
28944376472130d53a05f7473e7213c917207cd4 | Add model representing a listing | rlucioni/craigbot,rlucioni/craigbot,rlucioni/apartments | apartments/models.py | apartments/models.py | from sqlalchemy import create_engine, Column, DateTime, Float, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Listing(Base):
__tablename__ = 'listings'
id = Column(Integer, primary_key=True)
craigslist_id = Column(Integer, unique=True)
name = Column(String)
price = Column(Float)
link = Column(String, unique=True)
created = Column(DateTime)
area = Column(String)
geotag = Column(String)
lat = Column(Float)
lon = Column(Float)
location = Column(String)
nearest_stop = Column(String)
def __repr__(self):
return f'<Listing(name={self.name}, price={self.price}, craigslist_id={self.craigslist_id})>'
engine = create_engine('sqlite:///apartments.db')
Base.metadata.create_all(engine)
| mit | Python |
|
38cbc73f70a9ca896a29d7fa2e000388bbf40d88 | Add script to generate data from an experiment | NLeSC/cptm,NLeSC/cptm | DilipadTopicModelling/experiment_get_results.py | DilipadTopicModelling/experiment_get_results.py | import logging
import os
import pandas as pd
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# select experiment to get parameters from
nTopics = 100
start = 80
end = 199
alpha = 50.0/nTopics
beta = 0.02
nIter = 200
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
topicDict='{}/topicDict.dict'.format(data_dir),
opinionDict='{}/opinionDict.dict'.format(data_dir))
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'.format(nTopics)
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha,
beta=beta, beta_o=beta, out_dir=out_dir)
sampler._initialize()
sampler.estimate_parameters(start=start, end=end)
pd.DataFrame(sampler.theta).to_csv(os.path.join(out_dir, 'theta_{}.csv'.
format(nTopics)))
topics = sampler.topics_to_df(phi=sampler.topics, words=corpus.topic_words())
topics.to_csv(os.path.join(out_dir, 'topics_{}.csv'.format(nTopics)))
for i, p in enumerate(sampler.corpus.perspectives):
opinions = sampler.topics_to_df(phi=sampler.opinions[i],
words=corpus.opinion_words())
opinions.to_csv(os.path.join(out_dir,
'opinions_{}_{}.csv'.format(p.name, nTopics)))
| apache-2.0 | Python |
|
656d94c0375f6a96cc3a9d4b3227d8f19afe3dea | Add lemon drop elevator model | WesleyAC/toybox,WesleyAC/toybox,WesleyAC/toybox,WesleyAC/toybox,WesleyAC/toybox | control/systems/main.py | control/systems/main.py | import numpy as np
Kt = 1.41/89.0
Kv = 5840.0/3.0
G = 10.0
J = 4.0*(2.54**2.0)/2.0 # 4 kg on a 1 inch pully
R = 12.0/89.0
A = np.asarray([[0, 1],
[0, -(Kt*Kv)/((G**2)*J*R)]])
B = np.asarray([[0],
[Kt/(G*J*R)]])
| mit | Python |
|
2ca0d97649529dfc66486dc1d3e7fa1e37d8ee91 | add integration test for api analytics | cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata | integrations/test_api_analytics.py | integrations/test_api_analytics.py | """Integration tests for internal analytics."""
# standard library
import unittest
# third party
import mysql.connector
import requests
# use the local instance of the Epidata API
BASE_URL = 'http://delphi_web_epidata/epidata/api.php'
class ApiAnalyticsTests(unittest.TestCase):
"""Tests internal analytics not specific to any particular endpoint."""
def setUp(self):
"""Perform per-test setup."""
# connect to the `epidata` database and clear the `api_analytics` table
cnx = mysql.connector.connect(
user='user',
password='pass',
host='delphi_database_epidata',
database='epidata')
cur = cnx.cursor()
cur.execute('truncate table api_analytics')
cnx.commit()
cur.close()
# make connection and cursor available to test cases
self.cnx = cnx
self.cur = cnx.cursor()
def tearDown(self):
"""Perform per-test teardown."""
self.cur.close()
self.cnx.close()
def test_analytics_update(self):
"""Update internal analytics for requests to the API."""
make_request = lambda src: requests.get(BASE_URL, params={'source': src})
# make some requests
for _ in range(1):
make_request('source1')
for _ in range(5):
make_request('source2')
for _ in range(19):
make_request('source3')
# verify that analytics are available
self.cur.execute('''
select source, count(1)
from api_analytics
group by source
order by source
''')
values = [row for row in self.cur]
self.assertEqual(values, [
('source1', 1),
('source2', 5),
('source3', 19),
])
| mit | Python |
|
8cc622db293816fc96bb7df0139b57a2b5a2eaef | add scanning of live IP addresses with ping sweep, multi threading | rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network | Scan_IpAdds_ping.py | Scan_IpAdds_ping.py | import os, platform, collections
import socket, subprocess,sys
import threading
from datetime import datetime
class myThread (threading.Thread):
def __init__(self,startLastOctet,endLastOctet):
threading.Thread.__init__(self)
self.startLastOctet = startLastOctet
self.endLastOctet = endLastOctet
def run(self):
runThread(self.startLastOctet,self.endLastOctet)
def getNetwork():
net = raw_input("Enter the Network Address:\t\t ")
netSplit= net.split('.')
a = '.'
firstThreeOctet = netSplit[0]+a+netSplit[1]+a+netSplit[2]+a
startLastOctet = int(raw_input("Enter the beginning of last Octet:\t "))
endLastOctet = int(raw_input("Enter the end od last Octet:\t\t "))
endLastOctet =endLastOctet+1
dic = collections.OrderedDict()
oper = platform.system()
if (oper=="Windows"):
pingCmd = "ping -n 1 "
elif (oper== "Linux"):
pingCmd = "ping -c 1 "
else :
pingCmd = "ping -c 1 "
return firstThreeOctet, startLastOctet, endLastOctet, dic, pingCmd
def runThread(startLastOctet,endLastOctet):
#print "Scanning in Progess"
for ip in xrange(startLastOctet,endLastOctet):
addr = firstThreeOctet+str(ip)
pingAddress = pingCmd+addr
response = os.popen(pingAddress)
for line in response.readlines():
#if(line.count("TTL")):
# break
if (line.count("ttl")):
#print addr, "--> Live"
dic[ip]= addr
break
if __name__ == '__main__':
subprocess.call('clear',shell=True)
print "-" * 75
print "This program search for life IPs in last octet, with multiple threads "
print "\tFor example: 192.168.11.xxx - 192.168.11.yyy"
print "-" * 75
firstThreeOctet, startLastOctet, endLastOctet, dic, pingCmd = getNetwork()
t1= datetime.now()
total_ip =endLastOctet-startLastOctet
tn =3 # number of ip handled by one thread
total_thread = total_ip/tn
total_thread=total_thread+1
threads= []
try:
for i in xrange(total_thread):
en = startLastOctet+tn
if(en >endLastOctet):
en =endLastOctet
thread = myThread(startLastOctet,en)
thread.start()
threads.append(thread)
startLastOctet =en
except:
print "Error: unable to start thread"
print "\t Number of Threads active:", threading.activeCount()
for t in threads:
t.join()
print "\tExiting Main Thread"
sortedIPs = collections.OrderedDict(sorted(dic.items()))
for key in sortedIPs:
print "IP address: {} \t --> Live".format(sortedIPs[key])
t2= datetime.now()
total =t2-t1
print "Scanning complete in " , total
| mit | Python |
|
1489e896952f5a3ea498618f615c5fd133a297c7 | Add test cases for pricing API with DiscountModules | suutari/shoop,shawnadelic/shuup,shoopio/shoop,suutari-ai/shoop,hrayr-artunyan/shuup,shawnadelic/shuup,suutari-ai/shoop,suutari/shoop,akx/shoop,akx/shoop,shoopio/shoop,akx/shoop,shawnadelic/shuup,shoopio/shoop,suutari/shoop,suutari-ai/shoop,hrayr-artunyan/shuup,hrayr-artunyan/shuup | shoop_tests/core/test_pricing_discounts.py | shoop_tests/core/test_pricing_discounts.py | # This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import decimal
import pytest
from django.conf import settings
from shoop.apps.provides import override_provides
from shoop.core.pricing import (
DiscountModule, get_price_info, get_price_infos, get_pricing_steps,
get_pricing_steps_for_products
)
from shoop.testing.factories import create_product, get_default_shop
from shoop.testing.utils import apply_request_middleware
provide_overrider = override_provides(
"discount_module", [__name__ + ':Minus25DiscountModule'])
def setup_module(module):
global original_pricing_module
global original_discount_modules
original_pricing_module = settings.SHOOP_PRICING_MODULE
original_discount_modules = settings.SHOOP_DISCOUNT_MODULES
settings.SHOOP_PRICING_MODULE = "default_pricing"
settings.SHOOP_DISCOUNT_MODULES = ["minus25"]
provide_overrider.__enter__()
def teardown_module(module):
global original_pricing_module
global original_discount_modules
provide_overrider.__exit__(None, None, None)
settings.SHOOP_PRICING_MODULE = original_pricing_module
settings.SHOOP_DISCOUNT_MODULES = original_discount_modules
class Minus25DiscountModule(DiscountModule):
identifier = "minus25"
def discount_price(self, context, product, price_info):
price_info.price *= (1 - decimal.Decimal('0.25'))
return price_info
def initialize_test(rf):
shop = get_default_shop()
request = rf.get("/")
request.shop = shop
apply_request_middleware(request)
product1 = create_product("test-product1", shop=shop, default_price=120)
product2 = create_product("test-product2", shop=shop, default_price=180)
return (request, [product1, product2], shop.create_price)
@pytest.mark.django_db
def test_get_price_info(rf):
(request, products, price) = initialize_test(rf)
pi = get_price_info(request, products[0])
assert pi.price == price(90)
assert pi.base_price == price(120)
assert pi.quantity == 1
@pytest.mark.django_db
def test_get_price_info_with_quantity(rf):
(request, products, price) = initialize_test(rf)
pi = get_price_info(request, products[0], 20)
assert pi.price == price(1800)
assert pi.base_price == price(2400)
assert pi.quantity == 20
@pytest.mark.django_db
def test_product_get_price_info(rf):
(request, products, price) = initialize_test(rf)
pi = products[0].get_price_info(request)
assert pi.price == price(90)
assert pi.base_price == price(120)
@pytest.mark.django_db
def test_get_price_infos(rf):
(request, products, price) = initialize_test(rf)
pis = get_price_infos(request, products)
assert set(pis.keys()) == set(x.id for x in products)
pi1 = pis[products[0].id]
pi2 = pis[products[1].id]
assert pi1.price == price(90)
assert pi1.base_price == price(120)
assert pi2.price == price(135)
assert pi2.base_price == price(180)
@pytest.mark.django_db
def test_get_pricing_steps(rf):
(request, products, price) = initialize_test(rf)
pis = get_pricing_steps(request, products[0])
assert len(pis) == 1
assert pis[0].quantity == 1
assert pis[0].price == price(90)
assert pis[0].base_price == price(120)
@pytest.mark.django_db
def test_get_pricing_steps_for_products(rf):
(request, products, price) = initialize_test(rf)
pis = get_pricing_steps_for_products(request, products)
assert set(pis.keys()) == set(x.id for x in products)
assert len(pis[products[0].id]) == 1
assert len(pis[products[1].id]) == 1
assert pis[products[0].id][0].quantity == 1
assert pis[products[0].id][0].price == price(90)
assert pis[products[0].id][0].base_price == price(120)
assert pis[products[1].id][0].quantity == 1
assert pis[products[1].id][0].price == price(135)
assert pis[products[1].id][0].base_price == price(180)
| agpl-3.0 | Python |
|
e670de6ecb7be3da56acf2976148574165cb69aa | Add missing test module | h5py/h5py,h5py/h5py,h5py/h5py | h5py/tests/test_utils.py | h5py/tests/test_utils.py | #+
#
# This file is part of h5py, a low-level Python interface to the HDF5 library.
#
# Copyright (C) 2008 Andrew Collette
# http://h5py.alfven.org
# License: BSD (See LICENSE.txt for full license)
#
# $Date$
#
#-
import sys
import numpy
from common import HDF5TestCase, api_18
from h5py import *
from h5py import utils
from h5py.h5 import H5Error
class TestUtils(HDF5TestCase):
def test_check_read(self):
""" Check if it's possible to read from the NumPy array """
carr = numpy.ones((10,10), order='C')
farr = numpy.ones((10,10), order='F')
oarr = numpy.ones((10,10), order='C')
oarr.strides = (0,1)
utils.check_numpy_read(carr)
self.assertRaises(TypeError, utils.check_numpy_read, farr)
self.assertRaises(TypeError, utils.check_numpy_read, oarr)
s_space = h5s.create_simple((5,5))
m_space = h5s.create_simple((10,10))
l_space = h5s.create_simple((12,12))
utils.check_numpy_read(carr, m_space.id)
utils.check_numpy_read(carr, l_space.id)
self.assertRaises(TypeError, utils.check_numpy_read, carr, s_space.id)
# This should not matter for read
carr.flags['WRITEABLE'] = False
utils.check_numpy_read(carr)
def test_check_write(self):
""" Check if it's possible to write to the NumPy array """
carr = numpy.ones((10,10), order='C')
farr = numpy.ones((10,10), order='F')
oarr = numpy.ones((10,10), order='C')
oarr.strides = (0,1)
utils.check_numpy_write(carr)
self.assertRaises(TypeError, utils.check_numpy_write, farr)
self.assertRaises(TypeError, utils.check_numpy_write, oarr)
s_space = h5s.create_simple((5,5))
m_space = h5s.create_simple((10,10))
l_space = h5s.create_simple((12,12))
utils.check_numpy_write(carr, s_space.id)
utils.check_numpy_write(carr, m_space.id)
self.assertRaises(TypeError, utils.check_numpy_write, carr, l_space.id)
# This should matter now
carr.flags['WRITEABLE'] = False
self.assertRaises(TypeError, utils.check_numpy_write, carr)
def test_emalloc(self):
utils._test_emalloc(1024)
utils._test_emalloc(0)
self.assertRaises(MemoryError, utils._test_emalloc, sys.maxint)
| bsd-3-clause | Python |
|
3fd4244dbfd33bbf2fa369d81756e82b1cf1c467 | Clear out unaligned NLCD19 GWLF-E results | WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed | src/mmw/apps/modeling/migrations/0041_clear_nlcd2019_gwlfe_results.py | src/mmw/apps/modeling/migrations/0041_clear_nlcd2019_gwlfe_results.py | # Generated by Django 3.2.13 on 2022-10-17 13:47
from django.db import migrations
def clear_nlcd2019_gwlfe_results(apps, schema_editor):
"""
Clear the results for all scenarios belonging to GWLF-E projects made after
the release of 1.33.0, which had incorrectly aligned NLCD19 2019 on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These results will be recalculated with the correclty aligned NLCD19 values
when these projects are accessed again.
"""
Project = apps.get_model('modeling', 'Project')
Scenario = apps.get_model('modeling', 'Scenario')
Project.objects.filter(
model_package='gwlfe',
created_at__gte='2022-01-17',
).update(
gis_data=None,
mapshed_job_uuid=None,
subbasin_mapshed_job_uuid=None,
)
Scenario.objects.filter(
project__model_package='gwlfe',
project__created_at__gte='2022-01-17',
).update(
results='[]',
modification_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0040_clear_nlcd2019_tr55_results'),
]
operations = [
migrations.RunPython(clear_nlcd2019_gwlfe_results),
]
| apache-2.0 | Python |
|
34046e290842108212d71f6cf2445d7015bf2423 | Create text.py | fnielsen/dasem,fnielsen/dasem | dasem/text.py | dasem/text.py | """text."""
from nltk import sent_tokenize, word_tokenize
def sentence_tokenize(text):
"""Tokenize a Danish text into sentence.
The model from NTLK trained on Danish is used.
Parameters
----------
text : str
The text to be tokenized.
Returns
-------
sentences : list of str
Sentences as list of strings.
Examples
--------
>>> text = 'Hvad!? Hvor har du f.eks. siddet?'
>>> sentences = sentence_tokenize(text)
>>> sentences
['Hvad!?', 'Hvor har du f.eks. siddet?']
"""
return sent_tokenize(text, language='danish')
def word_tokenize(sentence):
"""Tokenize a Danish sentence into words."""
return word_tokenize(sentence)
| apache-2.0 | Python |
|
477de06a99fc4998ec15442e5fae9b919be53392 | Initialize P2_scheduledComicDownloader | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter15/PracticeProjects/P2_scheduledComicDownloader.py | books/AutomateTheBoringStuffWithPython/Chapter15/PracticeProjects/P2_scheduledComicDownloader.py | # Write a program that checks the websites of several web comics and automatically
# downloads the images if the comic was updated since the program’s last visit.
#
# Your operating system’s scheduler (Scheduled Tasks on Windows, launchd on OS X,
# and cron on Linux) can run your Python program once a day.
#
# The Python program itself can download the comic and then copy it to your desktop
# so that it is easy to find. This will free you from having to check the website
# yourself to see whether it has updated.
| mit | Python |
|
960f32fb9f1f34caf0d851a370786f39864c15b2 | add conoha/dns.py | yuuki0xff/conoha-cli | conoha/dns.py | conoha/dns.py |
from .api import API, CustomList
from . import error
__all__ = 'Domain DomainList Record RecordList'.split()
class DNSAPI(API):
def __init__(self, token, baseURIPrefix=None):
super().__init__(token, baseURIPrefix)
self._serviceType = 'dns'
def _getHeaders(self, h):
headers={
'Content-Type': 'application/json'
}
if h:
headers.update(h)
return super()._getHeaders(headers)
class Domain:
"""ドメイン"""
def __init__(self, data):
self.domainId = data['id']
self.name = data['name']
self.email = data['email']
self.serial = data['serial']
self.gslb = data.get('gslb')
self.ttl = data['ttl']
self.description = data['description']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
class DomainList(DNSAPI, CustomList):
"""ドメインの一覧"""
def __init__(self, token):
super().__init__(token)
CustomList.__init__(self)
path = 'domains'
res = self._GET(path)
self.extend(Domain(i) for i in res['domains'])
def _getitem(self, key, domain):
return key in [domain.domainId, domain.name]
def _validateDomain(self, nameOrDomainid):
domain = self.getDomain(nameOrDomainid)
if not domain:
raise error.NotFound('domain', nameOrDomainid)
def toDomainid(self, nameOrDomainid):
domain = self.getDomain(nameOrDomainid)
if domain:
return domain.domainId
def toName(self, nameOrDomainid):
domain = self.getDomain(nameOrDomainid)
if domain:
return domain.name
def getDomain(self, nameOrDomainid):
if nameOrDomainid:
for domain in self:
if (domain.domainId == nameOrDomainid) or (domain.name == nameOrDomainid):
return domain
def add(self, name, email, ttl=None, description=None, gslb=None):
data = {
'name': name,
'email': email,
'ttl': ttl,
'description': description,
'gslb': gslb
}
res = self._POST('domains', {k: v for k, v in data.items() if v is not None})
domain = Domain(res)
self.append(domain)
return domain
def update(self, nameOrDomainid, email=None, ttl=None, description=None, gslb=None):
self._validateDomain(nameOrDomainid)
domain = self.getDomain(nameOrDomainid)
data = {
'email': email,
'ttl': ttl,
'description': description,
'gslb': gslb
}
path = 'domains/{}'.format(domain.domainId)
res = self._PUT(path, {k: v for k, v in data.items() if v is not None})
self.remove(domain)
domain = Domain(res)
self.append(domain)
def delete(self, nameOrDomainid):
self._validateDomain(nameOrDomainid)
domainId = self.toDomainid(nameOrDomainid)
path = 'domains/{}'.format(domainId)
self._DELETE(path, isDeserialize=False)
class Record:
"""レコード"""
def __init__(self, data):
self.recordId = data['id']
self.name = data['name']
self.domainId = data['domain_id']
self.type = data['type']
self.data = data['data']
self.ttl = data['ttl']
self.description = data['description']
self.priority = data['priority']
self.gslb_check = data.get('gslb_check')
self.gslb_region = data.get('gslb_region')
self.gslb_weight = data.get('gslb_weight')
self.created_at = data['created_at']
self.updated_at = data['updated_at']
class RecordList(DNSAPI, CustomList):
"""レコードの一覧"""
def __init__(self, token, domainId):
super().__init__(token)
CustomList.__init__(self)
path = 'domains/{}/records'.format(domainId)
res = self._GET(path)
self.domainId = domainId
self.extend(Record(i) for i in res['records'])
def _getitem(self, key, record):
return key in [record.recordId]
def _validateRecord(self, recordId):
recordId = self.getRecord(recordId)
if not recordId:
raise error.NotFound('record', recordId)
def getRecord(self, recordId):
for record in self:
if record.recordId == recordId:
return record
def add(self, **kwargs):
path = 'domains/{}/records'.format(self.domainId)
res = self._POST(path, {k: v for k, v in kwargs.items() if v is not None})
record = Record(res)
self.append(record)
return record
def update(self, recordId, **kwargs):
self._validateRecord(recordId)
record = self.getRecord(recordId)
path = 'domains/{}/records/{}'.format(self.domainId, recordId)
res = self._PUT(path, {k: v for k, v in kwargs.items() if v is not None})
self.remove(record)
record = Record(res)
self.append(record)
return record
def delete(self, recordId):
self._validateRecord(recordId)
record = self.getRecord(recordId)
path = 'domains/{}/records/{}'.format(record.domainId, record.recordId)
self._DELETE(path, isDeserialize=False)
self.remove(record)
| mit | Python |
|
5dad4f0e2d9732d7ff4a0feebff332f005cabf01 | Remove foreign keys from deprecated `progress-edx-platform-extensions` (#1874) | edx-solutions/edx-platform,edx-solutions/edx-platform,edx-solutions/edx-platform,edx-solutions/edx-platform | common/djangoapps/database_fixups/migrations/0002_remove_foreign_keys_from_progress_extensions.py | common/djangoapps/database_fixups/migrations/0002_remove_foreign_keys_from_progress_extensions.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
"""
The `progress-edx-platform-extensions` has been deprecated in favor of `edx-completion`.
The requirement was removed in the commit linked as (1) below. However its migration (2) had not been reverted.
That migration used `auth_user.id` as the foreign key in its models (3), but Django does not resolve this constraint
between existing tables anymore, because the model has been removed.
Therefore we need to drop the tables related to deprecated application in order to be able to remove users properly.
Because of some performance concerns, deletion is implemented in (4).
This migration drops only foreign keys from deprecated tables.
If ran twice (for any reason), it will raise a custom error for better visibility that these keys do not exist.
(1) https://github.com/edx-solutions/edx-platform/commit/59bf3efe71533de53b60bd979517e889d18a96bb
(2) https://github.com/edx-solutions/progress-edx-platform-extensions/blob/master/progress/migrations/0001_initial.py
(3) https://github.com/edx-solutions/progress-edx-platform-extensions/blob/master/progress/models.py
(4) https://github.com/edx-solutions/edx-platform/pull/1862
"""
class Migration(migrations.Migration):
dependencies = [
('database_fixups', '0001_initial'),
]
operations = [
migrations.RunSQL("""
-- Drop a procedure if it already exists - safety check.
DROP PROCEDURE IF EXISTS drop_foreign_key_from_table;
-- We are dropping constraints from 3 tables, so we create a temporary procedure to avoid code repetition.
CREATE PROCEDURE drop_foreign_key_from_table(given_table VARCHAR(64))
BEGIN
-- Find the ID of the foreign key (there is only one per table, otherwise it would fail).
SET @foreign_key = (
SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS
WHERE TABLE_NAME = given_table AND CONSTRAINT_TYPE = 'FOREIGN KEY'
);
IF @foreign_key IS NOT NULL THEN
-- Prepare query (MySQL does not allow embedding queries in a standard way here).
SET @statement = CONCAT('ALTER TABLE ', given_table, ' DROP FOREIGN KEY ', @foreign_key);
PREPARE stmt FROM @statement;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
ELSE
-- Raise custom error for having clearer logs in case of a failure.
SET @error_message = CONCAT('Cannot find foreign key in ', given_table, ' table.');
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = @error_message;
END IF;
END;
-- Call temporary procedure on relevant tables.
CALL drop_foreign_key_from_table('progress_coursemodulecompletion');
CALL drop_foreign_key_from_table('progress_studentprogress');
CALL drop_foreign_key_from_table('progress_studentprogresshistory');
-- Clean up.
DROP PROCEDURE IF EXISTS drop_foreign_key_from_table;
""")
]
| agpl-3.0 | Python |
|
9eb35140a1790625c32773af6b8a2d76699e86c6 | Move MapEntityForm to mapentity (ref #129) | Anaethelion/django-mapentity,makinacorpus/django-mapentity,makinacorpus/django-mapentity,Anaethelion/django-mapentity,makinacorpus/django-mapentity,Anaethelion/django-mapentity | mapentity/forms.py | mapentity/forms.py | from django.utils.translation import ugettext_lazy as _
import floppyforms as forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Div, Button
from crispy_forms.bootstrap import FormActions
class MapEntityForm(forms.ModelForm):
pk = forms.Field(required=False, widget=forms.Field.hidden_widget)
model = forms.Field(required=False, widget=forms.Field.hidden_widget)
helper = FormHelper()
helper.form_class = 'form-horizontal'
modelfields = tuple()
geomfields = tuple()
actions = FormActions(
Button('cancel', _('Cancel'), ),
Submit('save_changes', _('Save changes'), css_class="btn-primary offset1"),
css_class="form-actions span11",
)
def __init__(self, *args, **kwargs):
super(MapEntityForm, self).__init__(*args, **kwargs)
# Generic behaviour
if self.instance.pk:
self.helper.form_action = self.instance.get_update_url()
else:
self.helper.form_action = self.instance.get_add_url()
self.fields['pk'].initial = self.instance.pk
self.fields['model'].initial = self.instance._meta.module_name
# Hide label for geom :
for geomfield in self.geomfields:
self.fields[geomfield].label = False
# Get fields from subclasses
fields = ('pk','model') + self.modelfields
leftpanel = Div(
*fields,
css_class="span3"
)
rightpanel = Div(
*self.geomfields,
css_class="span8"
)
# Main form layout
self.helper.layout = Layout(
leftpanel,
rightpanel,
self.actions
)
| bsd-3-clause | Python |
|
2a5012f0b74fa025bbc909fd8bfb10aec272d148 | Create pawn-brotherhood.py | aureooms/checkio | home/pawn-brotherhood.py | home/pawn-brotherhood.py | def safe_pawns ( pawns ) :
n = 0
for file , rank in pawns :
if rank < "2" : continue
if file > "a" :
first = chr( ord(file) - 1) + str( int(rank) - 1 )
if first in pawns :
n += 1
continue
if file < "h" :
second = chr( ord(file) + 1) + str( int(rank) - 1 )
if second in pawns :
n += 1
continue
return n
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert safe_pawns({"b4", "d4", "f4", "c3", "e3", "g5", "d2"}) == 6
assert safe_pawns({"b4", "c4", "d4", "e4", "f4", "g4", "e5"}) == 1
| agpl-3.0 | Python |
|
ef026ce3b4bf7fc50499ce5ecb688c02bbc77544 | Add outline for orbital maneuver class | RazerM/orbital | orbital/maneuver.py | orbital/maneuver.py | class Maneuver:
def __init__(self):
pass
@classmethod
def raise_apocenter_by(cls, delta, orbit):
pass
@classmethod
def change_apocenter_to(cls, apocenter, orbit):
pass
@classmethod
def lower_apocenter_by(cls, delta, orbit):
pass
@classmethod
def raise_pericenter_by(cls, delta, orbit):
pass
@classmethod
def change_pericenter_to(cls, pericenter, orbit):
pass
@classmethod
def lower_pericenter_by(cls, delta, orbit):
pass
@classmethod
def hohmann_transfer(cls):
# how to specify new orbit?
# - new semimajor axix/radius/altitude
pass
def bielliptic_transfer(cls):
pass
| mit | Python |
|
c191959db6b1a14d527ec41f910682fd017421ee | fix for handling spaces in sys.executable and in sut_path (issue 166) | fingeronthebutton/robotframework,suvarnaraju/robotframework,kyle1986/robortframe,dkentw/robotframework,yonglehou/robotframework,ChrisHirsch/robotframework,ChrisHirsch/robotframework,stasiek/robotframework,rwarren14/robotframework,eric-stanley/robotframework,jorik041/robotframework,fingeronthebutton/robotframework,kurtdawg24/robotframework,HelioGuilherme66/robotframework,SivagnanamCiena/robotframework,JackNokia/robotframework,rwarren14/robotframework,snyderr/robotframework,un33k/robotframework,jaloren/robotframework,jorik041/robotframework,edbrannin/robotframework,jaloren/robotframework,ChrisHirsch/robotframework,wojciechtanski/robotframework,wojciechtanski/robotframework,SivagnanamCiena/robotframework,snyderr/robotframework,JackNokia/robotframework,moto-timo/robotframework,fingeronthebutton/robotframework,jorik041/robotframework,synsun/robotframework,ashishdeshpande/robotframework,moto-timo/robotframework,snyderr/robotframework,jaloren/robotframework,xiaokeng/robotframework,dkentw/robotframework,jorik041/robotframework,JackNokia/robotframework,joongh/robotframework,xiaokeng/robotframework,robotframework/robotframework,alexandrul-ci/robotframework,SivagnanamCiena/robotframework,un33k/robotframework,userzimmermann/robotframework,jaloren/robotframework,joongh/robotframework,edbrannin/robotframework,snyderr/robotframework,robotframework/robotframework,yahman72/robotframework,kurtdawg24/robotframework,Colorfulstan/robotframework,kyle1986/robortframe,ashishdeshpande/robotframework,nmrao/robotframework,snyderr/robotframework,un33k/robotframework,yonglehou/robotframework,suvarnaraju/robotframework,stasiek/robotframework,xiaokeng/robotframework,ChrisHirsch/robotframework,alexandrul-ci/robotframework,fingeronthebutton/robotframework,kyle1986/robortframe,Colorfulstan/robotframework,JackNokia/robotframework,un33k/robotframework,kurtdawg24/robotframework,edbrannin/robotframework,SivagnanamCiena/robotframework,kyle1986/robortframe,userzimmermann/robotframework,xiaokeng/robotframework,dkentw/robotframework,yahman72/robotframework,nmrao/robotframework,wojciechtanski/robotframework,alexandrul-ci/robotframework,eric-stanley/robotframework,kyle1986/robortframe,stasiek/robotframework,suvarnaraju/robotframework,dkentw/robotframework,JackNokia/robotframework,xiaokeng/robotframework,yahman72/robotframework,eric-stanley/robotframework,userzimmermann/robotframework,ChrisHirsch/robotframework,jaloren/robotframework,jorik041/robotframework,yahman72/robotframework,edbrannin/robotframework,ashishdeshpande/robotframework,SivagnanamCiena/robotframework,stasiek/robotframework,suvarnaraju/robotframework,kurtdawg24/robotframework,ashishdeshpande/robotframework,alexandrul-ci/robotframework,synsun/robotframework,ashishdeshpande/robotframework,yonglehou/robotframework,Colorfulstan/robotframework,stasiek/robotframework,robotframework/robotframework,yahman72/robotframework,joongh/robotframework,suvarnaraju/robotframework,joongh/robotframework,un33k/robotframework,rwarren14/robotframework,userzimmermann/robotframework,synsun/robotframework,wojciechtanski/robotframework,dkentw/robotframework,joongh/robotframework,synsun/robotframework,edbrannin/robotframework,moto-timo/robotframework,rwarren14/robotframework,fingeronthebutton/robotframework,HelioGuilherme66/robotframework,eric-stanley/robotframework,alexandrul-ci/robotframework,nmrao/robotframework,rwarren14/robotframework,userzimmermann/robotframework,synsun/robotframework,yonglehou/robotframework,moto-timo/robotframework,Colorfulstan/robotframework,wojciechtanski/robotframework,yonglehou/robotframework,moto-timo/robotframework,Colorfulstan/robotframework,HelioGuilherme66/robotframework,nmrao/robotframework,kurtdawg24/robotframework,nmrao/robotframework | doc/quickstart/testlibs/LoginLibrary.py | doc/quickstart/testlibs/LoginLibrary.py | import os
import sys
class LoginLibrary:
def __init__(self):
sut_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', 'sut', 'login.py')
self._command_prefix = '"%s" "%s" ' % (sys.executable, sut_path)
self._status = ''
def create_user(self, username, password):
self._run_command('create', username, password)
def change_password(self, username, old_pwd, new_pwd):
self._run_command('change-password', username, old_pwd, new_pwd)
def attempt_to_login_with_credentials(self, username, password):
self._run_command('login', username, password)
def status_should_be(self, expected_status):
if expected_status != self._status:
raise AssertionError("Expected status to be '%s' but was '%s'"
% (expected_status, self._status))
def _run_command(self, command, *args):
command = '%s %s %s' % (self._command_prefix, command, ' '.join(args))
process = os.popen(command)
self._status = process.read().strip()
process.close()
| import os
import sys
class LoginLibrary:
def __init__(self):
sut_path = os.path.join(os.path.dirname(__file__),
'..', 'sut', 'login.py')
self._command_prefix = '%s %s ' % (sys.executable, sut_path)
self._status = ''
def create_user(self, username, password):
self._run_command('create', username, password)
def change_password(self, username, old_pwd, new_pwd):
self._run_command('change-password', username, old_pwd, new_pwd)
def attempt_to_login_with_credentials(self, username, password):
self._run_command('login', username, password)
def status_should_be(self, expected_status):
if expected_status != self._status:
raise AssertionError("Expected status to be '%s' but was '%s'"
% (expected_status, self._status))
def _run_command(self, command, *args):
command = '%s %s %s' % (self._command_prefix, command, ' '.join(args))
process = os.popen(command)
self._status = process.read().strip()
process.close()
| apache-2.0 | Python |
ede05f2196dc7e96df01176f20b39772ac26e1ae | add python/logviewer.py | chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes | python/logviewer.py | python/logviewer.py | #!/usr/bin/python3
import io, os, re, sys
from http import HTTPStatus, server
FILE = None
INDEX = """<!DOCTYPE html>
<meta charset="utf-8">
<title>Log Viewer</title>
<script>
var logBox = null;
var lastOffset = 0;
function initialize() {
logBox = document.getElementById('log');
lastOffset = 0;
update();
}
function update() {
fetch('/get?offset=' + lastOffset).then(function(response) {
if (response.ok) {
return response.text();
}
}).then(function(text) {
lastOffset += text.length;
logBox.value += text; // FIXME: escape
logBox.scrollTop = logBox.scrollHeight; // Scroll to bottom
setTimeout(update, 3000);
});
}
</script>
<body onLoad="initialize();">
<textarea id="log" wrap="off" cols="120" rows="50" readonly="readonly">
</textarea>
"""
# INDEX = None # Dev mode
class HTTPRequestHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_OK("text/html", INDEX.encode())
elif self.path.startswith('/get?'):
# TODO: convert query string to a dict
m = re.search('offset=(\\d+)', self.path)
offset = int(m.group(1)) if m else 0
m = re.search('length=(\\d+)', self.path)
length = int(m.group(1)) if m else -1
FILE.seek(offset)
body = FILE.read(length)
self.send_OK("text/plain", body)
else:
self.send_error(HTTPStatus.NOT_FOUND, "File not found")
def send_OK(self, content_type, body):
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", content_type)
self.send_header('Content-Length', int(len(body)))
self.end_headers()
self.wfile.write(body)
def main(argv):
global FILE, INDEX
FILE = open(argv[1], 'rb')
if not INDEX:
INDEX = open(os.path.splitext(argv[0])[0] + '.html').read()
server.test(HandlerClass=HTTPRequestHandler)
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause | Python |
|
3cbb02ebb1ba195f373c4b9238a49c30039f821e | revert changes | matanco/lottosend-sdk,matanco/lottosend-sdk | python/lottosend.py | python/lottosend.py | import json
import urllib2
class LottosendSDK:
#= Imports
#= Contrusctor
def __init__(self):
self.token = ''
self.lottosend_api = ''
self.results_api = ''
self.auto_login_url = ''
# signup user in lottosend system
def signupViaApi(self,first_name, last_name, prefix, phone, email, address, country, passwd, a_aid):
params = dict()
params = {
'web_user': {
'email': email,
'first_name': first_name,
'last_name': last_name,
'phone': phone,
'password': passwd,
'country': country,
'address': address,
'aid': a_aid
}
}
req = urllib2.Request(self.lottosend_api,
headers = {
"Authorization": 'Token token=%s' % self.token,
"Content-Type": "application/json",
"Accept": "*/*"
}, data = json.dumps(params))
return urllib2.urlopen(req).read()
# obtain user token to resign-in
def obtainToken(self,id):
req = urllib2.Request('%s/%s/token'%(self.lottosend_api,id),
headers = {
"Authorization": 'Token token=%s' % self.token,
"Content-Type": "application/json",
"Accept": "*/*"
})
return urllib2.urlopen(req).read()
# get all user info
def getUsersInfo(self):
req = urllib2.Request('%s/?last_synced_timestamp=1'%self.lottosend_api,
headers = {
"Authorization": 'Token token=%s' % self.token,
"Content-Type": "application/json",
"Accept": "*/*"
})
return urllib2.urlopen(req).read()
# get user transactions
def getUsersTransactions(self):
req = urllib2.Request('%s/transactions/?last_synced_timestamp=1'%self.lottosend_api,
headers = {
"Authorization": 'Token token=%s' % self.token,
"Content-Type": "application/json",
"Accept": "*/*"
})
return urllib2.urlopen(req).read()
| mit | Python |
|
490af74a5b52d8014a8c3e13cfa6f015a4927cf4 | add a merge migration to ensure the two lead nodes don't cause a crash during a deploy | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/migrations/0021_merge_20181011_1153.py | accelerator/migrations/0021_merge_20181011_1153.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-11 15:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0020_criterion_verbose_names'),
('accelerator', '0020_remove_is_open_from_program_family'),
]
operations = [
]
| mit | Python |
|
ba25fafa6b4572f1b7c8c7a901f5f7b75753c3c6 | Add Exercise 8.7. | jcrist/pydy,oliverlee/pydy,jcrist/pydy,Shekharrajak/pydy,jcrist/pydy,Shekharrajak/pydy,oliverlee/pydy,jcrist/pydy,skidzo/pydy,Shekharrajak/pydy,skidzo/pydy,Shekharrajak/pydy,skidzo/pydy,skidzo/pydy,oliverlee/pydy,jcrist/pydy,jcrist/pydy,jcrist/pydy | Kane1985/Chapter4/Ex8.7.py | Kane1985/Chapter4/Ex8.7.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 8.7 from Kane 1985.
"""
from __future__ import division
from collections import OrderedDict
from sympy import diff, solve, simplify, symbols
from sympy import pi, sin, cos
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import dot
from sympy.physics.mechanics import dynamicsymbols
from sympy.physics.mechanics import MechanicsStrPrinter
def msprint(expr):
pr = MechanicsStrPrinter()
return pr.doprint(expr)
def subs(x, *args, **kwargs):
if not hasattr(x, 'subs'):
if hasattr(x, '__iter__'):
return map(lambda x: subs(x, *args, **kwargs), x)
return x.subs(*args, **kwargs)
def partial_velocities(system, generalized_speeds, frame,
kde_map=None, constraint_map=None, express_frame=None):
partials = {}
if express_frame is None:
express_frame = frame
for p in system:
if isinstance(p, Point):
v = p.vel(frame)
elif isinstance(p, ReferenceFrame):
v = p.ang_vel_in(frame)
if kde_map is not None:
v = v.subs(kde_map)
if constraint_map is not None:
v = v.subs(constraint_map)
v_r_p = OrderedDict((u, v.diff(u, express_frame))
for u in generalized_speeds)
partials[p] = v_r_p
return partials
def generalized_active_forces(partials, force_pairs):
# use the same frame used in calculating partial velocities
v = partials.values()[0] # dict of partial velocities of the first item
ulist = v.keys() # list of generalized speeds in case user wants it
Flist = [0] * len(ulist)
for p, f in force_pairs:
for i, u in enumerate(ulist):
if partials[p][u] and f:
Flist[i] += dot(partials[p][u], f)
return Flist, ulist
## --- Declare symbols ---
# Define the system with 6 generalized speeds as follows:
q1, q2, q3 = dynamicsymbols('q1:4')
q1d, q2d, q3d = dynamicsymbols('q1:4', level=1)
u1, u2, u3 = dynamicsymbols('u1:4')
g, m, L, t = symbols('g m L t')
Q, R, S = symbols('Q R S')
# --- ReferenceFrames ---
N = ReferenceFrame('N')
# --- Define Points and set their velocities ---
# Simplify the system to 7 points, where each point is the aggregations of
# rods that are parallel horizontally.
pO = Point('O')
pO.set_vel(N, 0)
pP1 = pO.locatenew('P1', L/2*(cos(q1)*N.x + sin(q1)*N.y))
pP2 = pP1.locatenew('P2', L/2*(cos(q1)*N.x + sin(q1)*N.y))
pP3 = pP2.locatenew('P3', L/2*(cos(q2)*N.x - sin(q2)*N.y))
pP4 = pP3.locatenew('P4', L/2*(cos(q2)*N.x - sin(q2)*N.y))
pP5 = pP4.locatenew('P5', L/2*(cos(q3)*N.x + sin(q3)*N.y))
pP6 = pP5.locatenew('P6', L/2*(cos(q3)*N.x + sin(q3)*N.y))
for p in [pP1, pP2, pP3, pP4, pP5, pP6]:
p.set_vel(N, p.pos_from(pO).diff(t, N))
## --- Define kinematic differential equations/pseudo-generalized speeds ---
kde = [u1 - L*q1d, u2 - L*q2d, u3 - L*q3d]
kde_map = solve(kde, [q1d, q2d, q3d])
## --- Define contact/distance forces ---
forces = [(pP1, 6*m*g*N.x),
(pP2, S*N.y + 5*m*g*N.x),
(pP3, 6*m*g*N.x),
(pP4, -Q*N.y + 5*m*g*N.x),
(pP5, 6*m*g*N.x),
(pP6, R*N.y + 5*m*g*N.x)]
partials = partial_velocities([pP1, pP2, pP3, pP4, pP5, pP6], [u1, u2, u3], N, kde_map)
Fr, _ = generalized_active_forces(partials, forces)
print("Generalized active forces:")
for i, f in enumerate(Fr, 1):
print("F{0} = {1}".format(i, simplify(f)))
| bsd-3-clause | Python |
|
46cef615f9d10279ea4907a542a87e4af22b37cd | Add A* pathfinding algorithm to utilities. | Beskhue/enactive-agents,Beskhue/enactive-agents,Beskhue/enactive-agents | enactiveagents/utilities/pathfinding.py | enactiveagents/utilities/pathfinding.py | """
Module containing pathfinding utilities.
"""
import model
import Queue
class Pathfinding(object):
@staticmethod
def get_neighbours(world, position):
"""
Get all neighbours of a given position (cell).
:param world: The world
:param position: The given position (cell)
"""
neighbours = []
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if dx == 0 and dy == 0:
continue
if (position.get_x() + dx < 0
or position.get_y() + dy < 0
or position.get_x() + dx >= world.get_width()
or position.get_y() + dy >= world.get_height()):
continue
new_position = model.world.Position(position)
new_position.add((dx, dy))
add = True
entities = world.get_entities_at(new_position)
for entity in entities:
if entity.collidable():
add = False
break
if add:
neighbours.append(new_position)
return neighbours
@staticmethod
def heuristic(start, goal):
"""
Calculate the heuristic cost to get from start to the goal.
:param start: The starting position
:param goal: The goal position
"""
return abs(start.get_x() - goal.get_x()) + abs(start.get_y() - goal.get_y())
@staticmethod
def reconstruct_path(backtrack, goal):
path = []
current = goal
while backtrack[current] != None:
path.append(current)
current = backtrack[current]
return path
@staticmethod
def find_path(world, start, goal):
"""
Implements the A* algorithm to find a path from the start to the goal.
:param world: The world
:param start: The starting position
:param goal: The goal position
"""
priority_queue = Queue.PriorityQueue()
priority_queue.put(start, 0)
backtrack = {}
cost_to = {}
backtrack[start] = None
cost_to[start] = 0
while not priority_queue.empty():
current = priority_queue.get()
if current == goal:
# The goal has been found, so stop searching
break
for neighbour in Pathfinding.get_neighbours(world, current):
cost_to_neighbour = cost_to[current] + 1
if neighbour not in cost_to or cost_to_neighbour < cost_to[neighbour]:
cost_to[neighbour] = cost_to_neighbour
backtrack[neighbour] = current
priority = cost_to_neighbour + Pathfinding.heuristic(neighbour, goal)
priority_queue.put(neighbour, priority)
return (Pathfinding.reconstruct_path(backtrack, goal), cost_to[goal]) | mit | Python |
|
0ba701bd4459273df726e33709ae0e441bd4a767 | migrate username field to 150 chars | nimbis/django-shop,divio/django-shop,nimbis/django-shop,khchine5/django-shop,jrief/django-shop,awesto/django-shop,khchine5/django-shop,awesto/django-shop,nimbis/django-shop,jrief/django-shop,divio/django-shop,jrief/django-shop,jrief/django-shop,khchine5/django-shop,khchine5/django-shop,divio/django-shop,nimbis/django-shop,awesto/django-shop | email_auth/migrations/0003_django110.py | email_auth/migrations/0003_django110.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-27 09:08
from __future__ import unicode_literals
from django import VERSION
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('email_auth', '0002_auto_20160327_1119'),
]
operations = []
if VERSION >= (1, 10):
import django.contrib.auth.validators
operations.append(migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
))
| bsd-3-clause | Python |
|
ccb3d15521c89c52119e2df35c07d379b4f23940 | Add tkinter app | HackBinghamton/club,HackBinghamton/club,HackBinghamton/club | demo/tkinter/hello-tkinter.py | demo/tkinter/hello-tkinter.py | import tkinter as tk
class App(tk.Frame):
'''
A demo application.
The App is a subclass of tk.Frame.
tk.Frame is a widget that lets you 'pack in' other widgets.
This way we can add in more than one widget to our application.
'''
def __init__(self, parent=None):
self.parent = parent
# Attach our App to a parent widget
super().__init__(parent)
# Set the parent widget size
parent.geometry('500x500')
# Place our App on the screen
self.pack()
# Add in the rest of our widgets
self.add_hello_label()
self.add_button()
self.add_slider()
self.add_counter()
self.add_entry()
def add_hello_label(self):
'''
Adds a Label to the bottom of our frame.
'''
# We set the Label text and font here
self.hello = tk.Label(self, text='Hello, World!', font=('Times New Roman', 20))
# This tells tkinter where to place the Label
self.hello.pack(side='bottom')
def add_button(self):
'''
Adds a Button to the top of our frame.
'''
# We set the button text and command here
self.change = tk.Button(self, text='Change the text', command=self.change_text)
# pack() defaults as pack(side='top')
self.change.pack()
def change_text(self):
'''
A command to change the Label's properties.
'''
# Properties of a widget are accessed like a dictionary
self.hello['text'] = 'This is changed text!'
self.hello['fg'] = 'white'
self.hello['bg'] = 'black'
def add_slider(self):
'''
Adds a slider to the top of the frame.
'''
self.slider = tk.Scale(self,
# Define the minimum and maximum slider values
from_=10,
to=30,
# The default is a vertical slider
orient=tk.HORIZONTAL,
# The command gets called every time the slider is moved
command=self.scale_text)
# Set the sliders initial value
self.slider.set(20)
self.slider.pack()
def scale_text(self, val):
'''
Changes the font size of our label.
'''
# Font size is not a property of the label, so we have
# use the config() method
self.hello.config(font=('Times New Roman', val))
def inc(self, event):
self.number['text'] = str(1+int(self.number['text']))
def dec(self, event):
self.number['text'] = str(-1+int(self.number['text']))
def add_counter(self):
'''
Creates a label whose value can be updated with arrow keys'
'''
self.number = tk.Label(self, text='0', font=(None, 20))
self.number.pack(side='bottom')
# Root handles keyboard entry here
self.parent.bind('<Up>', self.inc)
self.parent.bind('<Down>', self.dec)
def clear_entry(self, event):
'''
Empties the entry box and prints out its contents
'''
print(self.entry.get())
self.entry.delete(0, 'end')
def add_entry(self):
'''
Creates an entry box.
'''
self.entry_label = tk.Label(self, text='Input')
self.entry_label.pack(side='left')
self.entry = tk.Entry(self, bd=5)
self.entry.pack(side='right')
# Submit text when we hit Enter
self.entry.bind('<Return>', self.clear_entry)
# An empty root widget to build our application from
root = tk.Tk()
# Create our app and attach it to the root
app = App(parent=root)
# Run the app
app.mainloop()
| mit | Python |
|
2cb7e09df0a8ec6fda707cccd1e9f8f00e15083c | Adjust the sorting of restaurants. | clementlefevre/hunger-game,clementlefevre/hunger-game,clementlefevre/hunger-game | migrations/versions/9ef49beab95_.py | migrations/versions/9ef49beab95_.py | """empty message
Revision ID: 9ef49beab95
Revises: 4b7b5a7ddc5c
Create Date: 2016-02-07 15:00:45.614000
"""
# revision identifiers, used by Alembic.
revision = '9ef49beab95'
down_revision = '4b7b5a7ddc5c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| mit | Python |
|
ed20a93e917cfdddc5cd49cc6446b6e80fb4573d | Migrate symbtr uuid field to django type | MTG/dunya,MTG/dunya,MTG/dunya,MTG/dunya | makam/migrations/0007_auto_20150812_1615.py | makam/migrations/0007_auto_20150812_1615.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('makam', '0006_auto_20150727_1631'),
]
operations = [
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=django_extensions.db.fields.UUIDField(max_length=36, editable=False, blank=True),
),
migrations.RunSQL('alter table makam_symbtr alter COLUMN uuid type uuid USING ("uuid"::uuid)'),
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=models.UUIDField(db_index=True),
),
]
| agpl-3.0 | Python |
|
e142530eef5754d4314d97f0d9e144f348d3909a | add docs_create_missing_stubs | madmongo1/hunter,ErniBrown/hunter,Knitschi/hunter,ruslo/hunter,mchiasson/hunter,dvirtz/hunter,tatraian/hunter,mchiasson/hunter,isaachier/hunter,dmpriso/hunter,fwinnen/hunter,dvirtz/hunter,dan-42/hunter,fwinnen/hunter,vdsrd/hunter,xsacha/hunter,akalsi87/hunter,ingenue/hunter,dan-42/hunter,ikliashchou/hunter,ErniBrown/hunter,madmongo1/hunter,xsacha/hunter,isaachier/hunter,caseymcc/hunter,dan-42/hunter,RomanYudintsev/hunter,stohrendorf/hunter,dmpriso/hunter,Knitschi/hunter,xsacha/hunter,ruslo/hunter,shekharhimanshu/hunter,ruslo/hunter,pretyman/hunter,RomanYudintsev/hunter,RomanYudintsev/hunter,shekharhimanshu/hunter,ruslo/hunter,ikliashchou/hunter,tatraian/hunter,xsacha/hunter,ingenue/hunter,isaachier/hunter,NeroBurner/hunter,madmongo1/hunter,NeroBurner/hunter,mchiasson/hunter,NeroBurner/hunter,vdsrd/hunter,ikliashchou/hunter,tatraian/hunter,ingenue/hunter,ErniBrown/hunter,fwinnen/hunter,ikliashchou/hunter,isaachier/hunter,NeroBurner/hunter,pretyman/hunter,mchiasson/hunter,shekharhimanshu/hunter,ErniBrown/hunter,vdsrd/hunter,stohrendorf/hunter,stohrendorf/hunter,dan-42/hunter,akalsi87/hunter,pretyman/hunter,caseymcc/hunter,ingenue/hunter,caseymcc/hunter,akalsi87/hunter,dmpriso/hunter,Knitschi/hunter,pretyman/hunter,madmongo1/hunter,dvirtz/hunter | maintenance/docs_create_missing_stubs.py | maintenance/docs_create_missing_stubs.py | import os
import subprocess
# hardcoded paths
HUNTER_DIR='..'
PACKAGES_DIR=os.path.join(HUNTER_DIR, 'cmake/projects')
DOCS_PKG_DIR=os.path.join(HUNTER_DIR, 'docs', 'packages', 'pkg')
# get all wiki entries
docs_filenames = [x for x in os.listdir(DOCS_PKG_DIR) if x.endswith('.rst')]
docs_entries = [x[:-4] for x in docs_filenames]
# get all hunter package entries
pkg_entries = [x for x in os.listdir(PACKAGES_DIR) if os.path.isdir(os.path.join(PACKAGES_DIR, x))]
pkg_entries_lower = [x.lower() for x in pkg_entries]
# packages both in hunter and wiki
pkg_match = [x for x in pkg_entries if x in docs_entries]
# packages only in hunter
pkg_only_hunter = [x for x in pkg_entries if x not in pkg_match]
# output directories
packages_dir = 'packages'
tmp_dir = 'packages/tmp'
only_hunter_dir = 'packages/only_hunter'
# create if not exist
for d in [packages_dir, tmp_dir, only_hunter_dir]:
if not os.path.exists(d):
os.mkdir(d)
# header for rst files
header_format_string = """.. spelling::
{}
.. _pkg.{}:
{}
{}
"""
# create dummy entries for packages only in hunter
for entry in pkg_only_hunter:
source_md = os.path.join(WIKI_DIR, 'pkg.' + entry.lower() + '.md')
tmp_rst = os.path.join(tmp_dir, entry + '.rst')
target_rst = os.path.join(only_hunter_dir, entry + '.rst')
underscores = "=" * len(entry)
header = header_format_string.format(entry, entry, entry, underscores)
#print(header)
with open(target_rst, 'w') as f:
f.write(header)
print("pkg_match entries: ", len(pkg_match))
print("pkg_only_hunter entries: ", len(pkg_only_hunter)) | bsd-2-clause | Python |
|
52c50ca6e4c5d2ee75300617c5da118fb1136e76 | Add custom plot style contour_image. | matthewwardrop/python-mplstyles,matthewwardrop/python-mplkit,matthewwardrop/python-mplstyles,matthewwardrop/python-mplkit | mplstyles/plots.py | mplstyles/plots.py | from matplotlib import cm
import matplotlib.pyplot as plt
from mplstyles import cmap as colormap
import numpy as np
def contour_image(x,y,Z,cmap=None,vmax=None,vmin=None,interpolation='nearest',contour_labelsize=9,contour_opts={},imshow_opts={},clegendlabels=[],label=False):
ax = plt.gca()
x_delta = float((x[-1]-x[0]))/(len(x)-1)/2.
y_delta = float((y[-1]-y[0]))/(len(y)-1)/2.
extent=(x[0],x[-1],y[0],y[-1])
extent_delta = (x[0]-x_delta,x[-1]+x_delta,y[0]-y_delta,y[-1]+y_delta)
ax.set_xlim(x[0],x[-1])
ax.set_ylim(y[0],y[-1])
if cmap is None:
cmap = colormap.reverse(cm.Blues)
Z = Z.transpose()
#plt.contourf(X,Y,self.pdata,interpolation=interpolation)
cs = ax.imshow(Z,interpolation=interpolation,origin='lower',aspect='auto',extent=extent_delta,cmap=cmap,vmax=vmax,vmin=vmin, **imshow_opts)
# Draw contours
X, Y = np.meshgrid(x, y)
CS = ax.contour(X, Y, Z, extent=extent, origin='lower', **contour_opts )
# Label contours
if label:
ax.clabel(CS, fontsize=contour_labelsize)
# Show contours in legend if desired
if len(clegendlabels) > 0:
for i in range(len(clegendlabels)):
CS.collections[i].set_label(clegendlabels[i])
#ax.legend()
return cs, CS | mit | Python |
|
a3e06625c1e16a17c65aefc6bb570d769ec9f56a | Test for bot_update.py. | eunchong/build,eunchong/build,eunchong/build,eunchong/build | tests/bot_update_test.py | tests/bot_update_test.py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import json
import os
from subprocess import Popen, PIPE
import sys
import tempfile
import threading
import unittest
BUILD_DIR = os.path.realpath(os.path.join(
os.path.dirname(__file__), '..'))
BOT_UPDATE_PATH = os.path.join(BUILD_DIR, 'scripts', 'slave', 'bot_update.py')
SLAVE_DIR = os.path.join(BUILD_DIR, 'slave')
chromium_utils = imp.load_source(
'chromium_utils',
os.path.join(BUILD_DIR, 'scripts', 'common', 'chromium_utils.py'))
class BotUpdateTest(unittest.TestCase):
# TODO(szager): Maybe replace this with a local temporary gerrit instance.
GIT_HOST = 'https://t3st-chr0m3.googlesource.com'
def setUp(self):
prefix = self.id().lstrip('__main__.')
testname = prefix.split('.')[-1]
self.workdir = tempfile.mkdtemp(dir=SLAVE_DIR, prefix=prefix)
self.builddir = os.path.join(self.workdir, 'build')
os.mkdir(self.builddir)
self.bu_cmd = [
sys.executable, BOT_UPDATE_PATH, '--force',
'--output_json', os.path.join(self.builddir, 'out.json'),
'--master', '%s_master' % testname,
'--builder_name', '%s_builder' % testname,
'--slave_name', '%s_slave' % testname ]
def tearDown(self):
chromium_utils.RemoveDirectory(self.workdir)
@staticmethod
def _subproc_thread_main(cmd, cwd):
thr = threading.current_thread()
thr.p = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=cwd)
(stdout, stderr) = thr.p.communicate()
thr.stdout = stdout
thr.stderr = stderr
def _subproc(self, cmd, cwd, timeout=15):
thr = threading.Thread(
target=self._subproc_thread_main, args=(cmd, cwd))
thr.daemon = True
thr.start()
thr.join(timeout)
if thr.isAlive():
thr.p.terminate()
self.fail('A subprocess timed out after %d seconds' % timeout)
return (thr.p.returncode, thr.stdout, thr.stderr)
@staticmethod
def _dump_subproc(cmd, cwd, status, stdout, stderr):
sep = ('#' * 80) + '\n'
print sep, 'Subprocess failed with status %d.\n' % status
print cmd, '\n\n... in %s\n' % cwd
print sep, '# stdout\n', sep, stdout, '\n'
print sep, '# stderr\n', sep, stderr, '\n', sep
@staticmethod
def _get_files(d):
result = []
for dirpath, dirnames, filenames in os.walk(d):
for f in filenames:
result.append(
os.path.join(dirpath.replace(d, '').lstrip('/'), f))
try:
dirnames.remove('.git')
except ValueError:
pass
return result
def test_001_simple(self):
solution = { 'name': 'top',
'url': '%s/BotUpdateTest/test_001_top.git' % self.GIT_HOST,
'deps_file': 'DEPS' }
gclient_spec = 'solutions=[%r]' % solution
self.bu_cmd.extend([
'--post-flag-day',
'--specs', gclient_spec,
'--revision', '91ea82d7125be47db12ccb973a2c6574eca0f342'])
status, stdout, stderr = self._subproc(self.bu_cmd, self.builddir)
if status != 0:
self._dump_subproc(self.bu_cmd, self.builddir, status, stdout, stderr)
self.assertEqual(status, 0)
expected_files = [
'DEPS',
'file.txt',
'ext/dep1/file.txt',
'ext/dep2/file.txt',
]
topdir = os.path.join(self.builddir, 'top')
self.assertItemsEqual(expected_files, self._get_files(topdir))
expected_json = {
'root': 'top',
'properties': {},
'did_run': True,
'patch_root': None
}
with open(os.path.join(self.builddir, 'out.json')) as fh:
actual_json = json.load(fh)
self.assertDictContainsSubset(expected_json, actual_json)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
|
e2a0a27e853e1e8c8913e9851d2a7aa0fb18b3ee | add exception test | benoitc/restkit,openprocurement/restkit | tests/exceptions_test.py | tests/exceptions_test.py | # -*- coding: utf-8 -
#
# Copyright (c) 2008 (c) Benoit Chesneau <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import unittest
class ResourceTestCase(unittest.TestCase):
def testForceException(self):
import webob.exc
from restkit.errors import use_simple_exception, use_wsgi_exception
use_simple_exception()
from restkit.errors import ResourceError
self.assert_(issubclass(ResourceError, webob.exc.WSGIHTTPException) == False)
use_wsgi_exception()
def testWebobException(self):
import webob.exc
from restkit.errors import ResourceError
self.assert_(issubclass(ResourceError, webob.exc.WSGIHTTPException) == True)
if __name__ == '__main__':
unittest.main() | mit | Python |
|
9231511307631ad92b896941607c4e5f3c7704ce | Create new script for attaching and releasing the gripper's compressor glove. | thomasweng15/cs473-baxter-project | cs473_baxter/scripts/glove.py | cs473_baxter/scripts/glove.py | #!/usr/bin/env python
import argparse
import rospy
import baxter_interface
class Glove():
def __init__(self, gripper):
self.gripper = Gripper(gripper)
# Verify robot is enabled
print "Getting robot state..."
self._rs = baxter_interface.RobotEnable()
self._init_state = self._rs.state().enabled
print "Enabling robot..."
self._rs.enable()
print "Running. Ctrl-c to quit"
def grip_glove(self):
self.gripper.calibrate()
self.gripper.open()
# set moving force
# set holding force
# prompt for glove
# grip glove
def release_glove(self):
self.gripper.open()
def clean_shutdown(self):
print "\nExiting glove routine..."
if not self._init_state and self._rs.state().enabled:
print "Disabling robot..."
self._rs.disable()
def main():
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__)
parser.add_argument(
'-g', '--grip', choices=['grip', 'release'], required=True,
help="grip or release glove"
)
args = parser.parse_args(rospy.myargv()[1:])
g = Glove('right')
# register shutdown callback
rospy.on_shutdown(g.clean_shutdown)
if args.grip is 'grip':
g.grip_glove()
else:
g.release_glove()
if __name__ == '__main__':
main()
| mit | Python |
|
bc2a707ea12716612422959b107b72c84d9dc946 | add test for dump_table_to_json() | wdiv-scrapers/dc-base-scrapers | tests/test_dump_table.py | tests/test_dump_table.py | import scraperwiki
import unittest
from dc_base_scrapers.common import dump_table_to_json
class DumpTableTests(unittest.TestCase):
def test_dump_table(self):
# create tables with same columns in different order
scraperwiki.sqlite.execute("""CREATE TABLE foo (
b TEXT,
a INT,
c TEXT
);""")
scraperwiki.sqlite.execute("""CREATE TABLE bar (
c TEXT,
b TEXT,
a INT
);""")
# insert same content differently ordered
foo_records = [
{'a': 2, 'b': 'foo', 'c': 'foo'},
{'a': 1, 'b': 'foo', 'c': 'foo'},
{'a': 3, 'b': 'foo', 'c': 'foo'},
]
for rec in foo_records:
scraperwiki.sqlite.save(unique_keys='a', table_name='foo', data=rec)
scraperwiki.sqlite.commit_transactions()
bar_records = [
{'a': 2, 'b': 'foo', 'c': 'foo'},
{'a': 3, 'b': 'foo', 'c': 'foo'},
{'a': 1, 'b': 'foo', 'c': 'foo'},
]
for rec in bar_records:
scraperwiki.sqlite.save(unique_keys='a', table_name='bar', data=rec)
scraperwiki.sqlite.commit_transactions()
# check that json representation is consistent
foo_json = dump_table_to_json('foo', 'a')
bar_json = dump_table_to_json('bar', 'a')
self.assertEqual(foo_json, bar_json)
| mit | Python |
|
5207d3c91d64170d783388a064334e495b3b562c | Add a new test for the latest RegexLexer change, multiple new states including '#pop'. | nsfmc/pygments,nsfmc/pygments,Khan/pygments,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,nsfmc/pygments,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,nsfmc/pygments,Khan/pygments,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,Khan/pygments,dbrgn/pygments-mirror,nsfmc/pygments,nsfmc/pygments,dbrgn/pygments-mirror,Khan/pygments,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,Khan/pygments,nsfmc/pygments,dbrgn/pygments-mirror,Khan/pygments,Khan/pygments,dbrgn/pygments-mirror,nsfmc/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,nsfmc/pygments,kirbyfan64/pygments-unofficial,Khan/pygments,Khan/pygments,nsfmc/pygments,Khan/pygments,Khan/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,nsfmc/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,nsfmc/pygments,dbrgn/pygments-mirror,Khan/pygments,dbrgn/pygments-mirror,Khan/pygments,nsfmc/pygments,Khan/pygments,Khan/pygments,Khan/pygments,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,dbrgn/pygments-mirror,dbrgn/pygments-mirror | tests/test_regexlexer.py | tests/test_regexlexer.py | # -*- coding: utf-8 -*-
"""
Pygments regex lexer tests
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
from pygments.token import Text
from pygments.lexer import RegexLexer
class TestLexer(RegexLexer):
"""Test tuple state transitions including #pop."""
tokens = {
'root': [
('a', Text.Root, 'rag'),
('e', Text.Root),
],
'beer': [
('d', Text.Beer, ('#pop', '#pop')),
],
'rag': [
('b', Text.Rag, '#push'),
('c', Text.Rag, ('#pop', 'beer')),
],
}
class TupleTransTest(unittest.TestCase):
def test(self):
lx = TestLexer()
toks = list(lx.get_tokens_unprocessed('abcde'))
self.assertEquals(toks,
[(0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
(3, Text.Beer, 'd'), (4, Text.Root, 'e')])
| bsd-2-clause | Python |
|
68afd4f71e1448017a7ed4775d7e70a26ff7c91b | add tests for new validation logic | fabianvf/scrapi,felliott/scrapi,erinspace/scrapi,jeffreyliu3230/scrapi,fabianvf/scrapi,alexgarciac/scrapi,mehanig/scrapi,CenterForOpenScience/scrapi,mehanig/scrapi,erinspace/scrapi,CenterForOpenScience/scrapi,felliott/scrapi | tests/test_validation.py | tests/test_validation.py | from __future__ import unicode_literals
import copy
import pytest
from jsonschema.exceptions import ValidationError
from scrapi.linter import NormalizedDocument
class TestValidation(object):
def test_validate_with_clean(self):
expected = {
"description": "This is a test",
"contributors": [
{
"name": "Test Testerson Jr",
"givenName": "Test",
"familyName": "Testerson",
"additionalName": "",
"ORCID": None,
"email": ""
}
],
'title': '',
'subjects': ['Math'],
'uris': {
"canonicalUri": "http://example.com"
},
"providerUpdatedDateTime": "2015-02-02T00:00:00Z",
"shareProperties": {
"source": "test"
}
}
doc = NormalizedDocument(to_be_validated, clean=True)
assert doc.attributes == expected
def test_validate(self):
expected = {
"description": "This is a test",
"contributors": [
{
"name": "Test Testerson Jr",
"givenName": "Test",
"familyName": "Testerson",
"additionalName": "",
"ORCID": None,
"email": ""
}
],
'title': '',
'tags': ['', '', ''],
'subjects': ['', 'Math'],
'uris': {
"canonicalUri": "http://example.com"
},
"providerUpdatedDateTime": "2015-02-02T00:00:00Z",
"shareProperties": {
"source": "test"
},
"otherProperties": [
{
"name": "Empty2",
"properties": {
"Empty2": None
}
}
]
}
doc = NormalizedDocument(to_be_validated)
assert doc.attributes == expected
def test_validate_fails(self):
to_be_tested = copy.deepcopy(to_be_validated)
to_be_tested['providerUpdatedDateTime'] = 'Yesterday'
with pytest.raises(ValidationError) as e:
doc = NormalizedDocument(to_be_tested)
with pytest.raises(ValidationError) as e:
doc = NormalizedDocument(to_be_tested, validate=False)
doc.validate()
to_be_validated = {
"description": "This is a test",
"contributors": [
{
"name": "Test Testerson Jr",
"givenName": "Test",
"familyName": "Testerson",
"additionalName": "",
"ORCID": None,
"email": ""
}
],
'title': '',
'tags': ['', '', ''],
'subjects': ['', 'Math'],
'uris': {
"canonicalUri": "http://example.com"
},
"providerUpdatedDateTime": "2015-02-02T00:00:00Z",
"shareProperties": {
"source": "test"
},
"otherProperties": [
{
"name": "Empty2",
"properties": {
"Empty2": None
}
}
]
}
| apache-2.0 | Python |
|
9445433b54fcbd7f56617fff853b761107bc94cc | Test add | goibibo/git-pylint-commit-hook,sebdah/git-pylint-commit-hook,evanunderscore/git-pylint-commit-hook | a.py | a.py | """
Comment
"""
print "apa"
| apache-2.0 | Python |
|
c1b34a71306af1c38f305981dc1d50135b2887d8 | add the missing new executor.py file | overcastcloud/trollius,overcastcloud/trollius,overcastcloud/trollius | asyncio/executor.py | asyncio/executor.py | from .log import logger
__all__ = (
'CancelledError', 'TimeoutError',
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
)
# Argument for default thread pool executor creation.
_MAX_WORKERS = 5
try:
import concurrent.futures
import concurrent.futures._base
except ImportError:
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
class Future(object):
def __init__(self, callback, args):
try:
self._result = callback(*args)
self._exception = None
except Exception as err:
raise
self._result = None
self._exception = err
self.callbacks = []
def cancelled(self):
return False
def done(self):
return True
def exception(self):
return self._exception
def result(self):
if self._exception is not None:
raise self._exception
else:
return self._result
def add_done_callback(self, callback):
callback(self)
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class SynchronousExecutor:
"""
Synchronous executor: submit() blocks until it gets the result.
"""
def submit(self, callback, *args):
return Future(callback, args)
def shutdown(self, wait):
pass
def get_default_executor():
logger.error("concurrent.futures module is missing: "
"use a synchrounous executor as fallback!")
return SynchronousExecutor()
else:
FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
Future = concurrent.futures.Future
Error = concurrent.futures._base.Error
CancelledError = concurrent.futures.CancelledError
TimeoutError = concurrent.futures.TimeoutError
def get_default_executor():
return concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
| apache-2.0 | Python |
|
fad65e68b3fcfa736ba5d6e62fbe0588100dc153 | Create gdax-myTrades-pagination.py | ccxt/ccxt,ccxt/ccxt,ccxt/ccxt,ccxt/ccxt,ccxt/ccxt | examples/py/gdax-myTrades-pagination.py | examples/py/gdax-myTrades-pagination.py | # -*- coding: utf-8 -*-
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
'''
Example snippet to traverse GDAX / CoinBase Pro pagination.
Useful for reaching back more than 100 myTrades, the same works
for fetchClosedOrders
'''
import ccxt
exchange = ccxt.gdax({
"apiKey": "123456",
"secret": "/abcdefghijklmnop/w==",
"password": "987654321",
"enableRateLimit": True
})
#use sandbox url
exchange.urls['api'] = exchange.urls['test']
param_key=''
param_value=''
allMyTrades: list = []
while True:
myTrades = exchange.fetchMyTrades(symbol='BTC/USD', params={param_key: param_value})
# Handle gdax with pagination ...
if exchange.last_response_headers._store.get('cb-after'):
param_key = 'after'
param_value = exchange.last_response_headers._store['cb-after'][1]
allMyTrades.extend(myTrades)
else:
allMyTrades.extend(myTrades)
break
for trade in allMyTrades:
print(trade)
| mit | Python |
|
6cc59b5ad1b70e0b303680d9e58c8d8158bec1e6 | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/implementation/easy/sock_merchant/py/solution.py | hackerrank/algorithms/implementation/easy/sock_merchant/py/solution.py | #!/bin/python3
import sys
import collections
n = int(input().strip())
c = map(int, input().strip().split(' '))
pairCount = sum(count // 2 for count in collections.Counter(c).values())
print(pairCount)
| mit | Python |
|
bb37514e110892a8a896b43173fa6288ec1685d4 | Add script to count the number of times a boolean key occurs and also optionally emit a new YAML file containing only results where the key matches True, False or None. | symbooglix/boogie-runner,symbooglix/boogie-runner | analysis/count_boolean_key.py | analysis/count_boolean_key.py | #!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
import argparse
import os
import logging
import sys
import yaml
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('bool_key', help='top level key of boolean type')
parser.add_argument('result_yml', type=argparse.FileType('r'), help='Input YAML file')
parser.add_argument('--write-true', dest='write_true', default=None, help='Write results that have bool_key set to true as a YAML file')
parser.add_argument('--write-false', dest='write_false', default=None, help='Write results that have bool_key set to false as a YAML file')
parser.add_argument('--write-none', dest='write_none', default=None, help='Write results that have bool_key set to null (None) as a YAML file')
pargs = parser.parse_args(args)
logging.info('Loading YAML file')
results = yaml.load(pargs.result_yml, Loader=Loader)
logging.info('Loading complete')
assert isinstance(results, list)
trueList = [ ]
falseList = [ ]
noneList = [ ]
for r in results:
if not pargs.bool_key in r:
logging.error('Key {} not in result'.format(pargs.bool_key))
return 1
value = r[pargs.bool_key]
if (not isinstance(value, bool)) and value != None:
logging.error('Key {} does not map to boolean or None')
return 1
if value == True:
trueList.append(r)
elif value == False:
falseList.append(r)
elif value == None:
noneList.append(r)
else:
logging.error('unreachable!')
return 1
# print results
print("Total {} keys: {}".format(pargs.bool_key, len(trueList) + len(falseList) + len(noneList)))
print("# of True: {}".format(len(trueList)))
print("# of not True {} ({} false, {} None)".format( len(falseList) + len(noneList), len(falseList) , len(noneList)))
writeFilteredResults(pargs.write_true, trueList, pargs.bool_key)
writeFilteredResults(pargs.write_false, falseList, pargs.bool_key)
writeFilteredResults(pargs.write_none, noneList, pargs.bool_key)
return 0
def writeFilteredResults(fileName, listToUse, key):
if fileName != None:
if os.path.exists(fileName):
logging.error('Refusing to overwrite {}'.format(fileName))
sys.exit(1)
if len(listToUse) == 0:
logging.info('Result list is empty not writing')
return
with open(fileName, 'w') as f:
logging.info('Writing results with "{}" key set to "{}" to file {}'.format(key, str(listToUse[0][key]) ,fileName))
yamlString = yaml.dump(listToUse, Dumper=Dumper, default_flow_style=False)
f.write(yamlString)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | Python |
|
bf01ea13c046d711939c1bb0aaedf9fbbc7c638d | Add initial systemd module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/systemd.py | salt/modules/systemd.py | '''
Provide the service module for systemd
'''
def __virtual__():
'''
Only work on systems which default to systemd
'''
if __grains__['os'] == 'Fedora' and __grains__['osrelease'] > 15:
return 'service'
return False
def start(name):
'''
Start the specified service with systemd
CLI Example::
salt '*' service.start <service name>
'''
cmd = 'systemctl start {0}.service'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specifed service with systemd
CLI Example::
salt '*' service.stop <service name>
'''
cmd = 'systemctl stop {0}.service'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Start the specified service with systemd
CLI Example::
salt '*' service.start <service name>
'''
cmd = 'systemctl restart {0}.service'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name):
'''
Return the status for a service via systemd, returns the PID if the service
is running or an empty string if the service is not running
'''
cmd = ("systemctl restart {0}.service | grep 'Main PID'"
" | awk '{print $3}'").format(name)
return __salt__['cmd.run'](cmd).strip()
| apache-2.0 | Python |
|
b1738d70e3a90e7bf27c9eeccb25b09403b74f1a | Add transport factory | devicehive/devicehive-python | devicehive/transport.py | devicehive/transport.py | def init(name, data_format, data_format_options, handler, handler_options):
transport_class_name = '%sTransport' % name.title()
transport_module = __import__('devicehive.transports.%s_transport' % name,
fromlist=[transport_class_name])
return getattr(transport_module, transport_class_name)(data_format,
data_format_options,
handler,
handler_options)
| apache-2.0 | Python |
|
9ba1dd92919fb37862e6e94bf55cc25e7be3b009 | add co.py | iimianmian/pyscripts | co.py | co.py | #!/bin/env python3
import functools
def coroutine(f):
@functools.wraps(f)
def _coroutine(*args, **kwargs):
active_coroutine = f(*args, **kwargs)
next(active_coroutine)
return active_coroutine
return _coroutine
@coroutine
def simple_coroutine():
print('Setting up the coroutine')
try:
while True:
item = yield
print('Got item: %r' % item)
except GeneratorExit:
print('Normal exit')
except Exception as e:
print('Exception exit: %r' % e)
raise
finally:
print('Any exit')
print('Creating simple coroutine')
active_coroutine = simple_coroutine()
print()
print('Sending spam')
active_coroutine.send('spam')
print()
print('Close the coroutine')
active_coroutine.close()
print()
print('Creating simple coroutine')
active_coroutine = simple_coroutine()
print()
print('Sending eggs')
active_coroutine.send('eggs')
print()
print('Throwing runtime error')
active_coroutine.throw(RuntimeError, 'Oops...')
print()
| apache-2.0 | Python |
|
02844d3a2ed329a02afaaf8dc1ad07407768a68b | Create app.py | evanscottgray/slackwow | app.py | app.py | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from flask import Flask
from flask import request
import requests
app = Flask(__name__)
def get_allergies():
URL = 'http://saallergy.info/today'
HEADERS = {'accept': 'application/json'}
r = requests.get(URL, headers=HEADERS)
data = r.json()
date = data['results'][0]['date']
text = 'Allergies for %s: ' % date
for a in data['results']:
text = text + '%s - %s (%s) | ' % (a['allergen'], a['level'],
a['count'])
text = text.rstrip(' ')
text = text.rstrip('|')
return text
@app.route("/allergies")
def allergies():
allergies_str = get_allergies()
return allergies_str
if __name__ == "__main__":
app.run(host='0.0.0.0')
| mit | Python |
|
43d73b7bdc8b38b3e2e583a0321936ab80c0f4e0 | Add bot.py | porglezomp/PyDankReddit,powderblock/PyDankReddit,powderblock/DealWithItReddit | bot.py | bot.py | import praw
r = praw.Reddit('/u/powderblock Glasses Bot')
for post in r.get_subreddit('all').get_new(limit=5):
print(str(post.url))
| mit | Python |
|
42389c93d11de00b50b08fcd1eca74fbe3941365 | Create banner-generator.py | coonsmatthew/name-generator | banner-generator.py | banner-generator.py | #!/usr/bin/python
#####################################################
# grabs a banner image from flaming text
# and saves it to the project directory as banner.png
#####################################################
import urllib
import random
word_file = "words.txt"
WORDS = open(word_file).read().splitlines()
word1 = random.choice(WORDS) + '+' + random.choice(WORDS)
myurl = "http://www.flamingtext.com/net-fu/proxy_form.cgi?imageoutput=true&script=dance-logo&text="+mytext
urllib.urlretrieve(myurl, "banner.png")
| mit | Python |
|
45b789010409e4e2e2afc88cb776c8b70e7768ec | Add unit test for DakotaBase | csdms/dakota,csdms/dakota | dakota/tests/test_dakota_base.py | dakota/tests/test_dakota_base.py | #!/usr/bin/env python
#
# Tests for dakota.dakota_base module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper ([email protected])
import os
import filecmp
from nose.tools import *
from dakota.dakota_base import DakotaBase
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print('\n*** DakotaBase tests')
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@raises(TypeError)
def test_instantiate():
"""Test whether DakotaBase fails to instantiate."""
d = DakotaBase()
| mit | Python |
|
debca45d27e414d09c4814bec14d49b22e166274 | Add tool to process familyname and style data. | dougfelt/nototools,anthrotype/nototools,googlefonts/nototools,googlei18n/nototools,googlei18n/nototools,anthrotype/nototools,dougfelt/nototools,dougfelt/nototools,moyogo/nototools,googlei18n/nototools,googlefonts/nototools,googlefonts/nototools,moyogo/nototools,googlefonts/nototools,moyogo/nototools,googlefonts/nototools,anthrotype/nototools | nototools/check_familyname_and_styles.py | nototools/check_familyname_and_styles.py | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Process a family description file.
You can check the file, and generate a list of font file names from it.
This list can be passed to noto_names to generate the name data.
The file is a list of Noto family names (see noto_fonts.py) interspersed with
definitions of what WWS style combinations apply to that file. See
_get_stylenames() for the format. Each style definition applies to each
following family until the next style definition."""
import argparse
import re
from nototools import noto_fonts
_style_re = re.compile(r'--\s+(.*)\s+--')
_extended_style_re = re.compile(r'^([TRBH]+)(?:/([CR]+)(?:/([RI]+))?)?$')
# Below we use the longest names we intend, so that the noto_names code can
# identify which families need extra short abbreviations. The style of
# abbreviation is based on the longest names in the family.
_WEIGHT_NAMES = {
'T': 'Thin',
'R': 'Regular',
'B': 'Bold',
'H': 'ExtraBold' # Nee 'Heavy'. Not 'Black' because 'ExtraBold' is longer.
}
_WIDTH_NAMES = {
'C': 'SemiCondensed', # We use this since it is longer. We don't expect to
# use ExtraCondensed.
'R': ''
}
_ITALIC_NAMES = {
'I': 'Italic',
'R': '',
}
def _get_stylenames(styles):
"""Returns the list of style names for the encoded styles. These are the
(master-ish) style names encoded as weights / widths/ italic, where weights
is any of 'T', 'R', 'B', or 'H', widths any of 'C' or 'R', and italic 'I'.
If there's not an italic then the italic is omitted, if there's only
regular width and no italic then widths are omitted."""
m = _extended_style_re.match(styles)
assert m
weights = m.group(1)
widths = m.group(2) or 'R'
slopes = m.group(3) or 'R'
# print '%s: %s, %s, %s' % (styles, weights, widths, slopes)
names = []
for wd in widths:
width_name = _WIDTH_NAMES[wd]
for wt in weights:
weight_name = _WEIGHT_NAMES[wt]
for it in slopes:
italic_name = _ITALIC_NAMES[it]
final_weight_name = weight_name
if wt == 'R' and (width_name or italic_name):
final_weight_name = ''
names.append(width_name + final_weight_name + italic_name)
return names
def check_familyname(name, styles):
notofont = noto_fonts.get_noto_font('unhinted/' + name + '-Regular.ttf')
if not notofont:
print 'Error: could not parse', name
return False
print name, noto_fonts.noto_font_to_wws_family_id(notofont), styles
return True
def generate_family_filenames(name, styles):
"""Name is the family name portion of a Noto filename. Styles is the
encoding of the styles, see _get_stylenames."""
stylenames = _get_stylenames(styles)
return [name + '-' + s + '.ttf' for s in stylenames]
def _for_all_familynames(namefile, fn):
"""Call fn passing the family name and style descriptor for
all families in namefile. '#' is a comment to eol, blank lines are
ignored."""
styles = 'RB'
with open(namefile, 'r') as f:
for name in f:
ix = name.find('#')
if ix >= 0:
name = name[:ix]
name = name.strip()
if not name:
continue
m = _style_re.match(name)
if m:
styles = m.group(1)
continue
assert name[0] != '-'
fn(name, styles)
def check_familynames(namefile):
passed = [True]
def fn(namefile, styles):
name_passed = check_familyname(namefile, styles)
passed[0] &= name_passed
_for_all_familynames(namefile, fn)
return passed[0]
def generate_filenames(namefile, outfile):
namelist = []
def fn(name, styles):
namelist.extend(generate_family_filenames(name, styles))
_for_all_familynames(namefile, fn)
allnames = '\n'.join(namelist)
if outfile:
with open(outfile, 'w') as f:
f.write(allnames)
f.write('\n')
else:
print allnames
def main():
DEFAULT_NAMEDATA = 'familyname_and_styles.txt'
parser = argparse.ArgumentParser()
parser.add_argument(
'-f', '--familynamedata', help='file containing family name/style data'
' (default %s)' % DEFAULT_NAMEDATA, metavar='file',
default=DEFAULT_NAMEDATA)
parser.add_argument(
'-c', '--check', help='check family name/style data', action='store_true')
parser.add_argument(
'-w', '--write', help='write filenames, default stdout', nargs='?',
const='stdout', metavar='file')
args = parser.parse_args()
if args.check:
passed = check_familynames(args.familynamedata)
if not passed:
print 'Check failed, some files had errors.'
return
print 'Check succeeded.'
if args.write:
outfile = None if args.write == 'stdout' else args.write
if not outfile and args.check:
print
generate_filenames(args.familynamedata, outfile)
if outfile:
print 'Wrote', outfile
if __name__ == '__main__':
main()
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.