commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
cf7e9dfec0c0cdab913f98ff325210b552610219 | Add new runner, search! | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/runners/search.py | salt/runners/search.py | '''
Runner frontend to search system
'''
# Import salt libs
import salt.search
import salt.output
def query(term):
'''
Query the search system
'''
search = salt.search.Search(__opts__)
result = search.query(term)
salt.output.display_output(result, 'pprint', __opts__)
return result
| apache-2.0 | Python |
|
da704e95b010330efd350e7ed85e51f252b8a453 | add missing migration | aldryn/aldryn-redirects,aldryn/aldryn-redirects | aldryn_redirects/migrations/0002_on_delete_and_verbose_names.py | aldryn_redirects/migrations/0002_on_delete_and_verbose_names.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-22 08:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('aldryn_redirects', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='redirecttranslation',
options={'default_permissions': (), 'managed': True, 'verbose_name': 'redirect Translation'},
),
migrations.AlterField(
model_name='redirect',
name='site',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aldryn_redirects_redirect_set', to='sites.Site'),
),
migrations.AlterField(
model_name='redirecttranslation',
name='language_code',
field=models.CharField(db_index=True, max_length=15, verbose_name='Language'),
),
]
| bsd-3-clause | Python |
|
17ae9e25663d029af11236584b4c759c895ae830 | Improve and consolidate condition scripts of Lithium to support timeouts and regex via optparse. r=Jesse | nth10sd/funfuzz,nth10sd/funfuzz,MozillaSecurity/funfuzz,MozillaSecurity/funfuzz,nth10sd/funfuzz,MozillaSecurity/funfuzz | util/fileIngredients.py | util/fileIngredients.py | #!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import with_statement
import re
def fileContains(f, s, isRegex):
if isRegex:
return fileContainsRegex(f, re.compile(s, re.MULTILINE))
else:
return fileContainsStr(f, s), s
def fileContainsStr(f, s):
found = False
with open(f, 'rb') as g:
for line in g:
if line.find(s) != -1:
print line.rstrip()
found = True
return found
def fileContainsRegex(f, regex):
# e.g. ~/fuzzing/lithium/lithium.py crashesat --timeout=30
# --regex '^#0\s*0x.* in\s*.*(?:\n|\r\n?)#1\s*' ./js --ion -n 735957.js
# Note that putting "^" and "$" together is unlikely to work.
matchedStr = ''
found = False
with open(f, 'rb') as g:
foundRegex = regex.search(g.read())
if foundRegex:
matchedStr = foundRegex.group()
print matchedStr
found = True
return found, matchedStr
| mpl-2.0 | Python |
|
03baa59cea76ab85f661bfa3e8d910fd6a7ae82a | Remove leading slash in redirections | andredias/nikola,x1101/nikola,xuhdev/nikola,xuhdev/nikola,gwax/nikola,x1101/nikola,wcmckee/nikola,xuhdev/nikola,xuhdev/nikola,okin/nikola,getnikola/nikola,knowsuchagency/nikola,knowsuchagency/nikola,okin/nikola,andredias/nikola,okin/nikola,knowsuchagency/nikola,gwax/nikola,getnikola/nikola,getnikola/nikola,wcmckee/nikola,getnikola/nikola,wcmckee/nikola,gwax/nikola,andredias/nikola,x1101/nikola,okin/nikola | nikola/plugins/task/redirect.py | nikola/plugins/task/redirect.py | # -*- coding: utf-8 -*-
# Copyright © 2012-2016 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Generate redirections."""
from __future__ import unicode_literals
import os
from nikola.plugin_categories import Task
from nikola import utils
class Redirect(Task):
"""Generate redirections."""
name = "redirect"
def gen_tasks(self):
"""Generate redirections tasks."""
kw = {
'redirections': self.site.config['REDIRECTIONS'],
'output_folder': self.site.config['OUTPUT_FOLDER'],
'filters': self.site.config['FILTERS'],
}
yield self.group_task()
if kw['redirections']:
for src, dst in kw["redirections"]:
src_path = os.path.join(kw["output_folder"], src.lstrip('/'))
yield utils.apply_filters({
'basename': self.name,
'name': src_path,
'targets': [src_path],
'actions': [(utils.create_redirect, (src_path, dst))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.redirect')],
}, kw["filters"])
| # -*- coding: utf-8 -*-
# Copyright © 2012-2016 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Generate redirections."""
from __future__ import unicode_literals
import os
from nikola.plugin_categories import Task
from nikola import utils
class Redirect(Task):
"""Generate redirections."""
name = "redirect"
def gen_tasks(self):
"""Generate redirections tasks."""
kw = {
'redirections': self.site.config['REDIRECTIONS'],
'output_folder': self.site.config['OUTPUT_FOLDER'],
'filters': self.site.config['FILTERS'],
}
yield self.group_task()
if kw['redirections']:
for src, dst in kw["redirections"]:
src_path = os.path.join(kw["output_folder"], src)
yield utils.apply_filters({
'basename': self.name,
'name': src_path,
'targets': [src_path],
'actions': [(utils.create_redirect, (src_path, dst))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.redirect')],
}, kw["filters"])
| mit | Python |
21ef2114975a315815d960fd1f28c5e4036fb935 | Update browsermark to use results.AddValue(..) | crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,littlstar/chromium.src,markYoungH/chromium.src,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,markYoungH/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,M4sse/chromium.src,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,bright-sparks/chromium-spacewalk,littlstar/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,littlstar/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,M4sse/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk,jaruba/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,littlstar/chromium.src,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,Just-D/chromium-1,Jonekee/chromium.src,axinging/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,fujunwei/chromium-crosswalk,dednal/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,dednal/chromium.src,axinging/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,Just-D/chromium-1,krieger-od/nwjs_chromium.src,ltilve/chromium,ltilve/chromium,ondra-novak/chromium.src,markYoungH/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,M4sse/chromium.src,dednal/chromium.src,ltilve/chromium,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,jaruba/chromium.src,littlstar/chromium.src,jaruba/chromium.src,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,Just-D/chromium-1,Jonekee/chromium.src,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,M4sse/chromium.src,dednal/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,Chilledheart/chromium,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,ltilve/chromium,markYoungH/chromium.src,M4sse/chromium.src | tools/perf/benchmarks/browsermark.py | tools/perf/benchmarks/browsermark.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Browsermark CSS, DOM, WebGL, JS, resize and page load benchmarks.
Browsermark benchmark suite have five test groups:
a) CSS group: measures your browsers 2D and 3D performance, and finally executes
CSS Crunch test
b) DOM group: measures variety of areas, like how well your browser traverse in
Document Object Model Tree or how fast your browser can create dynamic content
c) General group: measures areas like resize and page load times
d) Graphics group: tests browsers Graphics Processing Unit power by measuring
WebGL and Canvas performance
e) Javascript group: executes number crunching by doing selected Array and
String operations
Additionally Browsermark will test your browsers conformance, but conformance
tests are not included in this suite.
"""
import os
from telemetry import benchmark
from telemetry.page import page_measurement
from telemetry.page import page_set
from telemetry.value import scalar
class _BrowsermarkMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
# Select nearest server(North America=1) and start test.
js_start_test = """
for (var i=0; i < $('#continent a').length; i++) {
if (($('#continent a')[i]).getAttribute('data-id') == '1') {
$('#continent a')[i].click();
$('.start_test.enabled').click();
}
}
"""
tab.ExecuteJavaScript(js_start_test)
tab.WaitForJavaScriptExpression(
'window.location.pathname.indexOf("results") != -1', 600)
result = int(tab.EvaluateJavaScript(
'document.getElementsByClassName("score")[0].innerHTML'))
results.AddValue(
scalar.ScalarValue(results.current_page, 'Score', 'score', result))
@benchmark.Disabled
class Browsermark(benchmark.Benchmark):
"""Browsermark suite tests CSS, DOM, resize, page load, WebGL and JS."""
test = _BrowsermarkMeasurement
def CreatePageSet(self, options):
ps = page_set.PageSet(
file_path=os.path.abspath(__file__),
archive_data_file='../page_sets/data/browsermark.json',
make_javascript_deterministic=False)
ps.AddPageWithDefaultRunNavigate('http://browsermark.rightware.com/tests/')
return ps
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Browsermark CSS, DOM, WebGL, JS, resize and page load benchmarks.
Browsermark benchmark suite have five test groups:
a) CSS group: measures your browsers 2D and 3D performance, and finally executes
CSS Crunch test
b) DOM group: measures variety of areas, like how well your browser traverse in
Document Object Model Tree or how fast your browser can create dynamic content
c) General group: measures areas like resize and page load times
d) Graphics group: tests browsers Graphics Processing Unit power by measuring
WebGL and Canvas performance
e) Javascript group: executes number crunching by doing selected Array and
String operations
Additionally Browsermark will test your browsers conformance, but conformance
tests are not included in this suite.
"""
import os
from telemetry import benchmark
from telemetry.page import page_measurement
from telemetry.page import page_set
class _BrowsermarkMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
# Select nearest server(North America=1) and start test.
js_start_test = """
for (var i=0; i < $('#continent a').length; i++) {
if (($('#continent a')[i]).getAttribute('data-id') == '1') {
$('#continent a')[i].click();
$('.start_test.enabled').click();
}
}
"""
tab.ExecuteJavaScript(js_start_test)
tab.WaitForJavaScriptExpression(
'window.location.pathname.indexOf("results") != -1', 600)
result = int(tab.EvaluateJavaScript(
'document.getElementsByClassName("score")[0].innerHTML'))
results.Add('Score', 'score', result)
@benchmark.Disabled
class Browsermark(benchmark.Benchmark):
"""Browsermark suite tests CSS, DOM, resize, page load, WebGL and JS."""
test = _BrowsermarkMeasurement
def CreatePageSet(self, options):
ps = page_set.PageSet(
file_path=os.path.abspath(__file__),
archive_data_file='../page_sets/data/browsermark.json',
make_javascript_deterministic=False)
ps.AddPageWithDefaultRunNavigate('http://browsermark.rightware.com/tests/')
return ps
| bsd-3-clause | Python |
b0a6192649dd47548e007410b9f1a60ec23466de | Add files via upload | sainzad/stackOverflowCodeIdentifier | XMLAnalyze2.py | XMLAnalyze2.py | # Author: Andrew Sainz
#
# Purpose: XMLParser is designed to iterate through a collection of Post data collected from Stack Overflow
# forums. Data collected to analize the code tagged information to find the language of the code
# being utilized.
#
# How to use: To run from command line input "python XMLParser.py [XML file name].xml"
import xml.etree.ElementTree as ET
import sys
import re
from nltk.util import ngrams
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.classify import PositiveNaiveBayesClassifier
def parseBodyForTagCode(body):
try:
# Code is a string that contains all code tag data within the body
# ex. code = ['<code>EXCEPT</code>, <code>LEFT JOIN</code>']
code = [body[m.start():m.end()] for m in re.finditer('<code>(.+?)</code>', body)]
# print(code)
except AttributeError:
code = None
return code
def features(sentence):
words = sentence.lower().split()
return dict(('contains(%s)' %w, True) for w in words)
# Known list tag fields
knownJavaTags = []
knownJavaMention = []
knownC = []
knownCSharp = []
knownPython = []
xmldoc = sys.argv[1]
tree = ET.parse(xmldoc)
root = tree.getroot()
# print (root.attrib)
myList = []
# for each row in the xml document gather body information
for row in root:
# Body holds all comment information from post
body = row.get('Body')
rowId = row.get('Id')
# Tags for comment post
tags = row.get('Tags')
# parse body to find code tags
code = parseBodyForTagCode(body)
# Encode list information about code into UTF8
codeUni = repr([x.encode('UTF8') for x in code])
# If code isn't present ignore post move to next post
if codeUni == '[]':
continue
cleanCode = ""
for element in codeUni:
print (element is str)
element.decode()
cleanCode = element + cleanCode
cleanCode = re.sub('<code>|</code>','',cleanCode)
print (cleanCode)
if tags != None:
# Assign all known code to list
if ("<java>" in tags):
knownJavaTags.append(codeUni)
if ("<python>" in tags) or ("python" in body):
knownPython.append(rowId+'`'+codeUni+'`'+tags)
if ("<C>" in tags) or ("C" in body):
knownC.append(rowId+'`'+codeUni+'`'+tags)
if ("<C#>" in tags) or ("C#" in body):
knownCSharp.append(rowId+'`'+codeUni+'`'+tags)
# Known post tags are added to myList
myList.append(rowId+'`'+codeUni+'`'+tags)
else:
# unknown code tag is added to myList
myList.append(rowId+'`'+codeUni)
if "java" in body:
knownJavaMention.append(codeUni)
# Assign positive features
positive_featuresets = list(map(features, knownJavaTags))
unlabeled_featuresets = list(map(features, knownJavaMention))
classifier = PositiveNaiveBayesClassifier.train(positive_featuresets, unlabeled_featuresets)
# Ngram section
# print(myList)
############################################################################
for item in myList:
allCodeTags = [item[m.start():m.end()] for m in re.finditer('<code>(.+?)</code>', item)]
for code in allCodeTags:
cleanCode = re.sub('<code>|</code>','',code)
# print (cleanCode)
# print(classifier.classify(features(cleanCode)))
trigrams = ngrams(cleanCode.split(), 3)
# for grams in trigrams:
# print (grams)
# break | mit | Python |
|
8204a8b84cdcd515ea1dcf7ab67574b6db5baca6 | Add WS caller | ncarro/openacademy-project | web_services/ws_test.py | web_services/ws_test.py | import functools
import xmlrpclib
HOST = 'localhost'
PORT = 8069
DB = 'odoo_curso'
USER = 'admin'
PASS = 'admin'
ROOT = 'http://%s:%d/xmlrpc/' % (HOST,PORT)
# 1. Login
uid = xmlrpclib.ServerProxy(ROOT + 'common').login(DB,USER,PASS)
print "Logged in as %s (uid:%d)" % (USER,uid)
call = functools.partial(
xmlrpclib.ServerProxy(ROOT + 'object').execute,
DB, uid, PASS)
# 2. Read the sessions
model = 'openacademy.session'
domain = []
method_name = 'search_read'
sessions = call(model, method_name, domain, ['name','seats','taken_seats'])
for session in sessions:
print "Session %s (%s seats), taken seats %d" % (session['name'], session['seats'], session['taken_seats'])
# 3.create a new session
course_id = call('openacademy.course', 'search', [('name','ilike','Functional')])[0]
session_id = call(model, 'create', {
'name' : 'My session loca',
'course_id' : course_id,
})
| apache-2.0 | Python |
|
605fb4c6726d0c66bada870bffe526d493195b33 | Create USN.py | JadedCoder712/Final-Project | USN.py | USN.py | #Spooky scary skeeletons send shiveers down your spine
#You are a gunner in the Navy. Destroy the Commies.
| mit | Python |
|
129e548ac0be8ee3a60dd85aca9d095456b7d3a6 | Add new py-testresources package (#14031) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/py-testresources/package.py | var/spack/repos/builtin/packages/py-testresources/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTestresources(PythonPackage):
"""Testresources, a pyunit extension for managing expensive test resources.
"""
homepage = "https://launchpad.net/testresources"
url = "https://pypi.io/packages/source/t/testresources/testresources-2.0.1.tar.gz"
version('2.0.1', sha256='ee9d1982154a1e212d4e4bac6b610800bfb558e4fb853572a827bc14a96e4417')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | Python |
|
3e7b9b69e68c8594eac92d88f0579aab40d7d5ae | Test aborting queued live migration | mahak/nova,openstack/nova,mahak/nova,mahak/nova,openstack/nova,openstack/nova | nova/tests/functional/libvirt/test_live_migration.py | nova/tests/functional/libvirt/test_live_migration.py | # Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from lxml import etree
from nova.tests.functional import integrated_helpers
from nova.tests.functional.libvirt import base as libvirt_base
class LiveMigrationQueuedAbortTest(
libvirt_base.LibvirtMigrationMixin,
libvirt_base.ServersTestBase,
integrated_helpers.InstanceHelperMixin
):
"""Functional test for bug 1949808.
This test is used to confirm that VM's state is reverted properly
when queued Live migration is aborted.
"""
api_major_version = 'v2.1'
microversion = '2.74'
ADMIN_API = True
def setUp(self):
super().setUp()
# We will allow only one live migration to be processed at any
# given period of time
self.flags(max_concurrent_live_migrations='1')
self.src_hostname = self.start_compute(hostname='src')
self.dest_hostname = self.start_compute(hostname='dest')
self.src = self.computes[self.src_hostname]
self.dest = self.computes[self.dest_hostname]
# Live migration's execution could be locked if needed
self.lock_live_migration = threading.Lock()
def _migrate_stub(self, domain, destination, params, flags):
# Execute only if live migration is not locked
with self.lock_live_migration:
self.dest.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
conn = self.src.driver._host.get_connection()
# Because migrateToURI3 is spawned in a background thread,
# this method does not block the upper nova layers. Because
# we don't want nova to think the live migration has
# finished until this method is done, the last thing we do
# is make fakelibvirt's Domain.jobStats() return
# VIR_DOMAIN_JOB_COMPLETED.
server = etree.fromstring(
params['destination_xml']
).find('./uuid').text
dom = conn.lookupByUUIDString(server)
dom.complete_job()
def test_queued_live_migration_abort(self):
# Lock live migrations
self.lock_live_migration.acquire()
# Start instances: first one would be used to occupy
# executor's live migration queue, second one would be used
# to actually confirm that queued live migrations are
# aborted properly.
self.server_a = self._create_server(
host=self.src_hostname, networks='none')
self.server_b = self._create_server(
host=self.src_hostname, networks='none')
# Issue live migration requests for both servers. We expect that
# server_a live migration would be running, but locked by
# self.lock_live_migration and server_b live migration would be
# queued.
self._live_migrate(
self.server_a,
migration_expected_state='running',
server_expected_state='MIGRATING'
)
self._live_migrate(
self.server_b,
migration_expected_state='queued',
server_expected_state='MIGRATING'
)
# Abort live migration for server_b
serverb_migration = self.api.api_get(
'/os-migrations?instance_uuid=%s' % self.server_b['id']
).body['migrations'].pop()
self.api.api_delete(
'/servers/%s/migrations/%s' % (self.server_b['id'],
serverb_migration['id']))
self._wait_for_migration_status(self.server_b, ['cancelled'])
# Unlock live migrations and confirm that server_a becomes
# active again after successful live migration
self.lock_live_migration.release()
self._wait_for_state_change(self.server_a, 'ACTIVE')
# FIXME(artom) Assert the server_b never comes out of 'MIGRATING'
self.assertRaises(
AssertionError,
self._wait_for_state_change, self.server_b, 'ACTIVE')
self._wait_for_state_change(self.server_b, 'MIGRATING')
| apache-2.0 | Python |
|
226238bf4c672a58bb6c066f79301701d594a5c0 | Add notobuilder script | googlei18n/noto-source,googlei18n/noto-source,googlefonts/noto-source,googlefonts/noto-source,googlei18n/noto-source,googlefonts/noto-source | scripts/notobuilder.py | scripts/notobuilder.py | """Build a Noto font from one or more source files.
By default, places unhinted TTF, hinted TTF, OTF and (if possible) variable
fonts into the ``output/`` directory.
Currently does not support building from Monotype sources.
"""
import logging
import os
import re
import sys
from gftools.builder import GFBuilder
from gftools.builder.autohint import autohint
class NotoBuilder(GFBuilder):
def __init__(self, sources):
family = self.get_family_name(sources[0])
self.config = {
"sources": sources,
"familyName": family,
"buildVariable": True,
"autohintTTF": False, # We will, our own way
"buildWebfont": False,
"vfDir": "output/%s/unhinted/variable-ttf" % family,
"otDir": "output/%s/unhinted/otf" % family,
"ttDir": "output/%s/unhinted/ttf" % family,
}
self.outputs = set()
self.logger = logging.getLogger("GFBuilder")
self.fill_config_defaults()
def get_family_name(self, source=None):
if not source:
source = self.config["sources"][0]
source, _ = os.path.splitext(os.path.basename(source))
fname = re.sub(r"([a-z])([A-Z])", r"\1 \2", source)
fname = re.sub("-?MM$", "", fname)
return fname
def post_process_ttf(self, filename):
super().post_process_ttf(filename)
self.outputs.add(filename)
hinted_dir = "output/%s/hinted/ttf" % self.get_family_name()
os.makedirs(hinted_dir, exist_ok=True)
hinted = filename.replace("unhinted", "hinted")
try:
autohint(filename, hinted)
self.outputs.add(hinted)
except Exception as e:
self.logger.error("Couldn't autohint %s: %s" % (filename, e))
def post_process(self, filename):
super().post_process(filename)
self.outputs.add(filename)
def build_variable(self):
try:
super().build_variable()
except Exception as e:
self.logger.error("Couldn't build variable font: %s" % e)
if __name__ == '__main__':
import argparse
# https://stackoverflow.com/a/20422915
class ActionNoYes(argparse.Action):
def __init__(self, option_strings, dest, default=None, required=False, help=None):
if default is None:
raise ValueError('You must provide a default with Yes/No action')
if len(option_strings)!=1:
raise ValueError('Only single argument is allowed with YesNo action')
opt = option_strings[0]
if not opt.startswith('--'):
raise ValueError('Yes/No arguments must be prefixed with --')
opt = opt[2:]
opts = ['--' + opt, '--no-' + opt]
super(ActionNoYes, self).__init__(opts, dest, nargs=0, const=None,
default=default, required=required, help=help)
def __call__(self, parser, namespace, values, option_strings=None):
if option_strings.startswith('--no-'):
setattr(namespace, self.dest, False)
else:
setattr(namespace, self.dest, True)
parser = argparse.ArgumentParser(description='Build a Noto font')
parser.add_argument('sources', metavar='FILE', nargs='+',
help='source files')
parser.add_argument('--variable', action=ActionNoYes, default=True,
help='build a variable font')
parser.add_argument('--otf', action=ActionNoYes, default=True,
help='build an OTF')
parser.add_argument('--verbose','-v', action="store_true", help='verbose logging')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
builder = NotoBuilder(args.sources)
builder.config["buildVariable"] = args.variable
builder.config["buildOTF"] = args.otf
builder.build()
print("Produced the following files:")
for o in builder.outputs:
print("* "+o)
| apache-2.0 | Python |
|
f8712c62ad069b815ff775bd758bdbf693bdbdb7 | Add some constants. | williamgibb/pyFuckery,williamgibb/pyFuckery | src/pyfuckery/constants.py | src/pyfuckery/constants.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# XXX Update Docstring
"""
pyFuckery - constants.py
Created on 2/12/17.
"""
# Stdlib
import logging
import re
# Third Party Code
# Custom Code
log = logging.getLogger(__name__)
# Brainfuck tokens
SYM_PTR_INC = '>'
SYM_PTR_DEC = '<'
SYM_DATA_INC = '+'
SYM_DATA_DEC = '-'
SYM_IO_OUTPUT = '.'
SYM_IO_INPUT = ','
SYM_JMP_FWD = '['
SYM_JMP_BACKWARD = ']'
| mit | Python |
|
c97e44697444b15686bd0a6b5158c90630958238 | Add LRU example | voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts | lru.py | lru.py | from datetime import datetime
class LRUCacheItem(object):
"""Data structure of items stored in cache"""
def __init__(self, key, item):
self.key = key
self.item = item
self.timestamp = datetime.now()
class LRUCache(object):
"""A sample class that implements LRU algorithm"""
def __init__(self, length, delta=None):
self.length = length
self.delta = delta
self.hash = {}
self.item_list = []
def insertItem(self, item):
"""Insert new items to cache"""
if item.key in self.hash:
# Move the existing item to the head of item_list.
item_index = self.item_list.index(item)
self.item_list[:] = self.item_list[:item_index] + self.item_list[item_index+1:]
self.item_list.insert(0, item)
else:
# Remove the last item if the length of cache exceeds the upper bound.
if len(self.item_list) > self.length:
self.removeItem(self.item_list[-1])
# If this is a new item, just append it to
# the front of item_list.
self.hash[item.key] = item
self.item_list.insert(0, item)
def removeItem(self, item):
"""Remove those invalid items"""
del self.hash[item.key]
del self.item_list[self.item_list.index(item)]
def validateItem(self):
"""Check if the items are still valid."""
def _outdated_items():
now = datetime.now()
for item in self.item_list:
time_delta = now - item.timestamp
if time_delta.seconds > self.delta:
yield item
map(lambda x: self.removeItem(x), _outdated_items())
| mit | Python |
|
dd7ffbf97f9ae8426d7f60e465941f3f70bccdd6 | add file | curtisxk38/cs3240-labdemo | new.py | new.py | print("test")
| mit | Python |
|
494c8b88727dc958a7ba37f76d4c470837d26e1d | Define register files | nickdrozd/ecio-lisp,nickdrozd/ecio-lisp | reg.py | reg.py | EXP = 'EXP'
VAL = 'VAL'
ENV = 'ENV'
UNEV = 'UNEV'
FUNC = 'FUNC'
ARGL = 'ARGL'
CONT = 'CONT'
CURR = 'CURR'
STACK = 'STACK' | mit | Python |
|
1c46aa8a03e577ddb3db55a11df3db70905110d2 | Add serial_logger.py | ethanhart/arduino-climate | serial_logger.py | serial_logger.py | #!/usr/bin/env python
# encoding: utf-8
# Log serial monitor data
# TO-DO: add options for serial device, baud rate
import serial
import datetime
ser = serial.Serial('/dev/cu.usbmodemfa131', 9600)
now = datetime.datetime.now()
def get_date_string():
day = now.day
month = now.month
year = now.year
current_day = "{0}-{1}-{2}".format(year, month, day)
return current_day
while True:
current_date = get_date_string()
filename = current_date + '.temperature.log'
with open(filename, 'a') as log:
try:
temp = ser.readline()
#temp = 76
now = datetime.datetime.now()
iso = now.isoformat()
data = "{0} {1}".format(iso, temp)
print data.strip()
log.write(data)
#print now, temp
except:
pass
| mit | Python |
|
0df0daf7f52015258c3607bb2822c1c77c5e8207 | add tensorflow sample | soy-curd/Snippets,soy-curd/Snippets,soy-curd/Snippets,soy-curd/Snippets,soy-curd/Snippets,soy-curd/Snippets,soy-curd/Snippets,soy-curd/Snippets | python/other/flow.py | python/other/flow.py | import tensorflow as tf
a = tf.constant(1, name="a")
b = tf.constant(1, name="b")
c = a + b
print(c)
graph = tf.get_default_graph()
print(graph.as_graph_def())
with tf.Session() as sess:
print(sess.run(c)) | mit | Python |
|
fdc900d5da48ae9aea1c7537e026dc2d46c62bc8 | add some reuseable aggregation code | oss/shrunk,oss/shrunk,oss/shrunk,oss/shrunk,oss/shrunk | shrunk/aggregations.py | shrunk/aggregations.py |
def match_short_url(url):
return {"$match": {"short_url":url}}
def match_id(id):
return {"$match": {"short_url":url}}
#monthly visits aggregations phases
group_ips={"$group": {
"_id": "$source_ip",
"times": {
"$addToSet": "$time"
},
"count": {
"$sum": 1
}
}}
take_first_visit={"$project": {
"time": {
"$arrayElemAt": ["$times",0]
},
"count": 1
}}
#this monthly sort can probably get abstracted and reused
group_months={"$group": {
"_id": {
"month": {"$month": "$time"},
"year" : {"$year" : "$time"}
},
"first_time_visits": {
"$sum": 1
},
"all_visits": {
"$sum": "$count"
}
}}
make_sortable={"$project": {
"month": "$_id.month",
"year" : "$_id.year",
"first_time_visits": 1,
"all_visits": 1
}}
chronological_sort={ "$sort": {
"year" : 1,
"month": 1
}}
clean_results={"$project": {
"first_time_visits": 1,
"all_visits": 1
}}
monthly_visits_aggregation=[group_ips, take_first_visit, group_months, #process data
make_sortable, chronological_sort, clean_results] #sort
| mit | Python |
|
f9da8c4aa061223dac5147f6eaec6ad3419d1d6a | Add cli module to accept a language option | hackebrot/cookiedozer,hackebrot/cookiedozer | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/cli.py | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/cli.py | import os
os.environ["KIVY_NO_ARGS"] = "1"
import click
from {{cookiecutter.repo_name}}.{{cookiecutter.repo_name}} import {{cookiecutter.app_class_name}}
@click.command()
@click.option(
'-l', '--language', help='Default language of the App', default='en',
type=click.Choice(['en', 'de'])
)
def main(language):
"""Run {{cookiecutter.app_class_name}} with the given language setting.
"""
{{cookiecutter.app_class_name}}(language).run()
| mit | Python |
|
d7ea709e50510016bb448cb45e159528e416f08b | Create dianping_spider.py | EclipseXuLu/DataHouse,EclipseXuLu/DataHouse | DataHouse/crawler/dianping/dianping_spider.py | DataHouse/crawler/dianping/dianping_spider.py | """
a web spider for daz hong dian ping
"""
import time
import requests
from bs4 import BeautifulSoup
from lxml import etree
import pandas as pd
from pymongo import MongoClient
CITY_FILEPATH = 'city.xml'
CATEGORY_FILEPATH = 'type.xml'
SLEEP_TIME = 2
class City(object):
def __init__(self, pinyin, id, name):
self.pinyin = pinyin
self.id = id
self.name = name
def __str__(self):
return '{pinyin = ' + self.pinyin + '; id = ' + self.id + '; name = ' + self.name + '}';
def __repr__(self):
return self.__str__()
class Category(object):
def __init__(self, id, name):
self.id = id
self.name = name
def __str__(self):
return '{id = ' + self.id + '; name = ' + self.name + '}';
def __repr__(self):
return self.__str__()
def parse_city_xml(city_xml_filepath):
"""
parse the city_xml_filepath file in res directory, and return job list
:param city_xml_filepath:
:return:
"""
citylist = []
tree = etree.parse(city_xml_filepath)
for _ in tree.xpath('//city'):
city = City(_.get('pinyin').strip(), _.get('id').strip(), _.text.strip())
citylist.append(city)
return citylist
def parse_category_xml(type_xml_filepath):
"""
parse the type_xml_filepath file in res directory, and return job list
:param type_xml_filepath:
:return:
"""
categorylist = []
tree = etree.parse(type_xml_filepath)
for _ in tree.xpath('//type'):
category = Category(_.get('id').strip(), _.text.strip())
categorylist.append(category)
return categorylist
def crawl(start_num, city, category):
headers = {
'Host': 'mapi.dianping.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
}
payload = {
'start': start_num,
'categoryid': category.id,
'sortid': 0,
'maptype': 0,
'cityid': city.id
}
main_url = 'http://mapi.dianping.com/searchshop.json'
response = requests.get(main_url, params=payload, timeout=20, headers=headers)
if response.status_code == 200:
return response.json()
else:
print('Error!')
return None
def insert_item(item):
"""
insert an object into mongodb
:param item:
:return:
"""
client = MongoClient()
db = client.dianping.hotel
result = db.insert_one(item)
if __name__ == '__main__':
categorylist = parse_category_xml(CATEGORY_FILEPATH)
citylist = parse_city_xml(CITY_FILEPATH)
for city in citylist:
for category in categorylist:
# data = []
max_num = 25 # can be assigned to any number
start_num = 0
while start_num < max_num:
try:
dat = crawl(start_num, city, category)
if dat is not None:
max_num = dat['recordCount']
start_num += 25
print(dat['list'])
for _ in dat['list']:
_['cityName'] = city.name
_['categoryName'] = category.name
insert_item(_)
# data.append(dat['list'])
time.sleep(SLEEP_TIME)
# df = pd.DataFrame(data, )
# df = pd.DataFrame(data)
# df.to_excel('./food.xlsx', 'Food', index=False)
except:
pass
| mit | Python |
|
195b74304fa1c5eab3bc2e16df1346c2f92916f8 | Test py | exonum/exonum,exonum/exonum,alekseysidorov/exonum,alekseysidorov/exonum,alekseysidorov/exonum,alekseysidorov/exonum,exonum/exonum,exonum/exonum | testnet/tests/configs_api_test.py | testnet/tests/configs_api_test.py | #!/usr/bin/env python3
import unittest
import datetime
from exonum import ExonumApi, random_hex
class ConfigsApi(ExonumApi):
def new_config_propose(self, config, height, actual_from_height):
tx, c = self.send_transaction("config/propose", {"config": config, "height": height, "actual_from_height": actual_from_height})
return (self.get_config_propose(tx))
def new_config_vote(self):
tx, _ = self.send_transaction(
"config/vote", {"config_propose_hash": hash})
def get_config_propose(self, hash):
r = self.get("config/propose/" + hash)
return r.json()
def get_config_vote(self, pubkey):
r = self.get("config/vote/" + hash)
return r.json()
class ConfigsApiTest(ConfigsApi):
def setUp(self):
super().setUp()
self.host = "http://127.0.0.1:8400/api/v1"
self.times = 120
def create_many_proposes(self, txs):
final_tx = None
print()
print(" - Create {} config_proposes".format(txs))
start = datetime.datetime.now()
for i in range(txs):
r, c = self.post_transaction(
"wallets/create", {"name": "name_" + str(i)})
final_tx = r["tx_hash"]
tx = self.wait_for_transaction(final_tx)
self.assertNotEqual(tx, None)
finish = datetime.datetime.now()
delta = finish - start
ms = delta.seconds * 1000 + delta.microseconds / 1000
print(" - Commited, txs={}, total time: {}s".format(txs, ms / 1000))
start = datetime.datetime.now()
for i in range(txs):
info = self.find_user(cookies[i])
self.assertEqual(info["name"], "name_" + str(i))
finish = datetime.datetime.now()
delta = finish - start
ms = delta.seconds * 1000 + delta.microseconds / 1000
print(" - All users found, total time: {}s".format(ms / 1000))
def test_create_config_propose(self):
r, c = self.create_user("My First User")
self.assertEqual(r["name"], "My First User")
self.assertEqual(r["balance"], 0)
def test_create_proposes_1_10(self):
self.create_many_proposes(10)
def test_create_proposes_2_100(self):
self.create_many_proposes(100)
def test_create_proposes_3_1000(self):
self.create_many_proposes(1000)
def test_create_proposes_4_5000(self):
self.create_many_proposes(5000)
def test_create_proposes_5_10000(self):
self.create_many_proposes(10000)
if __name__ == '__main__':
unittest.main(verbosity=2, buffer=None)
| apache-2.0 | Python |
|
1f24571c358941932860eab9b46b386adc7c7ecc | Add script to output the users with unassigned tickets | EuroPython/epcon,matrixise/epcon,malemburg/epcon,barrachri/epcon,barrachri/epcon,matrixise/epcon,matrixise/epcon,artcz/epcon,artcz/epcon,matrixise/epcon,malemburg/epcon,barrachri/epcon,artcz/epcon,matrixise/epcon,PythonSanSebastian/epcon,artcz/epcon,barrachri/epcon,EuroPython/epcon,malemburg/epcon,artcz/epcon,PythonSanSebastian/epcon,PythonSanSebastian/epcon,malemburg/epcon,barrachri/epcon,matrixise/epcon,EuroPython/epcon,PythonSanSebastian/epcon,artcz/epcon,malemburg/epcon,PythonSanSebastian/epcon,EuroPython/epcon,malemburg/epcon,barrachri/epcon,PythonSanSebastian/epcon | p3/management/commands/users_with_unassigned_tickets.py | p3/management/commands/users_with_unassigned_tickets.py | # -*- coding: utf-8 -*-
""" Print information of the users who got unassigned tickets."""
from django.core.management.base import BaseCommand, CommandError
from django.core import urlresolvers
from conference import models
from conference import utils
from p3 import models as p3_models
from conference import models as conf_models
from assopy import models as assopy_models
from collections import defaultdict, OrderedDict
from optparse import make_option
import operator
import simplejson as json
import traceback
### Globals
### Helpers
def get_all_order_tickets():
orders = assopy_models.Order.objects.filter(_complete=True)
order_tkts = [ordi.ticket for order in orders for ordi in order.orderitem_set.all() if ordi.ticket is not None]
conf_order_tkts = [ot for ot in order_tkts if ot.fare.code.startswith('T')]
return conf_order_tkts
def get_assigned_ticket(ticket_id):
return p3_models.TicketConference.objects.filter(ticket=ticket_id)
def has_assigned_ticket(ticket_id):
return bool(get_assigned_ticket(ticket_id))
#
# def is_ticket_assigned_to_someone_else(ticket, user):
# tickets = p3_models.TicketConference.objects.filter(ticket_id=ticket.id)
#
# if not tickets:
# return False
# #from IPython.core.debugger import Tracer
# #Tracer()()
# #raise RuntimeError('Could not find any ticket with ticket_id {}.'.format(ticket))
#
# if len(tickets) > 1:
# raise RuntimeError('You got more than one ticket from a ticket_id.'
# 'Tickets obtained: {}.'.format(tickets))
#
# tkt = tickets[0]
# if tkt.ticket.user_id != user.id:
# return True
#
# if not tkt.assigned_to:
# return False
#
# if tkt.assigned_to == user.email:
# return False
# else:
# return True
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--emails',
action='store_true',
dest='emails',
default=False,
help='Will print user emails.',
),
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
tkts = get_all_order_tickets()
# unassigned tickets
un_tkts = [t for t in tkts if not has_assigned_ticket(t.id)]
# users with unassigned tickets
users = set()
for ut in un_tkts:
users.add(ut.user)
output = []
if options['emails']:
output = sorted([usr.email.encode('utf-8') for usr in users])
else:
output = sorted([usr.get_full_name().encode('utf-8') for usr in users])
#for ot in order_tkts:
# tkt = get_conference_ticket(ot.id)
#from IPython.core.debugger import Tracer
#Tracer()()
print(', '.join(output))
| bsd-2-clause | Python |
|
aa720214722ca6ea445cf4ba38aa5f51ef7772b4 | add random user for notes | ThinkmanWang/NotesServer,ThinkmanWang/NotesServer,ThinkmanWang/NotesServer | add_random_user.py | add_random_user.py | #!/usr/bin/python
#coding=utf-8
import sys
import MySQLdb
from DBUtils.PooledDB import PooledDB
import hashlib
import time
import random
g_dbPool = PooledDB(MySQLdb, 5, host='function-hz.com', user='notes', passwd='welc0me', db='db_notes', port=3306, charset = "utf8", use_unicode = True);
def create_random_user(user_name, szPwd):
#create user by cell phone number and send dynamic password
conn = g_dbPool.connection()
cur=conn.cursor()
count = cur.execute("insert into user(user_name, password) values (%s, %s) " \
, (user_name, hashlib.md5(szPwd).hexdigest()))
conn.commit()
if (1 == count):
return True
else:
return False
if __name__ == '__main__':
print ("start add rendom user")
for i in range(1, 5000000):
szPhone = str(random.randint(11111111111, 99999999999))
szPwd = "123456"
print ("create user %d %s ==> %s" % (i, szPhone, szPwd))
# nPhone = random.randint(11111111111, 99999999999)
create_random_user(szPhone, szPwd)
| apache-2.0 | Python |
|
05aa314ac9b5d38bb7a30e30aced9b27b2797888 | Add tests for non-async constructs | github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql | python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_syntax.py | python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_syntax.py | # Add taintlib to PATH so it can be imported during runtime without any hassle
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from taintlib import *
# This has no runtime impact, but allows autocomplete to work
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..taintlib import *
# Actual tests
class Context:
def __enter__(self):
return TAINTED_STRING
def __exit__(self, exc_type, exc, tb):
pass
def test_with():
ctx = Context()
taint(ctx)
with ctx as tainted:
ensure_tainted(tainted) # $ tainted
class Iter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def test_for():
iter = Iter()
taint(iter)
for tainted in iter:
ensure_tainted(tainted) # $ tainted
# Make tests runable
test_with()
test_for()
| mit | Python |
|
388c51ea5f83f718b885d784b566bc1873998c3a | add management command used to find all duplicate districts | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | custom/icds_reports/management/commands/find_duplicate_district_topojsons.py | custom/icds_reports/management/commands/find_duplicate_district_topojsons.py | from django.core.management import BaseCommand
from custom.icds_reports.utils.topojson_util.topojson_util import get_topojson_file_for_level, \
get_district_topojson_data
class Command(BaseCommand):
help = "Prints out any districts whose names are duplicated across states."
def handle(self, *args, **kwargs):
district_topojson_data = get_district_topojson_data()
districts_to_states = {}
districts_with_duplicates = set()
for state, data in district_topojson_data.items():
for district_name in data['districts']:
if district_name in districts_to_states:
districts_with_duplicates.add(district_name)
districts_to_states[district_name].append(state)
else:
districts_to_states[district_name] = [state]
print('District Name: [States]\n')
for duplicate_district in districts_with_duplicates:
print(f'{duplicate_district}: {", ".join(districts_to_states[duplicate_district])}')
| bsd-3-clause | Python |
|
8634db8fe61f819cf24023514d94e4ebfc7e819f | Add Stats() class | auth0/auth0-python,auth0/auth0-python | auth0/v2/stats.py | auth0/v2/stats.py | from .rest import RestClient
class Stats(object):
"""Auth0 stats endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
jwt_token (str): An API token created with your account's global
keys. You can create one by using the token generator in the
API Explorer: https://auth0.com/docs/api/v2
"""
def __init__(self, domain, jwt_token):
self.domain = domain
self.client = RestClient(jwt=jwt_token)
def _url(self, action):
return 'https://%s/api/v2/stats/%s' % (self.domain, action)
def active_users(self):
return self.client.get(self._url('active-users'))
def daily_stats(self, from_date=None, to_date=None):
return self.client.get(self._url('daily'), params={'from': from_date,
'to': to_date})
| mit | Python |
|
c1fcf54b63de95c85a9505d83062d8b320b1cbdf | Add python cloudfront update_distribution example to replace ACM Certificate | awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples | python/example_code/cloudfront/update_distribution_certificate.py | python/example_code/cloudfront/update_distribution_certificate.py | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import sys
#support for python 2 and 3 input types
def read(output):
if sys.version_info[0] < 3:
return(raw_input(output))
else:
return(input(output))
# Create CloudFront client
cf = boto3.client('cloudfront')
# List distributions with the pagination interface
print("\nAvailable CloudFront Distributions:\n")
paginator = cf.get_paginator('list_distributions')
for distributionlist in paginator.paginate():
for distribution in distributionlist['DistributionList']['Items']:
print("Domain: " + distribution['DomainName'])
print("Distribution Id: " + distribution['Id'])
print("Certificate Source: " + distribution['ViewerCertificate']['CertificateSource'])
if (distribution['ViewerCertificate']['CertificateSource'] == "acm"):
print("Certificate ARN: " + distribution['ViewerCertificate']['Certificate'])
print("")
print('Enter the Distribution Id of the CloudFront Distribution who\'s ACM Certificate you would like to replace. ')
distribution_id = read('Note that certificate source must be ACM - DistributionId: ')
distribution_config_response=cf.get_distribution_config(Id=distribution_id)
distribution_config=distribution_config_response['DistributionConfig']
distribution_etag=distribution_config_response['ETag']
if (distribution_config['ViewerCertificate']['CertificateSource'] != "acm"):
print("\nThe DistributionId you have entered is not currently using an ACM Certificate, exiting...\n")
exit()
old_cert_arn=distribution_config['ViewerCertificate']['ACMCertificateArn']
new_cert_arn=read("Please enter the ARN of the new ACM Certificate you would like to attach to Distribution " + distribution_id + ": ")
print("Replacing: " + old_cert_arn + "\nwith: " + new_cert_arn + "\n")
distribution_config['ViewerCertificate']['ACMCertificateArn']=new_cert_arn
distribution_config['ViewerCertificate']['Certificate']=new_cert_arn
cf.update_distribution(DistributionConfig=distribution_config,Id=distribution_id,IfMatch=distribution_etag)
| apache-2.0 | Python |
|
b861b70e72b582a1bd3ae3ae6fa8ae2478b4ebe4 | add the tests | UDST/synthpop,synthicity/synthpop,sfcta/synthpop,SEMCOG/synthpop,bhargavasana/synthpop,hanase/synthpop | popgen/test/test_categorizer.py | popgen/test/test_categorizer.py | import pytest
import numpy as np
from ..census_helpers import Census
from .. import categorizer as cat
@pytest.fixture
def c():
return Census("827402c2958dcf515e4480b7b2bb93d1025f9389")
@pytest.fixture
def acs_data(c):
population = ['B01001_001E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
all_columns = population + sex + race + male_age_columns + \
female_age_columns
df = c.block_group_query(all_columns, "06", "075", tract="030600")
return df
@pytest.fixture
def pums_data(c):
return c.download_population_pums("06", "07506")
def test_categorize(acs_data, pums_data):
p_acs_cat = cat.categorize(acs_data, {
("population", "total"): "B01001_001E",
("age", "19 and under"): "B01001_003E + B01001_004E + B01001_005E + "
"B01001_006E + B01001_007E + B01001_027E + "
"B01001_028E + B01001_029E + B01001_030E + "
"B01001_031E",
("age", "20 to 35"): "B01001_008E + B01001_009E + B01001_010E + "
"B01001_011E + B01001_012E + B01001_032E + "
"B01001_033E + B01001_034E + B01001_035E + "
"B01001_036E",
("age", "35 to 60"): "B01001_013E + B01001_014E + B01001_015E + "
"B01001_016E + B01001_017E + B01001_037E + "
"B01001_038E + B01001_039E + B01001_040E + "
"B01001_041E",
("age", "above 60"): "B01001_018E + B01001_019E + B01001_020E + "
"B01001_021E + B01001_022E + B01001_023E + "
"B01001_024E + B01001_025E + B01001_042E + "
"B01001_043E + B01001_044E + B01001_045E + "
"B01001_046E + B01001_047E + B01001_048E + "
"B01001_049E",
("race", "white"): "B02001_002E",
("race", "black"): "B02001_003E",
("race", "asian"): "B02001_005E",
("race", "other"): "B02001_004E + B02001_006E + B02001_007E + "
"B02001_008E",
("sex", "male"): "B01001_002E",
("sex", "female"): "B01001_026E"
}, index_cols=['NAME'])
assert len(p_acs_cat) == 3
assert len(p_acs_cat.columns) == 11
assert len(p_acs_cat.columns.names) == 2
assert p_acs_cat.columns[0][0] == "age"
assert np.all(cat.sum_accross_category(p_acs_cat) < 2)
def age_cat(r):
if r.AGEP <= 19:
return "19 and under"
elif r.AGEP <= 35:
return "20 to 35"
elif r.AGEP <= 60:
return "35 to 60"
return "above 60"
def race_cat(r):
if r.RAC1P == 1:
return "white"
elif r.RAC1P == 2:
return "black"
elif r.RAC1P == 6:
return "asian"
return "other"
def sex_cat(r):
if r.SEX == 1:
return "male"
return "female"
_, jd_persons = cat.joint_distribution(
pums_data,
cat.category_combinations(p_acs_cat.columns),
{"age": age_cat, "race": race_cat, "sex": sex_cat}
)
jd_persons | bsd-3-clause | Python |
|
151293037b941aba874fb2641c1bf982e2143beb | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/implementation/medium/the_time_in_words/py/solution.py | hackerrank/algorithms/implementation/medium/the_time_in_words/py/solution.py | #!/bin/python3
import sys
def solution(hrs, min):
lookup = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'quarter',
16: 'sixteen',
17: 'seventeen',
18: 'eighteen',
19: 'nineteen',
20: 'twenty',
21: 'twenty one',
22: 'twenty two',
23: 'twenty three',
24: 'twenty four',
25: 'twenty five',
26: 'twenty six',
27: 'twenty seven',
28: 'twenty eight',
29: 'twenty nine',
30: 'half',
}
if min == 0:
return "{} o' clock".format(lookup[hrs])
elif min <= 30:
if min == 15 or min == 30:
return "{} past {}".format(lookup[min], lookup[hrs])
else:
return "{} minute{} past {}".format(lookup[min], '' if min == 1 else 's', lookup[hrs])
rem = 60 - min
if rem == 15 or rem == 30:
return "{} to {}".format(lookup[rem], lookup[hrs + 1])
return "{} minute{} to {}".format(lookup[rem], '' if min == 1 else 's', lookup[hrs + 1])
h = int(input().strip())
m = int(input().strip())
s = solution(h, m)
print(s)
| mit | Python |
|
8ba0fcfa893e007f1c6cc794a36bd3604498c380 | add rapiro.py | oga00000001/RapiroTools | rapiroController.kivy/rapiro.py | rapiroController.kivy/rapiro.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import serial
import os
import time
import datetime
import threading
_str = ''
if os.name == 'posix':
com = serial.Serial('/dev/ttyAMA0', 57600, timeout = 0.05)
else:
com = sys.stdout
def a2dist(v):
d = 26.59*pow(v/1024.0*5.0,-1.209)
return(d)
def rxData():
global _str
while (1):
n = com.inWaiting()
#print n, _str
if n > 0:
_str += com.read(n)
def command(data):
inst = data.split(',')
r = ''
try:
t = inst[0]
s = inst[1]
except:
t = 'x'
s = 'Not define'
if t == 'a':
"""
Arduino
"""
com.write(s)
r = com.readline()
elif t == 'p':
"""
Raspberry pi
"""
os.system(s)
else:
pass
return(t, s, r)
def main():
#print(command('a,#M0'))
#print(command('a,#Z'))
#print(command('a,#PS02A090S05A000T001'))
print(command('a,#M0'))
print(command('a,#Q'))
print(command('a,#A6'))
#print(command('a,#A1'))
#print(command('a,#A2'))
#print(command('a,#A3'))
#print(command('a,#A4'))
#print(command('a,#A5'))
print(command('a,#A6'))
#print(command('a,#A7'))
print(command('a,#C'))
print(command('a,#D'))
if __name__ == '__main__':
#t1 = threading.Thread(target=rxData)
#t1.setDaemon(True)
#t1.start()
main()
| mit | Python |
|
a2848885e85ad6d9685bb8ae35747300ed4b6b8b | Add a BaseTokenizer | mozilla/spicedham,mozilla/spicedham | spicedham/tokenizer.py | spicedham/tokenizer.py | class BaseTokenizer(object):
def __init__(self, config):
pass
def tokenize(self, text):
return [text]
| mpl-2.0 | Python |
|
75a882bf38c88d73e38d13fbb8b1499ff4ae4ea6 | Add migration for changing users added by OSF for meetings with emails for fullnames to their guid | felliott/osf.io,adlius/osf.io,felliott/osf.io,adlius/osf.io,saradbowman/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,adlius/osf.io,felliott/osf.io,mattclark/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,cslzchen/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,aaxelb/osf.io,caseyrollins/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,caseyrollins/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,aaxelb/osf.io,baylee-d/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,mattclark/osf.io,mfraezz/osf.io,baylee-d/osf.io,pattisdr/osf.io | scripts/remove_after_use/set_meetings_users_fullnames_to_guids.py | scripts/remove_after_use/set_meetings_users_fullnames_to_guids.py | import sys
import logging
import django
from django.db import transaction
django.setup()
from osf.models import OSFUser
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
dry_run = '--dry' in sys.argv
with transaction.atomic():
users = OSFUser.objects.filter(fullname__regex=r'^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$', tags__name='osf4m')
logger.info('{} users found added by OSF 4 Meetings with emails for fullnames'.format(users.count()))
for user in users:
user.fullname = user._id
if not dry_run:
user.save()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
a6b35a9a94b2e4b32c2236258812b44e81184515 | Add management command for resyncing mobile worker location user data | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq | corehq/apps/users/management/commands/fix_location_user_data.py | corehq/apps/users/management/commands/fix_location_user_data.py | from corehq.apps.locations.models import Location
from corehq.apps.users.models import CommCareUser
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = "domain"
help = "Fix location user data for mobile workers."
def process_user(self, user):
if user.location_id:
user.set_location(Location.get(user.location_id))
else:
user.unset_location()
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("Usage: python manage.py fix_location_user_data %s" % self.args)
domain = args[0]
ids = (
CommCareUser.ids_by_domain(domain, is_active=True) +
CommCareUser.ids_by_domain(domain, is_active=False)
)
for doc in iter_docs(CommCareUser.get_db(), ids):
user = CommCareUser.wrap(doc)
try:
self.process_user(user)
except Exception as e:
print "Error processing user %s: %s" % (user._id, e)
| bsd-3-clause | Python |
|
a31ef338ef4029be92b0c578bdd12706a0f1c17d | Move zpool grains into salt.grains.zpool | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/grains/zpool.py | salt/grains/zpool.py | # -*- coding: utf-8 -*-
'''
ZFS grain provider
:maintainer: Jorge Schrauwen <[email protected]>
:maturity: new
:depends: salt.utils, salt.module.cmdmod
:platform: illumos,freebsd,linux
.. versionadded:: Oxygen
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
import salt.utils.dictupdate
import salt.utils.path
import salt.utils.platform
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
__virtualname__ = 'zfs'
__salt__ = {
'cmd.run': salt.modules.cmdmod.run,
'cmd.run_all': salt.modules.cmdmod.run_all,
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Figure out if we need to be loaded
'''
# Don't load on windows, NetBSD, or proxy
# NOTE: ZFS on Windows is in development
# NOTE: ZFS on NetBSD is in development
if salt.utils.platform.is_windows() or salt.utils.platform.is_netbsd() or 'proxyminion' in __opts__:
return False
# Don't load if we do not have the zpool command
if not salt.utils.path.which('zpool'):
return False
return True
def _zpool_data(zpool_cmd):
'''
Provide grains about zpools
'''
# collect zpool data
grains = {}
for zpool in __salt__['cmd.run']('{zpool} list -H -o name,size'.format(zpool=zpool_cmd)).splitlines():
if 'zpool' not in grains:
grains['zpool'] = {}
zpool = zpool.split()
grains['zpool'][zpool[0]] = zpool[1]
# return grain data
return grains
def zpool():
'''
Provide grains for zfs/zpool
'''
grains = {}
zpool_cmd = salt.utils.path.which('zpool')
grains = salt.utils.dictupdate.update(grains, _zpool_data(zpool_cmd), merge_lists=True)
return grains
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| apache-2.0 | Python |
|
bb8a448e5e3f935f5ee4f8da9c78bcb651486c15 | Create ZigZagConversion_002.py | Chasego/cod,cc13ny/algo,cc13ny/Allin,Chasego/codi,Chasego/codirit,Chasego/codi,Chasego/cod,Chasego/cod,Chasego/codirit,Chasego/codirit,cc13ny/Allin,Chasego/codi,cc13ny/algo,cc13ny/algo,Chasego/cod,Chasego/codi,cc13ny/Allin,Chasego/codirit,Chasego/cod,Chasego/codi,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,cc13ny/algo,cc13ny/algo | leetcode/006-ZigZag-Conversion/ZigZagConversion_002.py | leetcode/006-ZigZag-Conversion/ZigZagConversion_002.py | class Solution:
# @param {string} s
# @param {integer} numRows
# @return {string}
def convert(self, s, numRows):
if numRows < 2:
return s
halfsize = numRows - 1
size = 2 * halfsize
res = ''
for i in range(numRows):
j, cnt = i, 1
while j < len(s):
res += s[j]
if j % halfsize == 0:
j += size
else:
j = cnt * size - j
cnt += 1
return res
| mit | Python |
|
95fcdd4f2f65a330adcb115b7ce6d6084efc6ae8 | Add examples. | tillahoffmann/util | examples/sampling/metropolis.py | examples/sampling/metropolis.py | #!/usr/bin/env python
import numpy as np
from util import MetropolisSampler, log_gaussian
from matplotlib import pyplot as plt
def __main__():
np.random.seed(4)
# Generate parameters
num_dims = 3
mu = np.random.normal(0, 3, num_dims)
cov = np.diag(np.random.gamma(.5, size=num_dims))
# Create a sampler
sampler = MetropolisSampler(lambda x: -log_gaussian(x, mu, cov)[0], cov / num_dims)
# Draw samples
sampler.sample(mu, 1000)
# Show the trace
sampler.trace_plot(values=mu)
plt.show()
if __name__ == '__main__':
__main__()
| mit | Python |
|
415717bddb00ca650bef61a5c6054a7b47575b56 | Implement unit test for break. | iksteen/jaspyx,ztane/jaspyx | jaspyx/tests/visitor/test_break.py | jaspyx/tests/visitor/test_break.py | import ast
from jaspyx.ast_util import ast_store, ast_load
from jaspyx.tests.visitor.v8_helper import V8Helper
class TestBreak(V8Helper):
def test_break(self):
assert self.run(
[
ast.Assign(
[ast_store('i')],
ast.Num(0),
),
ast.While(
ast.Compare(
ast_load('i'),
[ast.Lt()],
[ast.Num(10)]
),
[
ast.Break(),
],
[]
)
],
'i',
int
) == 0
| mit | Python |
|
a1e451ab3525c5a0852782d1990f848b2329cb72 | add sinawb token | AsherYang/ThreeLine,AsherYang/ThreeLine,AsherYang/ThreeLine | server/crawler/sinawb/TokenConstant.py | server/crawler/sinawb/TokenConstant.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author: AsherYang
Email: [email protected]
Date: 2017/9/22.
Desc: sinaWeibo appkey
@see: http://open.weibo.com/apps/2489615368/info/basic?action=review
"""
domain="https://api.weibo.com/2/"
token=""
appkey = "2489615368"
secret = "dbb84df92e9a9c8f8e10d9985a8038a8" | apache-2.0 | Python |
|
1a9b6c7c58c5960df18335552780c3ca668dea5e | add evaluation script for ihm | YerevaNN/mimic3-benchmarks | evaluation/evalutate_ihm.py | evaluation/evalutate_ihm.py | import sklearn.utils as sk_utils
from mimic3models import metrics
import numpy as np
import pandas as pd
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('prediction', type=str)
parser.add_argument('--test_listfile', type=str, default='../data/in-hospital-mortality/test/listfile.csv')
parser.add_argument('--n_iters', type=int, default=10000)
args = parser.parse_args()
pred_df = pd.read_csv(args.prediction, index_col=False)
test_df = pd.read_csv(args.test_listfile, index_col=False)
df = test_df.merge(pred_df, left_on='stay', right_on='stay', how='left', suffixes=['_l', '_r'])
assert (df['prediction'].isnull().sum() == 0)
assert (df['y_true_l'].equals(df['y_true_r']))
n_samples = df.shape[0]
data = np.zeros((n_samples, 2))
data[:, 0] = np.array(df['prediction'])
data[:, 1] = np.array(df['y_true_l'])
auroc_score = metrics.print_metrics_binary(data[:, 1], data[:, 0], verbose=0)["auroc"]
aucs = []
for i in range(args.n_iters):
cur_data = sk_utils.resample(data, n_samples=len(data))
cur_auc = metrics.print_metrics_binary(cur_data[:, 1], cur_data[:, 0], verbose=0)["auroc"]
aucs += [cur_auc]
print "{} iterations".format(args.n_iters)
print "ROC of AUC = {}".format(auroc_score)
print "mean = {}".format(np.mean(aucs))
print "median = {}".format(np.median(aucs))
print "std = {}".format(np.std(aucs))
print "2.5% percentile = {}".format(np.percentile(aucs, 2.5))
print "97.5% percentile = {}".format(np.percentile(aucs, 97.5))
if __name__ == "__main__":
main()
| mit | Python |
|
e3c17a893ef4e0790af05cc238ac9038923b115a | Create docs directory for sphinx integration | NCRA-TIFR/gadpu,NCRA-TIFR/gadpu | docs/__init__.py | docs/__init__.py | #TODO:Create DOCS files for later integration using Sphinx
| mit | Python |
|
c206cfd940dd8ba58edb86f16691bcf50b6e5e30 | Add modgraph.py demo from Michael Hohn <[email protected]> | tkelman/graphviz,tkelman/graphviz,MjAbuz/graphviz,BMJHayward/graphviz,ellson/graphviz,jho1965us/graphviz,kbrock/graphviz,kbrock/graphviz,MjAbuz/graphviz,pixelglow/graphviz,BMJHayward/graphviz,jho1965us/graphviz,jho1965us/graphviz,tkelman/graphviz,ellson/graphviz,tkelman/graphviz,kbrock/graphviz,BMJHayward/graphviz,pixelglow/graphviz,ellson/graphviz,kbrock/graphviz,jho1965us/graphviz,ellson/graphviz,ellson/graphviz,tkelman/graphviz,pixelglow/graphviz,BMJHayward/graphviz,tkelman/graphviz,jho1965us/graphviz,BMJHayward/graphviz,pixelglow/graphviz,kbrock/graphviz,kbrock/graphviz,BMJHayward/graphviz,jho1965us/graphviz,tkelman/graphviz,BMJHayward/graphviz,pixelglow/graphviz,pixelglow/graphviz,BMJHayward/graphviz,jho1965us/graphviz,kbrock/graphviz,MjAbuz/graphviz,MjAbuz/graphviz,BMJHayward/graphviz,pixelglow/graphviz,MjAbuz/graphviz,ellson/graphviz,MjAbuz/graphviz,tkelman/graphviz,ellson/graphviz,jho1965us/graphviz,MjAbuz/graphviz,MjAbuz/graphviz,pixelglow/graphviz,MjAbuz/graphviz,ellson/graphviz,BMJHayward/graphviz,pixelglow/graphviz,ellson/graphviz,MjAbuz/graphviz,jho1965us/graphviz,kbrock/graphviz,kbrock/graphviz,MjAbuz/graphviz,ellson/graphviz,pixelglow/graphviz,kbrock/graphviz,jho1965us/graphviz,pixelglow/graphviz,tkelman/graphviz,kbrock/graphviz,tkelman/graphviz,ellson/graphviz,BMJHayward/graphviz,jho1965us/graphviz,tkelman/graphviz | tclpkg/gv/demo/modgraph.py | tclpkg/gv/demo/modgraph.py | #!/usr/bin/python
# display the kernel module dependencies
# author: Michael Hohn <[email protected]>
# based on: modgraph.tcl by John Ellson <[email protected]>
import sys
# sys.path.append('/usr/lib/graphviz/python')
sys.path.append('/usr/lib64/graphviz/python')
import gv
modules = open("/proc/modules", 'r').readlines()
G = gv.digraph("G")
gv.setv(G, 'rankdir', 'LR')
gv.setv(G, 'nodesep', '0.05')
gv.setv(G, 'node', 'shape', 'box')
gv.setv(G, 'node', 'width', '0')
gv.setv(G, 'node', 'height', '0')
gv.setv(G, 'node', 'margin', '.03')
gv.setv(G, 'node', 'fontsize', '8')
gv.setv(G, 'node', 'fontname', 'helvetica')
gv.setv(G, 'edge', 'arrowsize', '.4')
for rec in modules:
fields = rec.split(' ')
n = gv.node(G, fields[0])
for usedby in fields[3].split(','):
if (usedby != '-') & (usedby != ''):
gv.edge(n, gv.node(G, usedby))
gv.layout(G, 'dot')
# The 'xlib' renderer is provided by graphviz-cairo
gv.render(G, 'xlib')
| epl-1.0 | Python |
|
5c7a4547558e6f6959ae1878f56efef8716456c4 | add script to convert distances into probabilities | hbredin/byzance | scripts/distance2probability.py | scripts/distance2probability.py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 Hervé BREDIN
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Convert distance to probability
Usage:
distance2probability.py train <distance_matrix> <groundtruth_matrix> <d2p_model>
distance2probability.py apply <distance_matrix> <d2p_model> <probability_matrix>
distance2probability.py (-h | --help)
distance2probability.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
from pyannote.algorithms.stats.llr import LLRIsotonicRegression
import numpy as np
import pickle
def do_train(distance_matrix, groundtruth_matrix, d2p_model):
# load distance matrix
x = np.load(distance_matrix)
# load groundtruth matrix
y = np.load(groundtruth_matrix)
# train isotonic regression
ir = LLRIsotonicRegression()
ir.fit(x, y)
# save regression
pickle.dump(ir, d2p_model)
def do_apply(distance_matrix, d2p_model, probability_matrix):
# load distance matrix
x = np.load(distance_matrix)
# load regression
ir = pickle.load(d2p_model)
# apply isotonic regression
y = ir.apply(x)
# save probability matrix
np.save(probability_matrix, y)
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.1')
print arguments
if arguments['train']:
distance_matrix = arguments['<distance_matrix>']
groundtruth_matrix = arguments['<groundtruth_matrix>']
d2p_model = arguments['<d2p_model>']
do_train(distance_matrix, groundtruth_matrix, d2p_model)
if arguments['apply']:
distance_matrix = arguments['<distance_matrix>']
d2p_model = arguments['<d2p_model>']
probability_matrix = arguments['<probability_matrix>']
do_apply(distance_matrix, d2p_model, probability_matrix)
| mit | Python |
|
2197e16cf20bba5d373f4b7a250b8f1190be8ede | Add focus attribute example. | acrisci/i3ipc-python | examples/focused-windows.py | examples/focused-windows.py | #!/usr/bin/env python3
from argparse import ArgumentParser
import i3ipc
i3 = i3ipc.Connection()
def focused_windows():
tree = i3.get_tree()
workspaces = tree.workspaces()
for workspace in workspaces:
container = workspace
while container:
if not hasattr(container, 'focus') \
or not container.focus:
break
container_id = container.focus[0]
container = container.find_by_id(container_id)
if container:
coname = container.name
wsname = workspace.name
print('WS', wsname +':', coname)
if __name__ == '__main__':
parser = ArgumentParser(description = 'Print the names of the focused window of each workspace.')
parser.parse_args()
focused_windows()
| bsd-3-clause | Python |
|
e7146bbee86ea744d080f18a4f27def9cb26e33e | add corpus_test1.py to see how to parse music21 songs | fretboardfreak/potty_oh,fretboardfreak/potty_oh | experiments/corpus_test1.py | experiments/corpus_test1.py | #!/usr/bin/env python3
# Copyright 2016 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment to help determine the best way to use music21 objects.
The music21 libaries have a lot of purposes beyond what I need so for now I
think all I need is to know how to access the note pitches and their positions
and durations within the work. From those three bits of info I can then
construct a waveform representing that music given a tempo to define the length
of a quarter note.
"""
import numpy
from music21 import corpus
from potty_oh.common import get_cmd_line_parser
from potty_oh.common import call_main
def main():
parser = get_cmd_line_parser(description=__doc__)
parser.parse_args()
work_path = numpy.random.choice(corpus.getComposer('bach'))
work = corpus.parse(work_path)
for note in work.flat.notes:
print('{} [{}]: {} {}'.format(note.offset, note.duration.quarterLength,
note.pitch, note.frequency))
return 0
if __name__ == "__main__":
call_main(main)
| apache-2.0 | Python |
|
6ccdf23c67af632a46017d63b5f51d2c207be0ab | Add file | SuLab/scheduled-bots,SuLab/scheduled-bots,SuLab/scheduled-bots | scheduled_bots/scripts/merge_duplicate_gene_proteins.py | scheduled_bots/scripts/merge_duplicate_gene_proteins.py | from tqdm import tqdm
from wikidataintegrator.wdi_core import WDItemEngine, MergeError
from wikidataintegrator.wdi_login import WDLogin
from scheduled_bots.local import WDUSER, WDPASS
login = WDLogin(WDUSER, WDPASS)
s_protein = """
SELECT DISTINCT ?item1 ?item2 ?value {{
?item1 wdt:P352 ?value .
?item2 wdt:P352 ?value .
?item1 wdt:P31|wdt:P279 wd:Q8054 .
?item2 wdt:P31|wdt:P279 wd:Q8054 .
FILTER NOT EXISTS {{?item1 wdt:P703 wd:Q15978631}}
FILTER( ?item1 != ?item2 && STR( ?item1 ) < STR( ?item2 ) ) .
}}"""
s_gene = """
SELECT DISTINCT ?item1 ?item2 ?value {{
?item1 wdt:P351 ?value .
?item2 wdt:P351 ?value .
?item1 wdt:P703 ?taxon1 .
?item2 wdt:P703 ?taxon2 .
FILTER( ?item1 != ?item2 && STR( ?item1 ) < STR( ?item2 ) && ?taxon1 = ?taxon2) .
FILTER NOT EXISTS {{?item1 wdt:P703 wd:Q15978631}}
}}"""
s = s_gene
items = [{k: v['value'].split("/")[-1] for k, v in x.items()} for x in
WDItemEngine.execute_sparql_query(s)['results']['bindings']]
for x in tqdm(items):
try:
WDItemEngine.merge_items(from_id=x['item2'], to_id=x['item1'], login_obj=login, ignore_conflicts='statement|description|sitelink')
except MergeError as e:
print(e)
pass
| mit | Python |
|
495da73f305a2a0e79a28d251b5b93caea06656d | Add UglifyJS as a filter. | Carlangueitor/django-mediagenerator,adieu/django-mediagenerator,Carlangueitor/django-mediagenerator,adieu/django-mediagenerator | mediagenerator/filters/uglifier.py | mediagenerator/filters/uglifier.py | from django.conf import settings
from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
class Uglifier(Filter):
def __init__(self, **kwargs):
super(Uglifier, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Uglifier only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
args = ['uglifyjs']
try:
args = args + settings.UGLIFIER_OPTIONS
except AttributeError:
pass
try:
cmd = Popen(args,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
yield output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to run UglifyJs. "
"Please make sure you have Node.js and UglifyJS installed "
"and that it's in your PATH.\n"
"Error was: %s" % e)
| bsd-3-clause | Python |
|
660a3c5f3f8a4c63c21c27ce58c5639d37409ae1 | add thing to filter tracts | kyleconroy/bitewise,kyleconroy/bitewise,kyleconroy/bitewise | filter_tracts.py | filter_tracts.py | import json
calif_tracts_data = open('tracts.json')
calif_tracts = json.load(calif_tracts_data)
sf_tracts = []
for r in calif_tracts["features"]:
if r["properties"]["COUNTY"] == "075":
sf_tracts.append(r)
calif_tracts_data.close()
print json.dumps({"type": "FeatureCollection", "features": sf_tracts}) | mit | Python |
|
68a720ab539c6ba94fdf181328f27be453a9097f | Add examine_latent.py script | KelvinLu/krotos-convnet | examine_latent.py | examine_latent.py | # Examine the Million Song Dataset. Running this script will visualize each Echo
# Nest Taste Profile songs' latent features with t-SNE.
import os, subprocess
import matplotlib.pyplot as plt
from matplotlib.text import Annotation
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
from sklearn.cluster import MiniBatchKMeans, AgglomerativeClustering
from krotos.msd.latent.features import LatentFeatures
from krotos.paths import ROOT_PATH
from krotos.debug import report, report_newline
report("Get LatentFeatures instance...")
lf = LatentFeatures()
report("Scaling latent features")
latents = lf.Y
# latents = StandardScaler().fit_transform(latents)
samples = 5000
report("Selecting {0} random samples...".format(samples))
sample_idxs = np.random.choice(latents.shape[0], size=samples, replace=False)
latents = latents[sample_idxs, :]
report("Performing t-SNE embedding...")
model = TSNE(n_components=2, method='barnes_hut')
embedding = model.fit_transform(latents)
report("Performing clustering...")
n_clusters = 10
db = AgglomerativeClustering(
n_clusters=n_clusters,
linkage='average',
affinity='cosine'
).fit(latents)
cluster_labels = db.labels_
cmap = plt.get_cmap('jet', n_clusters)
report("Getting norms...")
norms = np.linalg.norm(latents, axis=1)
# min_max_norms = (norms - np.min(norms)) / (np.max(norms) - np.min(norms))
report("Retrieving song labels...")
song_labels = {}
echonest = lf._echonest
unique_tracks_path = os.path.join(ROOT_PATH, 'msd/resources/unique_tracks.txt')
if not os.path.exists(unique_tracks_path): raise Exception("unique_tracks.txt not found.")
with open(unique_tracks_path, 'r') as unique_tracks:
i = 0
for line in unique_tracks:
_, song_id, artist, track = line.strip().split("<SEP>")
song_labels[song_id] = (artist + ' - ' + track)
i += 1
report("{0:7d} song labels...".format(i), sameline=True)
report_newline()
sid_mismatches_path = os.path.join(ROOT_PATH, 'msd/resources/sid_mismatches.txt')
if not os.path.exists(sid_mismatches_path): raise Exception("sid_mismatches.txt not found.")
with open(sid_mismatches_path, 'r') as sid_mismatches:
i = 0
for line in sid_mismatches:
song_labels[line[8:26]] = "<bad data: mismatched song>"
i += 1
report("{0:5d} erroneous song labels noted...".format(i), sameline=True)
report_newline()
report("Plotting...")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(embedding[:, 0], embedding[:, 1], c=cluster_labels, cmap=cmap, s=100, alpha=0.5, linewidths=0.0, picker=True)
last_annotation = None
last_latent = None
last_ind = None
def report_top(idx, n):
report("\tGlobal closest songs:")
closest = sorted(zip(*lf.closest(lf.Y[idx], n=n)), key=lambda x: x[1], reverse=True)
for echonest_id, score in closest:
report("\t\t{0:7.5}: {1}".format(score, song_labels[echonest_id]))
def onpick(event):
global last_annotation
global last_latent
global last_ind
ind = event.ind[0]
idx = sample_idxs[ind]
x, y = embedding[ind]
track_id_echonest = echonest.get_track_id(idx)
label = unicode(song_labels[track_id_echonest], errors='ignore')
if last_ind == ind:
report_top(idx, 5)
return
last_ind = ind
if last_latent is not None:
s = np.dot(last_latent, latents[ind]) / (np.linalg.norm(last_latent) * np.linalg.norm(latents[ind]))
d = np.linalg.norm(last_latent - latents[ind])
report('Cosine similarity: {0: 7.5f}'.format(s))
report('L2 distance: {0: .5f}'.format(d))
last_latent = latents[ind]
report('Selected point at (x={1: 7.3f}, y={2: 7.3f}, norm={3: 7.3f}): \t{0}.'.format(label, x, y, norms[ind]))
if last_annotation is not None: last_annotation.remove()
last_annotation = Annotation(
label,
xy=(x, y),
xytext=(-20, 20),
textcoords='offset points',
ha='right',
va='bottom',
bbox=dict(boxstyle = 'round,pad=0.5', fc='white', alpha=0.6),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0', alpha=0.4)
)
ax.add_artist(last_annotation)
fig.canvas.draw()
fig.canvas.mpl_connect('pick_event', onpick)
report("Displaying plot")
plt.show()
report_newline()
| mit | Python |
|
e687dce8c8441728f1af6336497f7a131730db4f | Add untracked campaigns.py | mattclark/osf.io,brandonPurvis/osf.io,DanielSBrown/osf.io,adlius/osf.io,samchrisinger/osf.io,jnayak1/osf.io,ticklemepierce/osf.io,GageGaskins/osf.io,felliott/osf.io,abought/osf.io,HalcyonChimera/osf.io,ticklemepierce/osf.io,monikagrabowska/osf.io,acshi/osf.io,cslzchen/osf.io,mfraezz/osf.io,aaxelb/osf.io,cwisecarver/osf.io,mluo613/osf.io,monikagrabowska/osf.io,zamattiac/osf.io,KAsante95/osf.io,Johnetordoff/osf.io,Ghalko/osf.io,cwisecarver/osf.io,laurenrevere/osf.io,SSJohns/osf.io,mfraezz/osf.io,amyshi188/osf.io,samchrisinger/osf.io,erinspace/osf.io,rdhyee/osf.io,KAsante95/osf.io,TomBaxter/osf.io,monikagrabowska/osf.io,KAsante95/osf.io,rdhyee/osf.io,SSJohns/osf.io,RomanZWang/osf.io,HalcyonChimera/osf.io,zachjanicki/osf.io,brianjgeiger/osf.io,RomanZWang/osf.io,laurenrevere/osf.io,abought/osf.io,rdhyee/osf.io,mluke93/osf.io,wearpants/osf.io,kwierman/osf.io,mluke93/osf.io,Ghalko/osf.io,zamattiac/osf.io,RomanZWang/osf.io,mattclark/osf.io,adlius/osf.io,acshi/osf.io,billyhunt/osf.io,sloria/osf.io,felliott/osf.io,amyshi188/osf.io,caseyrollins/osf.io,caneruguz/osf.io,cslzchen/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,asanfilippo7/osf.io,felliott/osf.io,leb2dg/osf.io,samanehsan/osf.io,alexschiller/osf.io,mluke93/osf.io,Nesiehr/osf.io,billyhunt/osf.io,SSJohns/osf.io,Nesiehr/osf.io,sloria/osf.io,saradbowman/osf.io,jnayak1/osf.io,brandonPurvis/osf.io,samanehsan/osf.io,caseyrollins/osf.io,wearpants/osf.io,Ghalko/osf.io,erinspace/osf.io,zamattiac/osf.io,rdhyee/osf.io,doublebits/osf.io,asanfilippo7/osf.io,abought/osf.io,wearpants/osf.io,cwisecarver/osf.io,alexschiller/osf.io,crcresearch/osf.io,zachjanicki/osf.io,felliott/osf.io,hmoco/osf.io,TomHeatwole/osf.io,adlius/osf.io,samchrisinger/osf.io,ticklemepierce/osf.io,Johnetordoff/osf.io,zachjanicki/osf.io,mattclark/osf.io,zachjanicki/osf.io,erinspace/osf.io,GageGaskins/osf.io,pattisdr/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,DanielSBrown/osf.io,adlius/osf.io,baylee-d/osf.io,amyshi188/osf.io,mluke93/osf.io,alexschiller/osf.io,samchrisinger/osf.io,jnayak1/osf.io,monikagrabowska/osf.io,jnayak1/osf.io,HalcyonChimera/osf.io,billyhunt/osf.io,caseyrollins/osf.io,abought/osf.io,KAsante95/osf.io,kch8qx/osf.io,icereval/osf.io,doublebits/osf.io,monikagrabowska/osf.io,brandonPurvis/osf.io,saradbowman/osf.io,hmoco/osf.io,billyhunt/osf.io,HalcyonChimera/osf.io,chrisseto/osf.io,CenterForOpenScience/osf.io,wearpants/osf.io,hmoco/osf.io,danielneis/osf.io,caneruguz/osf.io,GageGaskins/osf.io,aaxelb/osf.io,amyshi188/osf.io,emetsger/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,RomanZWang/osf.io,danielneis/osf.io,RomanZWang/osf.io,mfraezz/osf.io,caneruguz/osf.io,doublebits/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,ticklemepierce/osf.io,TomBaxter/osf.io,mluo613/osf.io,doublebits/osf.io,asanfilippo7/osf.io,SSJohns/osf.io,sloria/osf.io,mfraezz/osf.io,alexschiller/osf.io,mluo613/osf.io,chrisseto/osf.io,kch8qx/osf.io,aaxelb/osf.io,aaxelb/osf.io,acshi/osf.io,chrisseto/osf.io,chennan47/osf.io,brandonPurvis/osf.io,binoculars/osf.io,Johnetordoff/osf.io,icereval/osf.io,acshi/osf.io,kch8qx/osf.io,GageGaskins/osf.io,GageGaskins/osf.io,cslzchen/osf.io,binoculars/osf.io,asanfilippo7/osf.io,Ghalko/osf.io,hmoco/osf.io,chennan47/osf.io,kwierman/osf.io,brandonPurvis/osf.io,kch8qx/osf.io,pattisdr/osf.io,KAsante95/osf.io,cwisecarver/osf.io,baylee-d/osf.io,DanielSBrown/osf.io,kch8qx/osf.io,brianjgeiger/osf.io,billyhunt/osf.io,kwierman/osf.io,zamattiac/osf.io,acshi/osf.io,TomBaxter/osf.io,baylee-d/osf.io,TomHeatwole/osf.io,Nesiehr/osf.io,emetsger/osf.io,laurenrevere/osf.io,leb2dg/osf.io,doublebits/osf.io,icereval/osf.io,leb2dg/osf.io,crcresearch/osf.io,samanehsan/osf.io,cslzchen/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,danielneis/osf.io,danielneis/osf.io,emetsger/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,emetsger/osf.io,DanielSBrown/osf.io,mluo613/osf.io,TomHeatwole/osf.io,samanehsan/osf.io,kwierman/osf.io,mluo613/osf.io,binoculars/osf.io,chrisseto/osf.io,TomHeatwole/osf.io | framework/auth/campaigns.py | framework/auth/campaigns.py | import httplib as http
from framework.exceptions import HTTPError
from website import mails
VALID_CAMPAIGNS = (
'prereg',
)
EMAIL_TEMPLATE_MAP = {
'prereg': mails.CONFIRM_EMAIL_PREREG
}
def email_template_for_campaign(campaign, default=None):
if campaign in VALID_CAMPAIGNS:
try:
return EMAIL_TEMPLATE_MAP[campaign]
except KeyError as e:
if default:
return default
else:
raise e
def campaign_for_user(user):
campaigns = [tag for tag in user.system_tags if tag in VALID_CAMPAIGNS]
if campaigns:
return campaigns[0]
def campaign_url_for(campaign):
# Defined inside this function to ensure a request context
REDIRECT_MAP = {
'prereg': '/prereg/'
}
if campaign not in VALID_CAMPAIGNS:
raise HTTPError(http.BAD_REQUEST)
else:
try:
return REDIRECT_MAP[campaign]
except KeyError:
raise HTTPError(http.NOT_FOUND)
| apache-2.0 | Python |
|
96ed06f1f3dab3aa9d0f8150c41a5c1b943a86b0 | Add test for config module | StrellaGroup/frappe,frappe/frappe,StrellaGroup/frappe,frappe/frappe,StrellaGroup/frappe,frappe/frappe | frappe/tests/test_config.py | frappe/tests/test_config.py | # Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.config import get_modules_from_all_apps_for_user
class TestConfig(unittest.TestCase):
def test_get_modules(self):
frappe_modules = frappe.get_all("Module Def", filters={"app_name": "frappe"}, pluck="name")
all_modules_data = get_modules_from_all_apps_for_user()
first_module_entry = all_modules_data[0]
all_modules = [x["module_name"] for x in all_modules_data]
self.assertIn("links", first_module_entry)
self.assertIsInstance(all_modules_data, list)
self.assertFalse([x for x in frappe_modules if x not in all_modules])
| mit | Python |
|
d7bfed84d773e7ccbd23e910a533f70b4dd02184 | Add module entrypoint | thismachinechills/grub2systemd | g2sd/__main__.py | g2sd/__main__.py | from .g2sd import cmd
if __name__ == "__main__":
cmd()
| agpl-3.0 | Python |
|
7421cecfd6b304692eb19d76d3f90a61a950bc83 | add get_reviewers | RRSCDS/douban-mining | get_reviewers.py | get_reviewers.py |
import sys
import urllib2
import time
from lxml import html
def get_reviewers(bookid, star=1):
allstar10_list = []
for tag in ['collections', 'doings', 'wishes']:
reached_end = False
i = 0
while not reached_end:
print "start %d" % i
page_url = "http://book.douban.com/subject/%s/%s?start=%d" % (bookid, tag, i)
response = urllib2.urlopen(page_url)
page_html = response.read()
tree = html.fromstring(page_html)
reviews_element_list = tree.xpath('//*[@id="' + tag + '_tab"]//table')
if len(reviews_element_list) < 20:
reached_end = True
reviewer_list = tree.xpath('//*[@id="' + tag + '_tab"]//table/tr/td/div[@class="pl2"]/a')
reviewers = [ el.attrib['href'] for el in reviewer_list ]
review_list = tree.xpath('//*[@id="' + tag + '_tab"]//table/tr/td/p[@class="pl"]/span[last()]')
reviews = [ el.attrib['class'] for el in review_list ]
review_stars = "allstar%d0" % star
allstar10_list.extend([reviewer for (reviewer,review) in zip(reviewers, reviews) if review == review_stars])
i += 20
time.sleep(1)
return allstar10_list
if __name__ == "__main__":
bookid = sys.argv[1]
allstar10_list = get_reviewers( bookid )
for i in allstar10_list:
print i
| mit | Python |
|
258a8d38d590f856e144b1e725fe38619c6758ea | Create notes_extractor.py | agancsos/python,agancsos/python | notes_extractor/notes_extractor.py | notes_extractor/notes_extractor.py | #!/usr/bin/env python3
###############################################################################
# Name : extract_notes.py #
# Version : v. 1.0.0.0 #
# Author : Abel Gancsos #
# Description : Helps extract data about Apple Notes. #
###############################################################################
import os, sys, sqlite3;
class INNote:
identifier=None;name=None;
def __init__(self, row=None):
if row != None:
self.identifier = row[0];
self.name = row[1];
pass;
class NotesExtractor:
notes_path=None;connection=None;cursor=None;
def __init__(self, params=dict()):
self.notes_path = params["-p"] if "-p" in params.keys() else "{0}/Library/Group Containers/group.com.apple.notes/NoteStore.sqlite".format(os.environ['HOME']);
assert os.path.exists(self.notes_path), "Notes cache must exist...";
self.connection = sqlite3.connect(self.notes_path);
self.cursor = self.connection.cursor();
def ensure_close(self):
self.connection.commit();
self.connection.close();
def search(self, keyword=""):
notes = list();
self.cursor.execute("SELECT ZIDENTIFIER, ZTITLE1 FROM ZICCLOUDSYNCINGOBJECT WHERE ZTITLE1 LIKE '%{0}%'".format(keyword));
rows = self.cursor.fetchall();
for row in rows: notes.append(INNote(row));
return notes;
pass;
if __name__ == "__main__":
params = dict();
for i in range(0, len(sys.argv) - 1): params[sys.argv[i]] = sys.argv[i + 1];
session = NotesExtractor(params);
notes = session.search(params["-n"] if "-n" in params.keys() else "");
for note in notes: print("{1}\t\tnotes://showNote?identifier={0}".format(note.identifier, note.name));
session.ensure_close();
| mit | Python |
|
c0d135fc40142561e4a2409e47b34c367a6a7ef4 | add script to read device logs from rms dump | dimagi/commcare,dimagi/commcare,dimagi/commcare-core,dimagi/commcare-core,dimagi/commcare,dimagi/commcare-core | util/scripts/devicelogs.py | util/scripts/devicelogs.py | from rmsdump import *
def read_log_entry (log_entry):
return tuple(log_entry.val[i].val for i in range(0, 3))
def print_log (log_atom):
print '%s> %s: %s' % (log_atom[0].strftime('%Y-%m-%d %H:%M:%S'), log_atom[1], log_atom[2])
if __name__ == "__main__":
data = sys.stdin.read()
stream = DataStream(data)
(rmses, num_rms, err) = extract_rms(stream)
log_rmses = [rms for rms in rmses if rms['name'].startswith('LOG_') and rms['name'] != 'LOG_IX']
log_entries = []
for log_rms in log_rmses:
log_entries.extend([rec['content'][1] for rec in log_rms['records']])
log_digest = [read_log_entry(le) for le in log_entries]
for la in sorted(log_digest, key=lambda la: la[0]):
print_log(la)
| apache-2.0 | Python |
|
f1ccab2168dea1b0827f4ca929f0036e84170a76 | Add tests for cross domain xhr view | praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go | go/base/tests/test_views.py | go/base/tests/test_views.py | """Test for go.base.utils."""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from go.base.tests.utils import VumiGoDjangoTestCase
class BaseViewsTestCase(VumiGoDjangoTestCase):
def cross_domain_xhr(self, url):
return self.client.post(reverse('cross_domain_xhr'), {'url': url})
@patch('requests.get')
def test_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': None})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://username:[email protected]')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr_with_https_and_port(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr(
'https://username:[email protected]:443/foo')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('https://domain.com:443/foo',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
| bsd-3-clause | Python |
|
b67cc70a6cf04e605ad93933dd9d8a88db94f093 | add a simple flask app | gsathya/flow,gsathya/flow | backend/app.py | backend/app.py | from flask import Flask
import db
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
#db.process_db()
app = Flask(__name__)
app.run(debug=True)
| mit | Python |
|
ce894232e3d8b00be3520f9f8d34ceb706b8dd75 | Create removelast2pagesofpdf.py | vicyangworld/AutoOfficer | RemoverLast2PagesOfPDF/removelast2pagesofpdf.py | RemoverLast2PagesOfPDF/removelast2pagesofpdf.py | import PyPDF2, os
import sys
import CmdFormat
import shutil
CDMF = CmdFormat.CmdFormat("PDF分离器")
class PDFMerger(object):
"""docstring for PDFMerger"""
def __init__(self,ROOTPATH):
self.__ROOTPATH = ROOTPATH+"\\"
self.__countriesCount = 0
self.__currentCoutry=""
self.bRegenerate = True
def __messages(self):
CDMF.set_cmd_color(CmdFormat.FOREGROUND_RED | CmdFormat.FOREGROUND_GREEN | \
CmdFormat.FOREGROUND_BLUE | CmdFormat.FOREGROUND_INTENSITY)
print("\n")
print("========================== 欢迎使用 ==================================")
def __quiry(self,mes):
while True:
content = CDMF.print_green_input_text(mes)
if content=="y" or content=="Y" or content=="n" or content=="N":
break
if content=="y" or content=="Y":
return True
else:
return False
def Run(self):
self.__messages()
allFiles = os.listdir(self.__ROOTPATH)
CDMF.print_blue_text("扫描待统计村民资料...,")
nNumFile = 0;
nNumNoContent = 0;
for fileOrDir in allFiles:
if fileOrDir.startswith(('1','2','3','4','5','6','7','8','9','0')) and fileOrDir.endswith('.pdf'):
nNumFile = nNumFile + 1
CDMF.print_blue_text("扫描完毕!共有 "+str(nNumFile) + " 户的资料,",end='')
CDMF.print_blue_text("需要统计的有 "+str(nNumFile) + " 户.")
#多个村
bdeleteOrg = self.__quiry("是否删掉原文件(请输入y或n):")
index = 1
for file in allFiles:
filefull = os.path.join(self.__ROOTPATH,file)
if not os.path.isdir(filefull):
if filefull.endswith('.pdf'): #找到以.pdf结尾的文件
(filepath,tempfilename) = os.path.split(filefull)
(filename,extension) = os.path.splitext(tempfilename)
if filename.startswith(('1','2','3','4','5','6','7','8','9','0')):
pdfWriter = PyPDF2.PdfFileWriter() #生成一个空白的pdf文件
inPDFfile = open(filefull,'rb')
pdfReader = PyPDF2.PdfFileReader(inPDFfile) #以只读方式依次打开pdf文件
for pageNum in range(pdfReader.numPages):
if pageNum<pdfReader.numPages-2:
pdfWriter.addPage(pdfReader.getPage(pageNum)) #将打开的pdf文件内容一页一页的复制到新建的空白pdf里
outPdfName = self.__ROOTPATH+'\\'+'Res_'+filename+'.pdf'
pdfOutput = open(outPdfName,'wb')
pdfWriter.write(pdfOutput) #将复制的内容全部写入合并的pdf
pdfOutput.close()
inPDFfile.close()
outPdfName="" #清空outPdfName
CDMF.print_yellow_text(str(index)+'/'+str(nNumFile)+' ---> '+file+" 成功!")
index += 1
if bdeleteOrg:
os.remove(filefull)
if __name__ == '__main__':
ROOTPATH = os.getcwd()
Job = PDFMerger(ROOTPATH)
Job.Run()
CDMF.print_yellow_text("任务完成!")
quit = input("按任意键退出...")
| mit | Python |
|
2b25b9ba1c9417e3e25a91055a65551210eb5313 | Add meal migrations | teamtaverna/core | app/timetables/migrations/0002_meal.py | app/timetables/migrations/0002_meal.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-16 17:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Meal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60, unique=True)),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
],
),
]
| mit | Python |
|
835d9628513a80215641bc4c63eae1fae7b8442b | rewrite portforwarding api | cboling/xos,cboling/xos,cboling/xos,cboling/xos,cboling/xos | xos/api/utility/portforwarding.py | xos/api/utility/portforwarding.py | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import serializers
from rest_framework import generics
from rest_framework.views import APIView
from core.models import *
from django.forms import widgets
from django.core.exceptions import PermissionDenied
from xos.exceptions import XOSNotFound
from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
from django.db.models import Q
class PortForwarding(Port):
class Meta:
proxy = True
app_label = "core"
def __init__(self, *args, **kwargs):
super(PortForwarding, self).__init__(*args, **kwargs)
class PortForwardingSerializer(serializers.Serializer):
id = serializers.CharField(read_only=True)
ip = serializers.CharField(read_only=True)
ports = serializers.CharField(read_only=True, source="network.ports")
hostname = serializers.CharField(read_only=True, source="instance.node.name")
class Meta:
model = PortForwarding
fields = ('id', 'ip', 'ports', 'hostname')
class PortForwardingViewSet(XOSViewSet):
base_name = "list"
method_name = "portforwarding"
method_kind = "viewset"
serializer_class = PortForwardingSerializer
def get_queryset(self):
queryset = queryset=Port.objects.exclude(Q(network__isnull=True) |
Q(instance__isnull=True) |
Q(instance__node__isnull=True) |
Q(network__ports__exact='') |
Q(ip__isnull=True) | Q(ip__exact=''))
node_name = self.request.query_params.get('node_name', None)
if node_name is not None:
queryset = queryset.filter(instance__node__name = node_name)
return queryset
| apache-2.0 | Python |
|
f1826b2cf4c4103efe52713a57dc2fcabda1a45d | fix migration for real | anushbmx/kitsune,mythmon/kitsune,NewPresident1/kitsune,MikkCZ/kitsune,silentbob73/kitsune,anushbmx/kitsune,Osmose/kitsune,YOTOV-LIMITED/kitsune,MikkCZ/kitsune,H1ghT0p/kitsune,silentbob73/kitsune,brittanystoroz/kitsune,brittanystoroz/kitsune,YOTOV-LIMITED/kitsune,safwanrahman/kitsune,H1ghT0p/kitsune,feer56/Kitsune2,mythmon/kitsune,safwanrahman/kitsune,feer56/Kitsune2,safwanrahman/kitsune,NewPresident1/kitsune,NewPresident1/kitsune,anushbmx/kitsune,Osmose/kitsune,feer56/Kitsune2,silentbob73/kitsune,mythmon/kitsune,YOTOV-LIMITED/kitsune,feer56/Kitsune2,anushbmx/kitsune,brittanystoroz/kitsune,mozilla/kitsune,mozilla/kitsune,mythmon/kitsune,NewPresident1/kitsune,H1ghT0p/kitsune,H1ghT0p/kitsune,Osmose/kitsune,safwanrahman/kitsune,YOTOV-LIMITED/kitsune,mozilla/kitsune,MikkCZ/kitsune,silentbob73/kitsune,brittanystoroz/kitsune,Osmose/kitsune,MikkCZ/kitsune,mozilla/kitsune | kitsune/questions/migrations/0006_ios_questionlocale.py | kitsune/questions/migrations/0006_ios_questionlocale.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_questionlocale(apps, schema_editor):
Product = apps.get_model('products', 'Product')
QuestionLocale = apps.get_model('questions', 'QuestionLocale')
p, created = Product.objects.get_or_create(slug='ios', defaults={
'title': 'Firefox for iOS',
'description': 'Firefox for iPhone, iPad and iPod touch devices',
'display_order': 0,
'visible': False})
ql, created = QuestionLocale.objects.get_or_create(locale='en-US')
ql.products.add(p)
class Migration(migrations.Migration):
dependencies = [
('questions', '0005_change_locale_sr_Cyrl_to_sr'),
('products', '0001_initial'),
]
operations = [
migrations.RunPython(create_questionlocale),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_questionlocale(apps, schema_editor):
Product = apps.get_model('products', 'Product')
QuestionLocale = apps.get_model('questions', 'QuestionLocale')
p = Product.objects.get_or_create(slug='ios', defaults={
'title': 'Firefox for iOS',
'description': 'Firefox for iPhone, iPad and iPod touch devices',
'display_order': 0,
'visible': False})
QuestionLocale.objects.get_or_create(locale='en-US', product=p)
class Migration(migrations.Migration):
dependencies = [
('questions', '0005_change_locale_sr_Cyrl_to_sr'),
('products', '0001_initial'),
]
operations = [
migrations.RunPython(create_questionlocale),
]
| bsd-3-clause | Python |
4178691ed3826239721f2d9a6435ef90cfb5cf82 | Add color to input | wzyuliyang/initpy,janusnic/initpy,Parkayun/initpy | flask_init/run.py | flask_init/run.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import os
import six
import templates
from .creator import Creator
from .exceptions import InvalidFolderName
def color_input(color, text):
return six.moves.input(color+text+'\033[0m')
def color_print(color, text):
six.print_(color+text+'\033[0m')
def main():
name = color_input("\033[35m", "> Project name [flask_proj]: ")
name = name or 'flask_proj'
module = color_input("\033[35m", "> Module name [common]: ")
module = module or 'common'
creator = Creator(os.getcwd())
try:
creator.create_folder(creator.root_path, name)
proj_path = os.path.join(creator.root_path, name)
except InvalidFolderName:
six.print_("\nInvalid Project Name, use another name!")
else:
creator.create_file(proj_path, "manage.py", templates.manager)
creator.create_folder(proj_path, "requirements")
creator.create_file(os.path.join(proj_path, "requirements"), "dev.txt",
templates.requirements)
app_init = templates.app_init.substitute(module=module)
creator.create_module(proj_path, "app", app_init)
app_path = os.path.join(proj_path, "app")
creator.create_folder(app_path, "templates")
template_path = os.path.join(app_path, "templates")
creator.create_file(template_path, "base.html", templates.base_html)
creator.create_folder(template_path, module)
creator.create_file(os.path.join(template_path, module),
"index.html", templates.module_html)
module_init = templates.module_init.substitute(module=module)
creator.create_folder(app_path, "static")
creator.create_module(app_path, module, module_init)
module_view = templates.module_views.substitute(module=module)
module_path = os.path.join(app_path, module)
creator.create_file(module_path, "views.py", module_view)
creator.create_file(module_path, "models.py", templates.blank)
color_print("\033[31m", "\n".join(creator.errors))
color_print("\033[34m", "Complete!")
six.print_("You can install package using ", end="")
color_print("\033[34m", "pip install -r requirements/dev.txt")
six.print_("You can run using ", end="")
color_print("\033[34m", "python manage.py run")
if __name__ == '__main__':
main()
| #!/usr/bin/python
# -*- coding:utf-8 -*-
import os
import six
import templates
from .creator import Creator
from .exceptions import InvalidFolderName
def main():
name = six.moves.input('Input project name (default is "flask_proj"): ')
name = name or 'flask_proj'
module = six.moves.input('Input module name (default is "common"): ')
module = module or 'common'
creator = Creator(os.getcwd())
try:
creator.create_folder(creator.root_path, name)
proj_path = os.path.join(creator.root_path, name)
except InvalidFolderName:
six.print_("\nInvalid Project Name, use another name!")
else:
creator.create_file(proj_path, "manage.py", templates.manager)
creator.create_folder(proj_path, "requirements")
creator.create_file(os.path.join(proj_path, "requirements"), "dev.txt",
templates.requirements)
app_init = templates.app_init.substitute(module=module)
creator.create_module(proj_path, "app", app_init)
app_path = os.path.join(proj_path, "app")
creator.create_folder(app_path, "templates")
template_path = os.path.join(app_path, "templates")
creator.create_file(template_path, "base.html", templates.base_html)
creator.create_folder(template_path, module)
creator.create_file(os.path.join(template_path, module),
"index.html", templates.module_html)
module_init = templates.module_init.substitute(module=module)
creator.create_folder(app_path, "static")
creator.create_module(app_path, module, module_init)
module_view = templates.module_views.substitute(module=module)
module_path = os.path.join(app_path, module)
creator.create_file(module_path, "views.py", module_view)
creator.create_file(module_path, "models.py", templates.blank)
six.print_("\n".join(creator.errors))
six.print_("You can install package "
"\"pip install -r requirements/dev.txt\"")
six.print_("You can run \"python manage.py run\"")
if __name__ == '__main__':
main()
| mit | Python |
48933f27c098b05276271a62ed3c970e4d5721b0 | add missing file | radical-cybertools/radical.repex | src/radical/repex/utils.py | src/radical/repex/utils.py |
import radical.utils as ru
# ------------------------------------------------------------------------------
#
def expand_ln(to_link, src_sbox, tgt_sbox, rid, cycle):
expand = {'rid' : rid,
'cycle': cycle}
if not src_sbox: src_sbox = '.'
if not tgt_sbox: tgt_sbox = '.'
ret = list()
for data in ru.as_list(to_link):
src, tgt = data.split('>')
try:
src = src.strip() % expand
tgt = tgt.strip() % expand
except:
raise RuntimeError('expansion error: %s : %s : %s' % (src, tgt, expand))
ret.append('%s/%s > %s/%s' % (src_sbox, src, tgt_sbox, tgt))
return ret
# ------------------------------------------------------------------------------
#
def last_task(replica):
cs = replica.current_stage
if cs >= len(replica.stages):
cs -= 1
assert(cs < len(replica.stages))
tasks = replica.stages[cs].tasks
assert(tasks)
assert(len(tasks) == 1)
return list(tasks)[0]
# ------------------------------------------------------------------------------
| mit | Python |
|
88788c215c619ab894e21243d584541f311dbfb9 | Add eventlet test check to new tests __init__.py | varunarya10/oslo.concurrency,JioCloud/oslo.concurrency | oslo_concurrency/tests/__init__.py | oslo_concurrency/tests/__init__.py | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
if os.environ.get('TEST_EVENTLET'):
import eventlet
eventlet.monkey_patch()
| apache-2.0 | Python |
|
112c3a5a7728aea9be59b4bab1c26932e5faceaf | replace simple_api.py, set filename via commandline param, git add files that dont exist | niryariv/yuvalim,niryariv/yuvalim | import_fusion.py | import_fusion.py | #!/usr/bin/python
import json
import requests
import sys
import codecs
import subprocess
from datetime import datetime
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", dest="output_file", help="output filename (will be stored under the data/ directory)")
(options, args) = parser.parse_args()
if not options.output_file:
print "must include -f <output filename>"
exit()
OUTPUT_PATH = 'data/' + options.output_file
with open("api.json") as f:
keys = json.loads(f.read())
server_key = keys["ServerKey"]
tablename = keys['fusion_table']
endpoint = 'https://www.googleapis.com/fusiontables/v1/query?sql=SELECT * FROM '
apicall = "".join([endpoint, tablename, "&key=", server_key])
raw = requests.get(apicall)
if not raw.ok:
print("something wrong with the apicall\n would print the requests object for inspection and debugging:")
print('dir:',dir(raw))
print('status code:',raw.status_code)
print('text:', raw.text)
sys.exit()
data = raw.json()
geojson = {"type": "FeatureCollection", "features": []}
for place in data['rows']:
geojson['features'].append(
{
"geometry": {
"type": "Point",
"coordinates": [
place[6],
place[5]
]
},
"type": "Feature",
"properties": {
"city": place[1],
"name": place[0],
"district":place[2],
"subdistrict":place[3],
"address":place[4],
"operator": place[16],
"days": [
place[8],
place[9],
place[10],
place[11],
place[12],
place[13]
],
"phones" : place[7],
"notes": place[15],
"error" : place[17]
}
}
)
with codecs.open(OUTPUT_PATH,'wb+', 'utf-8') as f:
output = "stations=" + json.dumps(geojson, indent=4, ensure_ascii=False)
f.write(output)
f.close()
subprocess.call(['git', 'add', OUTPUT_PATH])
subprocess.call(['git','commit', OUTPUT_PATH, '-m', 'commiting updated geojson from Fusion table %s' % datetime.now().strftime("%d/%m/%Y %H:%M")])
| bsd-3-clause | Python |
|
43d23f19933e898254d58c4874e6f0c0ac3b1cc6 | Add example config file | pyaiot/pyaiot,pyaiot/pyaiot,pyaiot/pyaiot,pyaiot/pyaiot | config-example.py | config-example.py | # Example configuration file for for Pyaiot
# Configuration options are shared between all pyaiot components.
# Debug
# Enable debug logging for all components.
#debug = False
# Broker host:
# Other component connect to this host for their broker connection. The
# dashboard passes this hostname to the clients for their broker connection.
#broker_host = 'localhost'
# Broker port number:
# This is the tcp port number the websocket of the broker is listening on. Other
# component use this configuration options to determine which port number to
# connect to.
#broker_port = 8020
# Key file
# The key file is necessary to authenticate different components to the broker.
# Both the broker and the other components use the path specified to find the
# key file for authentication.
#key_file = '~/.pyaiot/keys'
# coap port
# The coap component listens on this port for CoAP messages from nodes
#coap_port = 5683
# MQTT host
# The hostname of the MQTT broker. The mqtt component connects to this hostname
# for the MQTT broker connection.
#mqtt_host = 'localhost'
# MQTT port
# The port the MQTT broker listens on. The MQTT component connects to this port
# on the MQTT broker.
#mqtt_port = 1886
# Gateway port
# This port is used by the websocket gateway to listen on. Websocket nodes
# connect to this port to connect with the websocket gateway.
#gateway_port = 8001
# max time
# Both the CoAP broker and the MQTT broker remove nodes from the broker after
# this many seconds without any messages from a node.
#max_time = 120
# Web Port
# The web interface listens on this port for HTTP connections.
#web_port = 8080
# Broker SSL
# When enabled, the URI to the broker is supplied with wss to indicate to use
# SSL to connect to the broker. Use this when you have a reverse proxy in front
# of the dashboard to handle SSL termination.
#broker_ssl=False
# Camera URL
# The HTTP clients get this URL for their connection to webcam images. If None
# is configured, no webcam functionality is configured
#camera_url = None
# Title
# The title of the web page.
#title = 'IoT Dashboard'
# Logo
# The logo for the navbar of the dashboard. Should be an URL to the image. If
# None is configured, no logo is shown.
#logo = None
# Favicon
# Optionally show a favicon on the dashboard. Should be an URL to an image. If
# None is configured, no favicon is passed to the web page.
#favicon = None
| bsd-3-clause | Python |
|
783c3e740c154fb27b247b1cb8af5c853a8be973 | add basic_routing_test on experimental scenario | kadhikari/navitia,Tisseo/navitia,ballouche/navitia,ballouche/navitia,CanalTP/navitia,kadhikari/navitia,kadhikari/navitia,xlqian/navitia,TeXitoi/navitia,lrocheWB/navitia,kinnou02/navitia,Tisseo/navitia,pbougue/navitia,CanalTP/navitia,pbougue/navitia,antoine-de/navitia,ballouche/navitia,pbougue/navitia,Tisseo/navitia,kinnou02/navitia,patochectp/navitia,kadhikari/navitia,Tisseo/navitia,antoine-de/navitia,lrocheWB/navitia,pbougue/navitia,lrocheWB/navitia,patochectp/navitia,antoine-de/navitia,xlqian/navitia,patochectp/navitia,patochectp/navitia,ballouche/navitia,kinnou02/navitia,TeXitoi/navitia,CanalTP/navitia,kinnou02/navitia,xlqian/navitia,xlqian/navitia,xlqian/navitia,antoine-de/navitia,TeXitoi/navitia,CanalTP/navitia,TeXitoi/navitia,Tisseo/navitia,lrocheWB/navitia,CanalTP/navitia | source/jormungandr/tests/routing_tests_experimental.py | source/jormungandr/tests/routing_tests_experimental.py | # Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import logging
from tests.tests_mechanism import AbstractTestFixture
from tests_mechanism import dataset
from check_utils import *
from nose.tools import eq_
import jormungandr.scenarios.experimental
from jormungandr.instance import Instance
def check_journeys(resp):
assert not resp.get('journeys') or sum([1 for j in resp['journeys'] if j['type'] == "best"]) == 1
@dataset(["main_routing_test"])
class TestJourneysExperimental(AbstractTestFixture):
"""
Test the experiental scenario
All the tests are defined in "TestJourneys" class, we only change the scenario
NOTE: for the moment we cannot import all routing tests, so we only get 2, but we need to add some more
"""
def setup(self):
logging.debug('setup for experimental')
from jormungandr import i_manager
dest_instance = i_manager.instances['main_routing_test']
self.old_scenario = dest_instance._scenario
dest_instance._scenario = jormungandr.scenarios.experimental.Scenario()
def teardown(self):
from jormungandr import i_manager
i_manager.instances['main_routing_test']._scenario = self.old_scenario
def test_journeys(self):
#NOTE: we query /v1/coverage/main_routing_test/journeys and not directly /v1/journeys
#not to use the jormungandr database
response = self.query_region(journey_basic_query, display=True)
check_journeys(response)
is_valid_journey_response(response, self.tester, journey_basic_query)
def test_error_on_journeys(self):
""" if we got an error with kraken, an error should be returned"""
query_out_of_production_bound = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}"\
.format(from_coord="0.0000898312;0.0000898312", # coordinate of S in the dataset
to_coord="0.00188646;0.00071865", # coordinate of R in the dataset
datetime="20110614T080000") # 2011 should not be in the production period
response, status = self.query_no_assert("v1/coverage/main_routing_test/" + query_out_of_production_bound)
assert status != 200, "the response should not be valid"
check_journeys(response)
assert response['error']['id'] == "date_out_of_bounds"
assert response['error']['message'] == "date is not in data production period"
#and no journey is to be provided
assert 'journeys' not in response or len(response['journeys']) == 0
@dataset(["main_ptref_test"])
class TestJourneysExperimentalWithPtref(AbstractTestFixture):
"""Test the experimental scenario with ptref_test data"""
def setup(self):
logging.debug('setup for experimental')
from jormungandr import i_manager
dest_instance = i_manager.instances['main_ptref_test']
self.old_scenario = dest_instance._scenario
dest_instance._scenario = jormungandr.scenarios.experimental.Scenario()
def teardown(self):
from jormungandr import i_manager
i_manager.instances['main_ptref_test']._scenario = self.old_scenario
def test_strange_line_name(self):
response = self.query("v1/coverage/main_ptref_test/journeys"
"?from=stop_area:stop2&to=stop_area:stop1"
"&datetime=20140107T100000", display=True)
check_journeys(response)
eq_(len(response['journeys']), 1)
| agpl-3.0 | Python |
|
b1caa89d75aecc564d504e5baffd0dc7619cd587 | Create foursq_friends.py | chenyang03/Foursquare_Crawler,chenyang03/Foursquare_Crawler | foursq_friends.py | foursq_friends.py | import json
from foursq_utils import *
def fetch_usr_friends(user_id):
super_token = 'QEJ4AQPTMMNB413HGNZ5YDMJSHTOHZHMLZCAQCCLXIX41OMP'
url = 'https://api.foursquare.com/v2/users/' + str(user_id) + '/friends?oauth_token=' + super_token + '&v=20210115'
try:
raw = get_raw_info(url)
data = json.loads(raw)
if data['meta']['code'] != 200:
return -1
friends_info = data['response']['friends']
friendsUID = []
if 'items' in friends_info.keys():
for item in friends_info['items']:
friendsUID.append(item['id'])
friends_info.setdefault('friendsUID',friendsUID)
else:
friends_info.setdefault('friendsUID', [])
return friends_info
except:
return -1
| mit | Python |
|
139524072cc56d19ce887aaa95705dff8a952cc2 | Add lc035_search_insert_position.py | bowen0701/algorithms_data_structures | lc035_search_insert_position.py | lc035_search_insert_position.py | """Leetcode 35. Search Insert Position
Easy
URL: https://leetcode.com/problems/search-insert-position/
Given a sorted array and a target value, return the index if the target is found.
If not, return the index where it would be if it were inserted in order.
You may assume no duplicates in the array.
Example 1:
Input: [1,3,5,6], 5
Output: 2
Example 2:
Input: [1,3,5,6], 2
Output: 1
Example 3:
Input: [1,3,5,6], 7
Output: 4
Example 4:
Input: [1,3,5,6], 0
Output: 0
"""
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
|
1669cd22d6c8ee5bcb37c6770c98ddcf8848d901 | Make lots of xyz with differing natoms visualizable | patrickmelix/Python4ChemistryTools | pad-trajectory.py | pad-trajectory.py | #!/usr/bin/env python3
#
# Script to generate an ext-xyz trajectory from individual ext-xyz files with varying atom numbers using ASE.
# Appens 'X' atoms at origin to obtain frames with equal lengths
# by Patrick Melix
# 2020/06/08
#
from ase import io, Atom
import os
def main(inList, outFile='traj.xyz', outFormat='extxyz'):
#if output exists mv to .bak
if os.path.isfile(outFile):
print('ATTENTION: {:} exists, moving to *.bak'.format(outFile))
os.rename(outFile, outFile+'.bak')
traj = []
for inFile in inList:
if not os.path.isfile(inFile):
raise ValueError('File {:} does not exist'.format(inFile))
print(inFile)
traj.append(io.read(inFile))
maxLen = max([len(frame) for frame in traj])
for i in range(len(traj)):
if len(traj[i]) < maxLen:
for j in range(maxLen-len(traj[i])):
traj[i].append(Atom('X'))
with open(outFile,'w') as f:
for frame in traj:
frame.write(f, format=outFormat)
return
#########################
# Functions
########################
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Combine different lengths of XYZ')
parser.add_argument('--outformat', help='Output ASE Format', default='extxyz')
parser.add_argument('--outfile', help='Output File', default='traj.xyz')
parser.add_argument('-files', type=str, nargs='+', default=[], help='All the XYZ Files')
args = parser.parse_args()
main(args.files, args.outfile, args.outformat)
| mit | Python |
|
900b09803f5c49b8645ba7f3d47eb17515061377 | Create heads_and_legs.py | Kunalpod/codewars,Kunalpod/codewars | heads_and_legs.py | heads_and_legs.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Heads and Legs
#Problem level: 8 kyu
def animals(heads, legs):
if heads==0 and legs==0:
return (0,0)
y = legs//2 - heads
x = heads-y
if x<0 or y<0 or legs%2!=0:
return "No solutions"
return (x,y)
| mit | Python |
|
fed98c8a9723c6fe18c123015b51714dc4ccdf68 | add migrations | bane138/nonhumanuser,bane138/nonhumanuser,bane138/nonhumanuser,bane138/nonhumanuser | actual_play/migrations/0006_game_thumbnail.py | actual_play/migrations/0006_game_thumbnail.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-10-20 22:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('actual_play', '0005_auto_20161010_1313'),
]
operations = [
migrations.AddField(
model_name='game',
name='thumbnail',
field=models.ImageField(blank=True, null=True, upload_to='actual_play/image/%Y/%m/%d'),
),
]
| mit | Python |
|
5c9ffc4a0ab9f8aed3071a0bf4ad0fc69070b628 | Create inside_market.py | MKTSTK/Runover | inside_market.py | inside_market.py | import pandas as pd
import numpy as np
# update current state of our bid and ask
# iterate thru each trade and determine if a fill was generated
# id
# price
# qty
# side - bid/ask
# status - live, canceled, rejected
LIVE = 0
CANCELED = 1
REJECTED = 2
FILLED = 3
PARTIAL = 4
BID = 5
ASK = 6
MINUS_INF = -9999999
PLUS_INF = 9999999
# represents a limit order in our inside market
class order():
id = 0
def __init__(self, price, qty, side, status):
self.id = order.id
order.id += 1
self.price = price
self.qty = qty
self.side = side
self.status = status
def cancel(self):
self.status = CANCELED
def modify(self, new_price, new_qty = -1):
self.price = new_price
if new_qty > 0:
self.qty = new_qty
def evaluate(self, trade_price):
if self.side == BID:
if trade_price < self.price:
self.status = FILLED
return True, self.price
else:
return False, 0.0
else:
if trade_price > self.price:
self.status = FILLED
return True, self.price
else:
return False, 0.0
class inside_market():
def __init__(self, bid_price, ask_price):
if bid_price < ask_price:
self.bid = order(bid_price, 1, BID, LIVE)
self.ask = order(ask_price, 1, ASK, LIVE)
self.status = 1
else:
self.status = -1
def update(self, side, new_price):
if side == BID:
if new_price < self.ask.price:
self.bid.price = new_price
return True, "MODIFIED ORDER ID = ", self.bid.id
else:
return False, "FAILED TO MODIFY ORDER ID = ", self.bid.id, " RESULTING BID WOULD HAVE CROSSED OUR ASK"
else:
if new_price > self.bid.price:
self.ask.price = new_price
return True, "MODIFIED ORDER ID = ", self.ask.id
else:
return False, "FAILED TO MODIFY ORDER ID = ", self.bid.id, " RESULTING ASK WOULD HAVE CROSSED OUR BID"
def evaluate(self, trade_price):
bid_fill, bid_fill_price = self.bid.evaluate(trade_price)
ask_fill, ask_fill_price = self.ask.evaluate(trade_price)
if bid_fill == True:
return BID, bid_fill_price
elif ask_fill == True:
return ASK, ask_fill_price
else:
return None, 0.0
def shift(self, increment):
self.bid.price += increment
self.ask.price += increment
def exit(self, side, increment):
if side == BID:
# shift the bid down to minus_inf to not buy anymore
self.bid.price = MINUS_INF
self.ask.price -= increment
else:
# shift the ask up to plus_inf to not sell anymore
self.ask.price = PLUS_INF
self.bid.price += increment
| bsd-3-clause | Python |
|
0dd3894fb8816f6f904e5c7d204ab2672b304588 | Add earth mesh module | thomasgibson/firedrake-hybridization | gravity_waves/earth_mesh.py | gravity_waves/earth_mesh.py | from __future__ import absolute_import, print_function, division
from firedrake import *
__all__ = ["generate_earth_mesh"]
def generate_earth_mesh(r_level, num_layers, thickness, hexes=False):
"""Generates an Earth-like spherical mesh for the gravity wave
problem.
:arg r_level: An ``int`` denoting the number of refinement
levels.
:arg num_layers: An ``int`` denoting the number of mesh layers.
:arg thickness: The thickness of the spherical shell (in meters).
:arg hexes: A ``bool`` indicating whether to generate a hexahedral mesh.
Returns: A Firedrake extruded spherical mesh.
"""
earth_radius = 6.371e6
layer_height = thickness / num_layers
if hexes:
spherical_base = CubedSphereMesh(earth_radius,
refinement_level=r_level)
else:
spherical_base = IcosahedralSphereMesh(earth_radius,
refinement_level=r_level)
earth_mesh = ExtrudedMesh(spherical_base, layers=num_layers,
layer_height=layer_height,
extrusion_type="radial")
return earth_mesh
| mit | Python |
|
5578d11f45e9c41ab9c4311f2bed48b9c24d9bf5 | Create file for Nonterminal have method | PatrikValkovic/grammpy | tests/grammar_term-nonterm_test/NonterminalHaveTest.py | tests/grammar_term-nonterm_test/NonterminalHaveTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
""" | mit | Python |
|
b40c6ce73c439e7d74b867702fdd2c4cd7ad8b15 | add testrunner to automactically create/delete a test db during python and django tests. | sirex/couchdbkit,ghickman/couchdbkit,ghickman/couchdbkit,benoitc/couchdbkit,sirex/couchdbkit,benoitc/couchdbkit,benoitc/couchdbkit,ghickman/couchdbkit | couchdbkit/ext/django/testrunner.py | couchdbkit/ext/django/testrunner.py | from django.test.simple import DjangoTestSuiteRunner
from django.conf import settings
from couchdbkit.ext.django import loading as loading
from couchdbkit.resource import ResourceNotFound
class CouchDbKitTestSuiteRunner(DjangoTestSuiteRunner):
"""
A test suite runner for couchdbkit. This offers the exact same functionality
as the default django test suite runner, except that it connects all the couchdbkit
django-extended models to a test database. The test database is deleted at the
end of the tests. To use this, just add this file to your project and the following
line to your settings.py file:
TEST_RUNNER = 'myproject.testrunner.CouchDbKitTestSuiteRunner'
"""
dbs = []
def get_test_db_name(self, dbname):
return "%s_test" % dbname
def setup_databases(self, **kwargs):
print "overridding the couchdbkit database settings to use a test database!"
# first pass: just implement this as a monkey-patch to the loading module
# overriding all the existing couchdb settings
self.dbs = [(app, self.get_test_db_name(url)) for app, url in getattr(settings, "COUCHDB_DATABASES", [])]
old_handler = loading.couchdbkit_handler
couchdbkit_handler = loading.CouchdbkitHandler(self.dbs)
loading.couchdbkit_handler = couchdbkit_handler
loading.register_schema = couchdbkit_handler.register_schema
loading.get_schema = couchdbkit_handler.get_schema
loading.get_db = couchdbkit_handler.get_db
# register our dbs with the extension document classes
for app, value in old_handler.app_schema.items():
for name, cls in value.items():
cls.set_db(loading.get_db(app))
return super(CouchDbKitTestSuiteRunner, self).setup_databases(**kwargs)
def teardown_databases(self, old_config, **kwargs):
deleted_databases = []
skipcount = 0
for app, item in self.dbs:
app_label = app.split('.')[-1]
db = loading.get_db(app_label)
if db.dbname in deleted_databases:
skipcount += 1
continue
try:
db.server.delete_db(db.dbname)
deleted_databases.append(db.dbname)
print "deleted database %s for %s" % (db.dbname, app_label)
except ResourceNotFound:
print "database %s not found for %s! it was probably already deleted." % (db.dbname, app_label)
if skipcount:
print "skipped deleting %s app databases that were already deleted" % skipcount
return super(CouchDbKitTestSuiteRunner, self).teardown_databases(old_config, **kwargs) | mit | Python |
|
159156cb962ad0c8f4ea6f022471c75f33306f7e | Add unit tests for snapshots_client | Juniper/tempest,Tesora/tesora-tempest,masayukig/tempest,openstack/tempest,Tesora/tesora-tempest,bigswitch/tempest,masayukig/tempest,zsoltdudas/lis-tempest,Juniper/tempest,sebrandon1/tempest,cisco-openstack/tempest,openstack/tempest,LIS/lis-tempest,vedujoshi/tempest,izadorozhna/tempest,zsoltdudas/lis-tempest,vedujoshi/tempest,sebrandon1/tempest,LIS/lis-tempest,izadorozhna/tempest,cisco-openstack/tempest,bigswitch/tempest | tempest/tests/services/compute/test_snapshots_client.py | tempest/tests/services/compute/test_snapshots_client.py | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import mockpatch
from tempest_lib import exceptions as lib_exc
from tempest.services.compute.json import snapshots_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestSnapshotsClient(base.BaseComputeServiceTest):
FAKE_SNAPSHOT = {
"createdAt": "2015-10-02T16:27:54.724209",
"displayDescription": u"Another \u1234.",
"displayName": u"v\u1234-001",
"id": "100",
"size": 100,
"status": "available",
"volumeId": "12"
}
FAKE_SNAPSHOTS = {"snapshots": [FAKE_SNAPSHOT]}
def setUp(self):
super(TestSnapshotsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = snapshots_client.SnapshotsClient(
fake_auth, 'compute', 'regionOne')
def _test_create_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.create_snapshot,
'tempest.common.service_client.ServiceClient.post',
{"snapshot": self.FAKE_SNAPSHOT},
to_utf=bytes_body, status=200,
volume_id=self.FAKE_SNAPSHOT["volumeId"])
def test_create_snapshot_with_str_body(self):
self._test_create_snapshot()
def test_create_shapshot_with_bytes_body(self):
self._test_create_snapshot(bytes_body=True)
def _test_show_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot,
'tempest.common.service_client.ServiceClient.get',
{"snapshot": self.FAKE_SNAPSHOT},
to_utf=bytes_body, snapshot_id=self.FAKE_SNAPSHOT["id"])
def test_show_snapshot_with_str_body(self):
self._test_show_snapshot()
def test_show_snapshot_with_bytes_body(self):
self._test_show_snapshot(bytes_body=True)
def _test_list_snapshots(self, bytes_body=False, **params):
self.check_service_client_function(
self.client.list_snapshots,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_SNAPSHOTS, to_utf=bytes_body, **params)
def test_list_snapshots_with_str_body(self):
self._test_list_snapshots()
def test_list_snapshots_with_byte_body(self):
self._test_list_snapshots(bytes_body=True)
def test_list_snapshots_with_params(self):
self._test_list_snapshots('fake')
def test_delete_snapshot(self):
self.check_service_client_function(
self.client.delete_snapshot,
'tempest.common.service_client.ServiceClient.delete',
{}, status=202, snapshot_id=self.FAKE_SNAPSHOT['id'])
def test_is_resource_deleted_true(self):
module = ('tempest.services.compute.json.snapshots_client.'
'SnapshotsClient.show_snapshot')
self.useFixture(mockpatch.Patch(
module, side_effect=lib_exc.NotFound))
self.assertTrue(self.client.is_resource_deleted('fake-id'))
def test_is_resource_deleted_false(self):
module = ('tempest.services.compute.json.snapshots_client.'
'SnapshotsClient.show_snapshot')
self.useFixture(mockpatch.Patch(
module, return_value={}))
self.assertFalse(self.client.is_resource_deleted('fake-id'))
| apache-2.0 | Python |
|
913c9a10b2eb3b3d9de108a82a3251b2c0de0e10 | Add test for Hostname object | CybOXProject/python-cybox | cybox/test/objects/hostname_test.py | cybox/test/objects/hostname_test.py | # Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.hostname_object import Hostname
from cybox.test.objects import ObjectTestCase
class TestHostname(ObjectTestCase, unittest.TestCase):
object_type = "HostnameObjectType"
klass = Hostname
_full_dict = {
'is_domain_name': True,
'hostname_value': "www.example.com",
'naming_system': ["DNS", "NETBIOS"],
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | Python |
|
514aca20c6f076a86819d7180f36c3b2e8bcc33b | Add integration test checking compatibility of Keras models with TF optimizers. | keras-team/keras,keras-team/keras | tests/integration_tests/test_tensorflow_integration.py | tests/integration_tests/test_tensorflow_integration.py | from __future__ import print_function
import os
import tempfile
import pytest
import keras
from keras import layers
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
@pytest.mark.skipif(keras.backend.backend() != 'tensorflow', reason='Requires TF backend')
@keras_test
def test_tf_optimizer():
import tensorflow as tf
num_hidden = 10
output_dim = 2
input_dim = 10
target = 0.8
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=1., rho=0.95, epsilon=1e-08)
(x_train, y_train), (x_test, y_test) = get_test_data(
num_train=1000, num_test=200,
input_shape=(input_dim,),
classification=True, num_classes=output_dim)
model = keras.Sequential()
model.add(layers.Dense(num_hidden,
activation='relu',
input_shape=(input_dim,)))
model.add(layers.Dense(output_dim, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=8, batch_size=16,
validation_data=(x_test, y_test), verbose=2)
assert history.history['val_acc'][-1] >= target
# Test saving.
_, fname = tempfile.mkstemp('.h5')
model.save(fname)
model = keras.models.load_model(fname)
assert len(model.weights) == 4
os.remove(fname)
if __name__ == '__main__':
pytest.main([__file__])
| apache-2.0 | Python |
|
ebffda0ec0f2619ad1071bb1d00d87ce08d59498 | Add support for SymDIVINE tool | sosy-lab/benchexec,martin-neuhaeusser/benchexec,sosy-lab/benchexec,IljaZakharov/benchexec,dbeyer/benchexec,IljaZakharov/benchexec,ultimate-pa/benchexec,martin-neuhaeusser/benchexec,sosy-lab/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,IljaZakharov/benchexec,ultimate-pa/benchexec,martin-neuhaeusser/benchexec,dbeyer/benchexec,sosy-lab/benchexec,IljaZakharov/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,martin-neuhaeusser/benchexec | benchexec/tools/symdivine.py | benchexec/tools/symdivine.py | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
import os
class Tool(benchexec.tools.template.BaseTool):
"""
SymDIVINE wrapper object
"""
BINS = ['symdivine', 'run_symdivine.py', 'compile_benchmark.py', 'lart', 'libz3.so']
def executable(self):
"""
Find the path to the executable file that will get executed.
This method always needs to be overridden,
and most implementations will look similar to this one.
The path returned should be relative to the current directory.
"""
return util.find_executable(self.BINS[0])
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
"""
Return the name of the tool, formatted for humans.
"""
return 'SymDIVINE'
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
"""
Compose the command line to execute from the name of the executable,
the user-specified options, and the inputfile to analyze.
This method can get overridden, if, for example, some options should
be enabled or if the order of arguments must be changed.
All paths passed to this method (executable, tasks, and propertyfile)
are either absolute or have been made relative to the designated working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@param options: a list of options, in the same order as given in the XML-file.
@param tasks: a list of tasks, that should be analysed with the tool in one run.
In most cases we we have only _one_ inputfile.
@param propertyfile: contains a specification for the verifier.
@param rlimits: This dictionary contains resource-limits for a run,
for example: time-limit, soft-time-limit, hard-time-limit, memory-limit, cpu-core-limit.
All entries in rlimits are optional, so check for existence before usage!
"""
directory = os.path.dirname(executable)
# Ignore propertyfile since we run only reachability
return [os.path.join('.', directory, self.BINS[1]), directory] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
join_output = '\n'.join(output)
if isTimeout:
return 'TIMEOUT'
if returncode == 2:
return 'Pre-run phase failed: ' + join_output
if join_output is None:
return 'ERROR - no output'
elif 'Safe.'in join_output:
return result.RESULT_TRUE_PROP
elif 'Error state' in join_output:
return result.RESULT_FALSE_REACH
else:
return result.RESULT_UNKNOWN
def program_files(self, executable):
"""
OPTIONAL, this method is only necessary for situations when the benchmark environment
needs to know all files belonging to a tool
(to transport them to a cloud service, for example).
Returns a list of files or directories that are necessary to run the tool.
"""
directory = os.path.dirname(executable)
return map(lambda x: os.path.join('.', directory, x), self.BINS)
| apache-2.0 | Python |
|
3840fbe6ca33e48b9bdbd78e85830a13606f612c | Create efi-smc.py | jacobsalmela/apple-efi-smc-scraper | efi-smc.py | efi-smc.py | #!/usr/bin/python
from lxml import html
import requests
# Get the EFI/SMC table from Apple's Website
page = requests.get('http://support.apple.com/en-us/HT1237')
tree = html.fromstring(page.text)
# Count the number of rows which will be used in looping
rows = tree.xpath('//*[@id="kbtable"]/tbody/tr')
# For each row:
for i in range(len(rows)):
# Get the friendly name, model, EFI version, SMC version, and the download URLs
friendly_name = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[1]/text()' % locals())
model = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[2]/p/text()' % locals())
efi_version = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[3]/p/a/text()' % locals())
efi_url = tree.xpath('//*[@id="kbtable"]/tbody/tr[3]/td[3]/p/a/@href' % locals())
smc_version = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[4]/p/a/text()' % locals())
smc_url = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[4]/a/@href' % locals())
# Print everything in a human-readable format
if not friendly_name:
continue
else:
print friendly_name[0]
if not model:
model = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[2]/text()' % locals())
print model[0]
else:
print model[0]
if not efi_version:
efi_version = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[3]/a/text()' % locals())
if not efi_version:
print 'No EFI'
else:
print efi_version[0]
print efi_url[0]
else:
print efi_version[0]
if not smc_version:
smc_version = tree.xpath('//*[@id="kbtable"]/tbody/tr[%(i)s]/td[4]/a/text()' % locals())
if not smc_version:
print 'No SMC'
else:
print smc_version[0]
print smc_url[0]
else:
print smc_version[0]
print '\n'
| mit | Python |
|
cdc6b62400f66d1b2747b5668a6618c961deb962 | create game class | vivekpabani/powerball-game | powerball/game.py | powerball/game.py | #!/usr/bin/env python
from collections import Counter
from .player import Player
class Game:
def __init__(self, players=None):
"""
Initialize the game instance.
players may be initialized by argument or by calling the begin method.
winning_numbers is initialized with an empty list. It should be populated
by the generate_winning_numbers method when called.
:param players (list): list of players
"""
self.players = list()
self.winning_numbers = list()
| mit | Python |
|
b5d1be9069507feaeb41cfcf9cd774a244ffe49c | Add Activity model | piotr-dobrogost/sqlalchemy-continuum,kvesteri/sqlalchemy-continuum,rmoorman/sqlalchemy-continuum,avilaton/sqlalchemy-continuum | sqlalchemy_continuum/ext/activity_stream.py | sqlalchemy_continuum/ext/activity_stream.py | import sqlalchemy as sa
from sqlalchemy_utils import generic_relationship, JSONType
class Activity(object):
@declared_attr
def actor_id(self):
return sa.Column(
sa.Integer,
sa.ForeignKey('user.id'),
index=True
)
@declared_attr
def actor(self):
return sa.orm.relationship('User')
verb = sa.Column(sa.Unicode(255))
data = sa.Column(JSONType)
# This is used to discriminate between the linked tables.
object_type = sa.Column(sa.Unicode(255))
# This is used to point to the primary key of the linked row.
object_id = sa.Column(sa.Integer)
object = generic_relationship(object_type, object_id)
# This is used to discriminate between the linked tables.
target_type = sa.Column(sa.Unicode(255))
# This is used to point to the primary key of the linked row.
target_id = sa.Column(sa.Integer)
target = generic_relationship(target_type, target_id)
| bsd-3-clause | Python |
|
98b738e21918d1b6c4f2193cf229c518c9913974 | add standalone affordance server script | rdeits/director,mitdrc/director,mithrandir123/director,openhumanoids/director,empireryan/director,manuelli/director,RobotLocomotion/director,RobotLocomotion/director,rdeits/director,mithrandir123/director,edowson/director,patmarion/director,RobotLocomotion/director,patmarion/director,edowson/director,empireryan/director,mithrandir123/director,mithrandir123/director,rdeits/director,RussTedrake/director,RobotLocomotion/director,openhumanoids/director,patmarion/director,RussTedrake/director,patmarion/director,edowson/director,gizatt/director,RobotLocomotion/director,manuelli/director,empireryan/director,gizatt/director,edowson/director,openhumanoids/director,mithrandir123/director,gizatt/director,RussTedrake/director,rdeits/director,mitdrc/director,empireryan/director,empireryan/director,patmarion/director,gizatt/director,mitdrc/director,edowson/director,openhumanoids/director,RussTedrake/director,manuelli/director,manuelli/director,rdeits/director,mitdrc/director,RussTedrake/director,mitdrc/director,gizatt/director,openhumanoids/director,manuelli/director | src/python/scripts/affordanceServer.py | src/python/scripts/affordanceServer.py | from ddapp import consoleapp
from ddapp import lcmobjectcollection
from ddapp.timercallback import TimerCallback
import datetime
def main():
app = consoleapp.ConsoleApp()
meshCollection = lcmobjectcollection.LCMObjectCollection('MESH_COLLECTION_COMMAND')
affordanceCollection = lcmobjectcollection.LCMObjectCollection('AFFORDANCE_COLLECTION_COMMAND')
meshCollection.sendEchoRequest()
affordanceCollection.sendEchoRequest()
def printCollection():
print
print '----------------------------------------------------'
print datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print '%d affordances' % len(affordanceCollection.collection)
for desc in affordanceCollection.collection.values():
print
print 'name:', desc['Name']
print 'type:', desc['classname']
timer = TimerCallback(targetFps=0.2)
timer.callback = printCollection
timer.start()
#app.showPythonConsole()
app.start()
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
|
8e4d60645fb45e37c7a947b3a86219e5fd15c194 | Add py-geeup package (#12367) | iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/py-geeup/package.py | var/spack/repos/builtin/packages/py-geeup/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyGeeup(PythonPackage):
"""Simple Client for Earth Engine Uploads with Selenium Support."""
homepage = "https://github.com/samapriya/geeup"
url = "https://pypi.io/packages/source/g/geeup/geeup-0.2.4.tar.gz"
version('0.2.4', sha256='20f62306ea900d7fa28a97cc92204716212dc030c50a6ac8214772a61a1a83fe')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'test'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pysmartdl', type=('build', 'run'))
depends_on('[email protected]', type=('build', 'run'), when='^python@:3.3')
depends_on('[email protected]:', type=('build', 'run'), when='^[email protected]:')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 | Python |
|
09aada1e7b734bde947a7031f97ecce34b8c65b2 | Create magical_marvelous_tour.py | mschruf/python | Google_Code_Jam/2014/Round_3/A/magical_marvelous_tour.py | Google_Code_Jam/2014/Round_3/A/magical_marvelous_tour.py | #!/usr/bin/python -tt
"""Solves Google Code Jam 2014 Round 3 Problem A
(https://code.google.com/codejam/contest/3024486/dashboard#s=p0)
"Magical, Marvelous Tour"
"""
import sys
def read_input():
"""Parses problem data from stdin.
Args:
None
Returns:
List of test cases, in order specified, each of which is list of
integers representing values of n, p, q, r, s, in that order
"""
lines = sys.stdin.read().splitlines()
num_test_cases = int(lines[0])
assert num_test_cases == len(lines) - 1
test_cases = []
for line in lines[1:]:
test_cases.append([int(x) for x in line.split()])
return test_cases
def get_best_range(devices):
"""Determines best choice of range for Arnar.
Args:
devices: list of integers where value of element i is number of
transistors in device i
Returns:
Tuple consisting of tuple of indices defining range and integer of
number of transistors in range Arnar will choose
"""
# Since Solveig will always choose interval with most transistors, Arnar's
# best chance of winning is with whatever partition which maximizes the
# number of transistors in the interval with the second-highest number of
# transistors.
# brute-force approach: try all possible partitions; however, we don't
# recompute the number of devices in each of the three intervals from
# scratch; rather, we set them at the start and update them with at most a
# one addition or subtraction as we change the partitions
num_best = 0
range_best = (0, 0)
# generate array with cumulative transistor sum for each device index, i.e.
# element i reflects number of transistors in devices with indices < i
cumulative_sums = [0]
for i in range(1, len(devices) + 1):
cumulative_sums.append(cumulative_sums[i - 1] + devices[i - 1])
for i in range(len(devices)):
for j in range(i, len(devices)):
interval_sums = [cumulative_sums[i],
cumulative_sums[j + 1] - cumulative_sums[i],
cumulative_sums[len(devices)] -
cumulative_sums[j + 1]]
assert sum(interval_sums) == cumulative_sums[len(devices)]
# NOTE: following is faster than sorting list of 3 elements and
# adding elements 0 and 1
num_arnar = cumulative_sums[len(devices)] - max(interval_sums)
if num_arnar > num_best:
num_best = num_arnar
range_best = (i, j)
return (range_best, num_best)
def main():
test_cases = read_input()
i = 1
for test_case in test_cases:
(n, p, q, r, s) = test_case
devices = [(x * p + q) % r + s for x in range(n)]
num_transistors_total = sum(devices)
((range_start, range_end), num_transistors_arnar) = \
get_best_range(devices)
probability_win = num_transistors_arnar / float(num_transistors_total)
print 'Case #%d: %.10f' % (i, probability_win)
i += 1
if __name__ == '__main__':
main()
| cc0-1.0 | Python |
|
c50628d1cf984be774cdf1bc6728b9c1cb3f94fa | Create Assignment2Solution.py | WUSTL-GIS-Programming-spring-2014/classinfo,WUSTL-GIS-Programming-spring-2014/classinfo | Assignments/Assignment2Solution.py | Assignments/Assignment2Solution.py | # Your name here
# Assignment 2: Process a folder of shapefiles
# Using the os library, find all shapefiles,and only shapefiles in a given folder and buffer them as before.
# Catch exceptions to handle invalid shapefiles.
import arcpy
import os
def main(inputfolder,prefix,outputfolder):
"""Buffer all shapefiles in inputfolder, appending with prefix and output to outputfolder."""
filelist = os.listdir(inf)
for f in filelist:
if f.endswith('.shp'):
try:
input = inputfolder + f
output = outputfolder + prefix + f
arcpy.Buffer_analysis (input, output, u'500 Feet')
except Exception as e:
print "Unable to buffer", f
print e
return outputfolder
if __name__ == '__main__':
# Arguments must be supplied in the __main__ block, not in the function called.
inf = u'C:\\Facilities\\'
p = u'Buffered_'
outf = u'C:\\Facilities\\'
# Print output location to standard output
print "Output written to", main(inf, p, outf)
| unlicense | Python |
|
4f32369efb0b2cd8540cc78132cadfbed6e68ae8 | Read and write xls files | alimanfoo/petlx,alimanfoo/petlx,alimanfoo/petlx | src/petlx/xls.py | src/petlx/xls.py | """
Read and write xls files, using xlrd.
"""
import os
import petl
from petlx.util import UnsatisfiedDependency
dep_message = """
The package xlrd is required. pip install xlrd.
"""
def fromxls(filename, sheetname):
"""
Extract a table from a sheet in an Excel (.xls) file.
N.B., the sheet name is case sensitive, so watch out for, e.g., 'Sheet1'.
The package xlrd is required. Try ``pip install xlrd``.
"""
return XLSView(filename, sheetname)
class XLSView(petl.util.RowContainer):
def __init__(self, filename, sheetname='Sheet1'):
self.filename = filename
self.sheetname = sheetname
def __iter__(self):
try:
import xlrd
except ImportError as e:
raise UnsatisfiedDependency(e, dep_message)
wb = xlrd.open_workbook(filename=self.filename)
ws = wb.sheet_by_name(self.sheetname)
return (ws.row_values(rownum) for rownum in range(0,ws.nrows))
import sys
from petlx.integration import integrate
integrate(sys.modules[__name__]) | mit | Python |
|
24d742e444c84df99629d8a6aff7ca7e6c90f995 | Add adhoc script to detect jobs with stuck ActiveInvocations list. | luci/luci-go,luci/luci-go,luci/luci-go,luci/luci-go,luci/luci-go,luci/luci-go | scheduler/misc/detect_stuck_active_invs.py | scheduler/misc/detect_stuck_active_invs.py | #!/usr/bin/env python
# Copyright 2018 The LUCI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds jobs with old entries (>1d) in ActiveInvocations list.
Usage:
prpc login
./detect_stuck_active_invs.py luci-scheduler-dev.appspot.com
Requires the caller to be in 'administrators' group.
"""
import json
import subprocess
import sys
import time
def prpc(host, method, body):
p = subprocess.Popen(
['prpc', 'call', host, method],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, _ = p.communicate(json.dumps(body))
if p.returncode:
raise Exception('Call to %s failed' % method)
return json.loads(out)
def check_job(host, job_ref):
print 'Checking %s/%s' % (job_ref['project'], job_ref['job'])
state = prpc(host, 'internal.admin.Admin.GetDebugJobState', job_ref)
active_invs = state.get('activeInvocations', [])
if not active_invs:
print ' No active invocations'
return []
stuck = []
for inv_id in active_invs:
print ' ...checking %s' % inv_id
inv = prpc(host, 'scheduler.Scheduler.GetInvocation', {
'jobRef': job_ref,
'invocationId': inv_id,
})
started = time.time() - int(inv['startedTs']) / 1000000.0
if started > 24 * 3600:
print ' it is stuck!'
stuck.append((job_ref, inv_id))
return stuck
def main():
if len(sys.argv) != 2:
print >> sys.stderr, 'Usage: %s <host>' % sys.argv[0]
return 1
host = sys.argv[1]
stuck = []
for job in prpc(host, 'scheduler.Scheduler.GetJobs', {})['jobs']:
stuck.extend(check_job(host, job['jobRef']))
if not stuck:
print 'No invocations are stuck'
return
print
print 'All stuck invocations: '
for job_ref, inv_id in stuck:
print '%s/%s %s' % (job_ref['project'], job_ref['job'], inv_id)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | Python |
|
cf78037980a9345c12b1e2562bc4eda63cea95b3 | Add a simple regression test to go with r143260. CommandInterpreter::PreprocessCommand() should not infinite loop when a target has not been specified yet. | apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb | test/functionalities/backticks/TestBackticksWithoutATarget.py | test/functionalities/backticks/TestBackticksWithoutATarget.py | """
Test that backticks without a target should work (not infinite looping).
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class BackticksWithNoTargetTestCase(TestBase):
mydir = "functionalities/backticks"
def test_backticks_no_target(self):
"""A simple test of backticks without a target."""
self.expect("print `1+2-3`",
substrs = [' = 0'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| apache-2.0 | Python |
|
28e226a47d16fb6a52c937031be19d8832e7e5c4 | Bump development version | nephila/django-ckeditor-filebrowser-filer,GasimGasimzada/django-ckeditor-filebrowser-filer,nephila/django-ckeditor-filebrowser-filer,GasimGasimzada/django-ckeditor-filebrowser-filer | ckeditor_filebrowser_filer/__init__.py | ckeditor_filebrowser_filer/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.2.0.b1'
| # -*- coding: utf-8 -*-
__version__ = '0.1.1'
| bsd-3-clause | Python |
ff2fb40e961fe7b0c3f6dd6e91d8fb79a865a631 | Hide the AuditorReader role in the system role assignment modal by placing it in an "implied" scope. | josthkko/ggrc-core,uskudnik/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,vladan-m/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,uskudnik/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,vladan-m/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,uskudnik/ggrc-core,hyperNURb/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,uskudnik/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core | src/ggrc_basic_permissions/migrations/versions/20131108224846_37b63b122038_hide_the_auditorread.py | src/ggrc_basic_permissions/migrations/versions/20131108224846_37b63b122038_hide_the_auditorread.py |
"""Hide the AuditorReader role by changing its scope.
Revision ID: 37b63b122038
Revises: 1ff082d26157
Create Date: 2013-11-08 22:48:46.956836
"""
# revision identifiers, used by Alembic.
revision = '37b63b122038'
down_revision = '1ff082d26157'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
column('scope', sa.String),
)
def upgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System Implied'})
)
def downgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System'})
)
| apache-2.0 | Python |
|
dc200e50020637650c8a5dfe76895b0a033a8cea | Add tests for verifying that deactivating password works | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rsr/tests/models/test_login_logging.py | akvo/rsr/tests/models/test_login_logging.py | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from datetime import timedelta
from django.forms import ValidationError
from django.conf import settings
from django.test import Client
from akvo.rsr.models import LoginLog
from akvo.rsr.models.login_log import MAX_FAILED_LOGINS
from akvo.rsr.tests.base import BaseTestCase
class LoginLoggingTestCase(BaseTestCase):
"""Tests for the login logging model"""
def setUp(self):
self.email = '[email protected]'
self.password = 'password'
self.user = self.create_user(self.email, self.password)
self.c = Client(HTTP_HOST=settings.RSR_DOMAIN)
def test_successful_login_creates_log_entry(self):
# When
self.c.login(username=self.email, password=self.password)
# Then
logs = LoginLog.objects.filter(email=self.email)
self.assertTrue(logs.exists())
self.assertTrue(logs.first().success)
def test_failed_login_creates_log_entry(self):
# When
with self.assertRaises(ValidationError):
self.c.login(username=self.email, password='')
# Then
logs = LoginLog.objects.filter(email=self.email)
self.assertTrue(logs.exists())
self.assertFalse(logs.first().success)
def test_password_deactivates_after_max_attempts(self):
# Given
for _ in range(MAX_FAILED_LOGINS - 1):
with self.assertRaises(ValidationError):
self.c.login(username=self.email, password='')
# When
with self.assertRaises(ValidationError) as assertion:
self.c.login(username=self.email, password='')
# Then
self.assertIn('Login has been disabled', assertion.exception.message)
def test_logins_post_password_deactivation_ignored(self):
# When
for _ in range(MAX_FAILED_LOGINS + 10):
with self.assertRaises(ValidationError):
self.c.login(username=self.email, password='')
with self.assertRaises(ValidationError) as assertion:
self.c.login(username=self.email, password=self.password)
# Then
self.assertIn('Login has been disabled', assertion.exception.message)
logs = LoginLog.objects.filter(email=self.email)
self.assertEqual(MAX_FAILED_LOGINS, logs.count())
def test_login_works_after_deactivation_time(self):
# Given
for _ in range(MAX_FAILED_LOGINS + 10):
with self.assertRaises(ValidationError):
self.c.login(username=self.email, password='')
# HACK: Set the creation time of these login attempts to older than login_disable_time
time_delta = settings.LOGIN_DISABLE_TIME * 2
creation_time = LoginLog.objects.first().created_at - timedelta(seconds=time_delta)
LoginLog.objects.update(created_at=creation_time)
# When
self.c.login(username=self.email, password=self.password)
# Then
log_entry = LoginLog.objects.filter(email=self.email).first()
self.assertTrue(log_entry.success)
| agpl-3.0 | Python |
|
e28a6423f63a169b46ebe46e9690d3858f953909 | Add tests | mathjazz/pontoon,mrinaljain/mozindia,elin-moco/ffclub,rtucker-mozilla/mozpackager,hfeeki/djiaoshoujia,akatsoulas/oneanddone,zofuthan/airmozilla,glogiotatidis/snippets-service,glogiotatidis/mozlex,linearregression/socorro,mozilla/bramble,kumar303/pto-planner,rtucker-mozilla/mozilla_inventory,jotes/pontoon,mozilla/dragnet,kumar303/rockit,bobsilverberg/oneanddone,peterbe/WhistlePig,akatsoulas/snippets-service,glogiotatidis/mozlex,Mte90/remo,anu7495/airmozilla,bensternthal/snippets-service,peterbe/WhistlePig,vivekanand1101/pontoon,Mte90/remo,hfeeki/djiaoshoujia,kenrick95/airmozilla,luser/socorro,rhelmer/inquisitor,rtucker-mozilla/mozpackager,bobsilverberg/oneanddone,lmorchard/badg.us,kumar303/bangotest,mozilla/sheriffs,mozilla/firefox-flicks,rhelmer/mopad,AdrianGaudebert/socorro,mozilla/remo,kumar303/pto-planner,lcamacho/airmozilla,mozilla/make.mozilla.org,Serg09/socorro,mozilla/remo,mrinaljain/mozindia,anu7495/airmozilla,luser/socorro,anjalymehla/airmozilla,peterbe/bramble,schalkneethling/snippets-service,LegNeato/cwsy,chirilo/airmozilla,mozilla/badges.mozilla.org,kumar303/bangotest,linearregression/socorro,akatsoulas/remo,mythmon/airmozilla,mozilla/make.mozilla.org,akatsoulas/oneanddone,kenrick95/airmozilla,mozilla/bramble,mozilla/gameon,Tchanders/socorro,abdullah2891/remo,jotes/pontoon,tgavankar/PlaydohSlideSync,rtucker-mozilla/mozilla_inventory,jlongster/jlongster-django,mozilla/playdoh,rtucker-mozilla/inventory,bensternthal/snippets-service,akatsoulas/mcp,lmorchard/badger2,abdullah2891/remo,Osmose/snippets-service-prototype,ehsan/airmozilla,Nolski/airmozilla,mozilla/peekaboo,mozilla/socorro,mozilla/dragnet,EricSekyere/airmozilla,chirilo/remo,akatsoulas/oneanddone,schalkneethling/snippets-service,mathjazz/pontoon,kumar303/rockit,a-buck/airmozilla,peterbe/WhistlePig,Tayamarn/socorro,jgmize/nucleus,abdullah2891/remo,EricSekyere/airmozilla,flamingspaz/remo,rtucker-mozilla/mozpackager,mozilla/BanHammer,akatsoulas/oneanddone,mozilla/make.mozilla.org,mozmar/snippets-service,mozilla/pontoon,Tchanders/socorro,rhelmer/socorro,johngian/remo,spthaolt/socorro,lmorchard/badg.us,KaiRo-at/socorro,akeym/cyder,yglazko/socorro,kumar303/rockit,mozilla/pontoon,mozilla/inventory,tannishk/airmozilla,mozilla/peekaboo,sudheesh001/pontoon,tessie/oneanddone,tannishk/airmozilla,mozilla/inventory,linearregression/socorro,rtucker-mozilla/inventory,jgmize/nucleus,jicksy/oneanddone_test,m8ttyB/oneanddone,Osmose/snippets-service,AdrianGaudebert/socorro,jicksy/oneanddone_test,mrinaljain/mozindia,Tayamarn/socorro,chirilo/remo,mozilla/peekaboo,linearregression/socorro,anu7495/airmozilla,chirilo/remo,jotes/pontoon,AutomatedTester/bisectcloud,m8ttyB/socorro,mozilla/snippets-service,mozilla/firefox-flicks,mozmar/snippets-service,Nolski/airmozilla,rtucker-mozilla/inventory,vivekanand1101/pontoon,mozilla/pto,jlongster/jlongster-django,Mte90/remo,Osmose/trephub,ehsan/airmozilla,yglazko/socorro,flamingspaz/remo,drkitty/cyder,murrown/cyder,Osmose/trephub,AdrianGaudebert/socorro-crashstats,cliqz/socorro,brianloveswords/webpagemaker,mozilla/kitchensinkserver,chirilo/airmozilla,vereseproject/verese,adngdb/socorro,rtucker-mozilla/WhistlePig,glogiotatidis/oneanddone,mozilla/socorro,akatsoulas/mozsignal,jlongster/jlongster-django,mathjazz/pontoon,mozilla/mozilla-badges,AutomatedTester/bisectcloud,m8ttyB/oneanddone,bugzPDX/airmozilla,lcamacho/airmozilla,lmorchard/whuru,murrown/cyder,bobsilverberg/oneanddone,linearregression/socorro,rhelmer/socorro,participedia/pontoon,drkitty/cyder,flamingspaz/remo,mozilla/bramble,linearregression/socorro,peterbe/peekaboo,lmorchard/gitshipped,EricSekyere/airmozilla,lonnen/socorro,muffinresearch/solitude,participedia/pontoon,lcamacho/airmozilla,AdrianGaudebert/socorro-crashstats,mozilla/BanHammer,mozilla/playdoh,m8ttyB/socorro,mastizada/pontoon,mozilla/pto,bugzPDX/airmozilla,twobraids/socorro,rtucker-mozilla/inventory,OSU-Net/cyder,lmorchard/whuru,zofuthan/airmozilla,tgavankar/PlaydohSlideSync,johngian/remo,rtucker-mozilla/mozpackager,XioNoX/BanHammer,Tchanders/socorro,mozilla/sheriffs,rhelmer/mopad,vivekanand1101/pontoon,ehsan/airmozilla,anu7495/airmozilla,mastizada/pontoon,participedia/pontoon,lonnen/socorro,mozilla/pontoon,blossomica/airmozilla,Osmose/snippets-service-prototype,MozillaIndia/mozindia,Tchanders/socorro,kumar303/bangotest,KaiRo-at/socorro,bobsilverberg/oneanddone,deepankverma/badges.mozilla.org,rhelmer/socorro,mozilla/badges.mozilla.org,akatsoulas/mozsignal,johngian/remo,Jobava/mirror-pontoon,muffinresearch/solitude,a-buck/airmozilla,adngdb/socorro,peterbe/bramble,bugzPDX/airmozilla,mozilla/airmozilla,Osmose/charas-playdoh,MozillaIndia/mozindia,mathjazz/pontoon,Tayamarn/socorro,LegNeato/cwsy,Osmose/pontoon,tannishk/airmozilla,hfeeki/djiaoshoujia,XioNoX/BanHammer,anjalymehla/airmozilla,peterbe/bramble,mozilla/gameon,bugzPDX/airmozilla,akatsoulas/snippets-service,Osmose/snippets-service,mozilla/badges.mozilla.org,LegNeato/cwsy,luser/socorro,peterbe/bramble,sudheesh001/pontoon,mozilla/socorro,tgavankar/unifolio,KaiRo-at/socorro,mythmon/airmozilla,mozilla/pontoon,abdullah2891/remo,mythmon/airmozilla,mozilla/remo,m8ttyB/socorro,peterbe/airmozilla,schalkneethling/snippets-service,Nolski/airmozilla,mozilla/dragnet,kumar303/rockit,mastizada/pontoon,hfeeki/djiaoshoujia,elin-moco/metrics,adini121/oneanddone,akatsoulas/mcp,mozilla/mozilla-badges,tessie/oneanddone,m8ttyB/socorro,tgavankar/unifolio,rhelmer/socorro,elin-moco/ffclub,lonnen/socorro,gkoberger/extend-siri,Osmose/snippets-service-prototype,mozilla/snippets-service,ehsan/airmozilla,murrown/cyder,mozilla/kitchensinkserver,twobraids/socorro,spthaolt/socorro,lmorchard/badg.us,Osmose/mnotes,elin-moco/ffclub,mozilla/socorro,Tayamarn/socorro,mozilla/BanHammer,akatsoulas/mozsignal,yglazko/socorro,tessie/oneanddone,twobraids/socorro,EricSekyere/airmozilla,VarnaSuresh/oneanddone,akatsoulas/remo,gkoberger/extend-siri,rtucker-mozilla/mozilla_inventory,deepankverma/badges.mozilla.org,mozilla/playdoh,lmorchard/whuru,yshlin/tildeslash,mozilla/pto,peterbe/airmozilla,mozilla/inventory,tsmrachel/remo,m8ttyB/socorro,mozilla/snippets-service,jotes/pontoon,mozilla/bramble,luser/socorro,AdrianGaudebert/socorro,lmorchard/gitshipped,Nolski/airmozilla,akeym/cyder,glogiotatidis/oneanddone,tannishk/airmozilla,zeeman/cyder,mozilla/pto,mozilla/inventory,mozilla/badg.us,twobraids/socorro,bensternthal/snippets-service,akeym/cyder,m8ttyB/pontoon,yshlin/tildeslash,adngdb/socorro,yfdyh000/pontoon,OSU-Net/cyder,pcabido/socorro,mathjazz/pontoon,peterbe/WhistlePig,a-buck/airmozilla,pcabido/socorro,MozillaIndia/mozindia,lcamacho/airmozilla,m8ttyB/pontoon,lcamacho/airmozilla,flamingspaz/remo,bsmedberg/socorro,MozillaIndia/mozindia,kenrick95/airmozilla,OSU-Net/cyder,AdrianGaudebert/socorro-crashstats,cliqz/socorro,mozilla/firefox-flicks,chirilo/airmozilla,glogiotatidis/snippets-service,mythmon/airmozilla,rhelmer/socorro,rhelmer/mopad,rtucker-mozilla/WhistlePig,spthaolt/socorro,zeeman/cyder,tessie/oneanddone,Jobava/mirror-pontoon,mozilla/badges.mozilla.org,glogiotatidis/mozlex,cliqz/socorro,XioNoX/BanHammer,elin-moco/metrics,pcabido/socorro,chirilo/airmozilla,brianloveswords/webpagemaker,bsmedberg/socorro,anu7495/airmozilla,rtucker-mozilla/mozilla_inventory,AutomatedTester/bisectcloud,adngdb/socorro,rtucker-mozilla/mozpackager,akatsoulas/remo,glogiotatidis/oneanddone,sudheesh001/pontoon,rhelmer/inquisitor,mozilla/kitchensinkserver,lmorchard/badger2,kumar303/bangotest,m8ttyB/pontoon,vereseproject/verese,jledbetter/cookdoh,cliqz/socorro,mozilla/airmozilla,lmorchard/gitshipped,jledbetter/cookdoh,glogiotatidis/oneanddone,tgavankar/PlaydohSlideSync,johngian/remo,mrinaljain/mozindia,rtucker-mozilla/mozpackager,tsmrachel/remo,spthaolt/socorro,zeeman/cyder,pcabido/socorro,mozilla/kitchensinkserver,Jobava/mirror-pontoon,pcabido/socorro,Osmose/mnotes,mozmar/snippets-service,tgavankar/PlaydohSlideSync,akeym/cyder,mastizada/pontoon,yfdyh000/pontoon,mozilla/inventory,spthaolt/socorro,VarnaSuresh/oneanddone,luser/socorro,mozilla/socorro,AutomatedTester/bisectcloud,bsmedberg/socorro,Serg09/socorro,glogiotatidis/snippets-service,KaiRo-at/socorro,blossomica/airmozilla,jgmize/nucleus,chirilo/remo,peterbe/peekaboo,Osmose/pontoon,drkitty/cyder,zofuthan/airmozilla,chirilo/airmozilla,blossomica/airmozilla,mozilla/BanHammer,Tayamarn/socorro,drkitty/cyder,Osmose/charas-playdoh,tannishk/airmozilla,lmorchard/badg.us,Jobava/mirror-pontoon,glogiotatidis/snippets-service,Osmose/yip,mozilla/badg.us,mozilla/pontoon,Serg09/socorro,Osmose/snippets-service-prototype,KaiRo-at/socorro,VarnaSuresh/oneanddone,adini121/oneanddone,rhelmer/inquisitor,tgavankar/unifolio,mozmar/snippets-service,schalkneethling/snippets-service,EricSekyere/airmozilla,deepankverma/badges.mozilla.org,akatsoulas/remo,m8ttyB/oneanddone,akatsoulas/mcp,zofuthan/airmozilla,zeeman/cyder,akatsoulas/snippets-service,Tchanders/socorro,mozilla/firefox-flicks,kenrick95/airmozilla,mozilla/peekaboo,peterbe/airmozilla,vereseproject/verese,Tchanders/socorro,Osmose/pontoon,m8ttyB/pontoon,Mte90/remo,a-buck/airmozilla,rtucker-mozilla/mozilla_inventory,participedia/pontoon,zofuthan/airmozilla,mozilla/airmozilla,tgavankar/unifolio,jgmize/nucleus,yglazko/socorro,anjalymehla/airmozilla,brianloveswords/webpagemaker,lmorchard/badger2,mozilla/snippets-service,yglazko/socorro,KaiRo-at/socorro,lmorchard/badger2,adngdb/socorro,twobraids/socorro,yfdyh000/pontoon,mozilla/make.mozilla.org,lmorchard/whuru,AdrianGaudebert/socorro-crashstats,mozilla/gameon,glogiotatidis/mozlex,mozilla/playdoh,kenrick95/airmozilla,adini121/oneanddone,vivekanand1101/pontoon,AdrianGaudebert/socorro,Osmose/pontoon,m8ttyB/socorro,tsmrachel/remo,cliqz/socorro,OSU-Net/cyder,deepankverma/badges.mozilla.org,rhelmer/socorro,elin-moco/metrics,bsmedberg/socorro,mozilla/socorro,mozilla/airmozilla,mozilla/remo,mozilla/badg.us,Osmose/yip,murrown/cyder,yglazko/socorro,cliqz/socorro,Osmose/mnotes,Serg09/socorro,gkoberger/extend-siri,rtucker-mozilla/WhistlePig,Serg09/socorro,ehsan/airmozilla,AdrianGaudebert/socorro,VarnaSuresh/oneanddone,adngdb/socorro,blossomica/airmozilla,mozilla/gameon,bensternthal/snippets-service,spthaolt/socorro,mozilla/mozilla-badges,yfdyh000/pontoon,luser/socorro,Osmose/snippets-service,Osmose/trephub,peterbe/peekaboo,LegNeato/cwsy,adini121/oneanddone,pcabido/socorro,bsmedberg/socorro,Tayamarn/socorro,tsmrachel/remo,Osmose/charas-playdoh,Osmose/mnotes,brianloveswords/webpagemaker,elin-moco/ffclub,Nolski/airmozilla,AdrianGaudebert/socorro,vereseproject/verese,Serg09/socorro,rhelmer/mopad,mythmon/airmozilla,jicksy/oneanddone_test,akatsoulas/snippets-service,lmorchard/gitshipped,XioNoX/BanHammer,mozilla/sheriffs,Osmose/snippets-service,kumar303/pto-planner,twobraids/socorro,m8ttyB/oneanddone,gkoberger/extend-siri,sudheesh001/pontoon,yshlin/tildeslash,anjalymehla/airmozilla,anjalymehla/airmozilla,rtucker-mozilla/inventory,lonnen/socorro | apps/commons/tests/test_accepted_locales.py | apps/commons/tests/test_accepted_locales.py | import os
import shutil
from django.conf import settings
import test_utils
import manage
class AcceptedLocalesTest(test_utils.TestCase):
"""Test lazy evaluation of locale related settings.
Verify that some localization-related settings are lazily evaluated based
on the current value of the DEV variable. Depending on the value,
DEV_LANGUAGES or PROD_LANGUAGES should be used.
"""
locale = manage.path('locale')
locale_bkp = manage.path('locale_bkp')
@classmethod
def setup_class(cls):
"""Create a directory structure for locale/.
Back up the existing locale/ directory and create the following
hierarchy in its place:
- locale/en-US/LC_MESSAGES
- locale/fr/LC_MESSAGES
- locale/templates/LC_MESSAGES
- locale/empty_file
Also, set PROD_LANGUAGES to ('en-US',).
"""
if os.path.exists(cls.locale_bkp):
raise Exception('A backup of locale/ exists at %s which might '
'mean that previous tests didn\'t end cleanly. '
'Skipping the test suite.' % cls.locale_bkp)
cls.DEV = settings.DEV
cls.PROD_LANGUAGES = settings.PROD_LANGUAGES
cls.DEV_LANGUAGES = settings.DEV_LANGUAGES
settings.PROD_LANGUAGES = ('en-US',)
os.rename(cls.locale, cls.locale_bkp)
for loc in ('en-US', 'fr', 'templates'):
os.makedirs(os.path.join(cls.locale, loc, 'LC_MESSAGES'))
open(os.path.join(cls.locale, 'empty_file'), 'w').close()
@classmethod
def teardown_class(cls):
"""Remove the testing locale/ dir and bring back the backup."""
settings.DEV = cls.DEV
settings.PROD_LANGUAGES = cls.PROD_LANGUAGES
settings.DEV_LANGUAGES = cls.DEV_LANGUAGES
shutil.rmtree(cls.locale)
os.rename(cls.locale_bkp, cls.locale)
def test_build_dev_languages(self):
"""Test that the list of dev locales is built properly.
On dev instances, the list of accepted locales should correspond to
the per-locale directories in locale/.
"""
settings.DEV = True
assert (settings.DEV_LANGUAGES == ['en-US', 'fr'] or
settings.DEV_LANGUAGES == ['fr', 'en-US']), \
'DEV_LANGUAGES do not correspond to the contents of locale/.'
def test_dev_languages(self):
"""Test the accepted locales on dev instances.
On dev instances, allow locales defined in DEV_LANGUAGES.
"""
settings.DEV = True
# simulate the successful result of the DEV_LANGUAGES list
# comprehension defined in settings.
settings.DEV_LANGUAGES = ['en-US', 'fr']
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US', 'fr': 'fr'}, \
('DEV is True, but DEV_LANGUAGES are not used to define the '
'allowed locales.')
def test_prod_languages(self):
"""Test the accepted locales on prod instances.
On stage/prod instances, allow locales defined in PROD_LANGUAGES.
"""
settings.DEV = False
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US'}, \
('DEV is False, but PROD_LANGUAGES are not used to define the '
'allowed locales.')
| bsd-3-clause | Python |
|
bb9fd71dc06ac39b461b4109e341fe7cd4172c76 | use self.create_socket() | community-ssu/telepathy-gabble,mlundblad/telepathy-gabble,Ziemin/telepathy-gabble,community-ssu/telepathy-gabble,jku/telepathy-gabble,mlundblad/telepathy-gabble,community-ssu/telepathy-gabble,mlundblad/telepathy-gabble,community-ssu/telepathy-gabble,Ziemin/telepathy-gabble,jku/telepathy-gabble,Ziemin/telepathy-gabble,jku/telepathy-gabble,Ziemin/telepathy-gabble | tests/twisted/file-transfer/test-receive-file-and-disconnect.py | tests/twisted/file-transfer/test-receive-file-and-disconnect.py | import socket
from file_transfer_helper import exec_file_transfer_test, ReceiveFileTest
class ReceiveFileAndDisconnectTest(ReceiveFileTest):
def receive_file(self):
s = self.create_socket()
s.connect(self.address)
# disconnect
self.conn.Disconnect()
self.q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
return True
if __name__ == '__main__':
exec_file_transfer_test(ReceiveFileAndDisconnectTest)
| import socket
from file_transfer_helper import exec_file_transfer_test, ReceiveFileTest
class ReceiveFileAndDisconnectTest(ReceiveFileTest):
def receive_file(self):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.address)
# disconnect
self.conn.Disconnect()
self.q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
return True
if __name__ == '__main__':
exec_file_transfer_test(ReceiveFileAndDisconnectTest)
| lgpl-2.1 | Python |
5dfd7b1534e19242ab778d535e2de13b424578f7 | Add examples | Jufik/python-fixer | example.py | example.py | #!/usr/bin/python3
#
# Copyright (c) 2016, Fabian Affolter <[email protected]>
# Released under the MIT license. See LICENSE file for details.
#
import fixerio
# Our base currency is the Czech Koruna instead of the default (EUR).
BASE = 'CZK'
exchange = fixerio.Fixer(base=BASE)
print('Current exchange rates:')
for currency, rate in exchange.convert().get('rates').items():
print('{} : {}'.format(currency, rate))
print('Current exchange rates for CHF:')
# Check if the target currency exists
if exchange.currency_available('CHF'):
print(exchange.convert().get('rates')['CHF'])
| mit | Python |
|
5afdd7775dd1aa232d3ca8fa2852f4a36918f224 | add management command to fix forms whose non-ascii chars are corrupted | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/cleanup/management/commands/fix_corrupted_forms.py | corehq/apps/cleanup/management/commands/fix_corrupted_forms.py | # encoding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.management import BaseCommand
from six.moves import input
from corehq.apps.app_manager.dbaccessors import get_apps_by_id
SUSPICIOUS_STRINGS = [
international_character.encode('utf-8').decode('latin1')
for international_character in [
'á', 'é', 'í', 'ó', 'ú',
'Á', 'É', 'Í', 'Ó', 'Ú',
'’',
] # TODO - add more common non-ascii characters
]
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('app_id')
parser.add_argument('form_id')
parser.add_argument(
'--cleanup',
action='store_true',
dest='cleanup',
default=False,
)
# https://dimagi-dev.atlassian.net/browse/HI-747
def handle(self, domain, app_id, form_id, cleanup=False, **options):
app = get_apps_by_id(domain, app_id)[0]
form = app.get_form(form_id)
source = form.source
if any(suspicious_string in source for suspicious_string in SUSPICIOUS_STRINGS):
print('FORM CONTAINS SUSPICIOUS STRING')
if cleanup:
if 'y' == input('Did you confirm that there are no app updates to publish? [y/N]'):
print('Cleaning form...')
form.source = source.encode('latin1').decode('utf-8')
app.save()
print('Done.')
else:
print('Aborting...')
| bsd-3-clause | Python |
|
3cf56093b9d132a5089a70a12feb73c4be987da8 | Add mtnpatch.py, a script to parse and import a full monotone diff | demsey/openenigma2,xifengchuo/openembedded,nx111/openembeded_openpli2.1_nx111,hulifox008/openembedded,nzjrs/overo-openembedded,sampov2/audio-openembedded,SIFTeam/openembedded,nvl1109/openembeded,openembedded/openembedded,JrCs/opendreambox,BlackPole/bp-openembedded,scottellis/overo-oe,popazerty/openembedded-cuberevo,buglabs/oe-buglabs,sutajiokousagi/openembedded,nlebedenco/mini2440,crystalfontz/openembedded,JrCs/opendreambox,demsey/openenigma2,thebohemian/openembedded,buglabs/oe-buglabs,nx111/openembeded_openpli2.1_nx111,mrchapp/arago-oe-dev,nx111/openembeded_openpli2.1_nx111,bticino/openembedded,mrchapp/arago-oe-dev,BlackPole/bp-openembedded,scottellis/overo-oe,nzjrs/overo-openembedded,JamesAng/goe,nx111/openembeded_openpli2.1_nx111,BlackPole/bp-openembedded,sentient-energy/emsw-oe-mirror,xifengchuo/openembedded,dellysunnymtech/sakoman-oe,SIFTeam/openembedded,troth/oe-ts7xxx,John-NY/overo-oe,John-NY/overo-oe,rascalmicro/openembedded-rascal,rascalmicro/openembedded-rascal,popazerty/openembedded-cuberevo,bticino/openembedded,anguslees/openembedded-android,JamesAng/goe,sutajiokousagi/openembedded,libo/openembedded,demsey/openenigma2,Martix/Eonos,philb/pbcl-oe-2010,dellysunnymtech/sakoman-oe,nvl1109/openembeded,demsey/openembedded,openpli-arm/openembedded,dellysunnymtech/sakoman-oe,libo/openembedded,demsey/openembedded,JamesAng/goe,YtvwlD/od-oe,JrCs/opendreambox,trini/openembedded,sentient-energy/emsw-oe-mirror,BlackPole/bp-openembedded,scottellis/overo-oe,SIFTeam/openembedded,sentient-energy/emsw-oe-mirror,JamesAng/goe,YtvwlD/od-oe,crystalfontz/openembedded,openpli-arm/openembedded,openpli-arm/openembedded,nlebedenco/mini2440,buglabs/oe-buglabs,trini/openembedded,KDAB/OpenEmbedded-Archos,nvl1109/openembeded,sampov2/audio-openembedded,rascalmicro/openembedded-rascal,thebohemian/openembedded,yyli/overo-oe,sampov2/audio-openembedded,sledz/oe,dellysunnymtech/sakoman-oe,philb/pbcl-oe-2010,anguslees/openembedded-android,openembedded/openembedded,crystalfontz/openembedded,xifengchuo/openembedded,dellysunnymtech/sakoman-oe,demsey/openembedded,popazerty/openembedded-cuberevo,JamesAng/oe,Martix/Eonos,popazerty/openembedded-cuberevo,philb/pbcl-oe-2010,YtvwlD/od-oe,nvl1109/openembeded,Martix/Eonos,BlackPole/bp-openembedded,nvl1109/openembeded,SIFTeam/openembedded,bticino/openembedded,anguslees/openembedded-android,demsey/openembedded,crystalfontz/openembedded,sampov2/audio-openembedded,Martix/Eonos,John-NY/overo-oe,dellysunnymtech/sakoman-oe,popazerty/openembedded-cuberevo,bticino/openembedded,openembedded/openembedded,giobauermeister/openembedded,crystalfontz/openembedded,KDAB/OpenEmbedded-Archos,sledz/oe,nzjrs/overo-openembedded,demsey/openenigma2,JamesAng/oe,nx111/openembeded_openpli2.1_nx111,John-NY/overo-oe,dave-billin/overo-ui-moos-auv,nvl1109/openembeded,xifengchuo/openembedded,sampov2/audio-openembedded,nx111/openembeded_openpli2.1_nx111,dave-billin/overo-ui-moos-auv,anguslees/openembedded-android,openembedded/openembedded,philb/pbcl-oe-2010,nvl1109/openembeded,KDAB/OpenEmbedded-Archos,bticino/openembedded,sledz/oe,xifengchuo/openembedded,yyli/overo-oe,troth/oe-ts7xxx,xifengchuo/openembedded,dave-billin/overo-ui-moos-auv,sampov2/audio-openembedded,Martix/Eonos,thebohemian/openembedded,JrCs/opendreambox,John-NY/overo-oe,xifengchuo/openembedded,JrCs/opendreambox,sledz/oe,mrchapp/arago-oe-dev,xifengchuo/openembedded,nzjrs/overo-openembedded,scottellis/overo-oe,YtvwlD/od-oe,John-NY/overo-oe,troth/oe-ts7xxx,dellysunnymtech/sakoman-oe,Martix/Eonos,anguslees/openembedded-android,KDAB/OpenEmbedded-Archos,demsey/openembedded,demsey/openenigma2,mrchapp/arago-oe-dev,openembedded/openembedded,mrchapp/arago-oe-dev,dave-billin/overo-ui-moos-auv,hulifox008/openembedded,sutajiokousagi/openembedded,hulifox008/openembedded,BlackPole/bp-openembedded,thebohemian/openembedded,John-NY/overo-oe,openembedded/openembedded,buglabs/oe-buglabs,sutajiokousagi/openembedded,SIFTeam/openembedded,demsey/openenigma2,dave-billin/overo-ui-moos-auv,openembedded/openembedded,YtvwlD/od-oe,popazerty/openembedded-cuberevo,buglabs/oe-buglabs,libo/openembedded,JamesAng/goe,JamesAng/oe,troth/oe-ts7xxx,KDAB/OpenEmbedded-Archos,sledz/oe,yyli/overo-oe,anguslees/openembedded-android,JrCs/opendreambox,nlebedenco/mini2440,dave-billin/overo-ui-moos-auv,bticino/openembedded,nlebedenco/mini2440,giobauermeister/openembedded,rascalmicro/openembedded-rascal,JamesAng/oe,hulifox008/openembedded,sentient-energy/emsw-oe-mirror,giobauermeister/openembedded,YtvwlD/od-oe,openpli-arm/openembedded,scottellis/overo-oe,giobauermeister/openembedded,libo/openembedded,troth/oe-ts7xxx,philb/pbcl-oe-2010,BlackPole/bp-openembedded,sledz/oe,mrchapp/arago-oe-dev,scottellis/overo-oe,sutajiokousagi/openembedded,SIFTeam/openembedded,JamesAng/oe,trini/openembedded,sutajiokousagi/openembedded,dave-billin/overo-ui-moos-auv,rascalmicro/openembedded-rascal,trini/openembedded,nzjrs/overo-openembedded,giobauermeister/openembedded,crystalfontz/openembedded,KDAB/OpenEmbedded-Archos,JamesAng/goe,trini/openembedded,nlebedenco/mini2440,giobauermeister/openembedded,dellysunnymtech/sakoman-oe,JamesAng/oe,rascalmicro/openembedded-rascal,trini/openembedded,demsey/openenigma2,openembedded/openembedded,buglabs/oe-buglabs,SIFTeam/openembedded,yyli/overo-oe,popazerty/openembedded-cuberevo,dellysunnymtech/sakoman-oe,anguslees/openembedded-android,sentient-energy/emsw-oe-mirror,YtvwlD/od-oe,sledz/oe,libo/openembedded,sampov2/audio-openembedded,YtvwlD/od-oe,crystalfontz/openembedded,openembedded/openembedded,openpli-arm/openembedded,nx111/openembeded_openpli2.1_nx111,Martix/Eonos,nlebedenco/mini2440,libo/openembedded,giobauermeister/openembedded,openpli-arm/openembedded,demsey/openembedded,yyli/overo-oe,yyli/overo-oe,sutajiokousagi/openembedded,thebohemian/openembedded,libo/openembedded,openpli-arm/openembedded,thebohemian/openembedded,nzjrs/overo-openembedded,nlebedenco/mini2440,nx111/openembeded_openpli2.1_nx111,nzjrs/overo-openembedded,sentient-energy/emsw-oe-mirror,hulifox008/openembedded,rascalmicro/openembedded-rascal,KDAB/OpenEmbedded-Archos,popazerty/openembedded-cuberevo,buglabs/oe-buglabs,philb/pbcl-oe-2010,JamesAng/goe,openembedded/openembedded,mrchapp/arago-oe-dev,rascalmicro/openembedded-rascal,xifengchuo/openembedded,bticino/openembedded,troth/oe-ts7xxx,yyli/overo-oe,giobauermeister/openembedded,yyli/overo-oe,trini/openembedded,openembedded/openembedded,JrCs/opendreambox,troth/oe-ts7xxx,demsey/openembedded,sentient-energy/emsw-oe-mirror,JrCs/opendreambox,JamesAng/oe,buglabs/oe-buglabs,thebohemian/openembedded,giobauermeister/openembedded,JrCs/opendreambox,hulifox008/openembedded,philb/pbcl-oe-2010,hulifox008/openembedded,scottellis/overo-oe | contrib/mtnpatch.py | contrib/mtnpatch.py | #!/usr/bin/env python
import sys, os, string, getopt
mtncmd = "monotone"
def main(argv = None):
if argv is None:
argv = sys.argv
opts, list = getopt.getopt(sys.argv[1:], ':R')
if len(list) < 1:
print "You must specify a file"
return 2
reverse = False
for o, a in opts:
if o == "-R":
reverse = True
if os.path.exists(list[0]):
input = open(list[0], 'r')
renameFrom = ""
cmd = ""
if reverse:
print "patch -R -p0 < %s" % list[0]
else:
print "patch -p0 < %s" % list[0]
for line in input:
if len(line) > 0:
if line[0] == '#':
parts = line.split()
if len(parts) > 2:
cmd = parts[1]
# deal with whilespace in filenames (badly)
fileName = parts[2]
i = 3
while i < len(parts) and fileName.count('"') % 2:
fileName += " %s" % parts[i]
if cmd == "delete_file":
if reverse:
print "%s add %s" % (mtncmd, fileName)
else:
print "%s drop -e %s" % (mtncmd, fileName)
elif cmd == "add_file":
if reverse:
print "%s drop -e %s" % (mtncmd, fileName)
else:
print "%s add %s" % (mtncmd, fileName)
elif cmd == "rename_file":
renameFrom = fileName
elif cmd == "to" and renameFrom != "":
if reverse:
print "%s rename -e %s %s" % (mtncmd, fileName, renameFrom)
else:
print "%s rename -e %s %s" % (mtncmd, renameFrom, fileName)
renameFrom = ""
else:
cmd = ""
if __name__ == "__main__":
sys.exit(main())
| mit | Python |
|
4430b1957b642e87cd263455e371bf1d634101b0 | Add buildone command | nirbheek/cerbero-old,nirbheek/cerbero-old,flexVDI/cerbero,sdroege/cerbero,brion/cerbero,nzjrs/cerbero,centricular/cerbero,OptoFidelity/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,atsushieno/cerbero,atsushieno/cerbero,GStreamer/cerbero,shoreflyer/cerbero,fluendo/cerbero,ylatuya/cerbero,ikonst/cerbero,nirbheek/cerbero,justinjoy/cerbero,brion/cerbero,lubosz/cerbero,nzjrs/cerbero,centricular/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,atsushieno/cerbero,EricssonResearch/cerbero,cee1/cerbero-mac,ford-prefect/cerbero,AlertMe/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,superdump/cerbero,nirbheek/cerbero,GStreamer/cerbero,AlertMe/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,BigBrother-International/gst-cerbero,BigBrother-International/gst-cerbero,multipath-rtp/cerbero,davibe/cerbero,EricssonResearch/cerbero,EricssonResearch/cerbero,AlertMe/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,ford-prefect/cerbero,ford-prefect/cerbero,ramaxlo/cerbero,AlertMe/cerbero,superdump/cerbero,cee1/cerbero-mac,nicolewu/cerbero,justinjoy/cerbero,nzjrs/cerbero,GStreamer/cerbero,cee1/cerbero-mac,nirbheek/cerbero-old,shoreflyer/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,sdroege/cerbero,ikonst/cerbero,ylatuya/cerbero,nzjrs/cerbero,lubosz/cerbero,lubosz/cerbero,EricssonResearch/cerbero,jackjansen/cerbero-2013,ford-prefect/cerbero,fluendo/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,davibe/cerbero,superdump/cerbero,EricssonResearch/cerbero,nicolewu/cerbero,BigBrother-International/gst-cerbero,GStreamer/cerbero,fluendo/cerbero,multipath-rtp/cerbero,flexVDI/cerbero,GStreamer/cerbero,multipath-rtp/cerbero,nirbheek/cerbero,sdroege/cerbero,brion/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,jackjansen/cerbero,centricular/cerbero,ikonst/cerbero,centricular/cerbero,OptoFidelity/cerbero,jackjansen/cerbero-2013,nzjrs/cerbero,superdump/cerbero,nicolewu/cerbero,jackjansen/cerbero,jackjansen/cerbero,multipath-rtp/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,jackjansen/cerbero-2013,centricular/cerbero,justinjoy/cerbero,ramaxlo/cerbero,brion/cerbero,justinjoy/cerbero,brion/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,ikonst/cerbero,OptoFidelity/cerbero,atsushieno/cerbero,multipath-rtp/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,cee1/cerbero-mac,fluendo/cerbero,fluendo/cerbero,sdroege/cerbero,sdroege/cerbero,BigBrother-International/gst-cerbero,ylatuya/cerbero,lubosz/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,shoreflyer/cerbero,nirbheek/cerbero-old,flexVDI/cerbero,ylatuya/cerbero,davibe/cerbero,ramaxlo/cerbero,jackjansen/cerbero-2013,nirbheek/cerbero,jackjansen/cerbero,ramaxlo/cerbero,OptoFidelity/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,flexVDI/cerbero,AlertMe/cerbero,ramaxlo/cerbero,shoreflyer/cerbero,jackjansen/cerbero-2013,shoreflyer/cerbero,atsushieno/cerbero,BigBrother-International/gst-cerbero,ikonst/cerbero,flexVDI/cerbero,davibe/cerbero | cerbero/commands/buildone.py | cerbero/commands/buildone.py | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#from cerbero.oven import Oven
from cerbero.commands import Command, register_command
from cerbero.cookbook import CookBook
from cerbero.errors import FatalError
from cerbero.oven import Oven
from cerbero.utils import _, N_, ArgparseArgument
class BuildOne(Command):
doc = N_('Build or rebuild a single recipe without its dependencies')
name = 'buildone'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('recipe', nargs=1,
help=_('name of the recipe to build')),
])
def run(self, config, args):
cookbook = CookBook.load(config)
recipe_name = args.recipe[0]
recipe = cookbook.get_recipe(recipe_name)
if recipe is None:
raise FatalError(_("Recipe %s not found" % recipe_name))
oven = Oven(recipe, cookbook, force=True, no_deps=True)
oven.start_cooking()
register_command(BuildOne)
| lgpl-2.1 | Python |
|
6244e0b40d847687b7ff875a48fb08060efc97bf | Solve Within PyCharm | RaminGiahi/Pyomo | Newsolver.py | Newsolver.py | from __future__ import division
from pyomo.environ import *
from pyomo.opt import SolverFactory
model = AbstractModel()
model.M = Set()
model.N = Set()
model.n = Param()
model.c = Param(model.M, model.N)
model.x = Var(model.M, model.N, domain=Binary)
model.u = Var(model.M, domain=NonNegativeIntegers)
def object(model):
return sum(model.c[i,j]*model.x[i,j] for (i,j) in model.M*model.N if i!=j)
model.obj = Objective(rule=object)
def const1(model,j):
return sum(model.x[i,j] for i in model.M if i!=j) == 1
model.cons = Constraint(model.N, rule= const1)
def const2(model,i):
return sum(model.x[i,j] for j in model.N if j!=i) ==1
model.cons2 = Constraint(model.M, rule=const2)
def const3(model,i,j):
if i==j or i <2 or j<2:
return Constraint.Skip
return model.u[i]-model.u[j]+model.n*model.x[i,j] <= model.n-1
model.cons3 = Constraint(model.M, model.N, rule=const3)
instance = model.create("salesman.dat")
instance.pprint()
opt = SolverFactory('glpk')
results = opt.solve(instance, tee=True)
results.write()
instance.solutions.load_from(results)
for v in instance.component_objects(Var, active=True):
print ("Variable",v)
varobject = getattr(instance, str(v))
for index in varobject:
print (" ",index, varobject[index].value) | unlicense | Python |
|
00f766b24865e8010411105794f20bc0ef39a6dc | Add py-sphinxcontrib-devhelp package (#13278) | iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/py-sphinxcontrib-devhelp/package.py | var/spack/repos/builtin/packages/py-sphinxcontrib-devhelp/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySphinxcontribDevhelp(PythonPackage):
"""sphinxcontrib-devhelp is a sphinx extension which outputs
Devhelp document."""
homepage = "http://sphinx-doc.org/"
url = "https://pypi.io/packages/source/s/sphinxcontrib-devhelp/sphinxcontrib-devhelp-1.0.1.tar.gz"
version('1.0.1', sha256='6c64b077937330a9128a4da74586e8c2130262f014689b4b89e2d08ee7294a34')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
def test(self):
# Requires sphinx, creating a circular dependency
pass
| lgpl-2.1 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.