commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
38eb6221ca41446c0c4fb1510354bdc4f00ba5f1 | Remove children via uid rather than name | waltermoreira/serfnode,waltermoreira/serfnode,waltermoreira/serfnode | serfnode/build/handler/launcher.py | serfnode/build/handler/launcher.py | #!/usr/bin/env python
import functools
import os
import signal
import sys
import docker_utils
def handler(name, signum, frame):
print('Should kill', name)
try:
cid = open('/child_{}'.format(name)).read().strip()
docker_utils.client.remove_container(cid, force=True)
except Exception:
pass
sys.exit(0)
def launch(name, args):
try:
cid = open('/child_{}'.format(name)).read().strip()
except IOError:
cid = name
try:
os.unlink('/child_{}'.format(name))
except OSError:
pass
try:
docker_utils.client.remove_container(cid, force=True)
except Exception:
pass
args.insert(0, '--cidfile=/child_{}'.format(name))
docker_utils.docker('run', *args)
if __name__ == '__main__':
name = sys.argv[1]
args = sys.argv[2:]
signal.signal(signal.SIGINT, functools.partial(handler, name))
launch(name, args)
| #!/usr/bin/env python
import functools
import os
import signal
import sys
import docker_utils
def handler(name, signum, frame):
print('Should kill', name)
try:
docker_utils.client.remove_container(name, force=True)
except Exception:
pass
sys.exit(0)
def launch(name, args):
try:
os.unlink('/child_{}'.format(name))
except OSError:
pass
try:
docker_utils.client.remove_container(name, force=True)
except Exception:
pass
args.insert(0, '--cidfile=/child_{}'.format(name))
docker_utils.docker('run', *args)
if __name__ == '__main__':
name = sys.argv[1]
args = sys.argv[2:]
signal.signal(signal.SIGINT, functools.partial(handler, name))
launch(name, args)
| mit | Python |
bd3d8738fc00b2d36aafe5749e88826845441541 | fix handling of pages (closes #685) | eirmag/weboob,frankrousseau/weboob,willprice/weboob,Boussadia/weboob,yannrouillard/weboob,nojhan/weboob-devel,willprice/weboob,franek/weboob,yannrouillard/weboob,sputnick-dev/weboob,sputnick-dev/weboob,laurent-george/weboob,frankrousseau/weboob,laurent-george/weboob,laurent-george/weboob,eirmag/weboob,franek/weboob,Boussadia/weboob,RouxRC/weboob,eirmag/weboob,Boussadia/weboob,nojhan/weboob-devel,sputnick-dev/weboob,Boussadia/weboob,RouxRC/weboob,willprice/weboob,Konubinix/weboob,franek/weboob,Konubinix/weboob,nojhan/weboob-devel,RouxRC/weboob,frankrousseau/weboob,yannrouillard/weboob,Konubinix/weboob | weboob/backends/orange/browser.py | weboob/backends/orange/browser.py | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Nicolas Duhamel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
#~ from .pages.compose import ClosePage, ComposePage, ConfirmPage, SentPage
#~ from .pages.login import LoginPage
from .pages import LoginPage, ComposePage, ConfirmPage
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword
__all__ = ['OrangeBrowser']
class OrangeBrowser(BaseBrowser):
DOMAIN = 'orange.fr'
PAGES = {
'http://id.orange.fr/auth_user/bin/auth_user.cgi.*': LoginPage,
'http://id.orange.fr/auth_user/bin/auth0user.cgi.*': LoginPage,
'http://smsmms1.orange.fr/./Sms/sms_write.php.*' : ComposePage,
'http://smsmms1.orange.fr/./Sms/sms_write.php?command=send' : ConfirmPage,
}
def get_nb_remaining_free_sms(self):
self.location("http://smsmms1.orange.fr/M/Sms/sms_write.php")
return self.page.get_nb_remaining_free_sms()
def home(self):
self.location("http://smsmms1.orange.fr/M/Sms/sms_write.php")
def is_logged(self):
self.location("http://smsmms1.orange.fr/M/Sms/sms_write.php", no_login=True)
return not self.is_on_page(LoginPage)
def login(self):
if not self.is_on_page(LoginPage):
self.location('http://id.orange.fr/auth_user/bin/auth_user.cgi?url=http://www.orange.fr', no_login=True)
self.page.login(self.username, self.password)
if not self.is_logged():
raise BrowserIncorrectPassword()
def post_message(self, message, sender):
if not self.is_on_page(ComposePage):
self.home()
self.page.post_message(message, sender)
| # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Nicolas Duhamel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
#~ from .pages.compose import ClosePage, ComposePage, ConfirmPage, SentPage
#~ from .pages.login import LoginPage
from .pages import LoginPage, ComposePage, ConfirmPage
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword
__all__ = ['OrangeBrowser']
class OrangeBrowser(BaseBrowser):
DOMAIN = 'orange.fr'
PAGES = {
'http://id.orange.fr/auth_user/bin/auth_user.cgi.*': LoginPage,
'http://id.orange.fr/auth_user/bin/auth0user.cgi.*': LoginPage,
'http://smsmms1.orange.fr/M/Sms/sms_write.php.*' : ComposePage,
'http://smsmms1.orange.fr/M/Sms/sms_write.php?command=send' : ConfirmPage,
}
def get_nb_remaining_free_sms(self):
self.location("http://smsmms1.orange.fr/M/Sms/sms_write.php")
return self.page.get_nb_remaining_free_sms()
def home(self):
self.location("http://smsmms1.orange.fr/M/Sms/sms_write.php")
def is_logged(self):
self.location("http://smsmms1.orange.fr/M/Sms/sms_write.php", no_login=True)
return not self.is_on_page(LoginPage)
def login(self):
if not self.is_on_page(LoginPage):
self.location('http://id.orange.fr/auth_user/bin/auth_user.cgi?url=http://www.orange.fr', no_login=True)
self.page.login(self.username, self.password)
if not self.is_logged():
raise BrowserIncorrectPassword()
def post_message(self, message, sender):
if not self.is_on_page(ComposePage):
self.home()
self.page.post_message(message, sender)
| agpl-3.0 | Python |
f631099894a02cb79b5be372894ed1f589849a8d | test for datetime.datetime type from dframe_dateconv | BMJHayward/infusionsoft_xpmt | test/pandaservtest.py | test/pandaservtest.py | import unittest, sys, os
from datetime import datetime
import pandas as pd
import src.pandaserv as pandaserv
import numpy as np
class Testpandaserv(unittest.TestCase):
def setUp(self):
self.dates = pd.date_range('20130101', periods=6)
self.df = pd.DataFrame(
np.random.randn(6,4), index=self.dates, columns=list('ABCD'))
self.df2 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' })
def test_dframe_dateconv(self):
print('Unfinished test, PASS.')
pandaserv.dframe_dateconv(self.df2, 'B')
for singledate in df['B']:
self.assertIsInstance(singledate, datetime)
def test_dframe_currencystrip(self):
print('Unfinished test, PASS.')
def test_make_sheets(self):
print('Unfinished test, PASS.')
def test_clean_sheets(self):
print('Unfinished test, PASS.')
if __name__ == '__main__':
unittest.main() | import unittest, sys, os
from datetime import datetime
import pandas as pd
import src.pandaserv as pandaserv
import numpy as np
class Testpandaserv(unittest.TestCase):
def setUp(self):
self.dates = pd.date_range('20130101', periods=6)
self.df = pd.DataFrame(
np.random.randn(6,4), index=self.dates, columns=list('ABCD'))
self.df2 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' })
def test_dframe_dateconv(self):
print('Unfinished test, PASS.')
pandaserv.dframe_dateconv(self.df, 'D')
self.assertIsInstance(self.df['D'], datetime)
def test_dframe_currencystrip(self):
print('Unfinished test, PASS.')
def test_make_sheets(self):
print('Unfinished test, PASS.')
def test_clean_sheets(self):
print('Unfinished test, PASS.')
if __name__ == '__main__':
unittest.main() | mit | Python |
91238b6b0f0b14a6d0f7707aa0b388cedfd5894c | set default false allow_cnpj_multi_ie | akretion/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil,akretion/l10n-brazil | l10n_br_base/models/res_config.py | l10n_br_base/models/res_config.py | # -*- coding: utf-8 -*-
from openerp import fields, models
from openerp.tools.safe_eval import safe_eval
class res_config(models.TransientModel):
_inherit = 'base.config.settings'
allow_cnpj_multi_ie = fields.Boolean(
string=u'Permitir o cadastro de Customers com CNPJs iguais',
default=False,
)
def get_default_allow_cnpj_multi_ie(self, cr, uid, fields, context=None):
icp = self.pool.get('ir.config_parameter')
return {
'allow_cnpj_multi_ie': safe_eval(icp.get_param(
cr, uid, 'l10n_br_base_allow_cnpj_multi_ie', 'False')),
}
def set_allow_cnpj_multi_ie(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context=context)
icp = self.pool.get('ir.config_parameter')
icp.set_param(cr, uid, 'l10n_br_base_allow_cnpj_multi_ie',
repr(config.allow_cnpj_multi_ie))
| # -*- coding: utf-8 -*-
from openerp import fields, models
from openerp.tools.safe_eval import safe_eval
class res_config(models.TransientModel):
_inherit = 'base.config.settings'
allow_cnpj_multi_ie = fields.Boolean(
string=u'Permitir o cadastro de Customers com CNPJs iguais',
default=True,
)
def get_default_allow_cnpj_multi_ie(self, cr, uid, fields, context=None):
icp = self.pool.get('ir.config_parameter')
return {
'allow_cnpj_multi_ie': safe_eval(icp.get_param(
cr, uid, 'l10n_br_base_allow_cnpj_multi_ie', 'False')),
}
def set_allow_cnpj_multi_ie(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context=context)
icp = self.pool.get('ir.config_parameter')
icp.set_param(cr, uid, 'l10n_br_base_allow_cnpj_multi_ie',
repr(config.allow_cnpj_multi_ie))
| agpl-3.0 | Python |
bc9c782317eac99716bc961e42e6072f0e5616cf | Add dummy var in order to work around issue 1 https://github.com/LinuxTeam-teilar/cronos.teilar.gr/issues/1 | LinuxTeam-teilar/cronos.teilar.gr,LinuxTeam-teilar/cronos.teilar.gr,LinuxTeam-teilar/cronos.teilar.gr | apps/__init__.py | apps/__init__.py | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.mail import send_mail
'''
For unkown reason, the logger is NOT able to find a handler
unless a settings.VARIABLE is called!!
https://github.com/LinuxTeam-teilar/cronos.teilar.gr/issues/1
I leave that here till the bug is fixed
'''
settings.DEBUG
def mail_cronos_admin(title, message):
'''
Wrapper function of send_mail
'''
try:
send_mail(title, message, '[email protected]', [settings.ADMIN[0][1]])
except:
pass
class CronosError(Exception):
'''
Custom Exception class
'''
def __init__(self, value):
self.value = value
def __unicode__(self):
return repr(self.value)
def log_extra_data(username = None, request = None, form = None, cronjob = None):
'''
Extra data needed by the custom formatter
All values default to None
It provides three data: client_ip, username and cronjob name
Username can be passed directly as argument, or it can be retrieved by
either the request var or the form
'''
log_extra_data = {
'client_ip': request.META.get('REMOTE_ADDR','None') if request else '',
'username': username if username else '',
'cronjob': cronjob if cronjob else '',
}
if not username:
if form:
log_extra_data['username'] = form.data.get('username', 'None')
else:
try:
if request.user.is_authenticated():
'''
Handle logged in users
'''
log_extra_data['username'] = request.user.name
else:
'''
Handle anonymous users
'''
log_extra_data['username'] = 'Anonymous'
except AttributeError:
pass
return log_extra_data
| # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.mail import send_mail
def mail_cronos_admin(title, message):
'''
Wrapper function of send_mail
'''
try:
send_mail(title, message, '[email protected]', [settings.ADMIN[0][1]])
except:
pass
class CronosError(Exception):
'''
Custom Exception class
'''
def __init__(self, value):
self.value = value
def __unicode__(self):
return repr(self.value)
def log_extra_data(username = None, request = None, form = None, cronjob = None):
'''
Extra data needed by the custom formatter
All values default to None
It provides three data: client_ip, username and cronjob name
Username can be passed directly as argument, or it can be retrieved by
either the request var or the form
'''
log_extra_data = {
'client_ip': request.META.get('REMOTE_ADDR','None') if request else '',
'username': username if username else '',
'cronjob': cronjob if cronjob else '',
}
if not username:
if form:
log_extra_data['username'] = form.data.get('username', 'None')
else:
try:
if request.user.is_authenticated():
'''
Handle logged in users
'''
log_extra_data['username'] = request.user.name
else:
'''
Handle anonymous users
'''
log_extra_data['username'] = 'Anonymous'
except AttributeError:
pass
return log_extra_data
| agpl-3.0 | Python |
d006711787d018ed401ba003d3472b8a0e843437 | Add documentation for ignoring empty strings | Sakartu/stringinfo | stringinfo.py | stringinfo.py | #!/usr/bin/env python3
# -*- coding: utf8 -*-
"""
Usage:
stringinfo [options] [--] [STRING]...
Options:
STRING The strings for which you want information. If none are given, read from stdin upto EOF. Empty strings are ignored.
--list List all plugins, with their descriptions and whether they're default or not
--all Run all plugins, even the ones that aren't default
--verbose Print debugging messages
--file INFILE Read inputs from inputfile, removing trailing newlines. BEWARE: leading/trailing whitespace is preserved!
Plugins:
"""
import colorama
from docopt import docopt
import sys
import veryprettytable
import plugins
from plugins.util import color
__author__ = 'peter'
def main():
args = docopt(__doc__ + plugins.usage_table())
# Find plugins
ps = plugins.get_plugins(args)
if args['--list']:
table = veryprettytable.VeryPrettyTable()
table.field_names = ('Name', 'Default', 'Description')
table.align = 'l'
for p in ps:
table.add_row((p.__name__,
color(p.default),
p.description))
print(table)
return
if args['--file']:
args['STRING'] = [x.strip('\n\r') for x in open(args['--file'], 'r')]
if not args['STRING']:
args['STRING'] = [sys.stdin.read()]
args['STRING'] = filter(None, args['STRING'])
# Initialize colorama
colorama.init()
# For each plugin, check if it's applicable and if so, run it
for p in ps:
plugin = p(args)
if plugin.sentinel():
print(plugin.header)
print(plugin.handle())
else:
if args['--verbose']:
print('Sentinel failed for {0}'.format(p.__name__))
if __name__ == '__main__':
main() | #!/usr/bin/env python3
# -*- coding: utf8 -*-
"""
Usage:
stringinfo [options] [--] [STRING]...
Options:
STRING The strings for which you want information. If none are given, read from stdin upto EOF.
--list List all plugins, with their descriptions and whether they're default or not
--all Run all plugins, even the ones that aren't default
--verbose Print debugging messages
--file INFILE Read inputs from inputfile, removing trailing newlines. BEWARE: leading/trailing whitespace is preserved!
Plugins:
"""
import colorama
from docopt import docopt
import sys
import veryprettytable
import plugins
from plugins.util import color
__author__ = 'peter'
def main():
args = docopt(__doc__ + plugins.usage_table())
# Find plugins
ps = plugins.get_plugins(args)
if args['--list']:
table = veryprettytable.VeryPrettyTable()
table.field_names = ('Name', 'Default', 'Description')
table.align = 'l'
for p in ps:
table.add_row((p.__name__,
color(p.default),
p.description))
print(table)
return
if args['--file']:
args['STRING'] = [x.strip('\n\r') for x in open(args['--file'], 'r')]
if not args['STRING']:
args['STRING'] = [sys.stdin.read()]
args['STRING'] = filter(None, args['STRING'])
# Initialize colorama
colorama.init()
# For each plugin, check if it's applicable and if so, run it
for p in ps:
plugin = p(args)
if plugin.sentinel():
print(plugin.header)
print(plugin.handle())
else:
if args['--verbose']:
print('Sentinel failed for {0}'.format(p.__name__))
if __name__ == '__main__':
main() | mit | Python |
9e7cd9f13abb29ff8458407b905d522548eaf5c9 | Refactor check_executables_have_shebangs for git ls-files reuse | pre-commit/pre-commit-hooks | pre_commit_hooks/check_executables_have_shebangs.py | pre_commit_hooks/check_executables_have_shebangs.py | """Check that executable text files have a shebang."""
import argparse
import shlex
import sys
from typing import Generator
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Set
from pre_commit_hooks.util import cmd_output
from pre_commit_hooks.util import zsplit
EXECUTABLE_VALUES = frozenset(('1', '3', '5', '7'))
def check_executables(paths: List[str]) -> int:
if sys.platform == 'win32': # pragma: win32 cover
return _check_git_filemode(paths)
else: # pragma: win32 no cover
retv = 0
for path in paths:
if not has_shebang(path):
_message(path)
retv = 1
return retv
class GitLsFile(NamedTuple):
mode: str
filename: str
def git_ls_files(paths: Sequence[str]) -> Generator[GitLsFile, None, None]:
outs = cmd_output('git', 'ls-files', '-z', '--stage', '--', *paths)
for out in zsplit(outs):
metadata, filename = out.split('\t')
mode, _, _ = metadata.split()
yield GitLsFile(mode, filename)
def _check_git_filemode(paths: Sequence[str]) -> int:
seen: Set[str] = set()
for ls_file in git_ls_files(paths):
is_executable = any(b in EXECUTABLE_VALUES for b in ls_file.mode[-3:])
if is_executable and not has_shebang(ls_file.filename):
_message(ls_file.filename)
seen.add(ls_file.filename)
return int(bool(seen))
def has_shebang(path: str) -> int:
with open(path, 'rb') as f:
first_bytes = f.read(2)
return first_bytes == b'#!'
def _message(path: str) -> None:
print(
f'{path}: marked executable but has no (or invalid) shebang!\n'
f" If it isn't supposed to be executable, try: "
f'`chmod -x {shlex.quote(path)}`\n'
f' If it is supposed to be executable, double-check its shebang.',
file=sys.stderr,
)
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)
return check_executables(args.filenames)
if __name__ == '__main__':
exit(main())
| """Check that executable text files have a shebang."""
import argparse
import shlex
import sys
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from pre_commit_hooks.util import cmd_output
from pre_commit_hooks.util import zsplit
EXECUTABLE_VALUES = frozenset(('1', '3', '5', '7'))
def check_executables(paths: List[str]) -> int:
if sys.platform == 'win32': # pragma: win32 cover
return _check_git_filemode(paths)
else: # pragma: win32 no cover
retv = 0
for path in paths:
if not _check_has_shebang(path):
_message(path)
retv = 1
return retv
def _check_git_filemode(paths: Sequence[str]) -> int:
outs = cmd_output('git', 'ls-files', '-z', '--stage', '--', *paths)
seen: Set[str] = set()
for out in zsplit(outs):
metadata, path = out.split('\t')
tagmode = metadata.split(' ', 1)[0]
is_executable = any(b in EXECUTABLE_VALUES for b in tagmode[-3:])
if is_executable and not _check_has_shebang(path):
_message(path)
seen.add(path)
return int(bool(seen))
def _check_has_shebang(path: str) -> int:
with open(path, 'rb') as f:
first_bytes = f.read(2)
return first_bytes == b'#!'
def _message(path: str) -> None:
print(
f'{path}: marked executable but has no (or invalid) shebang!\n'
f" If it isn't supposed to be executable, try: "
f'`chmod -x {shlex.quote(path)}`\n'
f' If it is supposed to be executable, double-check its shebang.',
file=sys.stderr,
)
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)
return check_executables(args.filenames)
if __name__ == '__main__':
exit(main())
| mit | Python |
5eeab4e458e7af3895525dcc08017eb855308723 | remove extra s typo | appeltel/AutoCMS,appeltel/AutoCMS,appeltel/AutoCMS | autocms/stats.py | autocms/stats.py | """Harvesting of persistent statsitical records."""
import os
import time
import importlib
from .core import load_records
def harvest_default_stats(records, config):
"""Add a row to the long term statistics record for a given test."""
now = int(time.time())
harvest_time = now - int(config['AUTOCMS_STAT_INTERVAL'])*3600
stat_records = [job for job in records if job.completed and
job.end_time > harvest_time]
runtimes = [job.run_time() for job in stat_records if job.is_success()]
if len(runtimes) == 0:
max_runtime = 0
min_runtime = 0
mean_runtime = 0
else:
max_runtime = max(runtimes)
min_runtime = min(runtimes)
mean_runtime = sum(runtimes)/float(len(runtimes))
successes = sum(1 for job in stat_records if job.is_success())
failures = sum(1 for job in stat_records if not job.is_success())
return "{} {} {} {} {} {}".format(now, successes, failures, min_runtime,
mean_runtime, max_runtime)
def append_stats_row(row, testname, config):
"""Add a line to the persistent statistics log of a test."""
statfile = os.path.join(config['AUTOCMS_BASEDIR'], testname,
'statistics.dat')
with open(statfile, 'a') as stat_handle:
stat_handle.write(row + '\n')
def perform_stats_harvesting(testname, config):
"""Analyze job records for given test and create row of statistics."""
records = load_records(testname, config)
# use a custom row if the test has configured one
harvest_stats = harvest_default_stats
try:
test_custom = importlib.import_module('autocms.custom.' + testname)
if hasattr(test_custom, 'harvest_stats'):
harvest_stats = getattr(test_custom, 'produce_webpage')
except ImportError:
pass
row = harvest_stats(records, config)
append_stats_row(row, testname, config)
| """Harvesting of persistent statsitical records."""
import os
import time
import importlib
from .core import load_records
def harvest_default_stats(records, config):
"""Add a row to the long term statistics record for a given test."""
now = int(time.time())
harvest_time = now - int(config['AUTOCMS_STAT_INTERVAL'])*3600
stat_records = [job for job in records if job.completed and
job.end_time > harvest_time]
runtimes = [job.run_time() for job in stat_records if job.is_success()]
if len(runtimes) == 0:
max_runtime = 0
min_runtime = 0
mean_runtime = 0
else:
max_runtime = max(runtimes)
min_runtime = min(runtimes)
mean_runtime = sum(runtimes)/float(len(runtimes))
successes = sum(1 for job in stat_records if job.is_successs())
failures = sum(1 for job in stat_records if not job.is_successs())
return "{} {} {} {} {} {}".format(now, successes, failures, min_runtime,
mean_runtime, max_runtime)
def append_stats_row(row, testname, config):
"""Add a line to the persistent statistics log of a test."""
statfile = os.path.join(config['AUTOCMS_BASEDIR'], testname,
'statistics.dat')
with open(statfile, 'a') as stat_handle:
stat_handle.write(row + '\n')
def perform_stats_harvesting(testname, config):
"""Analyze job records for given test and create row of statistics."""
records = load_records(testname, config)
# use a custom row if the test has configured one
harvest_stats = harvest_default_stats
try:
test_custom = importlib.import_module('autocms.custom.' + testname)
if hasattr(test_custom, 'harvest_stats'):
harvest_stats = getattr(test_custom, 'produce_webpage')
except ImportError:
pass
row = harvest_stats(records, config)
append_stats_row(row, testname, config)
| mit | Python |
2d392d8c107c9055e6b62bb365158b1001872cde | Fix deprecation warnings. | zafarali/emdp | emdp/analytic.py | emdp/analytic.py | """
Tools to get analytic solutions from MDPs
"""
import numpy as np
def calculate_P_pi(P, pi):
r"""
calculates P_pi
P_pi(s,t) = \sum_a pi(s,a) p(s, a, t)
:param P: transition matrix of size |S|x|A|x|S|
:param pi: matrix of size |S| x |A| indicating the policy
:return: a matrix of size |S| x |S|
"""
return np.einsum('sat,sa->st', P, pi)
def calculate_R_pi(R, pi):
r"""
calculates R_pi
R_pi(s) = \sum_a pi(s,a) r(s,a)
:param R: reward matrix of size |S| x |A|
:param pi: matrix of size |S| x |A| indicating the policy
:return:
"""
return np.einsum('sa,sa->s', R, pi)
def calculate_successor_representation(P_pi, gamma):
"""
Calculates the successor representation
(I- gamma*P_pi)^{-1}
:param P_pi:
:param gamma:
:return:
"""
return np.linalg.inv(np.eye(P_pi.shape[0]) - gamma * P_pi)
def calculate_V_pi_from_successor_representation(Phi, R_pi):
return np.einsum('st,t->s', Phi, R_pi)
def calculate_V_pi(P, R, pi, gamma):
r"""
Calculates V_pi from the successor representation using the analytic form:
(I- gamma*P_pi)^{-1} * R_pi
where P_pi(s,t) = \sum_a pi(s,a) p(s, a, t)
and R_pi(s) = \sum_a pi(s,a) r(s,a)
:param P: Transition matrix
:param R: Reward matrix
:param pi: policy matrix
:param gamma: discount factor
:return:
"""
P_pi = calculate_P_pi(P, pi)
R_pi = calculate_R_pi(R, pi)
Phi = calculate_successor_representation(P_pi, gamma)
return calculate_V_pi_from_successor_representation(Phi, R_pi)
| """
Tools to get analytic solutions from MDPs
"""
import numpy as np
def calculate_P_pi(P, pi):
"""
calculates P_pi
P_pi(s,t) = \sum_a pi(s,a) p(s, a, t)
:param P: transition matrix of size |S|x|A|x|S|
:param pi: matrix of size |S| x |A| indicating the policy
:return: a matrix of size |S| x |S|
"""
return np.einsum('sat,sa->st', P, pi)
def calculate_R_pi(R, pi):
"""
calculates R_pi
R_pi(s) = \sum_a pi(s,a) r(s,a)
:param R: reward matrix of size |S| x |A|
:param pi: matrix of size |S| x |A| indicating the policy
:return:
"""
return np.einsum('sa,sa->s', R, pi)
def calculate_successor_representation(P_pi, gamma):
"""
Calculates the successor representation
(I- gamma*P_pi)^{-1}
:param P_pi:
:param gamma:
:return:
"""
return np.linalg.inv(np.eye(P_pi.shape[0]) - gamma * P_pi)
def calculate_V_pi_from_successor_representation(Phi, R_pi):
return np.einsum('st,t->s', Phi, R_pi)
def calculate_V_pi(P, R, pi, gamma):
"""
Calculates V_pi from the successor representation using the analytic form:
(I- gamma*P_pi)^{-1} * R_pi
where P_pi(s,t) = \sum_a pi(s,a) p(s, a, t)
and R_pi(s) = \sum_a pi(s,a) r(s,a)
:param P: Transition matrix
:param R: Reward matrix
:param pi: policy matrix
:param gamma: discount factor
:return:
"""
P_pi = calculate_P_pi(P, pi)
R_pi = calculate_R_pi(R, pi)
Phi = calculate_successor_representation(P_pi, gamma)
return calculate_V_pi_from_successor_representation(Phi, R_pi) | mit | Python |
c8f774ea3455af057736166757f831407711ae67 | Bump to 0.4. | gtaylor/EVE-Market-Data-Structures | emds/__init__.py | emds/__init__.py | __version__ = '0.4' | __version__ = '0.3' | mit | Python |
033ec1c5c7d44c54136541aa0e1bd8c73e3c1163 | update test_unitcell | dschick/udkm1Dsim,dschick/udkm1Dsim | test/test_unitCell.py | test/test_unitCell.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from udkm1Dsim.atoms import Atom
from udkm1Dsim.unitCell import UnitCell
from pint import UnitRegistry
u = UnitRegistry()
u.default_format = '~P'
from numpy import array
def test_unit_cell():
Fe = Atom('Fe')
uc = UnitCell('uc', 'Unit Cell', 2.86*u.angstrom, heat_capacity=10*(u.J/u.kg/u.K),
lin_therm_exp=1e-6/u.K, therm_cond=1*(u.W/u.m/u.K),
opt_pen_depth=11*u.nm, sound_vel=5*(u.nm/u.ps))
uc.add_atom(Fe, 'lambda strain: 0*(strain+1)')
uc.add_atom(Fe, 'lambda strain: 0.5*(strain+1)')
assert uc.id == 'uc'
assert uc.name == 'Unit Cell'
assert uc.a_axis == 2.86*u.angstrom
assert uc.b_axis == 2.86*u.angstrom
assert uc.c_axis == 2.86*u.angstrom
assert uc.heat_capacity[0](300) == 10
assert uc.int_heat_capacity[0](300) == 3000
assert uc.lin_therm_exp[0](300) == 1e-6
assert uc.int_lin_therm_exp[0](300) == 0.0003
assert uc.therm_cond[0](300) == 1
assert uc.opt_pen_depth == 11*u.nm
assert uc.sound_vel == 5*(u.nm/u.ps)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from udkm1Dsim.atoms import Atom
from udkm1Dsim.unitCell import UnitCell
from pint import UnitRegistry
u = UnitRegistry()
u.default_format = '~P'
from numpy import array
def test_unit_cell():
Fe = Atom('Fe')
uc = UnitCell('uc', 'Unit Cell', 2.86*u.angstrom, heat_capacity=10*(u.J/u.kg/u.K),
lin_therm_exp=1e-6/u.K, therm_cond=1*(u.W/u.m/u.K),
opt_pen_depth=11*u.nm, sound_vel=5*(u.nm/u.ps))
uc.add_atom(Fe, 'lambda strain: 0*(strain+1)')
uc.add_atom(Fe, 'lambda strain: 0.5*(strain+1)')
assert uc.id == 'uc'
assert uc.name == 'Unit Cell'
assert uc.a_axis == 2.86*u.angstrom
assert uc.b_axis == 2.86*u.angstrom
assert uc.c_axis == 2.86*u.angstrom
assert uc.heat_capacity[0](300) == 10
assert uc.int_heat_capacity[0](300) == 3000
assert uc.lin_therm_exp[0](300) == 1e-6
assert uc.int_lin_therm_exp[0](300) == 0.0003
assert uc.therm_cond[0](300) == 1
assert uc.opt_pen_depth == 11*u.nm
assert uc.sound_vel == 5*(u.nm/u.ps)
assert uc.get_property_dict(types='phonon') == {'_c_axis': 2.86e-10,
'_mass': 2.2674165653283783e-26,
'_phonon_damping': 0.0,
'int_lin_therm_exp_str':
['lambda T : 1.0e-6*T'],
'spring_const': array([6]),
'num_sub_systems': 1}
| mit | Python |
b6d161e54e9b398f79f417ac14ec65e5fdb609d3 | remove pre tag in emit init | BrianHicks/emit,BrianHicks/emit,BrianHicks/emit | emit/__init__.py | emit/__init__.py | __version__ = '0.4.0'
from emit.router.core import Router
| __version__ = '0.4.0pre'
from emit.router.core import Router
| mit | Python |
19fa44530adf1fd5456a0be93ea0dddd7e43eb8c | Remove junk import. | sippy/rtp_cluster,sippy/rtp_cluster | Cli_server_tcp.py | Cli_server_tcp.py | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from twisted.internet.protocol import Factory
from twisted.internet import reactor
from Cli_session import Cli_session
class Cli_server_tcp(Factory):
command_cb = None
lport = None
accept_list = None
def __init__(self, command_cb, address):
self.command_cb = command_cb
self.protocol = Cli_session
self.lport = reactor.listenTCP(address[1], self, interface = address[0])
def buildProtocol(self, addr):
if self.accept_list != None and addr.host not in self.accept_list:
return None
p = Factory.buildProtocol(self, addr)
p.command_cb = self.command_cb
p.raddr = addr
return p
def shutdown(self):
self.lport.stopListening()
if __name__ == '__main__':
def callback(clm, cmd):
print cmd
return False
laddr = ('127.0.0.1', 12345)
f = Cli_server_tcp(callback, laddr)
reactor.run()
| # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from twisted.internet.protocol import Factory
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from Cli_session import Cli_session
class Cli_server_tcp(Factory):
command_cb = None
lport = None
accept_list = None
def __init__(self, command_cb, address):
self.command_cb = command_cb
self.protocol = Cli_session
self.lport = reactor.listenTCP(address[1], self, interface = address[0])
def buildProtocol(self, addr):
if self.accept_list != None and addr.host not in self.accept_list:
return None
p = Factory.buildProtocol(self, addr)
p.command_cb = self.command_cb
p.raddr = addr
return p
def shutdown(self):
self.lport.stopListening()
if __name__ == '__main__':
def callback(clm, cmd):
print cmd
return False
laddr = ('127.0.0.1', 12345)
f = Cli_server_tcp(callback, laddr)
reactor.run()
| bsd-2-clause | Python |
0070417e170ff67248918243d6aaea248a5d024c | Fix Q3 in exercise 6 checking code | Kaggle/learntools,Kaggle/learntools | learntools/computer_vision/ex6.py | learntools/computer_vision/ex6.py | from learntools.core import *
import tensorflow as tf
# Free
class Q1(CodingProblem):
_solution = ""
_hint = ""
def check(self):
pass
class Q2A(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = """It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow. Rotations especially could be taken through the full range, since there's no real concept of "up or down" for pictures taken straight overhead."""
class Q2B(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = "It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow."
Q2 = MultipartProblem(Q2A, Q2B)
class Q3(CodingProblem):
_solution = ""
_hint = ""
def check(self):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
| from learntools.core import *
import tensorflow as tf
# Free
class Q1(CodingProblem):
_solution = ""
_hint = ""
def check(self):
pass
class Q2A(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = """It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow. Rotations especially could be taken through the full range, since there's no real concept of "up or down" for pictures taken straight overhead."""
class Q2B(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = "It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow."
Q2 = MultipartProblem(Q2A, Q2B)
class Q3(CodingProblem):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
| apache-2.0 | Python |
f178b2378661a25cebe9753cf84d6ea9f3c081a8 | Improve doc for MultivalueEnum. | kissgyorgy/enum34-custom | enum34_custom.py | enum34_custom.py | from enum import Enum, EnumMeta
from functools import total_ordering
class _MultiValueMeta(EnumMeta):
def __init__(self, cls, bases, classdict):
# make sure we only have tuple values, not single values
for member in self.__members__.values():
if not isinstance(member.value, tuple):
raise TypeError('{} = {!r}, should be tuple!'
.format(member.name, member.value))
def __call__(cls, value):
"""Return the appropriate instance with any of the values listed."""
for member in cls:
if value in member.value:
return member
else:
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
class MultiValueEnum(Enum, metaclass=_MultiValueMeta):
"""Enum subclass where members can have multiple values.
You can reference a member by any of its value in the associated tuple.
"""
@total_ordering
class OrderableMixin:
"""Mixin for comparable Enums. The order is the definition order
from smaller to bigger.
"""
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.value == other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
names = self.__class__._member_names_
return names.index(self.name) < names.index(other.name)
return NotImplemented
| from enum import Enum, EnumMeta
from functools import total_ordering
class _MultiValueMeta(EnumMeta):
def __init__(self, cls, bases, classdict):
# make sure we only have tuple values, not single values
for member in self.__members__.values():
if not isinstance(member.value, tuple):
raise TypeError('{} = {!r}, should be tuple!'
.format(member.name, member.value))
def __call__(cls, value):
"""Return the appropriate instance with any of the values listed."""
for member in cls:
if value in member.value:
return member
else:
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
class MultiValueEnum(Enum, metaclass=_MultiMeta):
"""Enum subclass where members are declared as tuples."""
@total_ordering
class OrderableMixin:
"""Mixin for comparable Enums. The order is the definition order
from smaller to bigger.
"""
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.value == other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
names = self.__class__._member_names_
return names.index(self.name) < names.index(other.name)
return NotImplemented
| mit | Python |
cf472cc43c4473ad7403bda64302d1170ee6874e | Save user timezone | argoroots/Entu,argoroots/Entu,argoroots/Entu | controllers/preferences.py | controllers/preferences.py | from pytz.gae import pytz
from bo import *
from database.person import *
class ShowPreferences(boRequestHandler):
def get(self):
self.view('preferences', 'preferences.html', {
'person': Person().current,
'preferences': UserPreferences().current,
'timezones': pytz.common_timezones,
})
def post(self):
field = self.request.get('field').strip()
value = self.request.get('value').strip()
UserPreferences().set(field, value)
def main():
Route([
('/preferences', ShowPreferences),
])
if __name__ == '__main__':
main() | from bo import *
from database import *
class ShowPreferences(boRequestHandler):
def get(self):
self.view('preferences', 'preferences.html', {
'person': Person().current,
'preferences': UserPreferences().current,
})
def post(self):
UserPreferences().set_language(self.request.get('language'))
def main():
Route([
('/preferences', ShowPreferences),
])
if __name__ == '__main__':
main() | mit | Python |
ab4cc4fb85c8616de0be53d0a95ad8096ac0cc0c | set up the page and layout for Dashboard:Team Application Overview | SkillSmart/ConferenceManagementSystem,SkillSmart/ConferenceManagementSystem,SkillSmart/ConferenceManagementSystem,SkillSmart/ConferenceManagementSystem,SkillSmart/ConferenceManagementSystem | Dashboard/urls.py | Dashboard/urls.py | from django.conf.urls import url
# View Imports
from . import views
app_name = "dashboard"
urlpatterns = [
url(r'^$', views.DashboardIndex.as_view(), name='index'),
url(r'^experts/$', views.expert_management, name='manage_experts'),
url(r'^experts/(?P<username>.+)/$', views.expert_management, name="expert_detail"),
url(r'^sessions/$', views.SessionManagement.as_view(), name="manage_sessions"),
url(r'^teams/$', views.applicationoverview_team, name='manage_teams'),
url(r'^teams/(?P<slug>[-\w]+)/$', views.team_management, name='team_detail'),
url(r'^venues/$', views.VenueManagement.as_view(), name='manage_venues'),
url(r'^venues/(?P<slug>[-\w]+)/$', views.VenueManagement.as_view(), name='manage_venue_detail'),
url(r'^shifts/$', views.ShiftManagement.as_view(), name='manage_shifts'),
]
| from django.conf.urls import url
# View Imports
from . import views
app_name = "dashboard"
urlpatterns = [
url(r'^$', views.DashboardIndex.as_view(), name='index'),
url(r'^experts/$', views.expert_management, name='manage_experts'),
url(r'^experts/(?P<username>.+)/$', views.expert_management, name="expert_detail"),
url(r'^sessions/$', views.SessionManagement.as_view(), name="manage_sessions"),
url(r'^teams/$', views.applicationoverview_team, name='manage_teams'),
url(r'^teams/(?P<slug>.+)/$', views.team_management, name='team_detail'),
url(r'^venues/$', views.VenueManagement.as_view(), name='manage_venues'),
url(r'^venues/(?P<slug>[-\w]+)/$', views.VenueManagement.as_view(), name='manage_venue_detail'),
url(r'^shifts/$', views.ShiftManagement.as_view(), name='manage_shifts'),
]
| mit | Python |
f0bec02a6e2516ffd11d43b089576c0463d8d51f | Update denormalizer | barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore-django,dbinetti/barberscore-django,dbinetti/barberscore,dbinetti/barberscore,barberscore/barberscore-api | project/apps/api/management/commands/denormalize.py | project/apps/api/management/commands/denormalize.py | from django.core.management.base import (
BaseCommand,
)
from apps.api.models import (
Convention,
Contest,
Contestant,
Appearance,
Performance,
Group,
Singer,
Director,
Judge,
)
class Command(BaseCommand):
help = "Command to denormailze data."
def handle(self, *args, **options):
vs = Convention.objects.all()
for v in vs:
v.save()
ts = Contest.objects.all()
for t in ts:
t.save()
cs = Contestant.objects.all()
for c in cs:
c.save()
as_ = Appearance.objects.all()
for a in as_:
a.save()
ps = Performance.objects.all()
for p in ps:
p.save()
ss = Singer.objects.all()
for s in ss:
s.save()
js = Judge.objects.all()
for j in js:
j.save()
ds = Director.objects.all()
for d in ds:
d.save()
return "Done"
| from django.core.management.base import (
BaseCommand,
)
from apps.api.models import (
Convention,
Contest,
Contestant,
Performance,
Group,
Person,
Singer,
Director,
)
class Command(BaseCommand):
help = "Command to denormailze data."
def handle(self, *args, **options):
vs = Convention.objects.all()
for v in vs:
v.save()
ts = Contest.objects.all()
for t in ts:
t.save()
cs = Contestant.objects.all()
for c in cs:
c.save()
ps = Performance.objects.all()
for p in ps:
p.save()
gs = Group.objects.all()
for g in gs:
g.save()
rs = Person.objects.all()
for r in rs:
r.save()
ss = Singer.objects.all()
for s in ss:
s.save()
ds = Director.objects.all()
for d in ds:
d.save()
return "Done"
| bsd-2-clause | Python |
60e2503bde822fdcea91c3d0a8e6ddb0f67d0d79 | update scripts | cmu-db/db-webcrawler,cmu-db/cmdbac,cmu-db/db-webcrawler,cmu-db/db-webcrawler,cmu-db/cmdbac,cmu-db/cmdbac,cmu-db/db-webcrawler,cmu-db/cmdbac,cmu-db/cmdbac,cmu-db/db-webcrawler | scripts/deploy_repos.py | scripts/deploy_repos.py | #!/usr/bin/env python
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, "core"))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cmudbac.settings")
import django
django.setup()
from django.db.models import Q
from library.models import *
import utils
def main():
if len(sys.argv) != 3:
return
deploy_id = int(sys.argv[1])
total_deployer = int(sys.argv[2])
database = Database.objects.get(name='MySQL')
for repo in Repository.objects.filter(project_type = 3):
# for repo in Repository.objects.filter(project_type = 1).filter(latest_attempt__result = 'OK').filter(latest_attempt__log__contains = "[Errno 13] Permission denied: '/var/log/mysql/mysql.log'"):
if repo.id % total_deployer != deploy_id - 1:
continue
n = len(Attempt.objects.filter(repo = repo).filter(result = 'OK'))
if n == 0:
continue
print 'Attempting to deploy {} using {} ...'.format(repo, repo.project_type.deployer_class)
try:
utils.vagrant_deploy(repo, deploy_id, database)
except:
pass
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, "core"))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cmudbac.settings")
import django
django.setup()
from django.db.models import Q
from library.models import *
import utils
def main():
if len(sys.argv) != 3:
return
deploy_id = int(sys.argv[1])
total_deployer = int(sys.argv[2])
database = Database.objects.get(name='MySQL')
for repo in Repository.objects.filter(project_type = 3).filter(Q(latest_attempt__result = 'DE') | Q(latest_attempt__result = 'OK')):
# for repo in Repository.objects.filter(project_type = 1).filter(latest_attempt__result = 'OK').filter(latest_attempt__log__contains = "[Errno 13] Permission denied: '/var/log/mysql/mysql.log'"):
if repo.id % total_deployer != deploy_id - 1:
continue
print 'Attempting to deploy {} using {} ...'.format(repo, repo.project_type.deployer_class)
try:
utils.vagrant_deploy(repo, deploy_id, database)
except:
pass
if __name__ == '__main__':
main()
| apache-2.0 | Python |
bca6f6041e9f49d1d25d7a9c4cb88080d88c45b1 | Comment concerning differences in keys per path | saulshanabrook/django-dumper | dumper/invalidation.py | dumper/invalidation.py | import dumper.utils
def invalidate_paths(paths):
'''
Invalidate all pages for a certain path.
'''
for path in paths:
for key in all_cache_keys_from_path(path):
dumper.utils.cache.delete(key)
def all_cache_keys_from_path(path):
'''
Each path can actually have multiple cached entries, varying based on different HTTP
methods. So a GET request will have a different cached response from a HEAD request.
In order to invalidate a path, we must first know all the different cache keys that the
path might have been cached at. This returns those keys
'''
return [dumper.utils.cache_key(path, method) for method in dumper.settings.CACHABLE_METHODS]
| import dumper.utils
def invalidate_paths(paths):
'''
Invalidate all pages for a certain path.
'''
for path in paths:
for key in all_cache_keys_from_path(path):
dumper.utils.cache.delete(key)
def all_cache_keys_from_path(path):
return [dumper.utils.cache_key(path, method) for method in dumper.settings.CACHABLE_METHODS]
| mit | Python |
2b7dbcd01a4d208f83204fc4323ddff055e4a87e | Move common tests to a base reusable class. | yasserglez/ngram_profile | test_ngram_profile.py | test_ngram_profile.py | # -*- coding: utf-8 -*-
import os
import json
import unittest
import ngram_profile
class CommonNGramProfileTests(object):
profileClass = None
def test_init(self):
profile = self.profileClass()
self.assertEqual(len(profile), 0)
def test_json_roundtrip(self):
json_profile = '{"a": 0.5, "b": 0.3, "c": 0.2}'
tmp_file = 'test_json_roundtrip.json'
with open(tmp_file, 'w') as fd:
fd.write(json_profile)
profile = self.profileClass.from_json(tmp_file)
os.remove(tmp_file)
self.assertEqual(len(profile), 3)
self.assertEqual(profile[u'a'], 0.5)
self.assertEqual(profile[u'b'], 0.3)
self.assertEqual(profile[u'c'], 0.2)
profile.save_as_json(tmp_file)
with open(tmp_file, 'r') as fd:
self.assertEqual(json.load(fd), json.loads(json_profile))
os.remove(tmp_file)
def test_normalize_unicode_output(self):
profile = self.profileClass()
normalized = profile.normalize(u'abc')
self.assertTrue(isinstance(normalized, unicode))
class TestNGramProfile(CommonNGramProfileTests, unittest.TestCase):
profileClass = ngram_profile.NGramProfile
def test_normalize(self):
profile = self.profileClass()
x = u'abc'
y = profile.normalize(x)
self.assertEqual(x, y)
def test_tokenize(self):
profile = ngram_profile.NGramProfile()
self.assertRaises(NotImplementedError, profile.tokenize, u'')
class TestCharNGramProfile(CommonNGramProfileTests, unittest.TestCase):
profileClass = ngram_profile.CharNGramProfile
def test_tokenize(self):
self.fail('not yet implemented')
if __name__ == '__main__':
unittest.main()
| # -*- coding: utf-8 -*-
import os
import json
import unittest
from ngram_profile import NGramProfile, CharNGramProfile
class TestNGramProfile(unittest.TestCase):
def test_init(self):
profile = NGramProfile()
self.assertEqual(len(profile), 0)
def test_json_roundtrip(self):
json_profile = '{"a": 0.5, "b": 0.3, "c": 0.2}'
tmp_file = 'test_json_roundtrip.json'
with open(tmp_file, 'w') as fd:
fd.write(json_profile)
profile = NGramProfile.from_json(tmp_file)
os.remove(tmp_file)
self.assertEqual(len(profile), 3)
self.assertEqual(profile[u'a'], 0.5)
self.assertEqual(profile[u'b'], 0.3)
self.assertEqual(profile[u'c'], 0.2)
profile.save_as_json(tmp_file)
with open(tmp_file, 'r') as fd:
self.assertEqual(json.load(fd), json.loads(json_profile))
os.remove(tmp_file)
def test_normalize(self):
profile = NGramProfile()
x = u'abc'
y = profile.normalize(x)
self.assertTrue(isinstance(y, unicode))
self.assertEqual(x, y)
def test_tokenize(self):
profile = NGramProfile()
self.assertRaises(NotImplementedError, profile.tokenize, u'')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
d1232473ecb31eb2b85b67e54d5939093233f2bf | Print client pk in list_sessions command | nguyenduchien1994/django-ncharts,nguyenduchien1994/django-ncharts,nguyenduchien1994/django-ncharts,nguyenduchien1994/django-ncharts,nguyenduchien1994/django-ncharts | ncharts/management/commands/list_sessions.py | ncharts/management/commands/list_sessions.py | from django.core.management.base import NoArgsCommand
from ncharts.models import ClientState
from ncharts import views as nc_views
from django.contrib.sessions.models import Session
class Command(NoArgsCommand):
def handle_noargs(self, **options):
sessions = Session.objects.all()
print("#sessions=%d" % len(sessions))
for sess in sessions:
sess_dict = sess.get_decoded()
print("session keys=%s" % (repr([k for k in sess_dict.keys()])))
for sessk in sess_dict:
if len(sessk) > 5 and sessk[0:5] == "pdid_":
print("session, sessk=%s, client_id=%d" % (sessk, sess_dict[sessk]))
| from django.core.management.base import NoArgsCommand
from ncharts.models import ClientState
from ncharts import views as nc_views
from django.contrib.sessions.models import Session
class Command(NoArgsCommand):
def handle_noargs(self, **options):
sessions = Session.objects.all()
print("#sessions=%d" % len(sessions))
for sess in sessions:
sess_dict = sess.get_decoded()
print("session keys=%s" % (repr([k for k in sess_dict.keys()])))
for sessk in sess_dict:
if len(sessk) > 5 and sessk[0:5] == "pdid_":
print("session, sessk=%s" % (sessk))
| bsd-2-clause | Python |
e72da8231e7a5b05f098db1f78b66b8cb57f27ba | remove checking in autots import (#5489) | yangw1234/BigDL,intel-analytics/BigDL,intel-analytics/BigDL,yangw1234/BigDL,yangw1234/BigDL,yangw1234/BigDL,intel-analytics/BigDL,intel-analytics/BigDL | python/chronos/src/bigdl/chronos/autots/__init__.py | python/chronos/src/bigdl/chronos/autots/__init__.py | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import os
if os.getenv("LD_PRELOAD", "null") != "null":
warnings.warn("Users of `bigdl.chronos.autots` should "
"unset bigdl-nano environment variables!"
"Please run `source bigdl-nano-unset-env` "
"in your bash terminal")
try:
# TODO: make this a LazyImport
from .autotsestimator import AutoTSEstimator
from .tspipeline import TSPipeline
except ImportError:
pass
| #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from bigdl.nano.utils.log4Error import invalidInputError
import os
if os.getenv("LD_PRELOAD", "null") != "null":
invalidInputError(False,
errMsg="Users of `bigdl.chronos.autots` should "
"unset bigdl-nano environment variables!",
fixMsg="Please run `source bigdl-nano-unset-env` "
"in your bash terminal")
try:
# TODO: make this a LazyImport
from .autotsestimator import AutoTSEstimator
from .tspipeline import TSPipeline
except ImportError:
pass
| apache-2.0 | Python |
b52a937356f2112ecd5adcdf79ac6430169a735f | fix file close bug causing errors in pypy | Abhinav117/pymtl,Abhinav117/pymtl,Abhinav117/pymtl,Abhinav117/pymtl | new_pymtl/translation_tools/verilator_sim.py | new_pymtl/translation_tools/verilator_sim.py | #===============================================================================
# verilator_sim.py
#===============================================================================
#from verilator_cython import verilog_to_pymtl
from verilator_cffi import verilog_to_pymtl
import verilog
import os
import sys
import filecmp
#------------------------------------------------------------------------------
# get_verilated
#------------------------------------------------------------------------------
def get_verilated( model_inst ):
model_inst.elaborate()
model_name = model_inst.class_name
# Translate the PyMTL module to Verilog, if we've already done
# translation check if there's been any changes to the source
verilog_file = model_name + '.v'
temp_file = verilog_file + '.tmp'
#verilog.translate( model_inst, open( verilog_file, 'w+' ) )
#cached = False
# Write the output to a temporary file
fd = open( temp_file, 'w+' )
verilog.translate( model_inst, fd )
fd.close()
# Check if the temporary file matches an existing file (caching)
cached = False
if os.path.exists( verilog_file ):
cached = filecmp.cmp( temp_file, verilog_file )
if not cached:
os.system( ' diff %s %s'%( temp_file, verilog_file ))
# Rename temp to actual output
os.rename( temp_file, verilog_file )
# Verilate the module only if we've updated the verilog source
if not cached:
print "NOT CACHED", verilog_file
verilog_to_pymtl( model_inst, verilog_file )
# Use some trickery to import the verilated version of the model
sys.path.append( os.getcwd() )
__import__( 'W' + model_name )
imported_module = sys.modules[ 'W'+model_name ]
# Get the model class from the module, instantiate and elaborate it
model_class = imported_module.__dict__[ model_name ]
model_inst = model_class()
return model_inst
| #===============================================================================
# verilator_sim.py
#===============================================================================
#from verilator_cython import verilog_to_pymtl
from verilator_cffi import verilog_to_pymtl
import verilog
import os
import sys
import filecmp
#------------------------------------------------------------------------------
# get_verilated
#------------------------------------------------------------------------------
def get_verilated( model_inst ):
model_inst.elaborate()
model_name = model_inst.class_name
# Translate the PyMTL module to Verilog, if we've already done
# translation check if there's been any changes to the source
verilog_file = model_name + '.v'
temp_file = verilog_file + '.tmp'
#verilog.translate( model_inst, open( verilog_file, 'w+' ) )
#cached = False
# Caching avoids regeneration/recompilation
if os.path.exists( verilog_file ):
verilog.translate( model_inst, open( temp_file, 'w+' ) )
cached = filecmp.cmp( temp_file, verilog_file )
if not cached:
os.system( ' diff %s %s'%( temp_file, verilog_file ))
os.rename( temp_file, verilog_file )
else:
verilog.translate( model_inst, open( verilog_file, 'w+' ) )
cached = False
# Verilate the module only if we've updated the verilog source
if not cached:
print "NOT CACHED", verilog_file
verilog_to_pymtl( model_inst, verilog_file )
# Use some trickery to import the verilated version of the model
sys.path.append( os.getcwd() )
__import__( 'W' + model_name )
imported_module = sys.modules[ 'W'+model_name ]
# Get the model class from the module, instantiate and elaborate it
model_class = imported_module.__dict__[ model_name ]
model_inst = model_class()
return model_inst
| bsd-3-clause | Python |
b0806c0b8b950a3007107cc58fb21e504cf09427 | Move serial device path to settings | ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display | homedisplay/control_milight/management/commands/listen_433.py | homedisplay/control_milight/management/commands/listen_433.py | from control_milight.utils import process_automatic_trigger
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import serial
import time
class Command(BaseCommand):
args = ''
help = 'Listen for 433MHz radio messages'
ITEM_MAP = {
"5236713": "kitchen",
"7697747": "hall",
"1328959": "front-door",
"247615": "unused-magnetic-switch",
"8981913": "table",
}
def handle(self, *args, **options):
s = serial.Serial(settings.ARDUINO_433, 9600)
sent_event_map = {}
while True:
line = s.readline()
print "- %s" % line
if line.startswith("Received "):
id = line.split(" ")[1]
if id in self.ITEM_MAP:
item_name = self.ITEM_MAP[id]
if item_name in sent_event_map:
if sent_event_map[item_name] > time.time() - 5:
print "Too recent event: %s" % item_name
continue
process_automatic_trigger(item_name)
sent_event_map[item_name] = time.time()
else:
print "Unknown id: %s" % id
| from django.core.management.base import BaseCommand, CommandError
from control_milight.utils import process_automatic_trigger
import serial
import time
class Command(BaseCommand):
args = ''
help = 'Listen for 433MHz radio messages'
ITEM_MAP = {
"5236713": "kitchen",
"7697747": "hall",
"1328959": "front-door",
"247615": "unused-magnetic-switch",
"8981913": "table",
}
def handle(self, *args, **options):
s = serial.Serial("/dev/tty.usbserial-A9007LzM", 9600)
sent_event_map = {}
while True:
line = s.readline()
print "- %s" % line
if line.startswith("Received "):
id = line.split(" ")[1]
if id in self.ITEM_MAP:
item_name = self.ITEM_MAP[id]
if item_name in sent_event_map:
if sent_event_map[item_name] > time.time() - 5:
print "Too recent event: %s" % item_name
continue
process_automatic_trigger(item_name)
sent_event_map[item_name] = time.time()
else:
print "Unknown id: %s" % id
| bsd-3-clause | Python |
5d083a15a71aac24c3c4d29dd753067a93c62495 | Fix id builtin being overwritten | Encrylize/EasyEuler | EasyEuler/data.py | EasyEuler/data.py | import collections
import json
import os
from jinja2 import Environment, FileSystemLoader
from EasyEuler import paths
class ProblemList(collections.Sequence):
def __init__(self, problems):
self._problems = problems
def get(self, problem_id):
if problem_id < 1 or len(self) < problem_id:
# We don't want a negative index,
# because it'll wrap back around.
return None
return self[problem_id]
def __getitem__(self, problem_id):
return self._problems[problem_id - 1]
def __len__(self):
return len(self._problems)
class ConfigurationDictionary(collections.Mapping):
def __init__(self, configs):
self._config = {}
for config in configs:
self._config = self._update(self._config, config)
def _update(self, config, updates):
for key, value in updates.items():
if isinstance(value, collections.Mapping):
updated = self._update(config.get(key, {}), value)
config[key] = updated
else:
config[key] = value
return config
def get_language(self, key, value):
for name, options in self._config['languages'].items():
if options[key] == value:
return {'name': name, **options}
return None
def __getitem__(self, key):
return self._config[key]
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
config_list = []
for CONFIG_PATH in paths.CONFIGS:
if os.path.exists(CONFIG_PATH):
with open(CONFIG_PATH) as conf:
config_list.append(json.load(conf))
with open(paths.PROBLEMS) as f:
problem_list = json.load(f)
config = ConfigurationDictionary(config_list)
problems = ProblemList(problem_list)
templates = Environment(loader=FileSystemLoader(paths.TEMPLATES))
| import collections
import json
import os
from jinja2 import Environment, FileSystemLoader
from EasyEuler import paths
class ProblemList(collections.Sequence):
def __init__(self, problems):
self._problems = problems
def get(self, id):
if id < 1 or len(self) < id:
# We don't want a negative index, because it'll wrap back around.
return None
return self[id]
def __getitem__(self, id):
return self._problems[id - 1]
def __len__(self):
return len(self._problems)
class ConfigurationDictionary(collections.Mapping):
def __init__(self, configs):
self._config = {}
for config in configs:
self._config = self._update(self._config, config)
def _update(self, config, updates):
for key, value in updates.items():
if isinstance(value, collections.Mapping):
updated = self._update(config.get(key, {}), value)
config[key] = updated
else:
config[key] = value
return config
def get_language(self, key, value):
for name, options in self._config['languages'].items():
if options[key] == value:
return {'name': name, **options}
return None
def __getitem__(self, key):
return self._config[key]
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
config_list = []
for CONFIG_PATH in paths.CONFIGS:
if os.path.exists(CONFIG_PATH):
with open(CONFIG_PATH) as conf:
config_list.append(json.load(conf))
with open(paths.PROBLEMS) as f:
problem_list = json.load(f)
config = ConfigurationDictionary(config_list)
problems = ProblemList(problem_list)
templates = Environment(loader=FileSystemLoader(paths.TEMPLATES))
| mit | Python |
444b9b9d134e55378dd780e0093a5c2a27a95a09 | Expand authors + manifest cleaning | OCA/bank-payment,OCA/bank-payment | account_payment_partner/__openerp__.py | account_payment_partner/__openerp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Account Payment Partner module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Payment Partner',
'version': '8.0.0.1.0',
'category': 'Banking addons',
'license': 'AGPL-3',
'summary': 'Adds payment mode on partners and invoices',
'description': """
Account Payment Partner
=======================
This module adds severals fields :
* the *Supplier Payment Mode* and *Customer Payment Mode* on Partners,
* the *Payment Mode* on Invoices.
On a Payment Order, in the wizard *Select Invoices to Pay*, the invoices will
be filtered per Payment Mode.
""",
'author': "Akretion, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Odoo Community Association (OCA)",
'website': 'https://github.com/OCA/bank-payment',
'depends': ['account_banking_payment_export'],
'data': [
'views/res_partner_view.xml',
'views/account_invoice_view.xml',
'security/ir.model.access.csv',
],
'demo': ['demo/partner_demo.xml'],
'installable': True,
}
| # -*- encoding: utf-8 -*-
##############################################################################
#
# Account Payment Partner module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Payment Partner',
'version': '8.0.0.1.0',
'category': 'Banking addons',
'license': 'AGPL-3',
'summary': 'Adds payment mode on partners and invoices',
'description': """
Account Payment Partner
=======================
This module adds severals fields :
* the *Supplier Payment Mode* and *Customer Payment Mode* on Partners,
* the *Payment Mode* on Invoices.
On a Payment Order, in the wizard *Select Invoices to Pay*, the invoices will
be filtered per Payment Mode.
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'contributors': ['Pedro M. Baeza <[email protected]>'],
'depends': ['account_banking_payment_export'],
'data': [
'views/res_partner_view.xml',
'views/account_invoice_view.xml',
'security/ir.model.access.csv',
],
'demo': ['demo/partner_demo.xml'],
'installable': True,
}
| agpl-3.0 | Python |
483a66a693fd119192c12ee63c56a1da406fa3ca | fix templates path | arturtamborski/wypok,arturtamborski/wypok,arturtamborski/wypok,arturtamborski/wypok | accounts/views.py | accounts/views.py | from django.shortcuts import render
from django.urls import reverse
def profile(response, profile):
return render(response, 'account/profile.html')
| from django.shortcuts import render
from django.urls import reverse
def profile(response, profile):
return render(response, 'accounts/profile.html')
| mit | Python |
da9bab1d15d3f54d2ac65701e533b9bc34ebfea5 | remove test skip | cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy | tests/cupy_tests/array_api_tests/test_sorting_functions.py | tests/cupy_tests/array_api_tests/test_sorting_functions.py | import pytest
from cupy import array_api as xp
@pytest.mark.parametrize(
"obj, axis, expected",
[
([0, 0], -1, [0, 1]),
([0, 1, 0], -1, [1, 0, 2]),
([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]),
([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]),
],
)
def test_stable_desc_argsort(obj, axis, expected):
"""
Indices respect relative order of a descending stable-sort
See https://github.com/numpy/numpy/issues/20778
"""
x = xp.asarray(obj)
out = xp.argsort(x, axis=axis, stable=True, descending=True)
assert xp.all(out == xp.asarray(expected))
| import pytest
from cupy import array_api as xp
@pytest.mark.parametrize(
"obj, axis, expected",
[
([0, 0], -1, [0, 1]),
([0, 1, 0], -1, [1, 0, 2]),
([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]),
([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]),
],
)
@pytest.mark.skipif(
# https://github.com/cupy/cupy/issues/5701
True, reason="Sorting functions miss arguments kind and order")
def test_stable_desc_argsort(obj, axis, expected):
"""
Indices respect relative order of a descending stable-sort
See https://github.com/numpy/numpy/issues/20778
"""
x = xp.asarray(obj)
out = xp.argsort(x, axis=axis, stable=True, descending=True)
assert xp.all(out == xp.asarray(expected))
| mit | Python |
6231afb51f5653e210f41d47c66797c4bd4d738d | Make it possible for the user to change username | christophmeissner/volunteer_planner,pitpalme/volunteer_planner,christophmeissner/volunteer_planner,coders4help/volunteer_planner,alper/volunteer_planner,alper/volunteer_planner,alper/volunteer_planner,coders4help/volunteer_planner,klinger/volunteer_planner,pitpalme/volunteer_planner,christophmeissner/volunteer_planner,pitpalme/volunteer_planner,coders4help/volunteer_planner,klinger/volunteer_planner,klinger/volunteer_planner,volunteer-planner/volunteer_planner,volunteer-planner/volunteer_planner,coders4help/volunteer_planner,volunteer-planner/volunteer_planner,klinger/volunteer_planner,volunteer-planner/volunteer_planner,pitpalme/volunteer_planner,christophmeissner/volunteer_planner | accounts/views.py | accounts/views.py | # coding: utf-8
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import UpdateView
from django.core.urlresolvers import reverse_lazy
from volunteer_planner.utils import LoginRequiredMixin
@login_required()
def user_account_detail(request):
user = request.user
return render(request, 'user_detail.html', {'user': user})
class AccountUpdateView(LoginRequiredMixin, UpdateView):
"""
Allows a user to update their profile.
"""
fields = ['first_name', 'last_name', 'username']
template_name = "user_account_edit.html"
success_url = reverse_lazy('account_detail')
def get_object(self, queryset=None):
return self.request.user
| # coding: utf-8
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import UpdateView
from django.core.urlresolvers import reverse_lazy
from volunteer_planner.utils import LoginRequiredMixin
@login_required()
def user_account_detail(request):
user = request.user
return render(request, 'user_detail.html', {'user': user})
class AccountUpdateView(LoginRequiredMixin, UpdateView):
"""
Allows a user to update their profile.
"""
fields = ['first_name', 'last_name']
template_name = "user_account_edit.html"
success_url = reverse_lazy('account_detail')
def get_object(self, queryset=None):
return self.request.user
| agpl-3.0 | Python |
0bb558351a58caaca61eb381cc9a3a4ee4b881bb | format code | tomi77/bizzfuzz,tomi77/bizzfuzz | accounts/views.py | accounts/views.py | from django.shortcuts import render, redirect
from accounts.models import UserProfile
def index(request):
users = UserProfile.objects.all()
message = request.session.get('message', None)
info = request.session.get('info', None)
warning = request.session.get('warning', None)
alert = request.session.get('alert', None)
request.session['message'] = None
request.session['info'] = None
request.session['warning'] = None
request.session['alert'] = None
return render(request, 'accounts/index.html', {
'users': users,
'message': message,
'info': info,
'warning': warning,
'alert': alert
})
def add(request):
if request.method == 'POST':
user = UserProfile(username=request.POST.get('username'),
birthday=request.POST.get('birthday'))
user.save()
return redirect('/')
return render(request, 'accounts/add.html')
def edit(request, pk):
user = UserProfile.objects.get(pk=pk)
if request.method == 'POST':
user.username = request.POST.get('username', user.username)
user.birthday = request.POST.get('birthday', user.birthday)
user.save()
return redirect('/')
return render(request, 'accounts/edit.html', {
'user': user
})
def view(request, pk):
user = UserProfile.objects.get(pk=pk)
return render(request, 'accounts/view.html', {
'user': user
})
def delete(request, pk):
try:
user = UserProfile.objects.get(pk=pk)
user.delete()
request.session['message'] = 'User has been deleted'
except UserProfile.DoesNotExist:
request.session['alert'] = 'User does not exist'
return redirect('/')
| from django.shortcuts import render, redirect
from accounts.models import UserProfile
def index(request):
users = UserProfile.objects.all()
message = request.session.get('message', None)
info = request.session.get('info', None)
warning = request.session.get('warning', None)
alert = request.session.get('alert', None)
request.session['message'] = None
request.session['info'] = None
request.session['warning'] = None
request.session['alert'] = None
return render(request, 'accounts/index.html', {
'users': users,
'message': message,
'info': info,
'warning': warning,
'alert': alert
})
def add(request):
if request.method == 'POST':
user = UserProfile(username=request.POST.get('username'),
birthday=request.POST.get('birthday'))
user.save()
return redirect('/')
return render(request, 'accounts/add.html')
def edit(request, pk):
user = UserProfile.objects.get(pk=pk)
if request.method == 'POST':
user.username = request.POST.get('username', user.username)
user.birthday = request.POST.get('birthday', user.birthday)
user.save()
return redirect('/')
return render(request, 'accounts/edit.html', {
'user': user
})
def view(request, pk):
user = UserProfile.objects.get(pk=pk)
return render(request, 'accounts/view.html', {
'user': user
})
def delete(request, pk):
try:
user = UserProfile.objects.get(pk=pk)
user.delete()
request.session['message'] = 'User has been deleted'
except UserProfile.DoesNotExist:
request.session['alert'] = 'User does not exist'
return redirect('/')
| mit | Python |
7c75da48d6746fc148a79051338c3cd554d75615 | Change variable name to next for logout function | openstax/openstax-cms,Connexions/openstax-cms,openstax/openstax-cms,openstax/openstax-cms,Connexions/openstax-cms,openstax/openstax-cms | accounts/views.py | accounts/views.py | from django.shortcuts import redirect
from django.contrib.auth import logout as auth_logout
from django.conf import settings
def logout(request):
"""Logs out user redirects if in request"""
next = request.GET.get('next', '')
auth_logout(request)
if next:
return redirect('{}/?next={}'.format(settings.OPENSTAX_ACCOUNTS_LOGOUT_URL, next))
else:
return redirect(settings.OPENSTAX_ACCOUNTS_LOGOUT_URL)
| from django.shortcuts import redirect
from django.contrib.auth import logout as auth_logout
from django.conf import settings
def logout(request):
"""Logs out user redirects if in request"""
r = request.GET.get('r', '')
auth_logout(request)
if r:
return redirect('{}/?r={}'.format(settings.OPENSTAX_ACCOUNTS_LOGOUT_URL, r))
else:
return redirect(settings.OPENSTAX_ACCOUNTS_LOGOUT_URL)
| agpl-3.0 | Python |
f57326e5f5c7d64d6f7d5f204bcf388de897d5b0 | Revise palindrome function names | bowen0701/algorithms_data_structures | alg_palindrome.py | alg_palindrome.py | """Palindrome: a string that read the same forward and backward.
For example: radar, madam.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def palindrome(a_str):
"""Check palindrom by front & rear match by Deque."""
from ds_deque import Deque
str_deque = Deque()
for s in a_str:
str_deque.add_rear(s)
still_match = True
while str_deque.size() > 1 and still_match:
first = str_deque.remove_front()
last = str_deque.remove_rear()
if first != last:
still_match = False
return still_match
def palindrom_recur(a_str):
"""Check palindrome by recursion."""
if len(a_str) <= 1:
return True
else:
return a_str[0] == a_str[-1] and palindrom_recur(a_str[1:-1])
def main():
import time
start_time = time.time()
a_str = 'madam'
print('{0}: {1}'.format(a_str, palindrome(a_str)))
a_str = 'Bowen'
print('{0}: {1}'.format(a_str, palindrome(a_str)))
a_str = 'toot'
print('{0}: {1}'.format(a_str, palindrome(a_str)))
print('Time for palindrome(): {}'
.format(time.time() - start_time))
start_time = time.time()
a_str = 'madam'
print('{0}: {1}'.format(a_str, palindrom_recur(a_str)))
a_str = 'Bowen'
print('{0}: {1}'.format(a_str, palindrom_recur(a_str)))
a_str = 'toot'
print('{0}: {1}'.format(a_str, palindrom_recur(a_str)))
print('Time for palindrom_recur(): {}'
.format(time.time() - start_time))
if __name__ == '__main__':
main()
| """Palindrome: a string that read the same forward and backward.
For example: radar, madam.
"""
from __future__ import print_function
def match_palindrome(a_str):
"""Check palindrom by front & rear match by Deque."""
from ds_deque import Deque
str_deque = Deque()
for s in a_str:
str_deque.add_rear(s)
still_match = True
while str_deque.size() > 1 and still_match:
first = str_deque.remove_front()
last = str_deque.remove_rear()
if first != last:
still_match = False
return still_match
def match_palindrom_recur(a_str):
"""Check palindrome by recursion."""
if len(a_str) <= 1:
return True
else:
return a_str[0] == a_str[-1] and match_palindrom_recur(a_str[1:-1])
def main():
import time
start_time = time.time()
a_str = 'madam'
print('{0}: {1}'.format(a_str, match_palindrome(a_str)))
a_str = 'Bowen'
print('{0}: {1}'.format(a_str, match_palindrome(a_str)))
a_str = 'toot'
print('{0}: {1}'.format(a_str, match_palindrome(a_str)))
print('Time for match_palindrome(): {}'
.format(time.time() - start_time))
start_time = time.time()
a_str = 'madam'
print('{0}: {1}'.format(a_str, match_palindrom_recur(a_str)))
a_str = 'Bowen'
print('{0}: {1}'.format(a_str, match_palindrom_recur(a_str)))
a_str = 'toot'
print('{0}: {1}'.format(a_str, match_palindrom_recur(a_str)))
print('Time for match_palindrom_recur(): {}'
.format(time.time() - start_time))
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
4f2fb3ac84216096411a5b6583e4fbb22c8e5196 | bump dev version | cggh/scikit-allel | allel/__init__.py | allel/__init__.py | # -*- coding: utf-8 -*-
# flake8: noqa
from allel import model
from allel import stats
from allel import plot
from allel import io
from allel import chunked
from allel import constants
from allel import util
# convenient shortcuts
from allel.model.ndarray import *
from allel.model.chunked import *
# experimental
try:
import dask.array as _da
from allel.model.dask import *
except ImportError:
pass
# deprecated
try:
import bcolz as _bcolz
from allel.model.bcolz import *
except ImportError:
pass
__version__ = '0.21.0.dev3'
| # -*- coding: utf-8 -*-
# flake8: noqa
from allel import model
from allel import stats
from allel import plot
from allel import io
from allel import chunked
from allel import constants
from allel import util
# convenient shortcuts
from allel.model.ndarray import *
from allel.model.chunked import *
# experimental
try:
import dask.array as _da
from allel.model.dask import *
except ImportError:
pass
# deprecated
try:
import bcolz as _bcolz
from allel.model.bcolz import *
except ImportError:
pass
__version__ = '0.21.0.dev2'
| mit | Python |
709f807368ea7915bc5c2f7d6236b3a24df92c8c | Simplify script for recorded ctrl message injection | ynsta/steamcontroller,ynsta/steamcontroller,oneru/steamcontroller,oneru/steamcontroller | scripts/sc-test-cmsg.py | scripts/sc-test-cmsg.py | #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Stany MARCEL <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Steam Controller Control Message tester"""
import sys
import struct
import time
from steamcontroller import SteamController
def dump(sc, sci):
print(sci)
def _main():
try:
sc = SteamController(callback=dump)
for line in sys.stdin:
sc.handleEvents()
words = [int('0x'+x,16) for x in line.split()]
sc._sendControl(struct.pack('>' + 'I' * len(words), *words))
sc.run()
except KeyboardInterrupt:
pass
except Exception as e:
sys.stderr.write(str(e) + '\n')
print("Bye")
if __name__ == '__main__':
_main()
| #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Stany MARCEL <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Steam Controller USB Dumper"""
import sys
import struct
import time
from steamcontroller import SteamController
def dump(sc, sci):
print(sci)
def _main():
try:
sc = SteamController(callback=dump)
sc.handleEvents()
sc._sendControl(struct.pack('>' + 'I' * 1, 0x81000000))
sc._sendControl(struct.pack('>' + 'I' * 6, 0x87153284, 0x03180000, 0x31020008, 0x07000707, 0x00301400, 0x2f010000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xad020000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xad020000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xad020000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0x8e000000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0x85000000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xb4000000))
#sc._sendControl(struct.pack('>' + 'I' * 5, 0x9610730b, 0xc7191248, 0x074eff14, 0x464e82d6, 0xaa960000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 5, 0x9610e0b5, 0xda3a1e90, 0x5b325088, 0x0a6224d2, 0x67690000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 5, 0x96107ef6, 0x0e193e8c, 0xe61d2eda, 0xb80906eb, 0x9fe90000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 5, 0x96106e4a, 0xa4753ef0, 0x017ab50a, 0x24390f1f, 0x71fa0000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0x83000000))
#sc._sendControl(struct.pack('>' + 'I' * 6, 0xae150100, 0x00000001, 0x02110000, 0x02030000, 0x000a6d92, 0xd2550400))
sc.run()
except KeyboardInterrupt:
pass
except Exception as e:
sys.stderr.write(str(e) + '\n')
print("Bye")
if __name__ == '__main__':
_main()
| mit | Python |
6cbec939130ba8e17969e8d13b35765f9683b692 | add exception 2017/06/06 | maxis1314/pyutils,maxis1314/pyutils,maxis1314/pyutils | crawler/tools/MysqlBase.py | crawler/tools/MysqlBase.py | #-*- encoding:UTF-8 -*-
import urllib2
import re
import StringIO
import gzip
import logging
import sqlite3
import logutils
import urllib
import sys
import MySQLdb
reload(sys)
sys.setdefaultencoding('utf8')
class MysqlBase:
def __init__(self,dbname):
self.conn=None
self.reconn=False
self.dbname=dbname
self.connect()
def connect(self):
if self.conn is not None:
self.conn.close()
self.conn=MySQLdb.connect(host='localhost',user='root',passwd='',port=3306)
self.conn.select_db(self.dbname)
self.conn.set_character_set('utf8')
def reconnect(self):
if self.reconn:
self.connect()
def execute(self,sql):
try:
self.reconnect()
cur=self.conn.cursor()
cur.execute(sql)
self.conn.commit()
cur.close()
except Exception,e:
print 'db failed, reason:%s' % str(e)
return None
def query(self,sql,value=None):
self.reconnect()
cur=self.conn.cursor()
if value is None:
cur.execute(sql)
else:
cur.execute(sql,value)
alldata = cur.fetchall()
cur.close()
return alldata
def insert(self,sql,value):
values=[]
values.append(value)
self.multi_insert(sql,values)
def multi_insert(self,sql,values):
try:
self.reconnect()
cur=self.conn.cursor()
cur.executemany(sql,values)
self.conn.commit()
cur.close()
except Exception,e:
print 'db failed, reason:%s' % str(e)
return None
def multi_insert_test(self):
values=[]
for i in range(20):
values.append((i,'hi rollen'+str(i)))
self.multi_insert('insert into test values(%s,%s)',values)
| #-*- encoding:UTF-8 -*-
import urllib2
import re
import StringIO
import gzip
import logging
import sqlite3
import logutils
import urllib
import sys
import MySQLdb
reload(sys)
sys.setdefaultencoding('utf8')
class MysqlBase:
def __init__(self,dbname):
self.conn=None
self.reconn=False
self.dbname=dbname
self.connect()
def connect(self):
if self.conn is not None:
self.conn.close()
self.conn=MySQLdb.connect(host='localhost',user='root',passwd='',port=3306)
self.conn.select_db(self.dbname)
self.conn.set_character_set('utf8')
def reconnect(self):
if self.reconn:
self.connect()
def execute(self,sql):
self.reconnect()
cur=self.conn.cursor()
cur.execute(sql)
self.conn.commit()
cur.close()
def query(self,sql,value=None):
self.reconnect()
cur=self.conn.cursor()
if value is None:
cur.execute(sql)
else:
cur.execute(sql,value)
alldata = cur.fetchall()
cur.close()
return alldata
def insert(self,sql,value):
values=[]
values.append(value)
self.multi_insert(sql,values)
def multi_insert(self,sql,values):
self.reconnect()
cur=self.conn.cursor()
cur.executemany(sql,values)
self.conn.commit()
cur.close()
def multi_insert_test(self):
values=[]
for i in range(20):
values.append((i,'hi rollen'+str(i)))
self.multi_insert('insert into test values(%s,%s)',values)
| apache-2.0 | Python |
1ceef7205121141cf3c01826a1bb5d01013e74db | clean cruft | mattvonrocketstein/ymir,mattvonrocketstein/ymir,mattvonrocketstein/ymir,mattvonrocketstein/ymir | ymir/data.py | ymir/data.py | # -*- coding: utf-8 -*-
""" ymir.data
"""
from fabric.colors import green
STATUS_DEAD = ['terminated', 'shutting-down']
OK = green(' ok')
| # -*- coding: utf-8 -*-
""" ymir.data
"""
from fabric.colors import green
DEFAULT_SUPERVISOR_PORT = 9001 # supervisor WUI port
STATUS_DEAD = ['terminated', 'shutting-down']
OK = green(' ok')
| mit | Python |
20c61a39b0f2bc35eabc41f519732e2706c6f59c | test domain is uuid | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/data_dictionary/tests/test_util.py | corehq/apps/data_dictionary/tests/test_util.py | import uuid
from django.test import TestCase
from mock import patch
from corehq.apps.data_dictionary.models import CaseType, CaseProperty
from corehq.apps.data_dictionary.util import generate_data_dictionary
class GenerateDictionaryTest(TestCase):
domain = uuid.uuid4()
def tearDown(self):
CaseType.objects.filter(domain=self.domain).delete()
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_no_types(self, mock):
mock.return_value = {}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_empty_type(self, mock):
mock.return_value = {'': ['prop']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_no_properties(self, mock):
mock.return_value = {'type': []}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_one_type(self, mock):
mock.return_value = {'type': ['property']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 1)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_two_types(self, mock):
mock.return_value = {'type': ['property'], 'type2': ['property']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 2)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_two_properties(self, mock):
mock.return_value = {'type': ['property', 'property2']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
| from django.test import TestCase
from mock import patch
from corehq.apps.data_dictionary.models import CaseType, CaseProperty
from corehq.apps.data_dictionary.util import generate_data_dictionary
class GenerateDictionaryTest(TestCase):
domain = 'data-dictionary'
def tearDown(self):
CaseType.objects.filter(domain=self.domain).delete()
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_no_types(self, mock):
mock.return_value = {}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_empty_type(self, mock):
mock.return_value = {'': ['prop']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_no_properties(self, mock):
mock.return_value = {'type': []}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_one_type(self, mock):
mock.return_value = {'type': ['property']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 1)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_two_types(self, mock):
mock.return_value = {'type': ['property'], 'type2': ['property']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 2)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_two_properties(self, mock):
mock.return_value = {'type': ['property', 'property2']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
| bsd-3-clause | Python |
1ade506f5408cbbe099bb83bd701472137470618 | Add extra version of py-contextlib2 (#15322) | LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-contextlib2/package.py | var/spack/repos/builtin/packages/py-contextlib2/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyContextlib2(PythonPackage):
"""contextlib2 is a backport of the standard library's contextlib module to
earlier Python versions."""
homepage = "https://contextlib2.readthedocs.io/en/stable/"
url = "https://github.com/jazzband/contextlib2/archive/v0.6.0.tar.gz"
version('0.6.0', sha256='4f18e2f28bb642aae9447aacec93b1319c8ee838711553c0a2bd906753f2ad33')
version('0.5.5', sha256='613569263db0271f34c8484792360272a731f2185567c31c8118e9c994412170')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
| # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyContextlib2(PythonPackage):
"""contextlib2 is a backport of the standard library's contextlib module to
earlier Python versions."""
homepage = "https://contextlib2.readthedocs.io/en/stable/"
url = "https://github.com/jazzband/contextlib2/archive/v0.6.0.tar.gz"
version('0.6.0', sha256='4f18e2f28bb642aae9447aacec93b1319c8ee838711553c0a2bd906753f2ad33')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
| lgpl-2.1 | Python |
71615632defe37681d1257912ea03f6e1cdeffde | add v1.1-3 (#20923) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/r-fitdistrplus/package.py | var/spack/repos/builtin/packages/r-fitdistrplus/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFitdistrplus(RPackage):
"""Help to Fit of a Parametric Distribution to Non-Censored or Censored Data
Extends the fitdistr() function (of the MASS package) with several
functions to help the fit of a parametric distribution to non-censored or
censored data. Censored data may contain left censored, right censored and
interval censored values, with several lower and upper bounds. In addition
to maximum likelihood estimation (MLE), the package provides moment
matching (MME), quantile matching (QME) and maximum goodness-of-fit
estimation (MGE) methods (available only for non-censored data). Weighted
versions of MLE, MME and QME are available. See e.g. Casella & Berger
(2002). Statistical inference. Pacific Grove."""
homepage = "https://lbbe.univ-lyon1.fr/fitdistrplus.html"
url = "https://cloud.r-project.org/src/contrib/fitdistrplus_1.0-14.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/fitdistrplus"
version('1.1-3', sha256='776d5456e14398e44b78b3d7db526559bb7a3537e546a29c88aa192141c756de')
version('1.0-14', sha256='85082590f62aa08d99048ea3414c5cc1e5b780d97b3779d2397c6cb435470083')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-npsurv', when='@:1.0-14', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFitdistrplus(RPackage):
"""Extends the fitdistr() function (of the MASS package) with several
functions to help the fit of a parametric distribution to non-censored or
censored data. Censored data may contain left censored, right censored and
interval censored values, with several lower and upper bounds. In addition
to maximum likelihood estimation (MLE), the package provides moment
matching (MME), quantile matching (QME) and maximum goodness-of-fit
estimation (MGE) methods (available only for non-censored data). Weighted
versions of MLE, MME and QME are available. See e.g. Casella & Berger
(2002). Statistical inference. Pacific Grove."""
homepage = "https://lbbe.univ-lyon1.fr/fitdistrplus.html"
url = "https://cloud.r-project.org/src/contrib/fitdistrplus_1.0-14.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/fitdistrplus"
version('1.0-14', sha256='85082590f62aa08d99048ea3414c5cc1e5b780d97b3779d2397c6cb435470083')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-npsurv', type=('build', 'run'))
| lgpl-2.1 | Python |
7ac27aa4d365d02d998c3f4c82bc740791a1b515 | Update script.py | TingPing/plugins,TingPing/plugins | HexChat/script.py | HexChat/script.py | from __future__ import print_function
import os
import sys
if sys.version_info[0] < 3:
import urllib as request
else:
import urllib.request as request
import hexchat
__module_name__ = 'Script'
__module_author__ = 'TingPing'
__module_version__ = '3'
__module_description__ = 'Download scripts'
script_help = 'Script: Valid commands are:\n \
INSTALL script\n \
EDIT script\n \
UPDATE script\n \
REMOVE script'
addon_dir = os.path.join(hexchat.get_info('configdir'), 'addons')
# Store as preference?
addon_types = ('py', 'pl', 'lua', 'js') # tcl has no way to unload a single script?
addon_sites = ('http://raw.github.com/TingPing/plugins/master/HexChat/',
'http://raw.github.com/Arnavion/random/master/hexchat/',
'http://raw.github.com/Farow/hexchat-scripts/master/',
'http://orvp.net/xchat/')
def expand_script(script):
return os.path.join(addon_dir, script)
def download(script):
if script.partition('.')[2] not in addon_types:
print('Script: Not a valid script file type.')
return False
for site in addon_sites:
if request.urlopen(site + script).getcode() == 200:
print('Script: Downloading {}...'.format(script))
request.urlretrieve(site + script, expand_script(script))
return True
print('Script: Could not find {}'.format(script))
def script_cb(word, word_eol, userdata):
if len(word) > 2:
cmd = word[1].lower()
arg = word[2]
else:
hexchat.command('help script')
return hexchat.EAT_ALL
if cmd == 'install':
if os.path.exists(expand_script(arg)):
print('Script: {} is already installed.'.format(arg))
return hexchat.EAT_ALL
if download(arg):
hexchat.command('timer .5 load ' + expand_script(arg))
elif cmd == 'update':
if os.path.exists(expand_script(arg)) and download(arg):
hexchat.command('timer .5 unload ' + arg)
hexchat.command('timer 1 load ' + arg)
elif cmd == 'edit':
hexchat.command('url ' + expand_script(arg))
elif cmd == 'remove':
if arg == 'script.py':
print('Script: I refuse.')
return hexchat.EAT_ALL
if os.path.exists(expand_script(arg)):
hexchat.command('unload ' + expand_script(arg))
os.remove(expand_script(arg))
else:
print('Script: {} is not installed.'.format(arg))
else:
hexchat.command('help script')
return hexchat.EAT_ALL
def unload_callback(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
hexchat.hook_command('script', script_cb, help=script_help)
hexchat.hook_unload(unload_callback)
print(__module_name__, 'version', __module_version__, 'loaded.')
| from __future__ import print_function
import os
import sys
if sys.version_info[0] < 3:
import urllib as request
else:
import urllib.request as request
import hexchat
__module_name__ = 'Script'
__module_author__ = 'TingPing'
__module_version__ = '3'
__module_description__ = 'Download scripts'
script_help = 'Script: Valid commands are:\n \
INSTALL script\n \
EDIT script\n \
UPDATE script\n \
REMOVE script'
addon_dir = os.path.join(hexchat.get_info('configdir'), 'addons')
# Store as preference?
addon_types = ('py', 'pl', 'lua', 'js') # tcl has no way to unload a single script?
addon_sites = ('http://raw.github.com/TingPing/plugins/master/HexChat/',
'http://raw.github.com/Arnavion/random/master/hexchat/',
'http://orvp.net/xchat/')
def expand_script(script):
return os.path.join(addon_dir, script)
def download(script):
if script.partition('.')[2] not in addon_types:
print('Script: Not a valid script file type.')
return False
for site in addon_sites:
if request.urlopen(site + script).getcode() == 200:
print('Script: Downloading {}...'.format(script))
request.urlretrieve(site + script, expand_script(script))
return True
print('Script: Could not find {}'.format(script))
def script_cb(word, word_eol, userdata):
if len(word) > 2:
cmd = word[1].lower()
arg = word[2]
else:
hexchat.command('help script')
return hexchat.EAT_ALL
if cmd == 'install':
if os.path.exists(expand_script(arg)):
print('Script: {} is already installed.'.format(arg))
return hexchat.EAT_ALL
if download(arg):
hexchat.command('timer .5 load ' + expand_script(arg))
elif cmd == 'update':
if os.path.exists(expand_script(arg)) and download(arg):
hexchat.command('timer .5 unload ' + arg)
hexchat.command('timer 1 load ' + arg)
elif cmd == 'edit':
hexchat.command('url ' + expand_script(arg))
elif cmd == 'remove':
if arg == 'script.py':
print('Script: I refuse.')
return hexchat.EAT_ALL
if os.path.exists(expand_script(arg)):
hexchat.command('unload ' + expand_script(arg))
os.remove(expand_script(arg))
else:
print('Script: {} is not installed.'.format(arg))
else:
hexchat.command('help script')
return hexchat.EAT_ALL
def unload_callback(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
hexchat.hook_command('script', script_cb, help=script_help)
hexchat.hook_unload(unload_callback)
print(__module_name__, 'version', __module_version__, 'loaded.')
| mit | Python |
6a899eeb5be7a8b49b45ff0fc0f490a5cad151bd | Add SourceGroup model | ambitioninc/django-entity-event,ambitioninc/django-entity-event | entity_event/models.py | entity_event/models.py | from django.db import models
class Medium(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __unicode__(self):
return self.display_name
class Source(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
group = models.ForeignKey('SourceGroup')
def __unicode__(self):
return self.display_name
class SourceGroup(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __unicode__(self):
return self.display_name
class Subscription(models.Model):
medium = models.ForeignKey('Medium')
source = models.ForeignKey('Source')
entity = models.ForeignKey(Entity)
subentity_type = models.ForeignKey(ContentType, null=True)
def __unicode__(self):
s = '{entity} to {source} by {medium}'
entity = self.entity.__unicode__()
source = self.source.__unicode__()
medium = self.medium.__unicode__()
return s.format(entity=entity, source=source, medium=medium)
class Unsubscription(models.Model):
entity = models.ForeignKey(Entity)
medium = models.ForeignKey('Medium')
source = models.ForeignKey('Source')
def __unicode__(self):
s = '{entity} from {source} by {medium}'
entity = self.entity.__unicode__()
source = self.source.__unicode__()
medium = self.medium.__unicode__()
return s.format(entity=entity, source=source, medium=medium)
class Event(models.Model):
entity = models.ForeignKey(Entity)
subentity_type = models.ForeignKey(ContentType, null=True)
source = models.ForeignKey(Source)
context = jsonfield.JSONField()
time = models.DateTimeField(auto_add_now=True)
time_expires = models.DateTimeField(null=True, default=None)
uuid = models.CharField(max_length=128, unique=True)
def __unicode__(self):
s = '{source} event at {time}'
source = self.source.__unicode__()
time = self.time.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(source=source, time=time)
class EventSeen(models.Model):
event = models.ForeignKey('Event')
medium = models.ForeignKey(Medium)
time_seen = models.DateTimeField(null=True, default=None)
def __unicode__(self):
s = 'seen by {medium} at {time}'
medium = self.medium.__unicode__()
time = self.time_seen.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(medium=medium, time=time)
| from django.db import models
class Medium(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __unicode__(self):
return self.display_name
class Source(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __unicode__(self):
return self.display_name
class Subscription(models.Model):
medium = models.ForeignKey('Medium')
source = models.ForeignKey('Source')
entity = models.ForeignKey(Entity)
subentity_type = models.ForeignKey(ContentType, null=True)
def __unicode__(self):
s = '{entity} to {source} by {medium}'
entity = self.entity.__unicode__()
source = self.source.__unicode__()
medium = self.medium.__unicode__()
return s.format(entity=entity, source=source, medium=medium)
class Unsubscription(models.Model):
entity = models.ForeignKey(Entity)
medium = models.ForeignKey('Medium')
source = models.ForeignKey('Source')
def __unicode__(self):
s = '{entity} from {source} by {medium}'
entity = self.entity.__unicode__()
source = self.source.__unicode__()
medium = self.medium.__unicode__()
return s.format(entity=entity, source=source, medium=medium)
class Event(models.Model):
entity = models.ForeignKey(Entity)
subentity_type = models.ForeignKey(ContentType, null=True)
source = models.ForeignKey(Source)
context = jsonfield.JSONField()
time = models.DateTimeField(auto_add_now=True)
time_expires = models.DateTimeField(null=True, default=None)
uuid = models.CharField(max_length=128, unique=True)
def __unicode__(self):
s = '{source} event at {time}'
source = self.source.__unicode__()
time = self.time.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(source=source, time=time)
class EventSeen(models.Model):
event = models.ForeignKey('Event')
medium = models.ForeignKey(Medium)
time_seen = models.DateTimeField(null=True, default=None)
def __unicode__(self):
s = 'seen by {medium} at {time}'
medium = self.medium.__unicode__()
time = self.time_seen.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(medium=medium, time=time)
| mit | Python |
fe4c426fe6384b570bcc2a105bdf04f2f412a31f | Use Query.executQuery for filterCasts.py | mgalbier/Envision,dimitar-asenov/Envision,dimitar-asenov/Envision,mgalbier/Envision,dimitar-asenov/Envision,lukedirtwalker/Envision,Vaishal-shah/Envision,mgalbier/Envision,Vaishal-shah/Envision,dimitar-asenov/Envision,Vaishal-shah/Envision,Vaishal-shah/Envision,lukedirtwalker/Envision,mgalbier/Envision,Vaishal-shah/Envision,lukedirtwalker/Envision,lukedirtwalker/Envision,mgalbier/Envision,Vaishal-shah/Envision,lukedirtwalker/Envision,dimitar-asenov/Envision,mgalbier/Envision,dimitar-asenov/Envision,lukedirtwalker/Envision | InformationScripting/scripts/filterCasts.py | InformationScripting/scripts/filterCasts.py | # filterCasts
classUses = Query.executeQuery('ast -t=CastExpression|attribute -at=castType -input|uses -input -t=Class', [])
def hasTypeIdMethod( cl ):
for method in cl.methods:
if method.name == "typeIdStatic":
return True
return False
for tuple in classUses[0].tuples("uses"):
if hasTypeIdMethod(tuple.used):
values = [("ast", tuple.user)]
Query.result.add(Tuple(values))
Query.result = Query.toParent(["-t=CastExpression", "-addAs=node"], [Query.result])[0]
| # filterCasts
casts = Query.ast(["-t=CastExpression"] + Query.args, [])
castTypeAttributes = Query.attribute(["-at=castType", "-s=of"], casts)
classUses = Query.uses(["-s=of", "-t=Class"], castTypeAttributes)
def hasTypeIdMethod( cl ):
for method in cl.methods:
if method.name == "typeIdStatic":
return True
return False
for tuple in classUses[0].tuples("uses"):
if hasTypeIdMethod(tuple.used):
values = [("ast", tuple.user)]
Query.result.add(Tuple(values))
Query.result = Query.toParent(["-t=CastExpression", "-addAs=node"], [Query.result])[0]
| bsd-3-clause | Python |
f5c94105f6652186e05ebe201f127a1c8b7bd94c | add script to download and save articles | fhamborg/news-please,fhamborg/news-please | newsplease/tests/downloadarticles.py | newsplease/tests/downloadarticles.py | import json
import os
name = 'trump-in-saudi-arabia.txt'
basepath = '/Users/felix/Downloads/'
download_dir = basepath + 'dir' + name + '/'
os.makedirs(download_dir)
articles = NewsPlease.download_from_file(basepath + name)
for url in articles:
article = articles[url]
with open(download_dir + article['filename'], 'w') as outfile:
json.dump(article, outfile)
| import json
import os
name = 'trump-in-saudi-arabia.txt'
basepath = '/Users/felix/Downloads/'
download_dir = basepath + 'dir' + name + '/'
os.makedirs(download_dir)
articles = NewsPlease.download_from_file(basepath + name)
for url in articles:
article = articles[url]
with open(download_dir + article['filename'], 'w') as outfile:
json.dump(article, outfile)
| apache-2.0 | Python |
f6686169cf7344e0c75c6d060332d3692fc7df1c | Update curation table format | EBIvariation/eva-cttv-pipeline | bin/trait_mapping/create_table_for_manual_curation.py | bin/trait_mapping/create_table_for_manual_curation.py | #!/usr/bin/env python3
import argparse
from eva_cttv_pipeline.trait_mapping.ols import (
get_ontology_label_from_ols, is_current_and_in_efo, is_in_efo,
)
def find_previous_mapping(trait_name, previous_mappings):
if trait_name not in previous_mappings:
return ''
uri = previous_mappings[trait_name]
label = get_ontology_label_from_ols(uri)
uri_is_current_and_in_efo = is_current_and_in_efo(uri)
uri_in_efo = is_in_efo(uri)
if uri_in_efo:
trait_status = 'EFO_CURRENT' if uri_is_current_and_in_efo else 'EFO_OBSOLETE'
else:
trait_status = 'NOT_CONTAINED'
trait_string = '|'.join([uri, label, 'NOT_SPECIFIED', 'previously-used', trait_status])
return trait_string
def find_exact_mapping(trait_name, mappings):
for mapping in mappings:
if mapping.lower().split('|')[1] == trait_name:
return mapping
return ''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--traits-for-curation',
help='Table with traits for which the pipeline failed to make a confident prediction')
parser.add_argument(
'-m', '--previous-mappings',
help='Table with all mappings previously issued by EVA')
parser.add_argument(
'-o', '--output',
help='Output TSV to be loaded in Google Sheets for manual curation')
args = parser.parse_args()
outfile = open(args.output, 'w')
# Load all previous mappings
previous_mappings = dict(l.rstrip().split('\t') for l in open(args.previous_mappings))
# Process all mappings which require manual curation
for line in open(args.traits_for_curation):
fields = line.rstrip().split('\t')
trait_name, trait_freq = fields[:2]
mappings = fields[2:]
previous_mapping = find_previous_mapping(trait_name, previous_mappings)
exact_mapping = find_exact_mapping(trait_name, mappings)
out_line = '\t'.join(
[trait_name, trait_freq, previous_mapping, exact_mapping] + mappings
) + '\n'
outfile.write(out_line)
outfile.close()
| #!/usr/bin/env python3
import argparse
from eva_cttv_pipeline.trait_mapping.ols import (
get_ontology_label_from_ols, is_current_and_in_efo, is_in_efo,
)
def find_previous_mapping(trait_name, previous_mappings):
if trait_name not in previous_mappings:
return ''
uri = previous_mappings[trait_name]
label = get_ontology_label_from_ols(uri)
uri_is_current_and_in_efo = is_current_and_in_efo(uri)
uri_in_efo = is_in_efo(uri)
if uri_in_efo:
trait_status = 'EFO_CURRENT' if uri_is_current_and_in_efo else 'EFO_OBSOLETE'
else:
trait_status = 'NOT_CONTAINED'
trait_string = '|'.join([uri, label, 'NOT_SPECIFIED', 'previously-used', trait_status])
return trait_string
def find_exact_mapping(trait_name, mappings):
for mapping in mappings:
if mapping.lower().split('|')[1] == trait_name:
return mapping
return ''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--traits-for-curation',
help='Table with traits for which the pipeline failed to make a confident prediction')
parser.add_argument(
'-m', '--previous-mappings',
help='Table with all mappings previously issued by EVA')
parser.add_argument(
'-o', '--output',
help='Output TSV to be loaded in Google Sheets for manual curation')
args = parser.parse_args()
outfile = open(args.output, 'w')
# Load all previous mappings
previous_mappings = dict(l.rstrip().split('\t') for l in open(args.previous_mappings))
# Process all mappings which require manual curation
for line in open(args.traits_for_curation):
fields = line.rstrip().split('\t')
trait_name, trait_freq = fields[:2]
mappings = fields[2:]
previous_mapping = find_previous_mapping(trait_name, previous_mappings)
exact_mapping = find_exact_mapping(trait_name, mappings)
out_line = '\t'.join(
[trait_name, trait_freq,
# Mapping to use, if ready, comment, mapping URI, mapping label, whether exact, in EFO
'', '', '', '', '', '', '',
previous_mapping, exact_mapping] + mappings
) + '\n'
outfile.write(out_line)
outfile.close()
| apache-2.0 | Python |
c5902af643d639ecefa756a0caaeeb58a7c6d151 | Update P4_textToExcel working solution | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P4_textToExcel.py | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P4_textToExcel.py | # Write a program to read in the contents of several text files (you can make
# the text files yourself) and insert those contents into a spreadsheet, with
# one line of text per row. The lines of the first text file will be in the
# cells of column A, the lines of the second text file will be in the cells of
# column B, and so on.
import openpyxl
import os
FOLDER = "./p4files/"
# Open workbook
wb = openpyxl.Workbook()
sheet = wb.active
# Get list of files
filelist = os.listdir(FOLDER)
filelist.sort()
# Open file
for file in filelist:
with open(FOLDER + file) as fileObj:
index = 1
for line in fileObj:
# Transpose line into relevant workbook column
sheet.cell(row=index, column=(filelist.index(file) + 1)).value = line.strip()
index += 1
# Save workbook
wb.save("textToExcel.xlsx")
| # Write a program to read in the contents of several text files (you can make
# the text files yourself) and insert those contents into a spreadsheet, with
# one line of text per row. The lines of the first text file will be in the
# cells of column A, the lines of the second text file will be in the cells of
# column B, and so on.
import openpyxl
# Open workbook
wb = openpyxl.Workbook()
sheet = wb.active
# Get list of files
# Open file
# Scan lines into list
# Transpose list into relevant workbook column
# Close file
# Save workbook
wb.save("textToExcel.xlsx")
| mit | Python |
7597497017053356cdfbebc38aa1468240df2e45 | fix the install to ./install requirements | rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh,rajpushkar83/cloudmesh | fabfile/build.py | fabfile/build.py | from fabric.api import task, local, execute
import clean
__all__ = ['sdist', 'install', 'sphinx']
@task
def sdist():
"""create the sdist"""
execute(clean.all)
local("python setup.py sdist --format=bztar,zip")
@task
def install():
"""install cloudmesh"""
local("./install requirements.txt")
local("python setup.py install")
@task
def sphinx():
local("rm -rf /tmp/sphinx-contrib")
local("cd /tmp; hg clone http://bitbucket.org/birkenfeld/sphinx-contrib/")
local("cd /tmp/sphinx-contrib/autorun/; python setup.py install")
| from fabric.api import task, local, execute
import clean
__all__ = ['req', 'sdist', 'install', 'sphinx']
@task
def req():
"""install the requirements"""
local("pip install -r requirements.txt")
@task
def sdist():
"""create the sdist"""
execute(clean.all)
local("python setup.py sdist --format=bztar,zip")
@task
def install():
"""install cloudmesh"""
local("pip install -r requirements.txt")
local("python setup.py install")
@task
def sphinx():
local("rm -rf /tmp/sphinx-contrib")
local("cd /tmp; hg clone http://bitbucket.org/birkenfeld/sphinx-contrib/")
local("cd /tmp/sphinx-contrib/autorun/; python setup.py install")
| apache-2.0 | Python |
9646fb2b7f7f441c6630e04fa1e1af358f9c7d10 | Set version to 0.20 final | emory-libraries/eulexistdb,emory-libraries/eulexistdb,emory-libraries/eulexistdb | eulexistdb/__init__.py | eulexistdb/__init__.py | # file eulexistdb/__init__.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with `eXist-db`_ XML databases.
This package provides classes to ease interaction with eXist XML databases.
It contains the following modules:
* :mod:`eulexistdb.db` -- Connect to the database and query
* :mod:`eulexistdb.query` -- Query :class:`~eulxml.xmlmap.XmlObject`
models from eXist with semantics like a Django_ QuerySet
.. _eXist-db: http://exist.sourceforge.net/
.. _Django: http://www.djangoproject.com/
"""
__version_info__ = (0, 20, 0, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| # file eulexistdb/__init__.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with `eXist-db`_ XML databases.
This package provides classes to ease interaction with eXist XML databases.
It contains the following modules:
* :mod:`eulexistdb.db` -- Connect to the database and query
* :mod:`eulexistdb.query` -- Query :class:`~eulxml.xmlmap.XmlObject`
models from eXist with semantics like a Django_ QuerySet
.. _eXist-db: http://exist.sourceforge.net/
.. _Django: http://www.djangoproject.com/
"""
__version_info__ = (0, 20, 0, 'dev')
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| apache-2.0 | Python |
f633df6bb8e0e84699db2f47178f4b402ccc07a8 | Fix `OverflowError`. | ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/icekit-events,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/icekit-events,ic-labs/icekit-events | eventkit/utils/time.py | eventkit/utils/time.py | from datetime import datetime, timedelta
from timezone import timezone
ROUND_DOWN = 'ROUND_DOWN'
ROUND_NEAREST = 'ROUND_NEAREST'
ROUND_UP = 'ROUND_UP'
WEEKDAYS = {
'MON': 0,
'TUE': 1,
'WED': 2,
'THU': 3,
'FRI': 4,
'SAT': 5,
'SUN': 6,
}
MON = 'MON'
TUE = 'TUE'
WED = 'WED'
THU = 'THU'
FRI = 'FRI'
SAT = 'SAT'
SUN = 'SUN'
def round_datetime(when=None, precision=60, rounding=ROUND_NEAREST):
"""
Round a datetime object to a time that matches the given precision.
when (datetime), default now
The datetime object to be rounded.
precision (int, timedelta, str), default 60
The number of seconds, weekday (MON, TUE, WED, etc.) or timedelta
object to which the datetime object should be rounded.
rounding (str), default ROUND_NEAREST
The rounding method to use (ROUND_DOWN, ROUND_NEAREST, ROUND_UP).
"""
when = when or timezone.now()
weekday = WEEKDAYS.get(precision, WEEKDAYS['MON'])
if precision in WEEKDAYS:
precision = int(timedelta(days=7).total_seconds())
elif isinstance(precision, timedelta):
precision = int(precision.total_seconds())
# Get delta between the beginning of time and the given datetime object.
# If precision is a weekday, the beginning of time must be that same day.
when_min = when.min + timedelta(days=weekday)
if timezone.is_aware(when):
# It doesn't seem to be possible to localise the `min` datetime without
# raising `OverflowError`, so create a timezone aware object manually.
when_min = datetime(tzinfo=when.tzinfo, *when_min.timetuple()[:3])
delta = when - when_min
remainder = int(delta.total_seconds()) % precision
# First round down and strip microseconds.
when -= timedelta(seconds=remainder, microseconds=when.microsecond)
# Then add precision to round up.
if rounding == ROUND_UP or (
rounding == ROUND_NEAREST and remainder >= precision / 2):
when += timedelta(seconds=precision)
return when
| from datetime import timedelta
from timezone import timezone
ROUND_DOWN = 'ROUND_DOWN'
ROUND_NEAREST = 'ROUND_NEAREST'
ROUND_UP = 'ROUND_UP'
WEEKDAYS = {
'MON': 0,
'TUE': 1,
'WED': 2,
'THU': 3,
'FRI': 4,
'SAT': 5,
'SUN': 6,
}
MON = 'MON'
TUE = 'TUE'
WED = 'WED'
THU = 'THU'
FRI = 'FRI'
SAT = 'SAT'
SUN = 'SUN'
def round_datetime(when=None, precision=60, rounding=ROUND_NEAREST):
"""
Round a datetime object to a time that matches the given precision.
when (datetime), default now
The datetime object to be rounded.
precision (int, timedelta, str), default 60
The number of seconds, weekday (MON, TUE, WED, etc.) or timedelta
object to which the datetime object should be rounded.
rounding (str), default ROUND_NEAREST
The rounding method to use (ROUND_DOWN, ROUND_NEAREST, ROUND_UP).
"""
when = when or timezone.now()
weekday = WEEKDAYS.get(precision, WEEKDAYS['MON'])
if precision in WEEKDAYS:
precision = int(timedelta(days=7).total_seconds())
elif isinstance(precision, timedelta):
precision = int(precision.total_seconds())
# Get delta between the beginning of time and the given datetime object.
# If precision is a weekday, the beginning of time must be that same day.
when_min = when.min + timedelta(days=weekday)
if timezone.is_aware(when):
when_min = \
timezone.datetime(tzinfo=when.tzinfo, *when_min.timetuple()[:3])
delta = when - when_min
remainder = int(delta.total_seconds()) % precision
# First round down and strip microseconds.
when -= timedelta(seconds=remainder, microseconds=when.microsecond)
# Then add precision to round up.
if rounding == ROUND_UP or (
rounding == ROUND_NEAREST and remainder >= precision / 2):
when += timedelta(seconds=precision)
return when
| mit | Python |
3b4de1be81c7951ca064ff46e1f3e1ed95436ae3 | fix XSS vulnerability | Zopieux/bootstrap-breadcrumbs,prymitive/bootstrap-breadcrumbs,prymitive/bootstrap-breadcrumbs,Zopieux/bootstrap-breadcrumbs | django_bootstrap_breadcrumbs/templatetags/django_bootstrap_breadcrumbs.py | django_bootstrap_breadcrumbs/templatetags/django_bootstrap_breadcrumbs.py | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013 by Łukasz Mierzwa
:contact: [email protected]
"""
from inspect import ismethod
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.db.models import Model
from django import template
register = template.Library()
CONTEXT_KEY = 'DJANGO_BREADCRUMB_LINKS'
def breadcrumb(context, label, viewname, *args):
"""
Add link to list of breadcrumbs, usage:
{% load bubbles_breadcrumbs %}
{% breadcrumb "Home" "index" %}
Remember to use it inside {% block %} with {{ block.super }} to get all
parent breadcrumbs.
:param label: Breadcrumb link label.
:param viewname: Name of the view to link this breadcrumb to, or Model
instance with implemented get_absolute_url().
:param args: Any arguments to view function.
"""
context['request'].META[CONTEXT_KEY] = context['request'].META.get(
CONTEXT_KEY, []) + [(escape(label), viewname, args)]
return ''
def render_breadcrumbs(context):
"""
Render breadcrumbs html using twitter bootstrap css classes.
"""
links = []
for (label, viewname, args) in context['request'].META.get(
CONTEXT_KEY, []):
if isinstance(viewname, Model) and hasattr(
viewname, 'get_absolute_url') and ismethod(
viewname.get_absolute_url):
url = viewname.get_absolute_url()
else:
try:
url = reverse(viewname=viewname, args=args)
except NoReverseMatch:
url = viewname
links.append((url, _(unicode(label)) if label else label))
if not links:
return ''
ret = '<ul class="breadcrumb">'
total = len(links)
for (i, (url, label)) in enumerate(links, 1):
ret += '<li>'
if total > 1 and i < total:
ret += '<a href="%s">%s</a>' % (url, label)
ret += ' <span class="divider">/</span>'
else:
ret += label
ret += '</li>'
ret += '</ul>'
return mark_safe(ret)
register.simple_tag(takes_context=True)(breadcrumb)
register.simple_tag(takes_context=True)(render_breadcrumbs)
| # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013 by Łukasz Mierzwa
:contact: [email protected]
"""
from inspect import ismethod
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.db.models import Model
from django import template
register = template.Library()
CONTEXT_KEY = 'DJANGO_BREADCRUMB_LINKS'
def breadcrumb(context, label, viewname, *args):
"""
Add link to list of breadcrumbs, usage:
{% load bubbles_breadcrumbs %}
{% breadcrumb "Home" "index" %}
Remember to use it inside {% block %} with {{ block.super }} to get all
parent breadcrumbs.
:param label: Breadcrumb link label.
:param viewname: Name of the view to link this breadcrumb to, or Model
instance with implemented get_absolute_url().
:param args: Any arguments to view function.
"""
context['request'].META[CONTEXT_KEY] = context['request'].META.get(
CONTEXT_KEY, []) + [(label, viewname, args)]
return ''
def render_breadcrumbs(context):
"""
Render breadcrumbs html using twitter bootstrap css classes.
"""
links = []
for (label, viewname, args) in context['request'].META.get(
CONTEXT_KEY, []):
if isinstance(viewname, Model) and hasattr(
viewname, 'get_absolute_url') and ismethod(
viewname.get_absolute_url):
url = viewname.get_absolute_url()
else:
try:
url = reverse(viewname=viewname, args=args)
except NoReverseMatch:
url = viewname
links.append((url, _(unicode(label)) if label else label))
if not links:
return ''
ret = '<ul class="breadcrumb">'
total = len(links)
for (i, (url, label)) in enumerate(links, 1):
ret += '<li>'
if total > 1 and i < total:
ret += '<a href="%s">%s</a>' % (url, label)
ret += ' <span class="divider">/</span>'
else:
ret += label
ret += '</li>'
ret += '</ul>'
return mark_safe(ret)
register.simple_tag(takes_context=True)(breadcrumb)
register.simple_tag(takes_context=True)(render_breadcrumbs)
| mit | Python |
41ea0dd8c48ef8a336422482e9bbd1911bb7e168 | Make that it works in 90% of the cases. 3:30. | janraasch/sublimetext-commitment,janraasch/sublimetext-commitment | Commitment.py | Commitment.py | import sublime
import sublime_plugin
import HTMLParser
from commit import Commitment
whatthecommit = 'http://whatthecommit.com/'
randomMessages = Commitment()
class CommitmentToClipboardCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = HTMLParser.HTMLParser().unescape(commit.get('message', '').replace('\n','').replace('<br/>', '\n'))
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + '\n' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.set_clipboard(message)
class CommitmentToStatusBarCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = HTMLParser.HTMLParser().unescape(commit.get('message', '').replace('\n','').replace('<br/>', '\n'))
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + '\n' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.status_message(message) | import sublime
import sublime_plugin
from commit import Commitment
whatthecommit = 'http://whatthecommit.com/'
randomMessages = Commitment()
class CommitmentToClipboardCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = commit.get('message', '')
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.set_clipboard(message)
class CommitmentToStatusBarCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = commit.get('message', '')
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.status_message(message) | mit | Python |
81c32c9bc0868f7ccd764d8432fd46ccb7e6a8ef | Use get instead | andela-sjames/paystack-python | paystackapi/tests/test_transfer.py | paystackapi/tests/test_transfer.py | import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.transfer import Transfer
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_initiate(self):
"""Method defined to test transfer initiation."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfer requires OTP to continue"}',
status=201,
)
response = Transfer.initiate(
source="balance",
reason="Calm down",
amount="3794800",
recipient="RCP_gx2wn530m0i3w3m",
)
self.assertTrue(response['status'])
@httpretty.activate
def test_list(self):
"""Method defined to test transfer list."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfers retrieved"}',
status=201,
)
response = Transfer.list(
perPage=3,
page=1
)
self.assertTrue(response['status'])
| import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.transfer import Transfer
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_initiate(self):
"""Method defined to test transfer initiation."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfer requires OTP to continue"}',
status=201,
)
response = Transfer.initiate(
source="balance",
reason="Calm down",
amount="3794800",
recipient="RCP_gx2wn530m0i3w3m",
)
self.assertTrue(response['status'])
@httpretty.activate
def test_list(self):
"""Method defined to test transfer list."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfers retrieved"}',
status=201,
)
response = Transfer.list(
perPage=3,
page=1
)
self.assertTrue(response['status'])
| mit | Python |
8fd65190a2a68a7afeab91b0a02c83309f72ccd6 | Add tests to gen_test for generator, seems to work | virtuald/greenado,virtuald/greenado | tests/test_testing.py | tests/test_testing.py |
import greenado
from greenado.testing import gen_test
from tornado.testing import AsyncTestCase
from tornado import gen
@gen.coroutine
def coroutine():
raise gen.Return(1234)
class GreenadoTests(AsyncTestCase):
@gen_test
def test_without_timeout1(self):
assert greenado.gyield(coroutine()) == 1234
@gen_test
@greenado.generator
def test_without_timeout2(self):
assert (yield coroutine()) == 1234
@gen_test(timeout=5)
def test_with_timeout1(self):
assert greenado.gyield(coroutine()) == 1234
@gen_test(timeout=5)
@greenado.generator
def test_with_timeout2(self):
assert (yield coroutine()) == 1234
|
import greenado
from greenado.testing import gen_test
from tornado.testing import AsyncTestCase
from tornado import gen
@gen.coroutine
def coroutine():
raise gen.Return(1234)
class GreenadoTests(AsyncTestCase):
@gen_test
def test_without_timeout(self):
assert greenado.gyield(coroutine()) == 1234
@gen_test(timeout=5)
def test_with_timeout(self):
assert greenado.gyield(coroutine()) == 1234
| apache-2.0 | Python |
0d313502b8b5d850109b48cde8d3dea2dae0d802 | Clean up __init__.py . | graingert/vcrpy,poussik/vcrpy,gwillem/vcrpy,yarikoptic/vcrpy,ByteInternet/vcrpy,kevin1024/vcrpy,aclevy/vcrpy,IvanMalison/vcrpy,poussik/vcrpy,ByteInternet/vcrpy,mgeisler/vcrpy,kevin1024/vcrpy,bcen/vcrpy,agriffis/vcrpy,graingert/vcrpy | vcr/__init__.py | vcr/__init__.py | import logging
from .config import VCR
# Set default logging handler to avoid "No handler found" warnings.
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
default_vcr = VCR()
use_cassette = default_vcr.use_cassette
| import logging
from .config import VCR
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
default_vcr = VCR()
def use_cassette(path, **kwargs):
return default_vcr.use_cassette(path, **kwargs)
| mit | Python |
e353bae122c6e55da022d73c42d7eee09a558b44 | clean code | VisualDL/VisualDL,VisualDL/VisualDL,VisualDL/VisualDL,VisualDL/VisualDL,VisualDL/VisualDL | bin/visual_dl.py | bin/visual_dl.py | """ entry point of visual_dl
"""
import json
import os
import sys
from optparse import OptionParser
from flask import Flask, redirect
from flask import send_from_directory
from visualdl.log import logger
app = Flask(__name__, static_url_path="")
def option_parser():
"""
:return:
"""
parser = OptionParser(usage="usage: visual_dl visual_dl.py "\
"-p port [options]")
parser.add_option(
"-p",
"--port",
default=8040,
action="store",
dest="port",
help="rest api service port")
return parser.parse_args()
# return data
# status, msg, data
def gen_result(status, msg):
"""
:param status:
:param msg:
:return:
"""
result = dict()
result['status'] = status
result['msg'] = msg
result['data'] = {}
return result
server_path = os.path.abspath(os.path.dirname(sys.argv[0]))
static_file_path = "../visualdl/frontend/dist/"
@app.route('/static/<path:filename>')
def serve_static(filename):
return send_from_directory(os.path.join(server_path, static_file_path), filename)
@app.route("/")
def index():
return redirect('/static/index.html', code=302)
@app.route('/hello')
def hello():
result = gen_result(0, "Hello, this is VisualDL!")
return json.dumps(result)
if __name__ == '__main__':
options, args = option_parser()
logger.info(" port=" + str(options.port))
app.run(debug=False, host="0.0.0.0", port=options.port)
| """ entry point of visual_dl
"""
import json
import os
import sys
from optparse import OptionParser
from flask import Flask, redirect
from flask import send_from_directory
from visualdl.log import logger
app = Flask(__name__, static_url_path="")
def option_parser():
"""
:return:
"""
parser = OptionParser(usage="usage: visual_dl visual_dl.py "\
"-p port [options]")
parser.add_option(
"-p",
"--port",
default=8040,
action="store",
dest="port",
help="rest api service port")
return parser.parse_args()
# return data
# status, msg, data
def gen_result(status, msg):
"""
:param status:
:param msg:
:return:
"""
result = dict()
result['status'] = status
result['msg'] = msg
result['data'] = {}
return result
server_path = os.path.abspath(os.path.dirname(sys.argv[0]))
static_file_path = "../visualdl/frontend/dist/"
@app.route('/static/<path:filename>')
def serve_static(filename):
print("aaa")
return send_from_directory(os.path.join(server_path, static_file_path), filename)
@app.route("/")
def index():
return redirect('/static/index.html', code=302)
@app.route('/hello')
def hello():
result = gen_result(0, "Hello, this is VisualDL!")
return json.dumps(result)
if __name__ == '__main__':
options, args = option_parser()
logger.info(" port=" + str(options.port))
app.run(debug=False, host="0.0.0.0", port=options.port)
| apache-2.0 | Python |
3c72aa1266f1008552a3979ac057251bf2f93053 | Bump tensorflow in /training/xgboost/structured/base (#212) | GoogleCloudPlatform/ai-platform-samples,GoogleCloudPlatform/ai-platform-samples | training/xgboost/structured/base/setup.py | training/xgboost/structured/base/setup.py | #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import find_packages
from setuptools import setup
# While this is an xgboost sample, we will still require tensorflow and
# scikit-learn to be installed, since the sample uses certain functionalities
# available in those libraries:
# tensorflow: mainly to copy files seamlessly to GCS
# scikit-learn: the helpfer functions it provides, e.g. splitting datasets
REQUIRED_PACKAGES = [
'tensorflow==1.15.4',
'scikit-learn==0.20.2',
'pandas==0.24.2',
'xgboost==0.81',
'cloudml-hypertune',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='AI Platform | Training | xgboost | Base'
)
| #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import find_packages
from setuptools import setup
# While this is an xgboost sample, we will still require tensorflow and
# scikit-learn to be installed, since the sample uses certain functionalities
# available in those libraries:
# tensorflow: mainly to copy files seamlessly to GCS
# scikit-learn: the helpfer functions it provides, e.g. splitting datasets
REQUIRED_PACKAGES = [
'tensorflow==1.15.2',
'scikit-learn==0.20.2',
'pandas==0.24.2',
'xgboost==0.81',
'cloudml-hypertune',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='AI Platform | Training | xgboost | Base'
)
| apache-2.0 | Python |
2b850c2f20208e3813a6e85ccafbedf221bdbcbd | Speed up a test by mocking Ticket refreshing. | peplin/astral | astral/api/tests/test_ticket.py | astral/api/tests/test_ticket.py | from nose.tools import eq_, ok_
from tornado.httpclient import HTTPRequest
import json
import mockito
from astral.api.client import TicketsAPI
from astral.api.tests import BaseTest
from astral.models import Ticket, Stream, Node
from astral.models.tests.factories import TicketFactory
class TicketHandlerTest(BaseTest):
def test_delete(self):
node = Node.me()
ticket = TicketFactory(destination=node)
self.http_client.fetch(HTTPRequest(
self.get_url(ticket.absolute_url()), 'DELETE'), self.stop)
response = self.wait()
eq_(response.code, 200)
eq_(Ticket.get_by(id=ticket.id), None)
ok_(Stream.get_by(slug=ticket.stream.slug))
def test_get(self):
node = Node.me()
ticket = TicketFactory(destination=node)
mockito.when(TicketsAPI).create(mockito.any(),
destination_uuid=mockito.any()).thenReturn(
{'source': ticket.destination.uuid,
'source_port': ticket.source_port,
'hops': ticket.hops})
response = self.fetch(ticket.absolute_url())
eq_(response.code, 200)
result = json.loads(response.body)
ok_('ticket' in result)
eq_(result['ticket']['stream'], ticket.stream.slug)
def test_confirm(self):
node = Node.me()
ticket = TicketFactory(destination=node, confirmed=False)
data = {'confirmed': True}
eq_(ticket.confirmed, False)
self.http_client.fetch(HTTPRequest(
self.get_url(ticket.absolute_url()), 'PUT', body=json.dumps(data)),
self.stop)
response = self.wait()
eq_(response.code, 200)
eq_(ticket.confirmed, True)
| from nose.tools import eq_, ok_
from tornado.httpclient import HTTPRequest
import json
from astral.api.tests import BaseTest
from astral.models import Ticket, Stream, Node
from astral.models.tests.factories import TicketFactory
class TicketHandlerTest(BaseTest):
def test_delete(self):
node = Node.me()
ticket = TicketFactory(destination=node)
self.http_client.fetch(HTTPRequest(
self.get_url(ticket.absolute_url()), 'DELETE'), self.stop)
response = self.wait()
eq_(response.code, 200)
eq_(Ticket.get_by(id=ticket.id), None)
ok_(Stream.get_by(slug=ticket.stream.slug))
def test_get(self):
node = Node.me()
ticket = TicketFactory(destination=node)
response = self.fetch(ticket.absolute_url())
eq_(response.code, 200)
result = json.loads(response.body)
ok_('ticket' in result)
eq_(result['ticket']['stream'], ticket.stream.slug)
def test_confirm(self):
node = Node.me()
ticket = TicketFactory(destination=node, confirmed=False)
data = {'confirmed': True}
eq_(ticket.confirmed, False)
self.http_client.fetch(HTTPRequest(
self.get_url(ticket.absolute_url()), 'PUT', body=json.dumps(data)),
self.stop)
response = self.wait()
eq_(response.code, 200)
eq_(ticket.confirmed, True)
| mit | Python |
0ec3bfbd91e6e967bb2baae0307e76aafbb5aa91 | Simplify the base types | blackjax-devs/blackjax | blackjax/base.py | blackjax/base.py | from typing import NamedTuple, Tuple
from typing_extensions import Protocol
from .types import PRNGKey, PyTree
Position = PyTree
State = NamedTuple
Info = NamedTuple
class InitFn(Protocol):
"""A `Callable` used to initialize the kernel state.
Sampling algorithms often need to carry over some informations between
steps, often to avoid computing the same quantity twice. Therefore the
kernels do not operate on the chain positions themselves, but on states that
contain this position and other information.
The `InitFn` returns the state corresponding to a chain position. This state
can then be passed to the `update` function of the `SamplingAlgorithm`.
"""
def __call__(self, position: Position) -> State:
"""The initialization function.
Parameters
----------
position
A chain position.
Returns
-------
The kernel state that corresponds to the position.
"""
class Kernel:
"""A transition kernel used as the `update` of a `SamplingAlgorithms`.
Kernels are pure functions and are idempotent. They necessarily take a
random state `rng_key` and the current kernel state (which contains the
current position) as parameters, return a new state and some information
about the transtion.
"""
def __call__(self, rng_key: PRNGKey, state: State) -> Tuple[State, Info]:
"""The transition kernel.
Parameters
----------
rng_key:
The random state used by JAX's random numbers generator.
state:
The current kernel state. The kernel state contains the current
chain position as well as other information the kernel needs to
carry over from the previous step.
Returns
-------
A new state, as well as a NamedTuple that contains extra information
about the transition that does not need to be carried over to the next
step.
"""
class SamplingAlgorithm(NamedTuple):
"""A pair of functions that implement a sampling algorithm.
Blackjax sampling algorithms are implemented as a pair of pure functions: a
kernel, that takes a new samples starting from the current state, and an
initialization function that creates a kernel state from a chain position.
As they represent Markov kernels, the kernel functions are pure functions
and do not have internal state. To save computation time they also operate
on states which contain the chain state and additional information that
needs to be carried over for the next step.
Attributes
---------
init:
A pure function which when called with the initial position and the
target density probability function will return the kernel's initial
state.
step:
A pure function that takes a rng key, a state and possibly some
parameters and returns a new state and some information about the
transition.
"""
init: InitFn
step: Kernel
| from typing import Callable, NamedTuple, Tuple
from typing_extensions import Protocol
from .types import PRNGKey, PyTree
Position = PyTree
State = NamedTuple
Info = NamedTuple
class InitFn(Protocol):
"""A `Callable` used to initialize the kernel state.
Sampling algorithms often need to carry over some informations between
steps, often to avoid computing the same quantity twice. Therefore the
kernels do not operate on the chain positions themselves, but on states that
contain this position and other information.
The `InitFn` returns the state corresponding to a chain position. This state
can then be passed to the `update` function of the `SamplingAlgorithm`.
"""
def __call__(self, position: Position) -> State:
"""The initialization function.
Parameters
----------
position
A chain position.
Returns
-------
The kernel state that corresponds to the position.
"""
class Kernel:
"""A transition kernel used as the `update` of a `SamplingAlgorithms`.
Kernels are pure functions and are idempotent. They necessarily take a
random state `rng_key` and the current kernel state (which contains the
current position) as parameters, return a new state and some information
about the transtion.
"""
def __call__(self, rng_key: PRNGKey, state: State) -> Tuple[State, Info]:
"""The transition kernel.
Parameters
----------
rng_key:
The random state used by JAX's random numbers generator.
state:
The current kernel state. The kernel state contains the current
chain position as well as other information the kernel needs to
carry over from the previous step.
Returns
-------
A new state, as well as a NamedTuple that contains extra information
about the transition that does not need to be carried over to the next
step.
"""
class SamplingAlgorithm(NamedTuple):
"""A pair of functions that implement a sampling algorithm.
Blackjax sampling algorithms are implemented as a pair of pure functions: a
kernel, that takes a new samples starting from the current state, and an
initialization function that creates a kernel state from a chain position.
As they represent Markov kernels, the kernel functions are pure functions
and do not have internal state. To save computation time they also operate
on states which contain the chain state and additional information that
needs to be carried over for the next step.
Attributes
---------
init:
A pure function which when called with the initial position and the
target density probability function will return the kernel's initial
state.
step:
A pure function that takes a rng key, a state and possibly some
parameters and returns a new state and some information about the
transition.
"""
init: InitFn
step: Kernel
class SamplingAlgorithmGenerator(NamedTuple):
"""A pair of functions that implement a kenel generator.
This is meant to be a quick fix until we can pass the values of parameters
directly to the step function.
"""
init: InitFn
kernel: Callable
| apache-2.0 | Python |
f1b22cfcca8470a59a7bab261bbd2a46a7c2a2ed | Fix unicode issues at url translation | socib/django-socib-cms,socib/django-socib-cms | socib_cms/cmsutils/utils.py | socib_cms/cmsutils/utils.py | # coding: utf-8
import re
from django.core.urlresolvers import reverse
from django.conf import settings
def reverse_no_i18n(viewname, *args, **kwargs):
result = reverse(viewname, *args, **kwargs)
m = re.match(r'(/[^/]*)(/.*$)', result)
return m.groups()[1]
def change_url_language(url, language):
if hasattr(settings, 'LANGUAGES'):
languages = [lang[0] for lang in settings.LANGUAGES]
m = re.match(r'/([^/]*)(/.*$)', url)
if m and m.groups()[0] in languages:
return u"/{lang}{url}".format(
lang=language,
url=m.groups()[1])
return u"/{lang}{url}".format(
lang=language,
url=url)
return url
| # coding: utf-8
import re
from django.core.urlresolvers import reverse
from django.conf import settings
def reverse_no_i18n(viewname, *args, **kwargs):
result = reverse(viewname, *args, **kwargs)
m = re.match(r'(/[^/]*)(/.*$)', result)
return m.groups()[1]
def change_url_language(url, language):
if hasattr(settings, 'LANGUAGES'):
languages = [lang[0] for lang in settings.LANGUAGES]
m = re.match(r'/([^/]*)(/.*$)', url)
if m and m.groups()[0] in languages:
return "/{lang}{url}".format(
lang=language,
url=m.groups()[1])
return "/{lang}{url}".format(
lang=language,
url=url)
return url
| mit | Python |
19b77442ee3cc80d8c7eaee6bde6c87d6a9e9277 | Test a fix for the wheel test | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/integration/modules/saltutil.py | tests/integration/modules/saltutil.py | # -*- coding: utf-8 -*-
'''
Integration tests for the saltutil module.
'''
# Import Python libs
from __future__ import absolute_import
import time
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import integration
class SaltUtilModuleTest(integration.ModuleCase):
'''
Testcase for the saltutil execution module
'''
def setUp(self):
self.run_function('saltutil.refresh_pillar')
# Tests for the wheel function
def test_wheel_just_function(self):
'''
Tests using the saltutil.wheel function when passing only a function.
'''
# Wait for the pillar refresh to kick in, so that grains are ready to go
time.sleep(3)
ret = self.run_function('saltutil.wheel', ['minions.connected'])
self.assertIn('minion', ret['return'])
self.assertIn('sub_minion', ret['return'])
def test_wheel_with_arg(self):
'''
Tests using the saltutil.wheel function when passing a function and an arg.
'''
ret = self.run_function('saltutil.wheel', ['key.list', 'minion'])
self.assertEqual(ret['return'], {})
def test_wheel_no_arg_raise_error(self):
'''
Tests using the saltutil.wheel function when passing a function that requires
an arg, but one isn't supplied.
'''
self.assertRaises(TypeError, 'saltutil.wheel', ['key.list'])
def test_wheel_with_kwarg(self):
'''
Tests using the saltutil.wheel function when passing a function and a kwarg.
This function just generates a key pair, but doesn't do anything with it. We
just need this for testing purposes.
'''
ret = self.run_function('saltutil.wheel', ['key.gen'], keysize=1024)
self.assertIn('pub', ret['return'])
self.assertIn('priv', ret['return'])
if __name__ == '__main__':
from integration import run_tests
run_tests(SaltUtilModuleTest)
| # -*- coding: utf-8 -*-
'''
Integration tests for the saltutil module.
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import integration
class SaltUtilModuleTest(integration.ModuleCase):
'''
Testcase for the saltutil execution module
'''
# Tests for the wheel function
def test_wheel_just_function(self):
'''
Tests using the saltutil.wheel function when passing only a function.
'''
ret = self.run_function('saltutil.wheel', ['minions.connected'])
self.assertIn('minion', ret['return'])
self.assertIn('sub_minion', ret['return'])
def test_wheel_with_arg(self):
'''
Tests using the saltutil.wheel function when passing a function and an arg.
'''
ret = self.run_function('saltutil.wheel', ['key.list', 'minion'])
self.assertEqual(ret['return'], {})
def test_wheel_no_arg_raise_error(self):
'''
Tests using the saltutil.wheel function when passing a function that requires
an arg, but one isn't supplied.
'''
self.assertRaises(TypeError, 'saltutil.wheel', ['key.list'])
def test_wheel_with_kwarg(self):
'''
Tests using the saltutil.wheel function when passing a function and a kwarg.
This function just generates a key pair, but doesn't do anything with it. We
just need this for testing purposes.
'''
ret = self.run_function('saltutil.wheel', ['key.gen'], keysize=1024)
self.assertIn('pub', ret['return'])
self.assertIn('priv', ret['return'])
if __name__ == '__main__':
from integration import run_tests
run_tests(SaltUtilModuleTest)
| apache-2.0 | Python |
4338b097f97bb03be27c81a810a5fc652f842c8a | change cnab processor selection to method" | OCA/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil | l10n_br_account_payment_brcobranca/models/account_payment_mode.py | l10n_br_account_payment_brcobranca/models/account_payment_mode.py | # Copyright (C) 2012-Today - KMEE (<http://kmee.com.br>).
# @author Luis Felipe Miléo - [email protected]
# @author Renato Lima - [email protected]
# Copyright (C) 2021-Today - Akretion (<http://www.akretion.com>).
# @author Magno Costa <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
class AccountPaymentMode(models.Model):
"""
Override Account Payment Mode
"""
_inherit = "account.payment.mode"
@api.model
def _selection_cnab_processor(self):
selection = super()._selection_cnab_processor()
selection.append(("brcobranca", "BRCobrança"))
return selection
| # Copyright (C) 2012-Today - KMEE (<http://kmee.com.br>).
# @author Luis Felipe Miléo - [email protected]
# @author Renato Lima - [email protected]
# Copyright (C) 2021-Today - Akretion (<http://www.akretion.com>).
# @author Magno Costa <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class AccountPaymentMode(models.Model):
"""
Override Account Payment Mode
"""
_inherit = "account.payment.mode"
cnab_processor = fields.Selection(
selection_add=[("brcobranca", "BRCobrança")],
)
| agpl-3.0 | Python |
0bbd10058ff58ca5160e74374c0b34f99c429ad8 | Update docstrings | choderalab/openpathsampling,dwhswenson/openpathsampling,openpathsampling/openpathsampling,choderalab/openpathsampling,dwhswenson/openpathsampling,choderalab/openpathsampling,openpathsampling/openpathsampling,openpathsampling/openpathsampling,dwhswenson/openpathsampling,openpathsampling/openpathsampling,dwhswenson/openpathsampling | openpathsampling/high_level/part_in_b_tps.py | openpathsampling/high_level/part_in_b_tps.py | from openpathsampling.high_level.network import FixedLengthTPSNetwork
from openpathsampling.high_level.transition import FixedLengthTPSTransition
import openpathsampling as paths
class PartInBFixedLengthTPSTransition(FixedLengthTPSTransition):
"""Fixed length TPS transition accepting any frame in the final state.
Transition that builds an ensemble used to facilitate the rate
calculation in fixed-length TPS. [1]_ Details in
:class:`.PartInBFixedLengthTPSNetwork`.
See also
--------
PartInBFixedLengthTPSNetwork
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
def _tps_ensemble(self, stateA, stateB):
return paths.SequentialEnsemble([
paths.LengthEnsemble(1) & paths.AllInXEnsemble(stateA),
paths.LengthEnsemble(self.length - 1) \
& paths.PartInXEnsemble(stateB)
])
class PartInBFixedLengthTPSNetwork(FixedLengthTPSNetwork):
"""Network for fixed-length TPS accepting any frame in the final state
This network samples a single path ensemble where the paths must begin
in an initial state, run for a fixed total number of frames, and must
have at least one frame in a final state. This was used to assist in
the flux part of the TPS rate calculation. [1]_ This version is
generalized to multiple states.
Parameters
----------
intial_states : (list of) :class:`.Volume`
acceptable initial states
final_states : (list of) :class:`.Volume`
acceptable final states
length : int
length of paths in the path ensemble, in frames
allow_self_transitions : bool
whether self-transitions (A->A) are allowed; default is False. For
this network, A->B->A transitions are *always* allowed.
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
TransitionType = PartInBFixedLengthTPSTransition
| from openpathsampling.high_level.network import FixedLengthTPSNetwork
from openpathsampling.high_level.transition import FixedLengthTPSTransition
import openpathsampling as paths
class PartInBFixedLengthTPSTransition(FixedLengthTPSTransition):
"""Fixed length TPS transition accepting any frame in the final state.
Implements the ensemble in [1]_. Details in :class:`.PartInBNetwork`.
See also
--------
PartInBNetwork
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
def _tps_ensemble(self, stateA, stateB):
return paths.SequentialEnsemble([
paths.LengthEnsemble(1) & paths.AllInXEnsemble(stateA),
paths.LengthEnsemble(self.length - 1) \
& paths.PartInXEnsemble(stateB)
])
class PartInBFixedLengthTPSNetwork(FixedLengthTPSNetwork):
"""Network for fixed-length TPS accepting any frame in the final state
This network samples a single path ensemble where the paths must begin
in an initial state, run for a fixed total number of frames, and must
have at least one frame in a final state. This was used to assist in
the flux part of the rate calculation in Ref. [1]_. This version is
generalized to multiple states.
Parameters
----------
intial_states : (list of) :class:`.Volume`
acceptable initial states
final_states : (list of) :class:`.Volume`
acceptable final states
length : int
length of paths in the path ensemble, in frames
allow_self_transitions : bool
whether self-transitions (A->A) are allowed; default is False. For
this network, A->B->A transitions are *always* allowed.
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
TransitionType = PartInBFixedLengthTPSTransition
| mit | Python |
5c0a19386894e36898a48e7f10f01008e284e0c9 | Update dependency bazelbuild/bazel to latest version | google/copybara,google/copybara,google/copybara | third_party/bazel.bzl | third_party/bazel.bzl | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by copybara, please do not edit.
bazel_version = "f259b8abfd575f544635f57f3bb6678d566ef309"
bazel_sha256 = "7e262ca5f5595a74d75953dfdcb75b271c2561a292972da7f3be449a3e8b28f6"
| # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by copybara, please do not edit.
bazel_version = "03719362d021a241ef9af04f33db6efcfd18590a"
bazel_sha256 = "eff6cd1c44a7c3ec63163b415383a4fb7db6c99dfcda1288a586df9671346512"
| apache-2.0 | Python |
24f5afff6b8e65c633521189f4ac6bf4fbacbdb7 | Fix datapusher.wsgi to work with ckan-service-provider 0.0.2 | ESRC-CDRC/ckan-datapusher-service,governmentbg/ckan-datapusher,datawagovau/datapusher,tanmaythakur/datapusher,ckan/datapusher,OCHA-DAP/hdx-datapusher | deployment/datapusher.wsgi | deployment/datapusher.wsgi | import os
import sys
import hashlib
activate_this = os.path.join('/usr/lib/ckan/datapusher/bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import ckanserviceprovider.web as web
import datapusher.jobs as jobs
os.environ['JOB_CONFIG'] = '/etc/ckan/datapusher_settings.py'
web.init()
application = web.app
| import os
import sys
import hashlib
activate_this = os.path.join('/usr/lib/ckan/datapusher/bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import ckanserviceprovider.web as web
import datapusher.jobs as jobs
os.environ['JOB_CONFIG'] = '/etc/ckan/datapusher_settings.py'
web.configure()
application = web.app
| agpl-3.0 | Python |
efb420ddc6aa0052ecea6da84613da6e4cf1afc8 | Update Bazel to latest version | google/copybara,google/copybara,google/copybara | third_party/bazel.bzl | third_party/bazel.bzl | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bazel_version = "b017468d07da1e45282b9d153a4308fdace11eeb"
bazel_sha256 = "ce8dc5936238b6b7e27cdcdc13d481c94f20526fabfe20cbbceff17da83503e7"
| # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bazel_version = "6fe70c2fef70b8a3da3aa3cbea26c6bf60f17e13"
bazel_sha256 = "ad525027ecc7056feb23fe96cfe8b28257a6c47a9d908e0bc4e0e0988bf61d28"
| apache-2.0 | Python |
8959d982ddc810f9c226ce36884521cf979a61f1 | add destroy cb | cr33dog/pyxfce,cr33dog/pyxfce,cr33dog/pyxfce | gui/tests/testicontheme.py | gui/tests/testicontheme.py | #!/usr/bin/env python
# doesnt work. segfault.
# TODO: other screens?
import pygtk
pygtk.require("2.0")
import gtk
import xfce4
widget = xfce4.gui.IconTheme(gtk.gdk.screen_get_default())
ic = widget.load("folder", 24)
print ic
icname = widget.lookup("folder", 24)
print icname
image = gtk.Image()
image.set_from_pixbuf(ic)
image.show()
w = gtk.Window()
w.connect("destroy", lambda x: gtk.main_quit())
w.add(image)
w.show()
gtk.main()
| #!/usr/bin/env python
# doesnt work. segfault.
# TODO: other screens?
import pygtk
pygtk.require("2.0")
import gtk
import xfce4
widget = xfce4.gui.IconTheme(gtk.gdk.screen_get_default())
ic = widget.load("folder", 24)
print ic
icname = widget.lookup("folder", 24)
print icname
image = gtk.Image()
image.set_from_pixbuf(ic)
image.show()
w = gtk.Window()
w.add(image)
w.show()
gtk.main()
| bsd-3-clause | Python |
23c8044b84557dea940d527213022bfa19d28293 | test that Human is in Ensembl species | Proteogenomics/trackhub-creator,Proteogenomics/trackhub-creator | tests/test_ensembl_species_service.py | tests/test_ensembl_species_service.py | #
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 04-07-2017 09:14
# ---
# © 2017 Manuel Bernal Llinares <[email protected]>
# All rights reserved.
#
"""
Unit Tests for Ensembl Species Service
"""
import unittest
# App modules
import ensembl.service
class TestEnsemblSpeciesService(unittest.TestCase):
__NCB_TAXONOMY_HUMAN = '9606'
def setUp(self):
self.ensembl_service = ensembl.service.get_service()
def test_get_species_data(self):
species_data_service = self.ensembl_service.get_species_data_service()
self.assertIsNotNone(species_data_service.get_species_data(),
"Requested RAW species data from Ensembl IS NOT None")
def test_count_of_species(self):
self.assertNotEqual(self.ensembl_service.get_species_data_service().count_ensembl_species(),
0,
"Ensembl has a non-zero number of species")
def test_human_species_is_present(self):
"""
Test that Human taxonomy is present, this unit test is also testing the indexing mechanism
:return: no returned value
"""
self.assertIsNotNone(
self.ensembl_service.get_species_data_service().get_species_entry_for_taxonomy_id(
self.__NCB_TAXONOMY_HUMAN), "Human NCBI taxonomy is in species data from Ensembl")
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
| #
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 04-07-2017 09:14
# ---
# © 2017 Manuel Bernal Llinares <[email protected]>
# All rights reserved.
#
"""
Unit Tests for Ensembl Species Service
"""
import unittest
# App modules
import ensembl.service
class TestEnsemblSpeciesService(unittest.TestCase):
__NCB_TAXONOMY_HUMAN = 9606
def setUp(self):
self.ensembl_service = ensembl.service.get_service()
def test_get_species_data(self):
species_data_service = self.ensembl_service.get_species_data_service()
self.assertIsNotNone(species_data_service.get_species_data(),
"Requested RAW species data from Ensembl IS NOT None")
def test_count_of_species(self):
self.assertNotEqual(self.ensembl_service.get_species_data_service().count_ensembl_species(),
0,
"Ensembl has a non-zero number of species")
def test_human_species_is_present(self):
"""
Test that Human taxonomy is present, this unit test is also testing the indexing mechanism
:return: no returned value
"""
#TODO
pass
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
| apache-2.0 | Python |
d977a9ee9814264bd1d3080cadcd7e43b7c1d27e | Revert changes | Schevo/kiwi,Schevo/kiwi,Schevo/kiwi | examples/News/news2.py | examples/News/news2.py | #!/usr/bin/env python
from Kiwi2 import Delegates
from Kiwi2.Widgets.List import List, Column
from Kiwi2.initgtk import gtk
class NewsItem:
"""An instance that holds information about a news article."""
def __init__(self, title, author, url):
self.title, self.author, self.url = title, author, url
# Assemble friendly Pigdog.org news into NewsItem instances so they can
# be used in the CListDelegate
news = [
NewsItem("Smallpox Vaccinations for EVERYONE", "JRoyale",
"http://www.pigdog.org/auto/Power_Corrupts/link/2700.html"),
NewsItem("Is that uranium in your pocket or are you just happy to see me?",
"Baron Earl",
"http://www.pigdog.org/auto/bad_people/link/2699.html"),
NewsItem("Cut 'n Paste", "Baron Earl",
"http://www.pigdog.org/auto/ArtFux/link/2690.html"),
NewsItem("A Slippery Exit", "Reverend CyberSatan",
"http://www.pigdog.org/auto/TheCorporateFuck/link/2683.html"),
NewsItem("Those Crazy Dutch Have Resurrected Elvis", "Miss Conduct",
"http://www.pigdog.org/auto/viva_la_musica/link/2678.html")
]
# Specify the columns: one for each attribute of NewsItem, the URL
# column invisible by default
my_columns = [ Column("title", sorted=True),
Column("author"),
Column("url", title="URL", visible=False) ]
kiwilist = List(my_columns, news)
slave = Delegates.SlaveDelegate(toplevel=kiwilist)
slave.show_all()
gtk.main()
| #!/usr/bin/env python
from Kiwi2 import Delegates
from Kiwi2.Widgets.List import List, Column
from Kiwi2.initgtk import gtk
class NewsItem:
"""An instance that holds information about a news article."""
def __init__(self, title, author, url):
self.title, self.author, self.url = title, author, url
# Assemble friendly Pigdog.org news into NewsItem instances so they can
# be used in the CListDelegate
news = [
NewsItem("Smallpox Vaccinations for EVERYONE", "JRoyale",
"http://www.pigdog.org/auto/Power_Corrupts/link/2700.html"),
NewsItem("Is that uranium in your pocket or are you just happy to see me?",
"Baron Earl",
"http://www.pigdog.org/auto/bad_people/link/2699.html"),
NewsItem("Cut 'n Paste", "Baron Earl",
"http://www.pigdog.org/auto/ArtFux/link/2690.html"),
NewsItem("A Slippery Exit", "Reverend CyberSatan",
"http://www.pigdog.org/auto/TheCorporateFuck/link/2683.html"),
NewsItem("Those Crazy Dutch Have Resurrected Elvis", "Miss Conduct",
"http://www.pigdog.org/auto/viva_la_musica/link/2678.html")
]
# Specify the columns: one for each attribute of NewsItem, the URL
# column invisible by default
my_columns = [ Column("title", sorted=True),
Column("author", justify=gtk.JUSTIFY_RIGHT),
Column("url", title="URL", visible=False) ]
kiwilist = List(my_columns, news)
w = gtk.Window()
w.set_size_request(600, 250)
w.add(kiwilist)
w.show_all()
gtk.main()
| lgpl-2.1 | Python |
7c91d556220088ea5286611f3674aaa88f3a6340 | Add failing test for "Crash if session was flushed before commit (with validity strategy)" | kvesteri/sqlalchemy-continuum,rmoorman/sqlalchemy-continuum,piotr-dobrogost/sqlalchemy-continuum,avilaton/sqlalchemy-continuum | tests/test_exotic_operation_combos.py | tests/test_exotic_operation_combos.py | from six import PY3
from tests import TestCase
class TestExoticOperationCombos(TestCase):
def test_insert_deleted_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 0
def test_insert_deleted_and_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
def test_insert_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.flush()
self.session.commit()
assert article.versions.count() == 1
assert article.versions[0].operation_type == 0
def test_replace_deleted_object_with_update(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
article2 = self.Article()
article2.name = u'Another article'
article2.content = u'Some other content'
self.session.add(article)
self.session.add(article2)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2.id = article.id
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
class TestExoticOperationCombosWithValidityStrategy(TestExoticOperationCombos):
versioning_strategy = 'validity'
| from six import PY3
from tests import TestCase
class TestExoticOperationCombos(TestCase):
def test_insert_deleted_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 0
def test_insert_deleted_and_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
def test_insert_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.flush()
self.session.commit()
assert article.versions.count() == 1
assert article.versions[0].operation_type == 0
def test_replace_deleted_object_with_update(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
article2 = self.Article()
article2.name = u'Another article'
article2.content = u'Some other content'
self.session.add(article)
self.session.add(article2)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2.id = article.id
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
| bsd-3-clause | Python |
e816b1f63c299141c6ad907c860d2c5411829405 | Simplify aggregator code | alephdata/aleph,alephdata/aleph,pudo/aleph,pudo/aleph,pudo/aleph,alephdata/aleph,alephdata/aleph,alephdata/aleph | aleph/analysis/aggregate.py | aleph/analysis/aggregate.py | import logging
from collections import defaultdict
from followthemoney.types import registry
from aleph.analysis.util import tag_key
from aleph.analysis.util import TAG_COUNTRY, TAG_PHONE
from aleph.analysis.util import TAG_PERSON, TAG_COMPANY
log = logging.getLogger(__name__)
class TagAggregator(object):
MAX_TAGS = 10000
CUTOFFS = {
TAG_COUNTRY: .3,
TAG_PERSON: .003,
TAG_COMPANY: .003,
TAG_PHONE: .05,
}
def __init__(self):
self.values = defaultdict(list)
self.types = defaultdict(int)
def add(self, prop, value):
key = tag_key(value)
if key is None:
return
if (key, prop) not in self.values:
if len(self.values) > self.MAX_TAGS:
return
self.values[(key, prop)].append(value)
self.types[prop] += 1
def prop_cutoff(self, prop):
freq = self.CUTOFFS.get(prop, 0)
return self.types.get(prop, 0) * freq
@property
def entities(self):
for (_, prop), tags in self.values.items():
# skip entities that do not meet a threshold of relevance:
cutoff = self.prop_cutoff(prop)
if len(tags) < cutoff:
continue
label = tags[0]
if prop in (TAG_COMPANY, TAG_PERSON):
label = registry.name.pick(tags)
yield label, prop
def __len__(self):
return len(self.values)
| import logging
from Levenshtein import setmedian
from aleph.analysis.util import tag_key
from aleph.analysis.util import TAG_COUNTRY, TAG_LANGUAGE, TAG_PHONE
from aleph.analysis.util import TAG_PERSON, TAG_COMPANY
log = logging.getLogger(__name__)
class TagAggregator(object):
MAX_TAGS = 10000
CUTOFFS = {
TAG_COUNTRY: .2,
TAG_LANGUAGE: .3,
TAG_PERSON: .003,
TAG_COMPANY: .003,
TAG_PHONE: .05,
}
def __init__(self):
self.tags = {}
self.types = {}
def add(self, type_, tag):
key = tag_key(tag)
if key is None:
return
if (key, type_) not in self.tags:
self.tags[(key, type_)] = []
self.tags[(key, type_)].append(tag)
if type_ not in self.types:
if len(self.types) > self.MAX_TAGS:
return
self.types[type_] = 0
self.types[type_] += 1
def type_cutoff(self, type_):
freq = self.CUTOFFS.get(type_, 0)
return self.types.get(type_, 0) * freq
@property
def entities(self):
for (key, type_), tags in self.tags.items():
# skip entities that do not meet a threshold of relevance:
cutoff = self.type_cutoff(type_)
if len(tags) < cutoff:
continue
label = tags[0]
if type_ in (TAG_COMPANY, TAG_PERSON) and len(set(tags)) > 0:
label = setmedian(tags)
yield label, type_
def __len__(self):
return len(self.tags)
| mit | Python |
ca06a55d096eb4c67bf70c479107128b73087ab9 | integrate update | cyruscyliu/diffentropy | w1_integrate.py | w1_integrate.py | from sympy import integrate, symbols, log
# if 0 <= x < 0.25:
# return float(0)
# elif 0.25 <= x < 0.5:
# return 16.0 * (x - 0.25)
# elif 0.5 <= x < 0.75:
# return -16.0 * (x - 0.75)
# elif 0.75 < x <= 1:
# return float(0)
# h(f) = integrate(-f(x)lnf(x), (x, 0, 1))
x = symbols('x')
left = integrate(-16.0 * (x - 0.25) * log(16.0 * (x - 0.25)), (x, 0.25, 0.5))
right = integrate(16.0 * (x - 0.75) * log(-16.0 * (x - 0.75)), (x, 0.5, 0.75))
with open('w1_integrate_result.txt', 'w') as f:
f.write('left:{0} bit\n'.format(left * 1.44))
f.flush()
f.write('right:{0} bit\n'.format(right * 1.44))
f.flush()
f.write('all:{0} bit\n'.format((left + right) * 1.44))
f.flush()
f.close()
| from sympy import integrate, symbols, log
# if 0 <= x < 0.25:
# return float(0)
# elif 0.25 <= x < 0.5:
# return 16.0 * (x - 0.25)
# elif 0.5 <= x < 0.75:
# return -16.0 * (x - 0.75)
# elif 0.75 < x <= 1:
# return float(0)
# h(f) = integrate(-f(x)lnf(x), (x, 0, 1))
x = symbols('x')
left = integrate(-16.0 * (x - 0.25) * log(16.0 * (x - 0.25)), (x, 0.25, 0.5))
right = integrate(16.0 * (x - 0.75) * log(-16.0 * (x - 0.75)), (x, 0.5, 0.75))
print 'left {0}'.format(left)
print 'right {0}'.format(right)
print 'all {0}'.format(left + right)
| mit | Python |
f2bcbddab48eff06df78faff1ebb47c28adb4e0d | fix schema test | altair-viz/altair,jakevdp/altair,ellisonbg/altair | altair/tests/test_schema.py | altair/tests/test_schema.py | from altair.schema import load_schema
def test_schema():
schema = load_schema()
assert schema["$schema"]=="http://json-schema.org/draft-04/schema#"
| from altair.schema import SCHEMA
def test_schema():
assert SCHEMA["$schema"]=="http://json-schema.org/draft-04/schema#"
| bsd-3-clause | Python |
48f4c8dba40cb2fe03a74a7a4d7d979892601ddc | use __file__ to determine library path | avihoo/samplemod,azafred/skeletor,azafred/samplemod,Cyclid/example-python-project,introini/ourlist,introini/ourlist,johicks/twitterbias,introini/ourlist,introini/ourlist,tilt-silvie/samplemod,azafred/samplemod,azafred/skeletor,kennethreitz/samplemod | tests/context.py | tests/context.py | # -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import sample
| # -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import sample | bsd-2-clause | Python |
3c3013b8e7de5e1f8ae57e1d4a8b672cab8f6c47 | Test helpers : Message box, click yes vs enter | ucoin-io/cutecoin,ucoin-io/cutecoin,ucoin-io/cutecoin | tests/helpers.py | tests/helpers.py | from PyQt5.QtWidgets import QApplication, QMessageBox, QDialog, QFileDialog
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QTest
def click_on_top_message_box():
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QMessageBox):
QTest.mouseClick(w.button(QMessageBox.Yes), Qt.LeftButton)
elif isinstance(w, QDialog) and w.windowTitle() == "Registration":
QTest.keyClick(w, Qt.Key_Enter)
def select_file_dialog(filename):
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QFileDialog) and w.isVisible():
w.hide()
w.selectFile(filename)
w.show()
w.accept()
| from PyQt5.QtWidgets import QApplication, QMessageBox, QDialog, QFileDialog
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QTest
def click_on_top_message_box():
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QMessageBox):
QTest.keyClick(w, Qt.Key_Enter)
elif isinstance(w, QDialog) and w.windowTitle() == "Registration":
QTest.keyClick(w, Qt.Key_Enter)
def select_file_dialog(filename):
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QFileDialog) and w.isVisible():
w.hide()
w.selectFile(filename)
w.show()
w.accept()
| mit | Python |
1ab939ed7da45e7f6ff113b7e71017b28ee877a2 | Use 'with' keyword while opening file in tests/helpers.py | razorpay/razorpay-python | tests/helpers.py | tests/helpers.py | import razorpay
import os
import unittest
def mock_file(filename):
if not filename:
return ''
file_dir = os.path.dirname(__file__)
file_path = "{}/mocks/{}.json".format(file_dir, filename)
with open(file_path) as f:
mock_file_data = f.read()
return mock_file_data
class ClientTestCase(unittest.TestCase):
def setUp(self):
self.base_url = 'https://api.razorpay.com/v1'
self.secondary_url = 'https://test-api.razorpay.com/v1'
self.payment_id = 'fake_payment_id'
self.refund_id = 'fake_refund_id'
self.card_id = 'fake_card_id'
self.customer_id = 'fake_customer_id'
self.token_id = 'fake_token_id'
self.addon_id = 'fake_addon_id'
self.subscription_id = 'fake_subscription_id'
self.plan_id = 'fake_plan_id'
self.settlement_id = 'fake_settlement_id'
self.client = razorpay.Client(auth=('key_id', 'key_secret'))
self.secondary_client = razorpay.Client(auth=('key_id', 'key_secret'),
base_url=self.secondary_url)
| import razorpay
import os
import unittest
def mock_file(filename):
if not filename:
return ''
file_dir = os.path.dirname(__file__)
file_path = "{}/mocks/{}.json".format(file_dir, filename)
return open(file_path).read()
class ClientTestCase(unittest.TestCase):
def setUp(self):
self.base_url = 'https://api.razorpay.com/v1'
self.secondary_url = 'https://test-api.razorpay.com/v1'
self.payment_id = 'fake_payment_id'
self.refund_id = 'fake_refund_id'
self.card_id = 'fake_card_id'
self.customer_id = 'fake_customer_id'
self.token_id = 'fake_token_id'
self.addon_id = 'fake_addon_id'
self.subscription_id = 'fake_subscription_id'
self.plan_id = 'fake_plan_id'
self.settlement_id = 'fake_settlement_id'
self.client = razorpay.Client(auth=('key_id', 'key_secret'))
self.secondary_client = razorpay.Client(auth=('key_id', 'key_secret'),
base_url=self.secondary_url)
| mit | Python |
9f069cf4fe634f34ccda29c18c03c63db04fe199 | Update Funcaptcha example | ad-m/python-anticaptcha | examples/funcaptcha.py | examples/funcaptcha.py | from urllib.parse import urlparse
import requests
from os import environ
import re
from random import choice
from python_anticaptcha import AnticaptchaClient, FunCaptchaTask
api_key = environ['KEY']
site_key_pattern = 'data-pkey="(.+?)"'
url = 'https://www.funcaptcha.com/demo/'
client = AnticaptchaClient(api_key)
session = requests.Session()
UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
session.headers = {'User-Agent': UA}
proxy_urls = environ['PROXY_URL'].split(',')
def parse_url(url):
parsed = urlparse(url)
return dict(
proxy_type=parsed.scheme,
proxy_address=parsed.hostname,
proxy_port=parsed.port,
proxy_login=parsed.username,
proxy_password=parsed.password
)
def get_form_html():
return session.get(url).text
def get_token(form_html):
proxy_url = choice(proxy_urls)
proxy = parse_url(proxy_url)
site_key = re.search(site_key_pattern, form_html).group(1)
task = FunCaptchaTask(url, site_key, proxy=proxy, user_agent=UA)
job = client.createTask(task)
job.join(maximum_time=10**4)
return job.get_token_response()
def process():
html = get_form_html()
return get_token(html)
if __name__ == '__main__':
print(process())
| import requests
from os import environ
import re
from random import choice
from python_anticaptcha import AnticaptchaClient, FunCaptchaTask, Proxy
api_key = environ['KEY']
site_key_pattern = 'data-pkey="(.+?)"'
url = 'https://www.funcaptcha.com/demo/'
client = AnticaptchaClient(api_key)
session = requests.Session()
UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
session.headers = {'User-Agent': UA}
proxy_urls = environ['PROXY_URL'].split(',')
def get_form_html():
return session.get(url).text
def get_token(form_html):
proxy_url = choice(proxy_urls)
proxy = Proxy.parse_url(proxy_url)
site_key = re.search(site_key_pattern, form_html).group(1)
task = FunCaptchaTask(url, site_key, proxy=proxy, user_agent=UA)
job = client.createTask(task)
job.join(maximum_time=10**4)
return job.get_token_response()
def process():
html = get_form_html()
return get_token(html)
if __name__ == '__main__':
print(process())
| mit | Python |
d2fb1f22be6c6434873f2bcafb6b8a9b714acde9 | Use fail signal in fail_archive_on_error decorator | amyshi188/osf.io,caneruguz/osf.io,TomHeatwole/osf.io,SSJohns/osf.io,mluke93/osf.io,DanielSBrown/osf.io,Nesiehr/osf.io,jeffreyliu3230/osf.io,chrisseto/osf.io,acshi/osf.io,mattclark/osf.io,billyhunt/osf.io,caneruguz/osf.io,cosenal/osf.io,SSJohns/osf.io,njantrania/osf.io,mattclark/osf.io,alexschiller/osf.io,samchrisinger/osf.io,HarryRybacki/osf.io,MerlinZhang/osf.io,mluo613/osf.io,TomBaxter/osf.io,mattclark/osf.io,kch8qx/osf.io,baylee-d/osf.io,chennan47/osf.io,asanfilippo7/osf.io,asanfilippo7/osf.io,amyshi188/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,brandonPurvis/osf.io,acshi/osf.io,bdyetton/prettychart,danielneis/osf.io,brianjgeiger/osf.io,kch8qx/osf.io,emetsger/osf.io,reinaH/osf.io,ticklemepierce/osf.io,felliott/osf.io,hmoco/osf.io,SSJohns/osf.io,danielneis/osf.io,wearpants/osf.io,HalcyonChimera/osf.io,jmcarp/osf.io,TomHeatwole/osf.io,ZobairAlijan/osf.io,mfraezz/osf.io,cldershem/osf.io,adlius/osf.io,laurenrevere/osf.io,jolene-esposito/osf.io,sloria/osf.io,Johnetordoff/osf.io,doublebits/osf.io,MerlinZhang/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,Nesiehr/osf.io,billyhunt/osf.io,crcresearch/osf.io,njantrania/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,mfraezz/osf.io,hmoco/osf.io,jinluyuan/osf.io,monikagrabowska/osf.io,danielneis/osf.io,aaxelb/osf.io,Nesiehr/osf.io,caseyrygt/osf.io,kwierman/osf.io,cldershem/osf.io,brandonPurvis/osf.io,cwisecarver/osf.io,fabianvf/osf.io,amyshi188/osf.io,petermalcolm/osf.io,adlius/osf.io,rdhyee/osf.io,brandonPurvis/osf.io,laurenrevere/osf.io,rdhyee/osf.io,samanehsan/osf.io,haoyuchen1992/osf.io,asanfilippo7/osf.io,dplorimer/osf,leb2dg/osf.io,mfraezz/osf.io,abought/osf.io,amyshi188/osf.io,doublebits/osf.io,sbt9uc/osf.io,lyndsysimon/osf.io,dplorimer/osf,caneruguz/osf.io,laurenrevere/osf.io,ticklemepierce/osf.io,lyndsysimon/osf.io,DanielSBrown/osf.io,jmcarp/osf.io,baylee-d/osf.io,GageGaskins/osf.io,chennan47/osf.io,fabianvf/osf.io,cldershem/osf.io,jmcarp/osf.io,jnayak1/osf.io,binoculars/osf.io,zamattiac/osf.io,acshi/osf.io,crcresearch/osf.io,jinluyuan/osf.io,jnayak1/osf.io,binoculars/osf.io,Ghalko/osf.io,jinluyuan/osf.io,cosenal/osf.io,RomanZWang/osf.io,wearpants/osf.io,cslzchen/osf.io,ticklemepierce/osf.io,wearpants/osf.io,samchrisinger/osf.io,SSJohns/osf.io,jeffreyliu3230/osf.io,abought/osf.io,zachjanicki/osf.io,rdhyee/osf.io,DanielSBrown/osf.io,bdyetton/prettychart,MerlinZhang/osf.io,pattisdr/osf.io,chennan47/osf.io,bdyetton/prettychart,caseyrygt/osf.io,samanehsan/osf.io,pattisdr/osf.io,reinaH/osf.io,sloria/osf.io,caseyrollins/osf.io,zamattiac/osf.io,bdyetton/prettychart,caseyrollins/osf.io,TomHeatwole/osf.io,jeffreyliu3230/osf.io,cldershem/osf.io,mluo613/osf.io,KAsante95/osf.io,lyndsysimon/osf.io,zamattiac/osf.io,ZobairAlijan/osf.io,petermalcolm/osf.io,billyhunt/osf.io,chrisseto/osf.io,GageGaskins/osf.io,RomanZWang/osf.io,Ghalko/osf.io,petermalcolm/osf.io,zachjanicki/osf.io,TomHeatwole/osf.io,ckc6cz/osf.io,njantrania/osf.io,billyhunt/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,ckc6cz/osf.io,alexschiller/osf.io,DanielSBrown/osf.io,leb2dg/osf.io,cwisecarver/osf.io,billyhunt/osf.io,GageGaskins/osf.io,dplorimer/osf,arpitar/osf.io,dplorimer/osf,baylee-d/osf.io,adlius/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,doublebits/osf.io,kwierman/osf.io,adlius/osf.io,aaxelb/osf.io,jnayak1/osf.io,haoyuchen1992/osf.io,KAsante95/osf.io,cwisecarver/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,MerlinZhang/osf.io,RomanZWang/osf.io,RomanZWang/osf.io,ticklemepierce/osf.io,pattisdr/osf.io,erinspace/osf.io,arpitar/osf.io,icereval/osf.io,felliott/osf.io,KAsante95/osf.io,danielneis/osf.io,leb2dg/osf.io,caseyrygt/osf.io,GageGaskins/osf.io,petermalcolm/osf.io,mluo613/osf.io,KAsante95/osf.io,HalcyonChimera/osf.io,jeffreyliu3230/osf.io,zachjanicki/osf.io,zamattiac/osf.io,HarryRybacki/osf.io,ZobairAlijan/osf.io,cwisecarver/osf.io,njantrania/osf.io,chrisseto/osf.io,monikagrabowska/osf.io,CenterForOpenScience/osf.io,emetsger/osf.io,cosenal/osf.io,sbt9uc/osf.io,RomanZWang/osf.io,hmoco/osf.io,reinaH/osf.io,Ghalko/osf.io,icereval/osf.io,cslzchen/osf.io,arpitar/osf.io,reinaH/osf.io,zachjanicki/osf.io,jolene-esposito/osf.io,fabianvf/osf.io,alexschiller/osf.io,GageGaskins/osf.io,cslzchen/osf.io,brandonPurvis/osf.io,samchrisinger/osf.io,rdhyee/osf.io,Ghalko/osf.io,Johnetordoff/osf.io,mluo613/osf.io,brandonPurvis/osf.io,haoyuchen1992/osf.io,brianjgeiger/osf.io,samchrisinger/osf.io,caseyrygt/osf.io,erinspace/osf.io,kwierman/osf.io,monikagrabowska/osf.io,aaxelb/osf.io,HarryRybacki/osf.io,KAsante95/osf.io,leb2dg/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,kwierman/osf.io,caseyrollins/osf.io,sbt9uc/osf.io,samanehsan/osf.io,wearpants/osf.io,abought/osf.io,ckc6cz/osf.io,crcresearch/osf.io,chrisseto/osf.io,lyndsysimon/osf.io,jolene-esposito/osf.io,fabianvf/osf.io,binoculars/osf.io,kch8qx/osf.io,icereval/osf.io,mluke93/osf.io,Johnetordoff/osf.io,jmcarp/osf.io,mluo613/osf.io,acshi/osf.io,asanfilippo7/osf.io,saradbowman/osf.io,Nesiehr/osf.io,kch8qx/osf.io,mluke93/osf.io,mfraezz/osf.io,TomBaxter/osf.io,samanehsan/osf.io,mluke93/osf.io,arpitar/osf.io,jolene-esposito/osf.io,alexschiller/osf.io,cslzchen/osf.io,sbt9uc/osf.io,ZobairAlijan/osf.io,haoyuchen1992/osf.io,jinluyuan/osf.io,alexschiller/osf.io,jnayak1/osf.io,cosenal/osf.io,sloria/osf.io,HarryRybacki/osf.io,ckc6cz/osf.io,doublebits/osf.io,saradbowman/osf.io,abought/osf.io,doublebits/osf.io,kch8qx/osf.io,Johnetordoff/osf.io,emetsger/osf.io,emetsger/osf.io,acshi/osf.io,aaxelb/osf.io | website/archiver/decorators.py | website/archiver/decorators.py | import functools
from framework.exceptions import HTTPError
from website.project.decorators import _inject_nodes
from website.archiver import ARCHIVER_UNCAUGHT_ERROR
from website.archiver import signals
def fail_archive_on_error(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as e:
_inject_nodes(kwargs)
registration = kwargs['node']
signals.send.archive_fail(
registration,
ARCHIVER_UNCAUGHT_ERROR,
[str(e)]
)
return wrapped
| import functools
from framework.exceptions import HTTPError
from website.project.decorators import _inject_nodes
from website.archiver import ARCHIVER_UNCAUGHT_ERROR
from website.archiver import utils
def fail_archive_on_error(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as e:
_inject_nodes(kwargs)
registration = kwargs['node']
utils.handle_archive_fail(
ARCHIVER_UNCAUGHT_ERROR,
registration.registered_from,
registration,
registration.registered_user,
str(e)
)
return wrapped
| apache-2.0 | Python |
3caa77b0f4b43e274eba21a8d759335f7833b99d | Change OSF_COOKIE_DOMAIN to None in local-dist.py | zachjanicki/osf.io,DanielSBrown/osf.io,DanielSBrown/osf.io,rdhyee/osf.io,TomBaxter/osf.io,adlius/osf.io,mfraezz/osf.io,danielneis/osf.io,cosenal/osf.io,jmcarp/osf.io,cwisecarver/osf.io,caseyrygt/osf.io,danielneis/osf.io,haoyuchen1992/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,icereval/osf.io,Nesiehr/osf.io,felliott/osf.io,billyhunt/osf.io,mfraezz/osf.io,njantrania/osf.io,kwierman/osf.io,ticklemepierce/osf.io,MerlinZhang/osf.io,TomHeatwole/osf.io,mluke93/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,acshi/osf.io,cwisecarver/osf.io,erinspace/osf.io,Nesiehr/osf.io,njantrania/osf.io,mfraezz/osf.io,caseyrollins/osf.io,alexschiller/osf.io,doublebits/osf.io,monikagrabowska/osf.io,caneruguz/osf.io,abought/osf.io,SSJohns/osf.io,ckc6cz/osf.io,billyhunt/osf.io,wearpants/osf.io,samanehsan/osf.io,petermalcolm/osf.io,cwisecarver/osf.io,cwisecarver/osf.io,hmoco/osf.io,felliott/osf.io,asanfilippo7/osf.io,TomHeatwole/osf.io,arpitar/osf.io,adlius/osf.io,saradbowman/osf.io,zamattiac/osf.io,brianjgeiger/osf.io,hmoco/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,asanfilippo7/osf.io,amyshi188/osf.io,chrisseto/osf.io,ZobairAlijan/osf.io,acshi/osf.io,leb2dg/osf.io,GageGaskins/osf.io,RomanZWang/osf.io,wearpants/osf.io,monikagrabowska/osf.io,hmoco/osf.io,SSJohns/osf.io,hmoco/osf.io,KAsante95/osf.io,jolene-esposito/osf.io,ckc6cz/osf.io,baylee-d/osf.io,cosenal/osf.io,caseyrygt/osf.io,ZobairAlijan/osf.io,asanfilippo7/osf.io,chennan47/osf.io,aaxelb/osf.io,lyndsysimon/osf.io,HalcyonChimera/osf.io,brandonPurvis/osf.io,zamattiac/osf.io,amyshi188/osf.io,SSJohns/osf.io,pattisdr/osf.io,alexschiller/osf.io,saradbowman/osf.io,mluo613/osf.io,emetsger/osf.io,RomanZWang/osf.io,doublebits/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,Ghalko/osf.io,kch8qx/osf.io,adlius/osf.io,jmcarp/osf.io,mluke93/osf.io,binoculars/osf.io,emetsger/osf.io,sbt9uc/osf.io,lyndsysimon/osf.io,alexschiller/osf.io,GageGaskins/osf.io,kwierman/osf.io,brianjgeiger/osf.io,jnayak1/osf.io,haoyuchen1992/osf.io,TomHeatwole/osf.io,wearpants/osf.io,sloria/osf.io,samchrisinger/osf.io,caseyrollins/osf.io,MerlinZhang/osf.io,doublebits/osf.io,Ghalko/osf.io,ZobairAlijan/osf.io,HalcyonChimera/osf.io,samchrisinger/osf.io,icereval/osf.io,rdhyee/osf.io,acshi/osf.io,sbt9uc/osf.io,mattclark/osf.io,erinspace/osf.io,cosenal/osf.io,aaxelb/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,mattclark/osf.io,ZobairAlijan/osf.io,KAsante95/osf.io,binoculars/osf.io,haoyuchen1992/osf.io,alexschiller/osf.io,petermalcolm/osf.io,mluo613/osf.io,abought/osf.io,MerlinZhang/osf.io,adlius/osf.io,aaxelb/osf.io,laurenrevere/osf.io,laurenrevere/osf.io,lyndsysimon/osf.io,mluo613/osf.io,brandonPurvis/osf.io,ticklemepierce/osf.io,sloria/osf.io,doublebits/osf.io,samchrisinger/osf.io,mfraezz/osf.io,jmcarp/osf.io,caneruguz/osf.io,doublebits/osf.io,arpitar/osf.io,billyhunt/osf.io,felliott/osf.io,kch8qx/osf.io,emetsger/osf.io,samanehsan/osf.io,samchrisinger/osf.io,ticklemepierce/osf.io,ticklemepierce/osf.io,zamattiac/osf.io,RomanZWang/osf.io,chennan47/osf.io,caseyrygt/osf.io,crcresearch/osf.io,jnayak1/osf.io,Nesiehr/osf.io,cslzchen/osf.io,leb2dg/osf.io,abought/osf.io,njantrania/osf.io,RomanZWang/osf.io,GageGaskins/osf.io,Nesiehr/osf.io,jolene-esposito/osf.io,kch8qx/osf.io,cslzchen/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,amyshi188/osf.io,Ghalko/osf.io,kwierman/osf.io,CenterForOpenScience/osf.io,laurenrevere/osf.io,KAsante95/osf.io,chrisseto/osf.io,TomBaxter/osf.io,Ghalko/osf.io,cosenal/osf.io,danielneis/osf.io,zachjanicki/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,TomHeatwole/osf.io,acshi/osf.io,emetsger/osf.io,chrisseto/osf.io,abought/osf.io,jolene-esposito/osf.io,cslzchen/osf.io,kwierman/osf.io,sloria/osf.io,danielneis/osf.io,KAsante95/osf.io,CenterForOpenScience/osf.io,zachjanicki/osf.io,samanehsan/osf.io,Johnetordoff/osf.io,petermalcolm/osf.io,brandonPurvis/osf.io,jnayak1/osf.io,erinspace/osf.io,GageGaskins/osf.io,KAsante95/osf.io,brandonPurvis/osf.io,mluke93/osf.io,petermalcolm/osf.io,pattisdr/osf.io,caneruguz/osf.io,brandonPurvis/osf.io,caneruguz/osf.io,billyhunt/osf.io,jmcarp/osf.io,arpitar/osf.io,kch8qx/osf.io,mluo613/osf.io,SSJohns/osf.io,ckc6cz/osf.io,DanielSBrown/osf.io,kch8qx/osf.io,baylee-d/osf.io,zachjanicki/osf.io,billyhunt/osf.io,mluo613/osf.io,njantrania/osf.io,wearpants/osf.io,binoculars/osf.io,monikagrabowska/osf.io,MerlinZhang/osf.io,alexschiller/osf.io,HalcyonChimera/osf.io,ckc6cz/osf.io,rdhyee/osf.io,aaxelb/osf.io,caseyrollins/osf.io,haoyuchen1992/osf.io,rdhyee/osf.io,mattclark/osf.io,crcresearch/osf.io,asanfilippo7/osf.io,chennan47/osf.io,mluke93/osf.io,DanielSBrown/osf.io,caseyrygt/osf.io,sbt9uc/osf.io,amyshi188/osf.io,arpitar/osf.io,pattisdr/osf.io,jolene-esposito/osf.io,acshi/osf.io,leb2dg/osf.io,zamattiac/osf.io,jnayak1/osf.io,samanehsan/osf.io,GageGaskins/osf.io,RomanZWang/osf.io,icereval/osf.io,sbt9uc/osf.io,lyndsysimon/osf.io,chrisseto/osf.io | website/settings/local-dist.py | website/settings/local-dist.py | # -*- coding: utf-8 -*-
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
from . import defaults
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SEARCH_ENGINE = 'elastic'
ELASTIC_TIMEOUT = 10
# Comment out to use SHARE in development
USE_SHARE = False
# Comment out to use celery in development
USE_CELERY = False
# Comment out to use GnuPG in development
USE_GNUPG = False # Changing this may require you to re-enter encrypted fields
# Email
USE_EMAIL = False
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Mailchimp email subscriptions
ENABLE_EMAIL_SUBSCRIPTIONS = False
# Session
OSF_COOKIE_DOMAIN = None
COOKIE_NAME = 'osf'
SECRET_KEY = "CHANGEME"
# Uncomment if GPG was installed with homebrew
# GNUPG_BINARY = '/usr/local/bin/gpg'
##### Celery #####
## Default RabbitMQ broker
BROKER_URL = 'amqp://'
# Default RabbitMQ backend
CELERY_RESULT_BACKEND = 'amqp://'
USE_CDN_FOR_CLIENT_LIBS = False
# Example of extending default settings
# defaults.IMG_FMTS += ["pdf"]
| # -*- coding: utf-8 -*-
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
from . import defaults
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SEARCH_ENGINE = 'elastic'
ELASTIC_TIMEOUT = 10
# Comment out to use SHARE in development
USE_SHARE = False
# Comment out to use celery in development
USE_CELERY = False
# Comment out to use GnuPG in development
USE_GNUPG = False # Changing this may require you to re-enter encrypted fields
# Email
USE_EMAIL = False
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Mailchimp email subscriptions
ENABLE_EMAIL_SUBSCRIPTIONS = False
# Session
OSF_COOKIE_DOMAIN = '.localhost'
COOKIE_NAME = 'osf'
SECRET_KEY = "CHANGEME"
# Uncomment if GPG was installed with homebrew
# GNUPG_BINARY = '/usr/local/bin/gpg'
##### Celery #####
## Default RabbitMQ broker
BROKER_URL = 'amqp://'
# Default RabbitMQ backend
CELERY_RESULT_BACKEND = 'amqp://'
USE_CDN_FOR_CLIENT_LIBS = False
# Example of extending default settings
# defaults.IMG_FMTS += ["pdf"]
| apache-2.0 | Python |
22ae3a2e9a236de61c078d234d920a3e6bc62d7b | Add a bit of docs | steffann/pylisp | pylisp/application/lispd/address_tree/ddt_container_node.py | pylisp/application/lispd/address_tree/ddt_container_node.py | '''
Created on 1 jun. 2013
@author: sander
'''
from .container_node import ContainerNode
class DDTContainerNode(ContainerNode):
'''
A ContainerNode that indicates that we are responsible for this part of
the DDT tree.
'''
| '''
Created on 1 jun. 2013
@author: sander
'''
from .container_node import ContainerNode
class DDTContainerNode(ContainerNode):
pass
| bsd-3-clause | Python |
8acaec546de0311f5f33c2e8fb9e1828a1cbc44b | Fix memory leak caused by using rabbit as the result backend for celery | felliott/scrapi,ostwald/scrapi,fabianvf/scrapi,mehanig/scrapi,erinspace/scrapi,mehanig/scrapi,jeffreyliu3230/scrapi,erinspace/scrapi,felliott/scrapi,CenterForOpenScience/scrapi,fabianvf/scrapi,icereval/scrapi,alexgarciac/scrapi,CenterForOpenScience/scrapi | worker_manager/celeryconfig.py | worker_manager/celeryconfig.py | """
Configuration file for celerybeat/worker.
Dynamically adds consumers from all manifest files in worker_manager/manifests/
to the celerybeat schedule. Also adds a heartbeat function to the schedule,
which adds every 30 seconds, and a monthly task to normalize all non-normalized
documents.
"""
from celery.schedules import crontab
from datetime import timedelta
import os
import yaml
BROKER_URL = 'amqp://guest@localhost'
# CELERY_RESULT_BACKEND = 'amqp://guest@localhost'
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
CELERY_ACCEPT_CONTENT = ['pickle']
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = 'UTC'
CELERY_IMPORTS = ('worker_manager.celerytasks',)
# Programmatically generate celery beat schedule
SCHED = {}
for manifest in os.listdir('worker_manager/manifests/'):
filepath = 'worker_manager/manifests/' + manifest
with open(filepath) as f:
info = yaml.load(f)
SCHED['run ' + manifest] = {
'task': 'worker_manager.celerytasks.run_consumer',
'schedule': crontab(day_of_week=info['days'], hour=info['hour'], minute=info['minute']),
'args': [filepath],
}
# Deprecated
SCHED['request normalization of recent documents'] = {
'task': 'worker_manager.celerytasks.request_normalized',
'schedule': crontab(minute='*/1')
}
SCHED['check_archive'] = {
'task': 'worker_manager.celerytasks.check_archive',
'schedule': crontab(day_of_month='1', hour='23', minute='59'),
}
SCHED['heartbeat'] = {
'task': 'worker_manager.celerytasks.heartbeat',
'schedule': timedelta(seconds=30),
'args': (16, 16)
}
CELERYBEAT_SCHEDULE = SCHED
| """
Configuration file for celerybeat/worker.
Dynamically adds consumers from all manifest files in worker_manager/manifests/
to the celerybeat schedule. Also adds a heartbeat function to the schedule,
which adds every 30 seconds, and a monthly task to normalize all non-normalized
documents.
"""
from celery.schedules import crontab
from datetime import timedelta
import os
import yaml
BROKER_URL = 'amqp://guest@localhost'
CELERY_RESULT_BACKEND = 'amqp://guest@localhost'
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
CELERY_ACCEPT_CONTENT = ['pickle']
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = 'UTC'
CELERY_IMPORTS = ('worker_manager.celerytasks',)
# Programmatically generate celery beat schedule
SCHED = {}
for manifest in os.listdir('worker_manager/manifests/'):
filepath = 'worker_manager/manifests/' + manifest
with open(filepath) as f:
info = yaml.load(f)
SCHED['run ' + manifest] = {
'task': 'worker_manager.celerytasks.run_consumer',
'schedule': crontab(day_of_week=info['days'], hour=info['hour'], minute=info['minute']),
'args': [filepath],
}
# Deprecated
SCHED['request normalization of recent documents'] = {
'task': 'worker_manager.celerytasks.request_normalized',
'schedule': crontab(minute='*/1')
}
SCHED['check_archive'] = {
'task': 'worker_manager.celerytasks.check_archive',
'schedule': crontab(day_of_month='1', hour='23', minute='59'),
}
SCHED['add'] = {
'task': 'worker_manager.celerytasks.heartbeat',
'schedule': timedelta(seconds=30),
'args': (16, 16)
}
CELERYBEAT_SCHEDULE = SCHED
| apache-2.0 | Python |
16806f7a620ddaba727fc6c7d6387eaa1c17f103 | Update p4-test-tool.py | dbeyer/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,dbeyer/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec | benchexec/tools/p4-test-tool.py | benchexec/tools/p4-test-tool.py | # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool2):
#Needed for benchexec to run, but irrelevant for p4 extension
def executable(self, tool):
return "/"
def name(self):
return "P4 Test"
def determine_result(self, run):
for line in run.output:
if run.cmdline[3] + " ... ok" in line:
return benchexec.result.RESULT_CLASS_TRUE
else:
return benchexec.result.RESULT_CLASS_FALSE
| # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool2):
#Needed for benchexec to run, but irrelevant for p4 extension
def executable(self, tool):
return "/"
def name(self):
return "P4 Test"
def determine_result(self, run):
for line in run.output:
if run.cmdline[3] + " ... ok" in line:
return benchexec.result.RESULT_CLASS_TRUE
else:
return benchexec.result.RESULT_CLASS_FALSE
| apache-2.0 | Python |
b9d30a39f31862af607af44e97878a287f9361c5 | bump to v0.5.3 | ValvePython/steam | steam/__init__.py | steam/__init__.py | __version__ = "0.5.3"
__author__ = "Rossen Georgiev"
from steam.steamid import SteamID
from steam.webapi import WebAPI
| __version__ = "0.5.2"
__author__ = "Rossen Georgiev"
from steam.steamid import SteamID
from steam.webapi import WebAPI
| mit | Python |
026ba5fa78cb9916bffc23cf7dda1d1deb81b24c | Bump version 1.0.3 | pyschool/story | story/__init__.py | story/__init__.py | """
Story - PySchool
"""
__author__ = 'PySchool'
__version__ = '1.0.3'
__licence__ = 'MIT'
| """
Story - PySchool
"""
__author__ = 'PySchool'
__version__ = '1.0.2'
__licence__ = 'MIT'
| mit | Python |
8d0e3ae1f80e8b19292b18a20a338cbfd00364c7 | Bump to version number 1.6.0 | cartoonist/pystream-protobuf | stream/release.py | stream/release.py | # coding=utf-8
"""
stream.release
~~~~~~~~~~~~~~
Include release information of the package.
:copyright: (c) 2016 by Ali Ghaffaari.
:license: MIT, see LICENSE for more details.
"""
# CONSTANTS ###################################################################
# Development statuses:
DS_PLANNING = 1
DS_PREALPHA = 2
DS_ALPHA = 3
DS_BETA = 4
DS_STABLE = 5
DS_MATURE = 6
DS_INACTIVE = 7
DS_STRING = {
DS_PLANNING: 'Development Status :: 1 - Planning',
DS_PREALPHA: 'Development Status :: 2 - Pre-Alpha',
DS_ALPHA: 'Development Status :: 3 - Alpha',
DS_BETA: 'Development Status :: 4 - Beta',
DS_STABLE: 'Development Status :: 5 - Production/Stable',
DS_MATURE: 'Development Status :: 6 - Mature',
DS_INACTIVE: 'Development Status :: 7 - Inactive'
}
###############################################################################
# Package release information.
__title__ = 'stream'
__description__ = 'Python implementation of stream library'
__author__ = 'Ali Ghaffaari'
__email__ = '[email protected]'
__license__ = 'MIT'
# Release
__version__ = '1.6.0'
__status__ = DS_STABLE
# PyPI-related information
__keywords__ = 'stream protocol buffer protobuf'
__classifiers__ = [
# Development status
DS_STRING[__status__],
# License
'License :: OSI Approved :: MIT License',
# Supported Python versions.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Intended Audience and Topic
'Intended Audience :: Developers',
]
__requires__ = ['protobuf>=3.4.0', 'async_generator>=1.10', 'click>=6.0.0', 'future']
__tests_require__ = []
__extras_require__ = {
'test': ['nose>=1.0', 'coverage'],
}
__setup_requires__ = ['nose>=1.0', 'coverage']
__entry_points__ = '''
[console_scripts]
varint=stream.varint:cli
'''
| # coding=utf-8
"""
stream.release
~~~~~~~~~~~~~~
Include release information of the package.
:copyright: (c) 2016 by Ali Ghaffaari.
:license: MIT, see LICENSE for more details.
"""
# CONSTANTS ###################################################################
# Development statuses:
DS_PLANNING = 1
DS_PREALPHA = 2
DS_ALPHA = 3
DS_BETA = 4
DS_STABLE = 5
DS_MATURE = 6
DS_INACTIVE = 7
DS_STRING = {
DS_PLANNING: 'Development Status :: 1 - Planning',
DS_PREALPHA: 'Development Status :: 2 - Pre-Alpha',
DS_ALPHA: 'Development Status :: 3 - Alpha',
DS_BETA: 'Development Status :: 4 - Beta',
DS_STABLE: 'Development Status :: 5 - Production/Stable',
DS_MATURE: 'Development Status :: 6 - Mature',
DS_INACTIVE: 'Development Status :: 7 - Inactive'
}
###############################################################################
# Package release information.
__title__ = 'stream'
__description__ = 'Python implementation of stream library'
__author__ = 'Ali Ghaffaari'
__email__ = '[email protected]'
__license__ = 'MIT'
# Release
__version__ = '1.5.2'
__status__ = DS_BETA
# PyPI-related information
__keywords__ = 'stream protocol buffer protobuf'
__classifiers__ = [
# Development status
DS_STRING[__status__],
# License
'License :: OSI Approved :: MIT License',
# Supported Python versions.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Intended Audience and Topic
'Intended Audience :: Developers',
]
__requires__ = ['protobuf>=3.4.0', 'async_generator>=1.10', 'click>=6.0.0', 'future']
__tests_require__ = []
__extras_require__ = {
'test': ['nose>=1.0', 'coverage'],
}
__setup_requires__ = ['nose>=1.0', 'coverage']
__entry_points__ = '''
[console_scripts]
varint=stream.varint:cli
'''
| mit | Python |
c8069fff1941d0739bca8716a5e26f5c02ccffe3 | Add South field tuple. | playfire/django-enumfield | django_enumfield/fields.py | django_enumfield/fields.py | from django.db import models
class EnumField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, enumeration, *args, **kwargs):
self.enumeration = enumeration
kwargs.setdefault('choices', enumeration.get_choices())
super(EnumField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'IntegerField'
def to_python(self, value):
return self.enumeration.to_item(value)
def get_db_prep_save(self, value, connection=None):
if value is None:
return value
return self.to_python(value).value
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
def prepare(value):
v = self.to_python(value)
return self.get_db_prep_save(v, connection=connection)
if lookup_type == 'exact':
return [prepare(value)]
elif lookup_type == 'in':
return [prepare(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Lookup type %r not supported." % lookup_type)
def south_field_triple(self):
from south.modelsinspector import introspector
args, kwargs = introspector(self)
return ('django.db.models.fields.Integerfield', args, kwargs)
| from django.db import models
class EnumField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, enumeration, *args, **kwargs):
self.enumeration = enumeration
kwargs.setdefault('choices', enumeration.get_choices())
super(EnumField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'IntegerField'
def to_python(self, value):
return self.enumeration.to_item(value)
def get_db_prep_save(self, value, connection=None):
if value is None:
return value
return self.to_python(value).value
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
def prepare(value):
v = self.to_python(value)
return self.get_db_prep_save(v, connection=connection)
if lookup_type == 'exact':
return [prepare(value)]
elif lookup_type == 'in':
return [prepare(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Lookup type %r not supported." % lookup_type)
| bsd-3-clause | Python |
2c73fee5b0a3a527d0ee3c51291c7b4c01c9f688 | Revert "Создание скрипта изменения группы" | HowAU/python-training | fixture/group.py | fixture/group.py | class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# создание новой группы
wd.find_element_by_name("new").click()
# fill group form
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
if not wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").click()
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
wd.find_element_by_name("selected[]").click()#select 1 group
wd.find_element_by_name("delete").click() #delete group
self.return_to_groups_page()
def change_group_properties(self):
wd = self.app.wd
| class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# создание новой группы
wd.find_element_by_name("new").click()
# fill group form
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
if not wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").click()
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
wd.find_element_by_name("selected[]").click()#select 1 group
wd.find_element_by_name("delete").click() #delete group
self.return_to_groups_page()
def change_group_properties(self):
wd = self.app.wd
self.open_groups_page()
wd.find_element_by_name("selected[]").click() # select 1 group
wd.find_element_by_name("edit").click() #delete group
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Best group")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("Header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("Footer")
wd.find_element_by_name("update").click()
self.return_to_groups_page()
| apache-2.0 | Python |
4e8177bca4335c34950adb54c0bca4bca59ef0c0 | fix error: has no attribute __subclass__ | zhoukaigo/Blog,zhoukaigo/Blog | app/auth/oauth.py | app/auth/oauth.py | from rauth import OAuth2Service
from flask import current_app, url_for, redirect, request, session
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('oauth_callback', provider=self.provider_name, _external=True)
@classmethod
def get_provider(self, provider_name):
if self.providers is None:
self.providers = {}
for provider_class in self.__subclasses__():
provider = proveder_class()
self.providers[provider.provider_name] = provider
return self.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name = 'facebook',
client_id = self.consumer_id,
client_secret = self.consumer_secret,
authorize_url = 'https://graph.facebook.com/oauth/authorize',
access_token_url = 'https://graph.facebook.com/oauth/access_token',
base_url = 'https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri= self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me?fields=id,email').json()
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email')
)
| from rauth import OAuth2Service
from flask import current_app, url_for, redirect, request, session
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('oauth_callback', provider=self.provider_name, _external=True)
@classmethod
def get_provider(self, provider_name):
if self.providers is None:
self.providers = {}
for provider_class in self.__subclass__():
provider = proveder_class()
self.providers[provider.provider_name] = provider
return self.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name = 'facebook',
client_id = self.consumer_id,
client_secret = self.consumer_secret,
authorize_url = 'https://graph.facebook.com/oauth/authorize',
access_token_url = 'https://graph.facebook.com/oauth/access_token',
base_url = 'https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri= self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me?fields=id,email').json()
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email')
)
| mit | Python |
0781b47512cbab5fc1a090ff68b5f9d434a864af | Update examples/API_v2/lookup_users_using_user_ids.py | svven/tweepy,tweepy/tweepy | examples/API_v2/lookup_users_using_user_ids.py | examples/API_v2/lookup_users_using_user_ids.py | import tweepy
# Replace bearer token value with your own
bearer_token = ""
# Initializing the Tweepy client
client = tweepy.Client(bearer_token)
# Replace User IDs
ids = [2244994945, 6253282]
# By default the user ID, name and username are returned. user_fields can be
# used to specify the additional user data that you want returned for each user
# e.g. profile_image_url
users = client.get_users(ids=ids, user_fields=["profile_image_url"])
# Print the username and the user's profile image url
for user in users.data:
print(user.username)
print(user.profile_image_url)
| import tweepy
# Replace bearer token value with your own
bearer_token = ""
# Initializing the Tweepy client
client = tweepy.Client(bearer_token)
# Replace User IDs
ids = [2244994945, 6253282]
# By default the user ID, name and username are returned. user_fields can be
# used to specify the additional user data that you want returned for each user
# e.g. profile_image_url
users = client.get_users(ids, user_fields=["profile_image_url"])
# Print the username and the user's profile image url
for user in users.data:
print(user.username)
print(user.profile_image_url)
| mit | Python |
54c81494cbbe9a20db50596e68c57e1caa624043 | Add a User post_save hook for creating user profiles | SanaMobile/sana.protocol_builder,SanaMobile/sana.protocol_builder,SanaMobile/sana.protocol_builder,SanaMobile/sana.protocol_builder,SanaMobile/sana.protocol_builder | src-django/authentication/signals/user_post_save.py | src-django/authentication/signals/user_post_save.py | from authentication.models import UserProfile
from django.contrib.auth.models import User, Group
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.conf import settings
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=User)
def on_user_post_save(sender, instance=None, created=False, **kwargs):
# Normally, users automatically get a Token created for them (if they do not
# already have one) when they hit
#
# rest_framework.authtoken.views.obtain_auth_token view
#
# This will create an authentication token for newly created users so the
# user registration endpoint can return a token back to Ember
# (thus avoiding the need to hit login endpoint)
if created:
user_profile = UserProfile.objects.create(user=instance, is_email_confirmed=False)
user_profile.save()
Token.objects.create(user=instance)
# Add new user to the proper user group
normal_users_group, created = Group.objects.get_or_create(name=settings.NORMAL_USER_GROUP)
instance.groups.add(normal_users_group)
| from django.contrib.auth.models import User, Group
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.conf import settings
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=User)
def on_user_post_save(sender, instance=None, created=False, **kwargs):
# Normally, users automatically get a Token created for them (if they do not
# already have one) when they hit
#
# rest_framework.authtoken.views.obtain_auth_token view
#
# This will create an authentication token for newly created users so the
# user registration endpoint can return a token back to Ember
# (thus avoiding the need to hit login endpoint)
if created:
Token.objects.create(user=instance)
# Add new user to the proper user group
normal_users_group, created = Group.objects.get_or_create(name=settings.NORMAL_USER_GROUP)
instance.groups.add(normal_users_group)
| bsd-3-clause | Python |
9b678e184a568baea857ca68fcacb5070db6792d | update modulation.py | Koheron/lase | examples/modulation.py | examples/modulation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
# Driver to use
from lase.drivers import Oscillo
# Modules to import
import numpy as np
import matplotlib.pyplot as plt
import time
# Connect to Lase
host = '192.168.1.4' # Lase IP address
client = KClient(host)
driver = Oscillo(client) # Replace with appropriate driver
# Enable laser
driver.start_laser()
# Set laser current
current = 15 # mA
driver.set_laser_current(current)
# Modulation on DAC
amp_mod = 0.2
freq_mod = 1e6
driver.dac[1, :] = amp_mod*np.sin(2 * np.pi * freq_mod * driver.sampling.t)
driver.set_dac()
# Signal on ADC
driver.get_adc()
signal = driver.adc[0,:]
# Plot
plt.plot(driver.sampling.t, signal)
plt.show()
# Plot
psd_signal = np.abs(np.fft.fft(signal)) ** 2
plt.semilogy(1e-6 * np.fft.fftshift(driver.sampling.f_fft), np.fft.fftshift(psd_signal))
plt.xlabel('Frequency (MHz)')
plt.show()
# Disable laser
driver.stop_laser()
driver.close() | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
# Driver to use
from lase.drivers import Oscillo
# Modules to import
import numpy as np
import matplotlib.pyplot as plt
import time
# Connect to Lase
host = '192.168.1.4' # Lase IP address
client = KClient(host)
driver = Oscillo(client) # Replace with appropriate driver
# Enable laser
driver.start_laser()
# Set laser current
current = 15 #mA
driver.set_laser_current(current)
# Modulation on DAC
amp_mod = 0.2
freq_mod = 1e6
driver.dac[1,:] = amp_mod*np.sin(2*np.pi*freq_mod*driver.sampling.t)
driver.set_dac()
# Signal on ADC
driver.get_adc()
signal = driver.adc[0,:]
# Plot
plt.plot(driver.sampling.t, signal)
plt.show()
# Plot
psd_signal = np.abs(np.fft.fft(signal))**2
plt.semilogy(1e-6 * np.fft.fftshift(driver.sampling.f_fft), np.fft.fftshift(psd_signal))
plt.xlabel('Frequency (MHz)')
plt.show()
# Disable laser
driver.stop_laser()
driver.close()
| mit | Python |
1014c809638157da85794223c4990b5ae20512fa | Add crawled_at field back | mdsrosa/hackernews_scrapy | hackernews_scrapy/items.py | hackernews_scrapy/items.py | # -*- coding: utf-8 -*-
import scrapy
class HackernewsScrapyItem(scrapy.Item):
title = scrapy.Field()
url = scrapy.Field()
crawled_at = scrapy.Field(serializer=str)
| # -*- coding: utf-8 -*-
import scrapy
class HackernewsScrapyItem(scrapy.Item):
title = scrapy.Field()
url = scrapy.Field()
| mit | Python |
d8cb4384f32f4d0e20f3212a36cc01915260f7a8 | Support custom actions in search router | genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio | tests/routers.py | tests/routers.py | """Search router."""
from rest_framework.routers import DefaultRouter, DynamicRoute, Route
class SearchRouter(DefaultRouter):
"""Custom router for search endpoints.
Search endpoints don't follow REST principles and thus don't need
routes that default router provides.
"""
routes = [
Route(
url=r"^{prefix}{trailing_slash}$",
mapping={"get": "list", "post": "list_with_post"},
name="{basename}",
initkwargs={},
detail=False,
),
# Dynamically generated list routes. Generated using
# @action(detail=False) decorator on methods of the viewset.
DynamicRoute(
url=r'^{prefix}/{url_path}{trailing_slash}$',
name='{basename}-{url_name}',
detail=False,
initkwargs={}
),
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
detail=True,
initkwargs={'suffix': 'Instance'}
),
# Dynamically generated detail routes. Generated using
# @action(detail=True) decorator on methods of the viewset.
DynamicRoute(
url=r'^{prefix}/{lookup}/{url_path}{trailing_slash}$',
name='{basename}-{url_name}',
detail=True,
initkwargs={}
),
]
| """Search router."""
from rest_framework.routers import DefaultRouter, Route
class SearchRouter(DefaultRouter):
"""Custom router for search endpoints.
Search endpoints don't follow REST principles and thus don't need
routes that default router provides.
"""
routes = [
Route(
url=r"^{prefix}{trailing_slash}$",
mapping={"get": "list", "post": "list_with_post"},
name="{basename}",
initkwargs={},
detail=False,
)
]
| apache-2.0 | Python |
43922bb7cf5015cbf3538195d3d4f93ff8c9ec18 | Bump version | TombProject/tomb_cli,tomborine/tomb_cli | tomb_cli/__about__.py | tomb_cli/__about__.py | __title__ = 'tomb_cli'
__summary__ = 'Top level CLI command for tomb'
__uri__ = 'http://github.com/tomborine/tomb_cli'
__version__ = '0.0.2'
__author__ = 'John Anderson'
__email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = '2015 John Anderson (sontek)'
| __title__ = 'tomb_cli'
__summary__ = 'Top level CLI command for tomb'
__uri__ = 'http://github.com/tomborine/tomb_cli'
__version__ = '0.0.1'
__author__ = 'John Anderson'
__email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = '2015 John Anderson (sontek)'
| mit | Python |
18f373ffc1e49b33708ae2303b61ccf76ffa686e | Use pylab.load to read in data. | matplotlib/basemap,guziy/basemap,matplotlib/basemap,guziy/basemap | examples/ortho_demo.py | examples/ortho_demo.py | from matplotlib.toolkits.basemap import Basemap
from pylab import *
# read in topo data from pickle (on a regular lat/lon grid)
etopo = array(load('etopo20data.gz'),'f')
lons = array(load('etopo20lons.gz'),'f')
lats = array(load('etopo20lats.gz'),'f')
# create Basemap instance for Orthographic (satellite view) projection.
lon_0 = float(raw_input('enter reference longitude (lon_0):'))
lat_0 = float(raw_input('enter reference latitude (lat_0):'))
fillcont = int(raw_input('fill continents? (1 for yes, 0 for no):'))
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0)
# compute native map projection coordinates for lat/lon grid.
lons, lats = meshgrid(lons, lats)
x,y = m(lons,lats)
# create figure with same aspect ratio as map.
fig=m.createfigure().add_axes([0.05,0.05,0.9,0.9])
# make filled contour plot.
cs = m.contourf(x,y,etopo,30,cmap=cm.jet)
# draw coastlines.
m.drawcoastlines()
# draw a line around the map region.
m.drawmapboundary()
if fillcont:
m.fillcontinents()
# draw parallels and meridians.
m.drawparallels(arange(-90.,120.,30.))
m.drawmeridians(arange(0.,420.,60.))
title('Orthographic Map Centered on Lon=%s, Lat=%s' % (lon_0,lat_0))
show()
| from matplotlib import rcParams, use
rcParams['numerix'] = 'Numeric' # make sure Numeric is used (to read pickle)
from matplotlib.toolkits.basemap import Basemap
import cPickle
from pylab import *
# read in topo data from pickle (on a regular lat/lon grid)
topodict = cPickle.load(open('etopo20.pickle','rb'))
etopo = topodict['data']; lons = topodict['lons']; lats = topodict['lats']
# create Basemap instance for Orthographic (satellite view) projection.
lon_0 = float(raw_input('enter reference longitude (lon_0):'))
lat_0 = float(raw_input('enter reference latitude (lat_0):'))
fillcont = int(raw_input('fill continents? (1 for yes, 0 for no):'))
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0)
# compute native map projection coordinates for lat/lon grid.
lons, lats = meshgrid(lons,lats)
x,y = m(lons,lats)
# create figure with same aspect ratio as map.
fig=m.createfigure().add_axes([0.05,0.05,0.9,0.9])
# make filled contour plot.
cs = m.contourf(x,y,etopo,30,cmap=cm.jet)
# draw coastlines.
m.drawcoastlines()
# draw a line around the map region.
m.drawmapboundary()
if fillcont:
m.fillcontinents()
# draw parallels and meridians.
m.drawparallels(arange(-90.,120.,30.))
m.drawmeridians(arange(0.,420.,60.))
title('Orthographic Map Centered on Lon=%s, Lat=%s' % (lon_0,lat_0))
show()
| mit | Python |
2ab2927b2ee4f821fd75050da19a7f1f81aaeca8 | FIX divide mnist features by 255 in mlp example (#11961) | TomDLT/scikit-learn,scikit-learn/scikit-learn,vinayak-mehta/scikit-learn,xuewei4d/scikit-learn,chrsrds/scikit-learn,kevin-intel/scikit-learn,ivannz/scikit-learn,bnaul/scikit-learn,lesteve/scikit-learn,ogrisel/scikit-learn,chrsrds/scikit-learn,AlexandreAbraham/scikit-learn,sergeyf/scikit-learn,saiwing-yeung/scikit-learn,TomDLT/scikit-learn,ndingwall/scikit-learn,betatim/scikit-learn,ndingwall/scikit-learn,xuewei4d/scikit-learn,glemaitre/scikit-learn,glemaitre/scikit-learn,jakirkham/scikit-learn,AlexandreAbraham/scikit-learn,huzq/scikit-learn,ogrisel/scikit-learn,glemaitre/scikit-learn,jakirkham/scikit-learn,huzq/scikit-learn,AlexandreAbraham/scikit-learn,anntzer/scikit-learn,manhhomienbienthuy/scikit-learn,vinayak-mehta/scikit-learn,vortex-ape/scikit-learn,espg/scikit-learn,huzq/scikit-learn,vortex-ape/scikit-learn,kevin-intel/scikit-learn,amueller/scikit-learn,xuewei4d/scikit-learn,betatim/scikit-learn,amueller/scikit-learn,manhhomienbienthuy/scikit-learn,bnaul/scikit-learn,lesteve/scikit-learn,ndingwall/scikit-learn,betatim/scikit-learn,shyamalschandra/scikit-learn,chrsrds/scikit-learn,ogrisel/scikit-learn,vortex-ape/scikit-learn,kevin-intel/scikit-learn,saiwing-yeung/scikit-learn,vinayak-mehta/scikit-learn,AlexandreAbraham/scikit-learn,vinayak-mehta/scikit-learn,anntzer/scikit-learn,lesteve/scikit-learn,espg/scikit-learn,espg/scikit-learn,bnaul/scikit-learn,bnaul/scikit-learn,scikit-learn/scikit-learn,lesteve/scikit-learn,anntzer/scikit-learn,xuewei4d/scikit-learn,scikit-learn/scikit-learn,sergeyf/scikit-learn,ndingwall/scikit-learn,ivannz/scikit-learn,espg/scikit-learn,betatim/scikit-learn,anntzer/scikit-learn,chrsrds/scikit-learn,TomDLT/scikit-learn,manhhomienbienthuy/scikit-learn,ogrisel/scikit-learn,shyamalschandra/scikit-learn,ivannz/scikit-learn,glemaitre/scikit-learn,saiwing-yeung/scikit-learn,manhhomienbienthuy/scikit-learn,amueller/scikit-learn,amueller/scikit-learn,scikit-learn/scikit-learn,shyamalschandra/scikit-learn,saiwing-yeung/scikit-learn,huzq/scikit-learn,kevin-intel/scikit-learn,sergeyf/scikit-learn,sergeyf/scikit-learn,shyamalschandra/scikit-learn,ivannz/scikit-learn,jakirkham/scikit-learn,vortex-ape/scikit-learn,jakirkham/scikit-learn,TomDLT/scikit-learn | examples/neural_networks/plot_mnist_filters.py | examples/neural_networks/plot_mnist_filters.py | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.neural_network import MLPClassifier
print(__doc__)
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X = X / 255.
# rescale the data, use the traditional train/test split
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# solver='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.neural_network import MLPClassifier
print(__doc__)
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
# rescale the data, use the traditional train/test split
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# solver='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause | Python |
e41145a0812d43833d43abf335820c90628bbe62 | select all mail folder for crisping console | EthanBlackburn/sync-engine,EthanBlackburn/sync-engine,wakermahmud/sync-engine,jobscore/sync-engine,rmasters/inbox,PriviPK/privipk-sync-engine,jobscore/sync-engine,rmasters/inbox,gale320/sync-engine,nylas/sync-engine,EthanBlackburn/sync-engine,closeio/nylas,ErinCall/sync-engine,nylas/sync-engine,nylas/sync-engine,PriviPK/privipk-sync-engine,Eagles2F/sync-engine,EthanBlackburn/sync-engine,gale320/sync-engine,ErinCall/sync-engine,ErinCall/sync-engine,gale320/sync-engine,rmasters/inbox,ErinCall/sync-engine,Eagles2F/sync-engine,closeio/nylas,closeio/nylas,PriviPK/privipk-sync-engine,gale320/sync-engine,wakermahmud/sync-engine,closeio/nylas,wakermahmud/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,ErinCall/sync-engine,PriviPK/privipk-sync-engine,EthanBlackburn/sync-engine,Eagles2F/sync-engine,PriviPK/privipk-sync-engine,wakermahmud/sync-engine,jobscore/sync-engine,jobscore/sync-engine,Eagles2F/sync-engine,rmasters/inbox,wakermahmud/sync-engine,gale320/sync-engine | tools/crispinshell.py | tools/crispinshell.py | #!/usr/bin/env python
import sys, os; sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'server')))
import sessionmanager
import IPython
def start_console(user_email_address):
# You can also do this with
# $ python -m imapclient.interact -H <host> -u <user> ...
# but we want to use our sessionmanager and crispin so we're not.
def refresh_crispin():
return sessionmanager.get_crispin_from_email(user_email_address)
c = refresh_crispin()
c.select_folder(c.all_mail_folder_name())
server_uids = [unicode(s) for s in c.imap_server.search(['NOT DELETED'])]
banner = """
You can access the crispin instance with the 'c' variable.
AllMail message UIDs are in 'server_uids'.
You can refresh the session with 'refresh_crispin()'.
IMAPClient docs are at:
http://imapclient.readthedocs.org/en/latest/#imapclient-class-reference
"""
IPython.embed(banner1=banner)
# XXX Any cleanup?
| #!/usr/bin/env python
import sys, os; sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'server')))
import sessionmanager
import IPython
def start_console(user_email_address):
# You can also do this with
# $ python -m imapclient.interact -H <host> -u <user> ...
# but we want to use our sessionmanager and crispin so we're not.
c = None
def refresh_crispin():
global c
c = sessionmanager.get_crispin_from_email(user_email_address)
refresh_crispin()
server_uids = [unicode(s) for s in c.imap_server.search(['NOT DELETED'])]
banner = """
You can access the crispin instance with the 'c' variable.
AllMail message UIDs are in 'server_uids'.
You can refresh the session with 'refresh_crispin()'.
IMAPClient docs are at:
http://imapclient.readthedocs.org/en/latest/#imapclient-class-reference
"""
IPython.embed(banner1=banner)
# XXX Any cleanup?
| agpl-3.0 | Python |
45c67e0b9bc168549fdd1eb2cde3599aae921567 | Update base.py | raiderrobert/django-webhook | webhook/base.py | webhook/base.py | """
Base webhook implementation
"""
import json
from django.http import HttpResponse
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
class WebhookBase(View):
"""
Simple Webhook base class to handle the most standard case.
"""
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(WebhookBase, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = json.loads(request.body.decode('utf-8'))
self.process_webhook(data)
return HttpResponse(status=200)
def process_webhook(self, data):
"""
Unimplemented method
"""
raise NotImplementedError
| """
Base webhook implementation
"""
import json
from django.http import HttpResponse
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
class WebhookBase(View):
"""
Simple Webhook base class to handle the most standard case.
"""
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(WebhookBase, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = json.loads(request.body.decode('utf-8'))
self.process_webhook(data)
return HttpResponse(status=200)
def process_webhook(self, data=None):
"""
Unimplemented method
"""
raise NotImplementedError
| mit | Python |
a751e7f51412581e14cc822f1e443ed97746055a | Update structures example | stonebig/numba,seibert/numba,gmarkall/numba,jriehl/numba,ssarangi/numba,ssarangi/numba,pombredanne/numba,GaZ3ll3/numba,gdementen/numba,gmarkall/numba,pitrou/numba,gdementen/numba,stonebig/numba,sklam/numba,pitrou/numba,stuartarchibald/numba,jriehl/numba,IntelLabs/numba,pombredanne/numba,ssarangi/numba,cpcloud/numba,pombredanne/numba,gmarkall/numba,IntelLabs/numba,sklam/numba,cpcloud/numba,pombredanne/numba,pombredanne/numba,stuartarchibald/numba,GaZ3ll3/numba,IntelLabs/numba,gmarkall/numba,gmarkall/numba,stefanseefeld/numba,shiquanwang/numba,jriehl/numba,pitrou/numba,numba/numba,numba/numba,IntelLabs/numba,shiquanwang/numba,stefanseefeld/numba,pitrou/numba,stuartarchibald/numba,jriehl/numba,numba/numba,stonebig/numba,numba/numba,seibert/numba,gdementen/numba,seibert/numba,jriehl/numba,ssarangi/numba,stefanseefeld/numba,seibert/numba,ssarangi/numba,stuartarchibald/numba,IntelLabs/numba,stuartarchibald/numba,pitrou/numba,stefanseefeld/numba,stonebig/numba,seibert/numba,GaZ3ll3/numba,gdementen/numba,sklam/numba,stefanseefeld/numba,sklam/numba,shiquanwang/numba,gdementen/numba,GaZ3ll3/numba,GaZ3ll3/numba,cpcloud/numba,cpcloud/numba,sklam/numba,cpcloud/numba,numba/numba,stonebig/numba | examples/structures.py | examples/structures.py | from numba import struct, jit, double
import numpy as np
record_type = struct([('x', double), ('y', double)])
record_dtype = record_type.get_dtype()
a = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=record_dtype)
@jit(argtypes=[record_type[:]])
def hypot(data):
# return types of numpy functions are inferred
result = np.empty_like(data, dtype=np.float64)
# notice access to structure elements 'x' and 'y' via attribute access
# You can also index by field name or field index:
# data[i].x == data[i]['x'] == data[i][0]
for i in range(data.shape[0]):
result[i] = np.sqrt(data[i].x * data[i].x + data[i].y * data[i].y)
return result
print hypot(a)
# Notice inferred return type
print hypot.signature
# Notice native sqrt calls and for.body direct access to memory...
print hypot.lfunc
| from numba import struct, jit, double
import numpy as np
record_type = struct([('x', double), ('y', double)])
record_dtype = record_type.get_dtype()
a = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=record_dtype)
@jit(argtypes=[record_type[:]])
def hypot(data):
# return types of numpy functions are inferred
result = np.empty_like(data, dtype=np.float64)
# notice access to structure elements 'x' and 'y' via attribute access
for i in range(data.shape[0]):
result[i] = np.sqrt(data[i].x * data[i].x + data[i].y * data[i].y)
return result
print hypot(a)
# Notice inferred return type
print hypot.signature
# Notice native sqrt calls and for.body direct access to memory...
print hypot.lfunc
| bsd-2-clause | Python |
f6045517b27bf6f878ab2906aa6b793cfd640786 | upgrade anymail | mcallistersean/b2-issue-tracker,mcallistersean/b2-issue-tracker,mcallistersean/b2-issue-tracker | toucan_conf/settings/prod/__init__.py | toucan_conf/settings/prod/__init__.py | import os
from .. import *
try:
from ..secrets import ALLOWED_HOSTS
except ImportError:
raise ImportError('Please set ALLOWED_HOSTS in the secrets file when using production config.')
try:
from ..secrets import ANYMAIL
except ImportError:
raise ImportError('Please set ANYMAIL settings in the secrets file when using production config.')
INSTALLED_APPS += [
'anymail'
]
DEBUG = False
DEFAULT_FROM_EMAIL = '[email protected]'
STATIC_ROOT = os.path.join(BASE_DIR, '_static')
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# install raven handler if configured
try:
import raven
from ..secrets import RAVEN_DSN
except ImportError:
pass
else:
RAVEN_CONFIG = {
'dsn': RAVEN_DSN,
# If you are using git, you can also automatically configure the
# release based on the git info.
'release': raven.fetch_git_sha(BASE_DIR),
}
| import os
from .. import *
try:
from ..secrets import ALLOWED_HOSTS
except ImportError:
raise ImportError('Please set ALLOWED_HOSTS in the secrets file when using production config.')
try:
from ..secrets import ANYMAIL
except ImportError:
raise ImportError('Please set ANYMAIL settings in the secrets file when using production config.')
INSTALLED_APPS += [
'anymail'
]
DEBUG = False
DEFAULT_FROM_EMAIL = '[email protected]'
STATIC_ROOT = os.path.join(BASE_DIR, '_static')
EMAIL_BACKEND = "anymail.backends.mailgun.MailgunBackend"
# install raven handler if configured
try:
import raven
from ..secrets import RAVEN_DSN
except ImportError:
pass
else:
RAVEN_CONFIG = {
'dsn': RAVEN_DSN,
# If you are using git, you can also automatically configure the
# release based on the git info.
'release': raven.fetch_git_sha(BASE_DIR),
}
| mit | Python |
193d911536799751c9ec29571cb8091bcd187087 | fix uraseuranta py | CSCfi/antero,CSCfi/antero,CSCfi/antero,CSCfi/antero,CSCfi/antero,CSCfi/antero | pdi_integrations/arvo/python_scripts/get_arvo_uraseuranta.py | pdi_integrations/arvo/python_scripts/get_arvo_uraseuranta.py | #import json
import requests
#import os
from pandas.io.json import json_normalize
#import datetime
import base64
import os
try:
api_key = os.environ['AUTH_API_KEY']
except KeyError:
print("API-key is missing")
try:
api_user = os.environ['AUTH_API_USER']
except KeyError:
print("API-user is missing")
result = []
good_result=[]
filtered_result=[]
urls = []
url = 'https://arvo.csc.fi/api/vipunen/uraseuranta'
reqheaders = {'Content-Type': 'application/json'}
reqheaders['Accept'] = 'application/json'
### encode API user and API key tothe request headers
tmp = "%s:%s" % (api_user, api_key)
reqheaders['Authorization'] = "Basic %s" % base64.b64encode(tmp.encode('utf-8')).decode('utf-8')
#response = requests.get(url, headers=reqheaders).json()
## Not checking the status just downloading
## GET STATUS
##
while url != None: ## The url is not null
response = requests.get(url, headers=reqheaders).json()
for uraseuranta in response['data']:
result.append(uraseuranta)
# taustatiedot.append(uraseuranta['taustatiedot'])
url = response['pagination']['next_url']
urls.append(url)
## split result into two sets (with&without taustatiedot)
## test first 301 result
## for item in result[0:300]:
for item in result:
if item.get('taustatiedot') == None:
filtered_result.append(item)
else:
good_result.append(item)
## normalize data from result sets
### if you want to check column names use row below
### data.dtypes.index
data = json_normalize(good_result)
filtered_data = json_normalize(filtered_result)
# print(data[12])
# data['vastaajaid'].head(10)
## data.dtypes
## Export data to csv's
print("Exporting data to csv file")
filtered_data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta_vajaadata.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n', escapechar='$')
data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n' , escapechar='$')
#now = datetime.datetime.now()
#print
#print("Current date and time using str method of datetime object:")
#print(str(now))
## data.vastaajaid.nunique()
| #import json
import requests
#import os
from pandas.io.json import json_normalize
#import datetime
import os
try:
api_key = os.environ['AUTH_API_KEY']
except KeyError:
print("API-key missing")
result = []
good_result=[]
filtered_result=[]
urls = []
url = 'https://arvo.csc.fi/api/vipunen/uraseuranta'
reqheaders = {'Content-Type': 'application/json'}
reqheaders['Authorization'] = api_key
#response = requests.get(url, headers=reqheaders).json()
## Not checking the status just downloading
## GET STATUS
##
while url != 'null': ## The url is not null
response = requests.get(url, headers=reqheaders).json()
for uraseuranta in response['data']:
result.append(uraseuranta)
# taustatiedot.append(uraseuranta['taustatiedot'])
url = response['pagination']['next_url']
urls.append(url)
## split result into two sets (with&without taustatiedot)
## test first 301 result
## for item in result[0:300]:
for item in result:
if item.get('taustatiedot') == None:
filtered_result.append(item)
else:
good_result.append(item)
## normalize data from result sets
### if you want to check column names use row below
### data.dtypes.index
data = json_normalize(good_result)
filtered_data = json_normalize(filtered_result)
# print(data[12])
# data['vastaajaid'].head(10)
## data.dtypes
## Export data to csv's
filtered_data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta_vajaadata.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n', escapechar='$')
data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n' , escapechar='$')
#now = datetime.datetime.now()
#print
#print("Current date and time using str method of datetime object:")
#print(str(now))
## data.vastaajaid.nunique()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.