max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
mhw_armor_edit/ftypes/lbm_skill.py
|
nicolas43000/MHWorldData
| 129 |
123333
|
# coding: utf-8
from mhw_armor_edit import ftypes as ft
from mhw_armor_edit.ftypes import StructFile, Struct
class LbmSkillEntry(Struct):
STRUCT_SIZE = 10
unk1: ft.ubyte()
unk2: ft.ubyte()
item_id: ft.ushort()
item_qty: ft.ushort()
unk3: ft.pad(4)
class LbmSkill(StructFile):
EntryFactory = LbmSkillEntry
MAGIC = 0x0046
|
pypykatz/smb/dcsync.py
|
wisdark/pypykatz
| 1,861 |
123337
|
<reponame>wisdark/pypykatz
import asyncio
from pypykatz import logging
async def dcsync(url, username = None):
from aiosmb.commons.connection.url import SMBConnectionURL
from aiosmb.commons.interfaces.machine import SMBMachine
smburl = SMBConnectionURL(url)
connection = smburl.get_connection()
users = []
if username is not None:
users.append(username)
async with connection:
logging.debug('[DCSYNC] Connecting to server...')
_, err = await connection.login()
if err is not None:
raise err
logging.debug('[DCSYNC] Connected to server!')
logging.debug('[DCSYNC] Running...')
i = 0
async with SMBMachine(connection) as machine:
async for secret, err in machine.dcsync(target_users=users):
if err is not None:
raise err
i += 1
if i % 1000 == 0:
logging.debug('[DCSYNC] Running... %s' % i)
await asyncio.sleep(0)
yield secret
logging.debug('[DCSYNC] Finished!')
|
claf/machine/__init__.py
|
GMDennis/claf
| 225 |
123350
|
<reponame>GMDennis/claf
from claf.machine.open_qa import OpenQA
from claf.machine.nlu import NLU
# fmt: off
__all__ = [
"OpenQA",
"NLU",
]
# fmt: on
|
examples/custom/parallel_plot/parallel_reset.py
|
goncaloperes/bokeh
| 15,193 |
123357
|
from bokeh.models import ActionTool
class ParallelResetTool(ActionTool):
""" Tool to reset only plot axes and not selections
"""
__implementation__ = 'parallel_reset.ts'
|
sarpy/io/general/nitf_elements/tres/unclass/PIAPEA.py
|
spacefan/sarpy
| 119 |
123433
|
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
class PIAPEAType(TREElement):
def __init__(self, value):
super(PIAPEAType, self).__init__()
self.add_field('LASTNME', 's', 28, value)
self.add_field('FIRSTNME', 's', 28, value)
self.add_field('MIDNME', 's', 28, value)
self.add_field('DOB', 's', 6, value)
self.add_field('ASSOCTRY', 's', 2, value)
class PIAPEA(TREExtension):
_tag_value = 'PIAPEA'
_data_type = PIAPEAType
|
DQMOffline/Trigger/python/HMesonGammaMonitor_Client_cff.py
|
ckamtsikis/cmssw
| 852 |
123441
|
<filename>DQMOffline/Trigger/python/HMesonGammaMonitor_Client_cff.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
hmesongammaEfficiency = DQMEDHarvester("DQMGenericClient",
# subDirs = cms.untracked.vstring("HLT/Higgs/HMesonGamma/*"),
subDirs = cms.untracked.vstring("HLT/HIG/HMesonGamma/*"),
verbose = cms.untracked.uint32(0), # Set to 2 for all messages
resolution = cms.vstring(),
efficiency = cms.vstring(
"effic_gammapt 'gamma pT turnON; PFGamma pT [GeV]; efficiency' gammapt_numerator gammapt_denominator",
"effic_mesonpt 'meson pT turnON; PFMeson pT [GeV]; efficiency' mesonpt_numerator mesonpt_denominator",
"effic_gammaeta 'gamma #eta turnON; gamma #eta; efficiency' gammaeta_numerator gammaeta_denominator",
"effic_mesoneta 'meson #eta turnON; meson #eta; efficiency' mesoneta_numerator mesoneta_denominator",
),
efficiencyProfile = cms.untracked.vstring(
"effic_gammaetaVsLS 'Gamma #eta efficiency vs LS; LS; gamma #eta efficiency' gammaetaVsLS_numerator gammaetaVsLS_denominator",
),
)
hmesongammaClient = cms.Sequence(
hmesongammaEfficiency
)
|
bookwyrm/tests/views/landing/test_landing.py
|
mouse-reeve/fedireads
| 270 |
123452
|
<reponame>mouse-reeve/fedireads<filename>bookwyrm/tests/views/landing/test_landing.py
""" test for app action functionality """
from unittest.mock import patch
from django.contrib.auth.models import AnonymousUser
from django.template.response import TemplateResponse
from django.test import TestCase
from django.test.client import RequestFactory
from bookwyrm import models
from bookwyrm import views
from bookwyrm.tests.validate_html import validate_html
class LandingViews(TestCase):
"""pages you land on without really trying"""
def setUp(self):
"""we need basic test data and mocks"""
self.factory = RequestFactory()
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"<EMAIL>",
"<EMAIL>",
"password",
local=True,
localname="mouse",
)
self.anonymous_user = AnonymousUser
self.anonymous_user.is_authenticated = False
models.SiteSettings.objects.create()
@patch("bookwyrm.suggested_users.SuggestedUsers.get_suggestions")
def test_home_page(self, _):
"""there are so many views, this just makes sure it LOADS"""
view = views.Home.as_view()
request = self.factory.get("")
request.user = self.local_user
with patch("bookwyrm.activitystreams.ActivityStream.get_activity_stream"):
result = view(request)
self.assertEqual(result.status_code, 200)
validate_html(result.render())
request.user = self.anonymous_user
result = view(request)
self.assertIsInstance(result, TemplateResponse)
self.assertEqual(result.status_code, 200)
validate_html(result.render())
def test_about_page(self):
"""there are so many views, this just makes sure it LOADS"""
view = views.about
request = self.factory.get("")
request.user = self.local_user
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
def test_conduct_page(self):
"""there are so many views, this just makes sure it LOADS"""
view = views.conduct
request = self.factory.get("")
request.user = self.local_user
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
def test_privacy_page(self):
"""there are so many views, this just makes sure it LOADS"""
view = views.privacy
request = self.factory.get("")
request.user = self.local_user
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
def test_landing(self):
"""there are so many views, this just makes sure it LOADS"""
view = views.Landing.as_view()
request = self.factory.get("")
result = view(request)
self.assertIsInstance(result, TemplateResponse)
|
targets/arty/etherbone.py
|
skiphansen/litex-buildenv
| 198 |
123494
|
from litex.soc.integration.soc_core import mem_decoder
from litex.soc.integration.soc_sdram import *
from liteeth.common import convert_ip
from liteeth.core import LiteEthUDPIPCore
from liteeth.frontend.etherbone import LiteEthEtherbone
from liteeth.mac import LiteEthMAC
from liteeth.phy import LiteEthPHY
from targets.arty.base import SoC as BaseSoC
class EtherboneSoC(BaseSoC):
def __init__(self, platform, *args, **kwargs):
# Need a larger integrated ROM on or1k to fit the BIOS with TFTP support.
if 'integrated_rom_size' not in kwargs and kwargs.get('cpu_type', 'lm32') != 'lm32':
kwargs['integrated_rom_size'] = 0x10000
BaseSoC.__init__(self, platform, *args, **kwargs)
# Ethernet ---------------------------------------------------------------------------------
# Ethernet Phy
self.submodules.ethphy = LiteEthPHY(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
self.add_csr("ethphy")
# Ethernet Core
etherbone_mac_address = 0x10e2d5000000
etherbone_ip_address = "192.168.100.50"
self.submodules.ethcore = LiteEthUDPIPCore(
phy = self.ethphy,
mac_address = etherbone_mac_address,
ip_address = etherbone_ip_address,
clk_freq = self.clk_freq)
# Etherbone Core
self.submodules.etherbone = LiteEthEtherbone(self.ethcore.udp, 1234)
self.add_wb_master(self.etherbone.wishbone.bus)
# timing constraints
self.platform.add_period_constraint(self.ethphy.crg.cd_eth_rx.clk, 1e9/25e6)
self.platform.add_period_constraint(self.ethphy.crg.cd_eth_tx.clk, 1e9/25e6)
self.platform.add_false_path_constraints(
self.crg.cd_sys.clk,
self.ethphy.crg.cd_eth_rx.clk,
self.ethphy.crg.cd_eth_tx.clk)
# Analyzer ---------------------------------------------------------------------------------
#analyzer_signals = [
# # FIXME: find interesting signals to probe
# self.cpu.ibus,
# self.cpu.dbus
#]
#self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 512)
#self.add_csr("analyzer")
def configure_iprange(self, iprange):
iprange = [int(x) for x in iprange.split(".")]
while len(iprange) < 4:
iprange.append(0)
# Our IP address
self._configure_ip("LOCALIP", iprange[:-1]+[50])
# IP address of tftp host
self._configure_ip("REMOTEIP", iprange[:-1]+[100])
def _configure_ip(self, ip_type, ip):
for i, e in enumerate(ip):
s = ip_type + str(i + 1)
s = s.upper()
self.add_constant(s, e)
SoC = EtherboneSoC
|
vergeml/glossary.py
|
cclauss/vergeml
| 324 |
123509
|
<reponame>cclauss/vergeml<gh_stars>100-1000
PARAM_DESCR = {
'epochs': 'How many epochs to train.',
'learning rate': 'Optimizer learning rate.',
'batch size': 'The number of samples to use in one training batch.',
'decay': 'Learning rate decay.',
'early stopping': 'Early stopping delta and patience.',
'dropout': 'Dropout rate.',
'layers': 'The number of hidden layers.',
'optimizer': 'Which optimizer to use.'
}
LONG_DESCR = {
'learning rate': 'A hyperparameter which determines how quickly new learnings override old ones during training. In general, find a learning rate that is low enough that the network will converge to something useful, but high enough that the training does not take too much time.',
'batch size': 'Defines the number of samples to be propagated through the network at once in a batch. The higher the batch size, the more memory you will need.',
'epochs': 'The number of epochs define how often the network will see the complete set of samples during training.',
'decay': 'When using learning rate decay, the learning rate is gradually reduced during training which may result in getting closer to the optimal performance.',
'early stopping': 'Early Stopping is a form of regularization used to avoid overfitting when training. It is controlled via two parameters: Delta defines the minimum change in the monitored quantity to qualify as an improvement. Patience sets the number of epochs with no improvement after which training will be stopped.',
'dropout': 'Dropout is a regularization technique for reducing overfitting in neural networks, The term "dropout" refers to dropping out units (both hidden and visible) in a neural network during training.',
'layers': 'Deep learning models consist of a number of layers which contain one or more neurons. Typically, the neurons in one layer are connected to the next. Models with a higher number of layers can learn more complex representations, but are also more prone to overfitting.',
'optimizer': 'The algorithm which updates model parameters such as the weight and bias values when training the network.',
'sgd': 'Stochastic gradient descent, also known as incremental gradient descent, is a method for optimizing a neural network. It is called stochastic because samples are selected randomly instead of as a single group, as in standard gradient descent.',
'adam': 'The Adam optimization algorithm is an extension to stochastic gradient descent which computes adaptive learning rates for each parameter. Adam is well suited for many practical deep learning problems.',
'environment variables': """\
The following environment variables are available in VergeML:
- VERGEML_PERSISTENCE The number of persistent instances
- VERGEML_THEME The theme of the command line application
- VERGEML_FUNKY_ROBOTS Funky robots""",
"overfitting": "When a model performs well on the training samples, but is not able to generalize well to unseen data. During training, a typical sign for overfitting is when your validation loss goes up while your training loss goes down.",
"underfitting": "When a model can neither model the training data nor generalize to new data.",
"hyperparameters": "Hyperparameters are the parameters of a model that can be set from outside, i.e. are not learned during training. (e.g. learning rate, number of layers, kernel size).",
"random seed": "An integer value that seeds the random generator to generate random values. It is used to repeatably reproduce tasks and experiments.",
"project": "A VergeML project is just a directory. Typically it contains a vergeml.yaml file, a trainings directory and a samples directory.",
'project file': "A YAML file you can use to configure models, device usage, data processing and taks options.",
'checkpoint': 'A checkpoint is a static image of a trained AI. It can be used to restore the AI after training and make predictions.',
'stats': 'Stats are used to measure the performance of a model (e.g. accuracy).',
'samples': 'Samples are pieces of data (e.g. images, texts) that is being used to train models to create AIs.',
'val split': "Samples different from training samples that are used to evaluate the performance of model hyperparameters. You can set it via the --val-split option. See 'ml help split'.",
'test split': "Samples different from training samples that are used to evaluate the final performance of the model. You can set it via the --test-split option. See 'ml help split'.",
'split': 'split is a part of the sample data reserved for validation and testing (--val-split and --test-split options). It can be configured as either a percentage value (e.g. --val-split=10%) to reserve a fraction of training samples, a number to reserve a fixed number of samples, or a directory where the samples of the split are stored.',
'cache dir': 'A directory which contains the processed data.',
}
SYNONYMS = {
'stochastic gradient descent': 'sgd',
'hyperparameter': 'hyperparameters',
'project dir': 'project',
'training samples': 'samples',
'overfit': 'overfitting',
'underfit': 'underfitting'
}
def long_descr(key):
key = key.replace("-", " ")
key = SYNONYMS.get(key, key)
return LONG_DESCR.get(key, "").strip()
def short_param_descr(key):
key = key.replace("-", " ")
key = SYNONYMS.get(key, key)
return PARAM_DESCR.get(key, "").strip()
|
python/tests/test_auxdata.py
|
clayne/gtirb
| 230 |
123523
|
<gh_stars>100-1000
import unittest
from unittest import mock
import gtirb
class AuxDataTest(unittest.TestCase):
def setUp(self):
self.fake_ir = mock.MagicMock()
self.fake_ir.get_by_uuid = None
def test_lazy(self):
ad1 = gtirb.AuxData("test1", "string")
self.assertEqual(ad1.data, "test1")
serialized = ad1._to_protobuf()
ad2 = gtirb.AuxData._from_protobuf(serialized, self.fake_ir)
# Peek inside: the data is not yet deserialized
self.assertTrue(ad2._data is None)
# Accessing the data should deserialize
self.assertEqual(ad1.data, ad2.data)
self.assertTrue(ad2._data is not None)
# Just exercise repr
self.assertEqual(
repr(ad2), "AuxData(type_name='string', data='test1', )"
)
def test_lazy_never_deserialized(self):
serialized = gtirb.AuxData("testing 123", "string")._to_protobuf()
ad1 = gtirb.AuxData._from_protobuf(serialized, self.fake_ir)
# Peek inside: the data is not yet deserialized
self.assertTrue(ad1._data is None)
serialized2 = ad1._to_protobuf()
self.assertTrue(ad1._data is None)
self.assertEqual(serialized, serialized2)
if __name__ == "__main__":
unittest.main()
|
bups/scheduler/__init__.py
|
emersion/bups
| 106 |
123583
|
<filename>bups/scheduler/__init__.py
import anacron
import systemd
import systemd_user
schedulers = {
"anacron": anacron,
"systemd": systemd,
"systemd-user": systemd_user
}
|
upvote/gae/modules/upvote_app/api/web/events.py
|
iwikmai/upvote
| 453 |
123595
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers related to Events."""
import httplib
import logging
import webapp2
from webapp2_extras import routes
from google.appengine.ext import ndb
from upvote.gae.datastore import utils as datastore_utils
from upvote.gae.datastore.models import binary as binary_models
from upvote.gae.datastore.models import cert as cert_models
from upvote.gae.datastore.models import event as event_models
from upvote.gae.datastore.models import host as host_models
from upvote.gae.datastore.models import package as package_models
from upvote.gae.datastore.models import user as user_models
from upvote.gae.datastore.models import vote as vote_models
from upvote.gae.modules.upvote_app.api.web import monitoring
from upvote.gae.utils import handler_utils
from upvote.gae.utils import user_utils
from upvote.shared import constants
def _GetEventContext(events):
"""Adds relevant entities corresponding to the listed Events.
The entities included (if present) are the Blockable run, the Certificate
associated with run, the Host on which it was run, and the Vote cast by the
user.
Args:
events: list of Events, The events for which context should be fetched.
Returns:
A list of dicts where each dict is of the form:
{'event': Event, 'blockable': Blockable, 'cert': Certificate,
'host': Host, 'vote': Vote}
If any of the entities are not found (e.g. the user hasn't voted on a
Blockable), that dict entry is present but set to None.
"""
host_futures = ndb.get_multi_async(
ndb.Key(host_models.Host, event.host_id) for event in events)
# Fetch the entities associated with Event.blockable_key.
blockable_futures = ndb.get_multi_async(
event.blockable_key for event in events)
vote_futures = ndb.get_multi_async(
vote_models.Vote.GetKey(event.blockable_key, event.user_key)
for event in events)
# Fetch the entities associated with SantaEvent.bundle_key.
has_bundle = (
lambda e: isinstance(e, event_models.SantaEvent) and e.bundle_key)
bundle_futures = [
(event.bundle_key.get_async()
if has_bundle(event) else datastore_utils.GetNoOpFuture())
for event in events]
bundle_vote_futures = [
(vote_models.Vote.GetKey(event.bundle_key, event.user_key).get_async()
if has_bundle(event) else datastore_utils.GetNoOpFuture())
for event in events]
# Fetch the Certificate associated with the Event.
cert_futures = []
for event in events:
if event.cert_key:
cert_future = event.cert_key.get_async()
elif isinstance(event, event_models.SantaEvent) and event.cert_sha256:
cert_future = ndb.Key(
cert_models.SantaCertificate, event.cert_sha256).get_async()
else:
cert_future = datastore_utils.GetNoOpFuture()
cert_futures.append(cert_future)
# Merge all Event context entities into their associated dicts.
events_with_context = []
for i, event in enumerate(events):
context_dict = {
'event': event,
'host': host_futures[i].get_result(),
}
bundle = bundle_futures[i].get_result()
if bundle is None:
context_dict.update({
'blockable': blockable_futures[i].get_result(),
'cert': cert_futures[i].get_result(),
'vote': vote_futures[i].get_result(),
})
else:
context_dict.update({
'blockable': bundle,
'cert': bundle.main_cert_key,
'vote': bundle_vote_futures[i].get_result(),
})
events_with_context.append(context_dict)
return events_with_context
class EventQueryHandler(handler_utils.UserFacingQueryHandler):
"""Handler for querying events."""
MODEL_CLASS = event_models.Event
@property
def RequestCounter(self):
return monitoring.event_requests
@handler_utils.RecordRequest
def get(self):
# Determine whether Event should be returned with context.
with_context = self.request.get('withContext').lower() == 'true'
context_callback = _GetEventContext if with_context else None
self._Query(context_callback)
def _QueryModel(self, search_dict):
# Add search keys provided as query params.
urlsafe_key = self.request.get('blockableKey')
if urlsafe_key:
search_dict['blockableKey'] = urlsafe_key
host_id = self.request.get('hostId')
if host_id:
search_dict['hostId'] = host_id
# Determine scope of query and enforce ACL if queried as admin.
if self.request.get('asAdmin').lower() == 'true':
logging.info('Getting all events as Admin.')
self.RequirePermission(constants.PERMISSIONS.VIEW_OTHER_EVENTS)
ancestor = None
else:
logging.info('Getting events for user: %s', self.user.nickname)
ancestor = self.user.key
query = super(EventQueryHandler, self)._QueryModel(
search_dict, ancestor=ancestor)
return query.order(-self.MODEL_CLASS.last_blocked_dt)
class Bit9EventQueryHandler(EventQueryHandler):
MODEL_CLASS = event_models.Bit9Event
class SantaEventQueryHandler(EventQueryHandler):
MODEL_CLASS = event_models.SantaEvent
class EventHandler(handler_utils.UserFacingHandler):
"""Handler for interacting with individual events."""
def get(self, event_key): # pylint: disable=g-bad-name
try:
key = ndb.Key(urlsafe=event_key)
# NOTE: There is an open bug related to the inconsistent errors
# raised by the ndb.Key urlsafe constructor.
# See https://github.com/googlecloudplatform/datastore-ndb-python/issues/143
except: # pylint: disable=bare-except
self.abort(
httplib.BAD_REQUEST,
explanation='Event key %s could not be parsed' % event_key)
else:
event = key.get()
if event:
with_context = (self.request.get('withContext').lower() == 'true')
response_data = _GetEventContext([event])[0] if with_context else event
if event.executing_user != self.user.nickname:
self.RequirePermission(constants.PERMISSIONS.VIEW_OTHER_EVENTS)
self.respond_json(response_data)
else:
self.abort(httplib.NOT_FOUND, explanation='Event not found')
class RecentEventHandler(handler_utils.UserFacingHandler):
"""Handler for getting the most recent Event for a blockable, for a user."""
def get(self, blockable_id): # pylint: disable=g-bad-name
blockable = binary_models.Blockable.get_by_id(blockable_id)
if not blockable:
self.abort(httplib.NOT_FOUND, explanation='Blockable not found')
username = self.request.get('asUser')
if username:
self.RequirePermission(constants.PERMISSIONS.VIEW_OTHER_EVENTS)
user = user_models.User.GetById(
user_utils.UsernameToEmail(username))
else:
user = self.user
# If the blockable is a bundle, search by the 'bundle_key' property instead
# of 'blockable_key'.
blockable_filter = (
event_models.SantaEvent.bundle_key == blockable.key
if isinstance(blockable, package_models.SantaBundle) else
event_models.Event.blockable_key == blockable.key)
event_query = (event_models.Event
.query(ancestor=user.key)
.filter(blockable_filter)
.order(-event_models.Event.last_blocked_dt))
event = event_query.get()
response_data = event
if event:
with_context = (self.request.get('withContext').lower() == 'true')
response_data = _GetEventContext([event])[0] if with_context else event
self.respond_json(response_data)
# The Webapp2 routes defined for these handlers.
ROUTES = routes.PathPrefixRoute('/events', [
webapp2.Route(
'/most-recent/<blockable_id>',
handler=RecentEventHandler),
webapp2.Route(
'/query/bit9',
handler=Bit9EventQueryHandler),
webapp2.Route(
'/query/santa',
handler=SantaEventQueryHandler),
webapp2.Route(
'/query',
handler=EventQueryHandler),
webapp2.Route(
'/<event_key>',
handler=EventHandler),
])
|
meter/connector/master.py
|
anbo225/docklet
| 273 |
123606
|
<reponame>anbo225/docklet<gh_stars>100-1000
#!/usr/bin/python3
import socket, select, errno, threading, os
class master_connector:
tcp_port = 1727
max_minions = 256
conn = {}
epoll_fd = select.epoll()
def establish_vswitch(ovsname):
os.system('ovs-vsctl del-br ovs-%s >/dev/null 2>&1' % ovsname)
os.system('ovs-vsctl add-br ovs-%s' % ovsname)
os.system('brctl addif ovs-bridge ovs-%s >/dev/null 2>&1' % ovsname)
os.system('ip link set ovs-system up')
os.system('ip link set ovs-%s up' % ovsname)
def build_gre_conn(ovsname, ipaddr):
name = ipaddr.replace('.','_')
os.system('ovs-vsctl add-port ovs-%s gre-%s -- set interface gre-%s type=gre options:remote_ip=%s 2>/dev/null' % (ovsname, name, name, ipaddr))
def break_gre_conn(ovsname, ipaddr):
name = ipaddr.replace('.','_')
os.system('ovs-vsctl del-port ovs-%s gre-%s 2>/dev/null' % (ovsname, name))
def close_connection(fd):
master_connector.epoll_fd.unregister(fd)
master_connector.conn[fd][0].close()
addr = master_connector.conn[fd][1]
master_connector.conn.pop(fd)
master_connector.break_gre_conn('master', addr)
def do_message_response(input_buffer):
assert(input_buffer == b'ack')
return b'ack'
def start():
thread = threading.Thread(target = master_connector.run_forever, args = [])
thread.setDaemon(True)
thread.start()
return thread
def run_forever():
listen_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
listen_fd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_fd.bind(('', master_connector.tcp_port))
listen_fd.listen(master_connector.max_minions)
master_connector.epoll_fd.register(listen_fd.fileno(), select.EPOLLIN)
datalist = {}
master_connector.establish_vswitch('master')
try:
while True:
epoll_list = master_connector.epoll_fd.poll()
for fd, events in epoll_list:
if fd == listen_fd.fileno():
fileno, addr = listen_fd.accept()
fileno.setblocking(0)
master_connector.epoll_fd.register(fileno.fileno(), select.EPOLLIN | select.EPOLLET)
master_connector.conn[fileno.fileno()] = (fileno, addr[0])
master_connector.build_gre_conn('master', addr[0])
elif select.EPOLLIN & events:
datas = b''
while True:
try:
data = master_connector.conn[fd][0].recv(10)
if not data and not datas:
master_connector.close_connection(fd)
break
else:
datas += data
except socket.error as msg:
if msg.errno == errno.EAGAIN:
try:
datalist[fd] = master_connector.do_message_response(datas)
master_connector.epoll_fd.modify(fd, select.EPOLLET | select.EPOLLOUT)
except:
master_connector.close_connection(fd)
else:
master_connector.close_connection(fd)
break
elif select.EPOLLOUT & events:
sendLen = 0
while True:
sendLen += master_connector.conn[fd][0].send(datalist[fd][sendLen:])
if sendLen == len(datalist[fd]):
break
master_connector.epoll_fd.modify(fd, select.EPOLLIN | select.EPOLLET)
elif select.EPOLLHUP & events:
master_connector.close_connection(fd)
else:
continue
finally:
os.system('ovs-vsctl del-br ovs-master >/dev/null 2>&1')
|
Python/CanWeSort/Code.py
|
cs-mshah/AlgoCode
| 151 |
123651
|
<filename>Python/CanWeSort/Code.py
# Function for finding if it possible
# to obtain sorted array or not
def fun(arr, n, k):
v = []
# Iterate over all elements until K
for i in range(k):
# Store elements as multiples of K
for j in range(i, n, k):
v.append(arr[j]);
# Sort the elements
v.sort();
x = 0
# Put elements in their required position
for j in range(i, n, k):
arr[j] = v[x];
x += 1
v = []
# Check if the array becomes sorted or not
for i in range(n - 1):
if (arr[i] > arr[i + 1]):
return False
return True
# Driver code
nk= input().split()
K = int(nk[1])
n = int(nk[0])
arr= list(map(int,input().split()))
if (fun(arr, n, K)):
print("True")
else:
print("False")
|
pil_pillow__examples/gray/main.py
|
DazEB2/SimplePyScripts
| 117 |
123661
|
<reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install Pillow
from PIL import Image, ImageOps
image_file = "input.jpg"
image = Image.open(image_file)
image_gray = ImageOps.grayscale(image)
image_gray.save('image_gray.png')
image_gray.show()
|
setup.py
|
mitsuhiko/snaek
| 272 |
123681
|
<gh_stars>100-1000
from setuptools import setup
from setuptools.dist import Distribution
# Dogfood ourselves here. Since however we at this point might not be
# installed yet we cannot use snaek_rust_modules directly. Additionally
# we might not be able to import outselves yet because the setup
# requirements are not installed yet. In that case do nothing.
extra = {}
try:
from snaek import setuptools_ext
except ImportError:
pass
else:
class SneakDistribution(Distribution):
def __init__(self, *args, **kwargs):
Distribution.__init__(self, *args, **kwargs)
setuptools_ext.snaek_rust_modules(self, 'snaek_rust_modules', [
('snaek._bindgen', 'rust/'),
])
extra['distclass'] = SneakDistribution
setup(
name='snaek',
version='0.2.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['snaek'],
package_data={
'snaek': ['empty.c'],
},
description='A python library for distributing Rust modules.',
zip_safe=False,
platforms='any',
install_requires=[
'cffi>=1.6.0',
],
setup_requires=[
'cffi>=1.6.0',
],
entry_points={
'distutils.setup_keywords': [
'snaek_rust_modules = snaek.setuptools_ext:snaek_rust_modules',
'snaek_universal = snaek.setuptools_ext:snaek_universal',
],
},
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
**extra
)
|
native/libcst/tests/fixtures/global_nonlocal.py
|
jschavesr/LibCST
| 880 |
123709
|
<reponame>jschavesr/LibCST
global a
global b , c, d
nonlocal a
nonlocal a , b
|
test/unit/agent/common/context.py
|
dp92987/nginx-amplify-agent
| 308 |
123716
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from hamcrest import *
from amplify.agent.common.context import context
from test.base import BaseTestCase
from test.fixtures.defaults import *
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class ContextTestCase(BaseTestCase):
def test_freeze_api_url(self):
# check that if api_url is not set it will not prevent agent from setting api_url from cloud
context.app_config['cloud']['api_url'] = ''
context.setup(app='test', app_config=context.app_config.default)
assert_that(context.freeze_api_url, equal_to(False))
# check that an api_url from our receiver's domain will not prevent agent from setting api_url from cloud
context.app_config['cloud']['api_url'] = 'https://receiver.amplify.nginx.com:443/1.1'
context.setup(app='test', app_config=context.app_config.default)
assert_that(context.freeze_api_url, equal_to(False))
# check that a custom api_url will prevent agent from setting api_url from cloud
context.app_config['cloud']['api_url'] = 'http://some.other.domain/endpoint/'
context.setup(app='test', app_config=context.app_config.default)
assert_that(context.freeze_api_url, equal_to(True))
def test_uuid(self):
assert_that(context.app_config['credentials'], has_entry('imagename', ''))
assert_that(context.app_config['credentials'], has_entry('hostname', DEFAULT_HOST))
assert_that(context.app_config['credentials'], has_entry('api_key', DEFAULT_API_KEY))
assert_that(context.app_config['credentials'], has_entry('uuid', DEFAULT_UUID))
assert_that(context.uuid, equal_to(DEFAULT_UUID))
class ContextContainerTestCase(BaseTestCase):
def setup_method(self, method):
super(ContextContainerTestCase, self).setup_method(method)
context.app_config['credentials']['imagename'] = 'DockerTest'
context.setup(app='test', app_config=context.app_config.default)
def teardown_method(self, method):
context.app_config['credentials']['imagename'] = None
context.app_config['credentials']['uuid'] = DEFAULT_UUID
context.setup(app='test', app_config=context.app_config.default)
def test_uuid(self):
assert_that(context.app_config['credentials'], has_entry('imagename', 'DockerTest'))
assert_that(context.app_config['credentials'], has_entry('api_key', DEFAULT_API_KEY))
assert_that(context.app_config['credentials'], has_entry('uuid', 'container-DockerTest'))
assert_that(context.uuid, equal_to('container-DockerTest'))
|
fcos_core/layers/mask_prob.py
|
Davidnet/EmbedMask
| 181 |
123724
|
<reponame>Davidnet/EmbedMask
# import torch
# from torch import nn
# from torch.autograd import Function
from fcos_core import _C
mask_prob_cuda = _C.maskprob_forward
|
t/umash.py
|
warmchang/umash
| 108 |
123761
|
<gh_stars>100-1000
import cffi
import faulthandler
import os
import sys
from cffi_util import read_stripped_header
SELF_DIR = os.path.dirname(os.path.abspath(__file__))
TOPLEVEL = os.path.abspath(SELF_DIR + "/../") + "/"
# The reference implementation is at the top level.
sys.path.append(TOPLEVEL)
HEADERS = [
"umash.h",
"t/umash_test_only.h",
]
FFI = cffi.FFI()
for header in HEADERS:
FFI.cdef(
read_stripped_header(TOPLEVEL + header, {r'^extern "C" {\n': "", r"}\n": ""})
)
C = FFI.dlopen(os.getenv("UMASH_TEST_LIB", TOPLEVEL + "umash_test_only.so"))
# Pass in a copy of stderr in case anyone plays redirection tricks.
faulthandler.enable(os.dup(2))
|
differential_privacy/run_stackoverflow_with_secrets_test.py
|
garyxcheng/federated
| 330 |
123807
|
<filename>differential_privacy/run_stackoverflow_with_secrets_test.py
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for secret_sharer."""
import string
from absl import flags
from absl.testing import absltest
from differential_privacy import run_stackoverflow_with_secrets
FLAGS = flags.FLAGS
class RunStackoverflowWithSecretsTest(absltest.TestCase):
def test_permute_and_batch(self):
def to_histogram(l):
hist = {}
for i in l:
c = hist.get(i, 0)
hist[i] = c + 1
return hist
num_values = 8
batch_size = 3
for seed in range(10):
values = list(string.ascii_lowercase)[:num_values]
permute_and_batch = run_stackoverflow_with_secrets.PermuteAndBatch(
values, seed, batch_size)
data_so_far = []
for i in range(10):
batch = permute_and_batch(i)
self.assertLen(batch, batch_size)
data_so_far.extend(batch)
hist = to_histogram(data_so_far)
hist_hist = to_histogram(hist.values())
data_len = len(data_so_far)
if data_len < num_values or data_len % num_values == 0:
self.assertLen(hist_hist, 1)
else:
self.assertLen(hist_hist, 2)
hist_count = sum(k * v for k, v in hist_hist.items())
self.assertEqual(hist_count, data_len)
if i > 0:
recomputed_last_batch = permute_and_batch(i - 1)
self.assertSequenceEqual(recomputed_last_batch, last_batch)
last_batch = batch
if __name__ == '__main__':
absltest.main()
|
dialogue-engine/test/programytest/parser/template/graph_tests/test_rand.py
|
cotobadesign/cotoba-agent-oss
| 104 |
123825
|
<gh_stars>100-1000
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.exceptions import ParserException
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.rand import TemplateRandomNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphRandomTests(TemplateGraphTestClient):
def test_random_template_no_li(self):
template = ET.fromstring("""
<template>
<random>
</random>
</template>
""")
with self.assertRaises(ParserException):
self._graph.parse_template_expression(template)
def test_random_template_none_li(self):
template = ET.fromstring("""
<template>
<random>
<lowercase>FAIL</lowercase>
</random>
</template>
""")
with self.assertRaises(ParserException):
self._graph.parse_template_expression(template)
def test_random_template(self):
template = ET.fromstring("""
<template>
<random>
<li>1</li>
<li>2</li>
<li>3</li>
</random>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertIsNotNone(ast.children[0])
self.assertIsInstance(ast.children[0], TemplateRandomNode)
self.assertEqual(3, len(ast.children[0].children))
self.assertIsInstance(ast.children[0].children[0], TemplateNode)
self.assertIsInstance(ast.children[0].children[1], TemplateNode)
self.assertIsInstance(ast.children[0].children[2], TemplateNode)
selection = ast.children[0].resolve(self._client_context)
self.assertIsNotNone(selection)
self.assertIn(selection, ['1', '2', '3'])
def test_random_nested_template(self):
template = ET.fromstring("""
<template>
<random>
<li>
<random>
<li>Say something</li>
<li>Say the other</li>
</random>
</li>
<li>
<random>
<li>Hello world!</li>
<li>Goodbye cruel world</li>
</random>
</li>
</random>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertIsNotNone(ast.children[0])
self.assertIsInstance(ast.children[0], TemplateRandomNode)
self.assertEqual(2, len(ast.children[0].children))
self.assertIsInstance(ast.children[0].children[0], TemplateNode)
self.assertEqual(1, len(ast.children[0].children[0].children))
self.assertIsInstance(ast.children[0].children[0].children[0], TemplateRandomNode)
self.assertEqual(2, len(ast.children[0].children[0].children[0].children))
self.assertIsInstance(ast.children[0].children[1], TemplateNode)
self.assertEqual(1, len(ast.children[0].children[1].children))
self.assertIsInstance(ast.children[0].children[1].children[0], TemplateRandomNode)
self.assertEqual(2, len(ast.children[0].children[1].children[0].children))
selection = ast.children[0].resolve(self._client_context)
self.assertIsNotNone(selection)
self.assertIn(selection, ['Say something', 'Say the other', 'Hello world!', 'Goodbye cruel world'])
|
fixit/common/tests/test_comments.py
|
sk-/Fixit
| 313 |
123826
|
<filename>fixit/common/tests/test_comments.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tokenize
from io import BytesIO
from libcst.testing.utils import UnitTest
from fixit.common.comments import CommentInfo
from fixit.common.utils import dedent_with_lstrip
class CommentInfoTest(UnitTest):
def test_comment_info(self) -> None:
# A comment on a line with no other leading tokens is a "comment on own line".
# In contrast, trailing comments come after other tokens on the same line.
code = dedent_with_lstrip(
"""
# comment on own line
# this is a
# multiline comment
def fn():
# comment on own line
fn2()
fn3() # trailing comment
fn4()
# comment on own line
"""
)
tokens = tokenize.tokenize(BytesIO(code.encode("utf-8")).readline)
result = CommentInfo.compute(tokens=tokens)
# The set of all comments includes both comments on their own line and trailing
# comments.
self.assertEqual([tok.start[0] for tok in result.comments], [1, 2, 3, 5, 7, 9])
# `comments_on_own_line` is a subset of all comments
self.assertEqual(
[tok.start[0] for tok in result.comments_on_own_line], [1, 2, 3, 5, 9]
)
|
koalixcrm/crm/migrations/0049_auto_20181014_2258.py
|
Cataldir/koalixcrm
| 290 |
123873
|
<reponame>Cataldir/koalixcrm<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-10-12 20:56
from __future__ import unicode_literals
from django.db import migrations
def reverse_func(apps, schema_editor):
return 1
def backup_identifiers(apps, schema_editor):
Position = apps.get_model("crm", "Position")
CustomerGroupTransform = apps.get_model("crm", "CustomerGroupTransform")
Price = apps.get_model("crm", "Price")
UnitTransform = apps.get_model("crm", "UnitTransform")
db_alias = schema_editor.connection.alias
all_positions = Position.objects.using(db_alias).all()
for position in all_positions:
position.product_backup = position.product.id
position.save()
all_customer_group_transforms = CustomerGroupTransform.objects.using(db_alias).all()
for customer_group_transform in all_customer_group_transforms:
customer_group_transform.product_backup = customer_group_transform.product.id
customer_group_transform.save()
all_prices = Price.objects.using(db_alias).all()
for price in all_prices:
price.product_backup = price.product.id
price.save()
all_unit_transforms = UnitTransform.objects.using(db_alias).all()
for unit_transform in all_unit_transforms:
unit_transform.product_backup = unit_transform.product.id
unit_transform.save()
class Migration(migrations.Migration):
dependencies = [
('crm', '0048_auto_20181012_2056'),
]
operations = [
migrations.RunPython(backup_identifiers, reverse_func),
]
|
verisure/__init__.py
|
gablin/python-verisure
| 131 |
123877
|
<reponame>gablin/python-verisure
"""
A python module for reading and changing status of verisure devices through
verisure app API.
"""
__all__ = [
'Error',
'LoginError',
'ResponseError',
'Session'
]
from .session import ( # NOQA
Error,
LoginError,
ResponseError,
Session
)
ALARM_ARMED_HOME = 'ARMED_HOME'
ALARM_ARMED_AWAY = 'ARMED_AWAY'
ALARM_DISARMED = 'DISARMED'
LOCK_LOCKED = 'LOCKED'
LOCK_UNLOCKED = 'UNLOCKED'
SMARTPLUG_ON = 'on'
SMARTPLUG_OFF = 'off'
|
pyjswidgets/pyjamas/ui/GlassWidget.ie6.py
|
takipsizad/pyjs
| 739 |
123953
|
# Copyright 2006 <NAME> and contributors
# Copyright (C) 2009 <NAME> <<EMAIL>>
# Copyright (C) 2010 <NAME> <<EMAIL>>
# Copyright (C) 2010 <NAME> (IE override) <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This IE-specific override is required because IE doesn't allow
# empty element to generate events. Therefore, when the mouse moves
# (or clicks) happen over *only* the GlassWidget (which is empty)
# they stop flowing. however, IE does provide setCapture/releaseCapture
# methods on elements which can be used to same effect as a regular
# GlassWidget.
# This file implements the IE version of GlassWidget simply by mapping
# the GlassWidget API to the use of setCapture/releaseCapture
# we re-use the global 'mousecapturer' to prevent GlassWidget.hide()
# from releasing someone else's capture
def show(mousetarget, **kwargs):
global mousecapturer
# get the element that wants events
target_element = mousetarget.getElement()
# insure element can capture events
if hasattr(target_element,"setCapture"):
# remember it
mousecapturer = target_element
# start capturing
DOM.setCapture(target_element)
def hide():
global mousecapturer
if hasattr(mousecapturer,"releaseCapture"):
DOM.releaseCapture(mousecapturer)
mousecapturer = None
|
examples/test_ns.py
|
q0w/snug
| 123 |
123962
|
import json
from pathlib import Path
import aiohttp
import pytest
from gentools import sendreturn
import ns
import snug
live = pytest.config.getoption('--live')
CRED_PATH = Path('~/.snug/ns.json').expanduser()
auth = json.loads(CRED_PATH.read_bytes())
@pytest.fixture(scope='module')
async def exec():
async with aiohttp.ClientSession() as client:
yield ns.async_executor(auth=auth, client=client)
@pytest.mark.asyncio
async def test_all_stations(exec):
all_stations = ns.stations()
if live:
stations = await exec(all_stations)
assert isinstance(stations, list)
amsterdam_stations = [s for s in stations
if s.full_name.startswith('Amsterdam')]
assert len(amsterdam_stations) == 11
den_bosch = stations[0]
assert den_bosch.synonyms == ["Hertogenbosch ('s)", 'Den Bosch']
# offline test
query = iter(all_stations)
assert next(query).url.endswith('stations-v2')
result = sendreturn(query, snug.Response(200, content=STATIONS_SAMPLE))
assert len(result) == 4
assert result[3].full_name == '<NAME>'
@pytest.mark.asyncio
async def test_departures(exec):
departures = ns.departures(station='Amsterdam')
if live:
deps = await exec(departures)
assert len(deps) >= 10
departure = deps[0]
assert isinstance(departure, ns.Departure)
# offline test
query = iter(departures)
req = next(query)
assert req.url.endswith('avt')
assert req.params == {'station': 'Amsterdam'}
result = sendreturn(query, snug.Response(200, content=DEPARTURES_SAMPLE))
assert len(result)
assert result[1].platform_changed
@pytest.mark.asyncio
async def test_journey_options(exec):
travel_options = ns.journey_options(origin='Breda',
destination='Amsterdam')
travel_options_no_hsl = travel_options.replace(hsl='false')
if live:
options = await exec(travel_options)
assert len(options) >= 10
assert isinstance(options[0], ns.Journey)
# offline test
query = iter(travel_options)
assert next(query).params == {'fromStation': 'Breda',
'toStation': 'Amsterdam'}
result = sendreturn(query, snug.Response(200, content=JOURNEYS_SAMPLE))
assert len(result) == 3
assert result[0].components[1].stops[-1].platform == '8a'
assert next(iter(travel_options_no_hsl)).params == {
'fromStation': 'Breda',
'toStation': 'Amsterdam',
'hslAllowed': 'false'}
STATIONS_SAMPLE = b'''\
<Stations>
<Station>
<Code>HT</Code>
<Type>knooppuntIntercitystation</Type>
<Namen>
<Kort>Den Bosch</Kort>
<Middel>'s-Hertogenbosch</Middel>
<Lang>'s-Hertogenbosch</Lang>
</Namen>
<Land>NL</Land>
<UICCode>8400319</UICCode>
<Lat>51.69048</Lat>
<Lon>5.29362</Lon>
<Synoniemen>
<Synoniem>Hertogenbosch ('s)</Synoniem>
<Synoniem>Den Bosch</Synoniem>
</Synoniemen>
</Station>
<Station>
<Code>HTO</Code>
<Type>stoptreinstation</Type>
<Namen>
<Kort>Dn Bosch O</Kort>
<Middel>Hertogenb. Oost</Middel>
<Lang>'s-Hertogenbosch Oost</Lang>
</Namen>
<Land>NL</Land>
<UICCode>8400320</UICCode>
<Lat>51.700553894043</Lat>
<Lon>5.3183331489563</Lon>
<Synoniemen>
<Synoniem>Hertogenbosch Oost ('s)</Synoniem>
<Synoniem>Den Bosch Oost</Synoniem>
</Synoniemen>
</Station>
<Station>
<Code>HDE</Code>
<Type>stoptreinstation</Type>
<Namen>
<Kort>'t Harde</Kort>
<Middel>'t Harde</Middel>
<Lang>'t Harde</Lang>
</Namen>
<Land>NL</Land>
<UICCode>8400388</UICCode>
<Lat>52.4091682</Lat>
<Lon>5.893611</Lon>
<Synoniemen>
<Synoniem>Harde ('t)</Synoniem>
</Synoniemen>
</Station>
<Station>
<Code>AHBF</Code>
<Type>knooppuntIntercitystation</Type>
<Namen>
<Kort>Aachen</Kort>
<Middel>Aachen Hbf</Middel>
<Lang>Aachen Hbf</Lang>
</Namen>
<Land>D</Land>
<UICCode>8015345</UICCode>
<Lat>50.7678</Lat>
<Lon>6.091499</Lon>
<Synoniemen>
</Synoniemen>
</Station>
</Stations>
'''
DEPARTURES_SAMPLE = b'''\
<ActueleVertrekTijden>
<VertrekkendeTrein>
<RitNummer>2187</RitNummer>
<VertrekTijd>2018-01-22T21:49:00+0100</VertrekTijd>
<EindBestemming>Den Haag Centraal</EindBestemming>
<TreinSoort>Intercity</TreinSoort>
<RouteTekst>A'dam Sloterdijk, Haarlem, Leiden C.</RouteTekst>
<Vervoerder>NS</Vervoerder>
<VertrekSpoor wijziging="false">2a</VertrekSpoor>
</VertrekkendeTrein>
<VertrekkendeTrein>
<RitNummer>4083</RitNummer>
<VertrekTijd>2018-01-22T21:49:00+0100</VertrekTijd>
<EindBestemming>Rotterdam Centraal</EindBestemming>
<TreinSoort>Sprinter</TreinSoort>
<RouteTekst>Duivendrecht, Bijlmer ArenA, Breukelen</RouteTekst>
<Vervoerder>NS</Vervoerder>
<VertrekSpoor wijziging="true">4b</VertrekSpoor>
</VertrekkendeTrein>
<VertrekkendeTrein>
<RitNummer>2974</RitNummer>
<VertrekTijd>2018-01-22T21:53:00+0100</VertrekTijd>
<EindBestemming>Enkhuizen</EindBestemming>
<TreinSoort>Intercity</TreinSoort>
<RouteTekst>A'dam Sloterdijk, Hoorn</RouteTekst>
<Vervoerder>NS</Vervoerder>
<VertrekSpoor wijziging="false">8a</VertrekSpoor>
</VertrekkendeTrein>
<VertrekkendeTrein>
<RitNummer>14681</RitNummer>
<VertrekTijd>2018-01-22T21:53:00+0100</VertrekTijd>
<EindBestemming>Zwolle</EindBestemming>
<TreinSoort>Sprinter</TreinSoort>
<RouteTekst>Weesp, Lelystad C.</RouteTekst>
<Vervoerder>NS</Vervoerder>
<VertrekSpoor wijziging="false">10b</VertrekSpoor>
</VertrekkendeTrein>
</ActueleVertrekTijden>
'''
JOURNEYS_SAMPLE = b'''\
<ReisMogelijkheden>
<ReisMogelijkheid>
<AantalOverstappen>1</AantalOverstappen>
<GeplandeReisTijd>1:29</GeplandeReisTijd>
<ActueleReisTijd>1:29</ActueleReisTijd>
<GeplandeVertrekTijd>2018-01-22T20:20:00+0100</GeplandeVertrekTijd>
<ActueleVertrekTijd>2018-01-22T20:20:00+0100</ActueleVertrekTijd>
<GeplandeAankomstTijd>2018-01-22T21:49:00+0100</GeplandeAankomstTijd>
<ActueleAankomstTijd>2018-01-22T21:49:00+0100</ActueleAankomstTijd>
<Status>NIEUW</Status>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>3674</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>Breda</Naam>
<Tijd>2018-01-22T20:20:00+0100</Tijd>
<Spoor wijziging="false">3</Spoor>
</ReisStop>
<ReisStop>
<Naam>Tilburg</Naam>
<Tijd>2018-01-22T20:34:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>'s-Hertogenbosch</Naam>
<Tijd>2018-01-22T20:49:00+0100</Tijd>
<Spoor wijziging="false">1</Spoor>
</ReisStop>
</ReisDeel>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>2974</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>'s-Hertogenbosch</Naam>
<Tijd>2018-01-22T20:54:00+0100</Tijd>
<Spoor wijziging="false">3</Spoor>
</ReisStop>
<ReisStop>
<Naam>Utrecht Centraal</Naam>
<Tijd>2018-01-22T21:23:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Amsterdam Amstel</Naam>
<Tijd>2018-01-22T21:41:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Amsterdam Centraal</Naam>
<Tijd>2018-01-22T21:49:00+0100</Tijd>
<Spoor wijziging="false">8a</Spoor>
</ReisStop>
</ReisDeel>
</ReisMogelijkheid>
<ReisMogelijkheid>
<Melding>
<Id></Id>
<Ernstig>true</Ernstig>
<Text>Dit reisadvies vervalt</Text>
</Melding>
<AantalOverstappen>1</AantalOverstappen>
<GeplandeReisTijd>1:14</GeplandeReisTijd>
<ActueleReisTijd>1:14</ActueleReisTijd>
<Optimaal>false</Optimaal>
<GeplandeVertrekTijd>2018-01-22T20:23:00+0100</GeplandeVertrekTijd>
<ActueleVertrekTijd>2018-01-22T20:23:00+0100</ActueleVertrekTijd>
<GeplandeAankomstTijd>2018-01-22T21:37:00+0100</GeplandeAankomstTijd>
<ActueleAankomstTijd>2018-01-22T21:37:00+0100</ActueleAankomstTijd>
<Status>NIET-MOGELIJK</Status>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>1170</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>Breda</Naam>
<Tijd>2018-01-22T20:23:00+0100</Tijd>
<Spoor wijziging="false">7</Spoor>
</ReisStop>
<ReisStop>
<Naam>Rotterdam Centraal</Naam>
<Tijd>2018-01-22T20:47:00+0100</Tijd>
<Spoor wijziging="false">9</Spoor>
</ReisStop>
</ReisDeel>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity direct</VervoerType>
<RitNummer>1061</RitNummer>
<Status>GEANNULEERD</Status>
<Reisdetails>
<Reisdetail>Toeslag Schiphol-Rotterdam vv</Reisdetail>
</Reisdetails>
<ReisStop>
<Naam>Rotterdam Centraal</Naam>
<Tijd>2018-01-22T20:57:00+0100</Tijd>
<Spoor wijziging="false">12</Spoor>
</ReisStop>
<ReisStop>
<Naam>Schiphol Airport</Naam>
<Tijd>2018-01-22T21:23:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Amsterdam Centraal</Naam>
<Tijd>2018-01-22T21:37:00+0100</Tijd>
<Spoor wijziging="false">14a</Spoor>
</ReisStop>
</ReisDeel>
</ReisMogelijkheid>
<ReisMogelijkheid>
<Melding>
<Id></Id>
<Ernstig>false</Ernstig>
<Text>Dit is een aangepast reisadvies</Text>
</Melding>
<AantalOverstappen>1</AantalOverstappen>
<GeplandeReisTijd>1:47</GeplandeReisTijd>
<ActueleReisTijd>1:47</ActueleReisTijd>
<Optimaal>false</Optimaal>
<GeplandeVertrekTijd>2018-01-22T20:23:00+0100</GeplandeVertrekTijd>
<ActueleVertrekTijd>2018-01-22T20:23:00+0100</ActueleVertrekTijd>
<GeplandeAankomstTijd>2018-01-22T22:10:00+0100</GeplandeAankomstTijd>
<ActueleAankomstTijd>2018-01-22T22:10:00+0100</ActueleAankomstTijd>
<Status>GEWIJZIGD</Status>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>1170</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>Breda</Naam>
<Tijd>2018-01-22T20:23:00+0100</Tijd>
<Spoor wijziging="false">7</Spoor>
</ReisStop>
<ReisStop>
<Naam>Rotterdam Centraal</Naam>
<Tijd>2018-01-22T20:48:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Delft</Naam>
<Tijd>2018-01-22T21:00:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam><NAME> HS</Naam>
<Tijd>2018-01-22T21:08:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Den Haag Centraal</Naam>
<Tijd>2018-01-22T21:12:00+0100</Tijd>
<Spoor wijziging="false">1</Spoor>
</ReisStop>
</ReisDeel>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>2170</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>Den Haag Centraal</Naam>
<Tijd>2018-01-22T21:18:00+0100</Tijd>
<Spoor wijziging="false">10</Spoor>
</ReisStop>
<ReisStop>
<Naam>Leiden Centraal</Naam>
<Tijd>2018-01-22T21:35:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Heemstede-Aerdenhout</Naam>
<Tijd>2018-01-22T21:49:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Haarlem</Naam>
<Tijd>2018-01-22T21:55:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam><NAME></Naam>
<Tijd>2018-01-22T22:04:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Amsterdam Centraal</Naam>
<Tijd>2018-01-22T22:10:00+0100</Tijd>
<Spoor wijziging="false">7a</Spoor>
</ReisStop>
</ReisDeel>
</ReisMogelijkheid>
</ReisMogelijkheden>
'''
|
flows/transitions.py
|
sergioisidoro/django-flows
| 104 |
123986
|
# -*- coding: UTF-8 -*-
import random
from flows.components import COMPLETE
class Linear(object):
"""
The `Linear` transition assumes that once an `Action` has completed,
the flow should transition to the next `Action` in the relevant
`Scaffold`'s `action_set`. If there are no more actions left, then
`FlowComponent.COMPLETE` is returned and it is assumed that the next
`Scaffold` up the tree will deal with where to go next.
"""
def choose_next(self, scaffold):
# first figure out where we are in the current position
# so that we know which of our action_set is in the current
# path
position = scaffold._flow_position_instance._position
idx = -1
for idx, ffc in enumerate(position.flow_component_classes):
if scaffold.__class__ == ffc:
# we have found where we are
break
# idx == -1 implies we didn't find ourselves, which should be
# impossible
if idx == -1:
raise ValueError
# now work out which of our children is in the active path
if idx+1 >= len(position.flow_component_classes):
# this means that we are trying to choose the next item
# from an action, which is impossible as they don't have
# action sets!
raise ValueError
active_child = position.flow_component_classes[idx+1]
action_set = scaffold.action_set
child_idx = action_set.index(active_child)
# so, we know where we are in our action set, and we are linear,
# so next is simply the next one in our action set, or COMPLETE
# if there are no more options
if child_idx+1 >= len(action_set):
return COMPLETE
return action_set[child_idx+1]
class Chaos(object):
"""
Don't use this transition.
"""
def choose_next(self, scaffold):
return random.choice(scaffold.action_set)
|
node_launcher/node_set/bitcoind/bitcoind_rpc_client.py
|
ryan-lingle/node-launcher
| 249 |
123988
|
import base64
import binascii
import decimal
import json
import os
import platform
import sys
import urllib.parse as urlparse
from http.client import HTTP_PORT, HTTPConnection
DEFAULT_USER_AGENT = "AuthServiceProxy/0.1"
DEFAULT_HTTP_TIMEOUT = 30
# (un)hexlify to/from unicode, needed for Python3
unhexlify = binascii.unhexlify
hexlify = binascii.hexlify
if sys.version > '3':
unhexlify = lambda h: binascii.unhexlify(h.encode('utf8'))
hexlify = lambda b: binascii.hexlify(b).decode('utf8')
class JSONRPCError(Exception):
"""JSON-RPC protocol error base class
Subclasses of this class also exist for specific types of errors; the set
of all subclasses is by no means complete.
"""
SUBCLS_BY_CODE = {}
@classmethod
def _register_subcls(cls, subcls):
cls.SUBCLS_BY_CODE[subcls.RPC_ERROR_CODE] = subcls
return subcls
def __new__(cls, rpc_error):
assert cls is JSONRPCError
cls = JSONRPCError.SUBCLS_BY_CODE.get(rpc_error['code'], cls)
self = Exception.__new__(cls)
super(JSONRPCError, self).__init__(
'msg: %r code: %r' %
(rpc_error['message'], rpc_error['code']))
self.error = rpc_error
return self
class BaseProxy(object):
"""Base JSON-RPC proxy class. Contains only private methods; do not use
directly."""
def __init__(self,
service_url=None,
service_port=None,
btc_conf_file=None,
timeout=DEFAULT_HTTP_TIMEOUT):
# Create a dummy connection early on so if __init__() fails prior to
# __conn being created __del__() can detect the condition and handle it
# correctly.
self.__conn = None
if service_url is None:
# Figure out the path to the bitcoin.conf file
if btc_conf_file is None:
if platform.system() == 'Darwin':
btc_conf_file = os.path.expanduser('~/Library/Application Support/Bitcoin/')
elif platform.system() == 'Windows':
btc_conf_file = os.path.join(os.environ['APPDATA'], 'Bitcoin')
else:
btc_conf_file = os.path.expanduser('~/.bitcoin')
btc_conf_file = os.path.join(btc_conf_file, 'bitcoin.conf')
# Bitcoin Core accepts empty rpcuser, not specified in btc_conf_file
conf = {'rpcuser': ""}
# Extract contents of bitcoin.conf to build service_url
try:
with open(btc_conf_file, 'r') as fd:
for line in fd.readlines():
if '#' in line:
line = line[:line.index('#')]
if '=' not in line:
continue
k, v = line.split('=', 1)
conf[k.strip()] = v.strip()
# Treat a missing bitcoin.conf as though it were empty
except FileNotFoundError:
pass
conf['rpcport'] = int(conf.get('rpcport', service_port))
conf['rpchost'] = conf.get('rpcconnect', 'localhost')
service_url = ('%s://%s:%d' %
('http', conf['rpchost'], conf['rpcport']))
cookie_dir = conf.get('datadir', os.path.dirname(btc_conf_file))
cookie_file = os.path.join(cookie_dir, ".cookie")
try:
with open(cookie_file, 'r') as fd:
authpair = fd.read()
except IOError as err:
if 'rpcpassword' in conf:
authpair = "%s:%s" % (conf['rpcuser'], conf['rpcpassword'])
else:
raise ValueError(
'Cookie file unusable (%s) and rpcpassword not specified in the configuration file: %r' % (
err, btc_conf_file))
else:
url = urlparse.urlparse(service_url)
authpair = "%s:%s" % (url.username, url.password)
self.__service_url = service_url
self.__url = urlparse.urlparse(service_url)
if self.__url.scheme not in ('http',):
raise ValueError('Unsupported URL scheme %r' % self.__url.scheme)
if self.__url.port is None:
port = HTTP_PORT
else:
port = self.__url.port
self.__id_count = 0
if authpair is None:
self.__auth_header = None
else:
authpair = authpair.encode('utf8')
self.__auth_header = b"Basic " + base64.b64encode(authpair)
self.__conn = HTTPConnection(self.__url.hostname, port=port,
timeout=timeout)
def _call(self, service_name, *args):
self.__id_count += 1
postdata = json.dumps({'version': '1.1',
'method': service_name,
'params': args,
'id': self.__id_count})
headers = {
'Host': self.__url.hostname,
'User-Agent': DEFAULT_USER_AGENT,
'Content-type': 'application/json',
}
if self.__auth_header is not None:
headers['Authorization'] = self.__auth_header
self.__conn.request('POST', self.__url.path, postdata, headers)
response = self._get_response()
if response['error'] is not None:
raise JSONRPCError(response['error'])
elif 'result' not in response:
raise JSONRPCError({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list))
headers = {
'Host': self.__url.hostname,
'User-Agent': DEFAULT_USER_AGENT,
'Content-type': 'application/json',
}
if self.__auth_header is not None:
headers['Authorization'] = self.__auth_header
self.__conn.request('POST', self.__url.path, postdata, headers)
return self._get_response()
def _get_response(self):
http_response = self.__conn.getresponse()
if http_response is None:
raise JSONRPCError({
'code': -342, 'message': 'missing HTTP response from server'})
return json.loads(http_response.read().decode('utf8'),
parse_float=decimal.Decimal)
def close(self):
if self.__conn is not None:
self.__conn.close()
def __del__(self):
if self.__conn is not None:
self.__conn.close()
class RawProxy(BaseProxy):
"""Low-level proxy to a bitcoin JSON-RPC service
Unlike ``Proxy``, no conversion is done besides parsing JSON. As far as
Python is concerned, you can call any method; ``JSONRPCError`` will be
raised if the server does not recognize it.
"""
def __init__(self,
service_url=None,
service_port=None,
btc_conf_file=None,
timeout=DEFAULT_HTTP_TIMEOUT):
super(RawProxy, self).__init__(service_url=service_url,
service_port=service_port,
btc_conf_file=btc_conf_file,
timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
f = lambda *args: self._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class Proxy(BaseProxy):
"""Proxy to a bitcoin RPC service
Unlike ``RawProxy``, data is passed as ``bitcoin.core`` objects or packed
bytes, rather than JSON or hex strings. Not all methods are implemented
yet; you can use ``call`` to access missing ones in a forward-compatible
way. Assumes Bitcoin Core version >= v0.16.0; older versions mostly work,
but there are a few incompatibilities.
"""
def __init__(self,
service_url=None,
service_port=None,
btc_conf_file=None,
timeout=DEFAULT_HTTP_TIMEOUT):
"""Create a proxy object
If ``service_url`` is not specified, the username and password are read
out of the file ``btc_conf_file``. If ``btc_conf_file`` is not
specified, ``~/.bitcoin/bitcoin.conf`` or equivalent is used by
default. The default port is set according to the chain parameters in
use: mainnet, testnet, or regtest.
Usually no arguments to ``Proxy()`` are needed; the local bitcoind will
be used.
``timeout`` - timeout in seconds before the HTTP interface times out
"""
super(Proxy, self).__init__(service_url=service_url,
service_port=service_port,
btc_conf_file=btc_conf_file,
timeout=timeout)
def call(self, service_name, *args):
"""Call an RPC method by name and raw (JSON encodable) arguments"""
return self._call(service_name, *args)
def get_raw_mempool(self):
results = self.call('getrawmempool', True)
new_results = []
for result_key in results:
del results[result_key]['fees']
del results[result_key]['depends']
del results[result_key]['spentby']
results[result_key]['txid'] = result_key
new_results.append(results[result_key])
return new_results
|
chapter6/name_mangling.py
|
sharad16j/Expert-Python-Programming-Third-Edition
| 112 |
124022
|
class Base(object):
def __secret(self):
print("don't tell")
def public(self):
self.__secret()
class Derived(Base):
def __secret(self):
print("never ever")
if __name__ == "__main__":
print("Base class members:", dir(Base))
print("Derived class members:", dir(Derived))
print("Base.public() result:")
Base().public()
print("Derived.public() result:")
Derived().public()
|
protos/gen/python/protos/public/modeldb/versioning/Enums_pb2.py
|
stefan-petrov-toptal/modeldb
| 835 |
124023
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modeldb/versioning/Enums.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='modeldb/versioning/Enums.proto',
package='ai.verta.modeldb.versioning',
syntax='proto3',
serialized_options=b'P\001ZIgithub.com/VertaAI/modeldb/protos/gen/go/protos/public/modeldb/versioning',
serialized_pb=b'\n\x1emodeldb/versioning/Enums.proto\x12\x1b\x61i.verta.modeldb.versioning\"a\n\x0e\x44iffStatusEnum\"O\n\nDiffStatus\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x01\x12\x0b\n\x07\x44\x45LETED\x10\x02\x12\x0c\n\x08MODIFIED\x10\x03\x12\x0e\n\nCONFLICTED\x10\x04\x42MP\x01ZIgithub.com/VertaAI/modeldb/protos/gen/go/protos/public/modeldb/versioningb\x06proto3'
)
_DIFFSTATUSENUM_DIFFSTATUS = _descriptor.EnumDescriptor(
name='DiffStatus',
full_name='ai.verta.modeldb.versioning.DiffStatusEnum.DiffStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADDED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODIFIED', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONFLICTED', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=81,
serialized_end=160,
)
_sym_db.RegisterEnumDescriptor(_DIFFSTATUSENUM_DIFFSTATUS)
_DIFFSTATUSENUM = _descriptor.Descriptor(
name='DiffStatusEnum',
full_name='ai.verta.modeldb.versioning.DiffStatusEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_DIFFSTATUSENUM_DIFFSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=160,
)
_DIFFSTATUSENUM_DIFFSTATUS.containing_type = _DIFFSTATUSENUM
DESCRIPTOR.message_types_by_name['DiffStatusEnum'] = _DIFFSTATUSENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DiffStatusEnum = _reflection.GeneratedProtocolMessageType('DiffStatusEnum', (_message.Message,), {
'DESCRIPTOR' : _DIFFSTATUSENUM,
'__module__' : 'modeldb.versioning.Enums_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.versioning.DiffStatusEnum)
})
_sym_db.RegisterMessage(DiffStatusEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
saleor/shipping/migrations/0026_shippingzone_description.py
|
fairhopeweb/saleor
| 15,337 |
124051
|
# Generated by Django 3.1.4 on 2020-12-29 12:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("shipping", "0025_auto_20201130_1122"),
]
operations = [
migrations.AddField(
model_name="shippingzone",
name="description",
field=models.TextField(blank=True),
),
]
|
mmgen/models/translation_models/pix2pix.py
|
plutoyuxie/mmgeneration
| 718 |
124100
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.nn.parallel.distributed import _find_tensors
from mmgen.models.builder import MODELS
from ..common import set_requires_grad
from .static_translation_gan import StaticTranslationGAN
@MODELS.register_module()
class Pix2Pix(StaticTranslationGAN):
"""Pix2Pix model for paired image-to-image translation.
Ref:
Image-to-Image Translation with Conditional Adversarial Networks
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_ema = False
def forward_test(self, img, target_domain, **kwargs):
"""Forward function for testing.
Args:
img (tensor): Input image tensor.
target_domain (str): Target domain of output image.
kwargs (dict): Other arguments.
Returns:
dict: Forward results.
"""
# This is a trick for Pix2Pix
# ref: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/e1bdf46198662b0f4d0b318e24568205ec4d7aee/test.py#L54 # noqa
self.train()
target = self.translation(img, target_domain=target_domain, **kwargs)
results = dict(source=img.cpu(), target=target.cpu())
return results
def _get_disc_loss(self, outputs):
# GAN loss for the discriminator
losses = dict()
discriminators = self.get_module(self.discriminators)
target_domain = self._default_domain
source_domain = self.get_other_domains(target_domain)[0]
fake_ab = torch.cat((outputs[f'real_{source_domain}'],
outputs[f'fake_{target_domain}']), 1)
fake_pred = discriminators[target_domain](fake_ab.detach())
losses['loss_gan_d_fake'] = self.gan_loss(
fake_pred, target_is_real=False, is_disc=True)
real_ab = torch.cat((outputs[f'real_{source_domain}'],
outputs[f'real_{target_domain}']), 1)
real_pred = discriminators[target_domain](real_ab)
losses['loss_gan_d_real'] = self.gan_loss(
real_pred, target_is_real=True, is_disc=True)
loss_d, log_vars_d = self._parse_losses(losses)
loss_d *= 0.5
return loss_d, log_vars_d
def _get_gen_loss(self, outputs):
target_domain = self._default_domain
source_domain = self.get_other_domains(target_domain)[0]
losses = dict()
discriminators = self.get_module(self.discriminators)
# GAN loss for the generator
fake_ab = torch.cat((outputs[f'real_{source_domain}'],
outputs[f'fake_{target_domain}']), 1)
fake_pred = discriminators[target_domain](fake_ab)
losses['loss_gan_g'] = self.gan_loss(
fake_pred, target_is_real=True, is_disc=False)
# gen auxiliary loss
if self.with_gen_auxiliary_loss:
for loss_module in self.gen_auxiliary_losses:
loss_ = loss_module(outputs)
if loss_ is None:
continue
# the `loss_name()` function return name as 'loss_xxx'
if loss_module.loss_name() in losses:
losses[loss_module.loss_name(
)] = losses[loss_module.loss_name()] + loss_
else:
losses[loss_module.loss_name()] = loss_
loss_g, log_vars_g = self._parse_losses(losses)
return loss_g, log_vars_g
def train_step(self,
data_batch,
optimizer,
ddp_reducer=None,
running_status=None):
"""Training step function.
Args:
data_batch (dict): Dict of the input data batch.
optimizer (dict[torch.optim.Optimizer]): Dict of optimizers for
the generator and discriminator.
ddp_reducer (:obj:`Reducer` | None, optional): Reducer from ddp.
It is used to prepare for ``backward()`` in ddp. Defaults to
None.
running_status (dict | None, optional): Contains necessary basic
information for training, e.g., iteration number. Defaults to
None.
Returns:
dict: Dict of loss, information for logger, the number of samples\
and results for visualization.
"""
# data
target_domain = self._default_domain
source_domain = self.get_other_domains(self._default_domain)[0]
source_image = data_batch[f'img_{source_domain}']
target_image = data_batch[f'img_{target_domain}']
# get running status
if running_status is not None:
curr_iter = running_status['iteration']
else:
# dirty walkround for not providing running status
if not hasattr(self, 'iteration'):
self.iteration = 0
curr_iter = self.iteration
# forward generator
outputs = dict()
results = self(
source_image, target_domain=self._default_domain, test_mode=False)
outputs[f'real_{source_domain}'] = results['source']
outputs[f'fake_{target_domain}'] = results['target']
outputs[f'real_{target_domain}'] = target_image
log_vars = dict()
# discriminator
set_requires_grad(self.discriminators, True)
# optimize
optimizer['discriminators'].zero_grad()
loss_d, log_vars_d = self._get_disc_loss(outputs)
log_vars.update(log_vars_d)
# prepare for backward in ddp. If you do not call this function before
# back propagation, the ddp will not dynamically find the used params
# in current computation.
if ddp_reducer is not None:
ddp_reducer.prepare_for_backward(_find_tensors(loss_d))
loss_d.backward()
optimizer['discriminators'].step()
# generator, no updates to discriminator parameters.
if (curr_iter % self.disc_steps == 0
and curr_iter >= self.disc_init_steps):
set_requires_grad(self.discriminators, False)
# optimize
optimizer['generators'].zero_grad()
loss_g, log_vars_g = self._get_gen_loss(outputs)
log_vars.update(log_vars_g)
# prepare for backward in ddp. If you do not call this function
# before back propagation, the ddp will not dynamically find the
# used params in current computation.
if ddp_reducer is not None:
ddp_reducer.prepare_for_backward(_find_tensors(loss_g))
loss_g.backward()
optimizer['generators'].step()
if hasattr(self, 'iteration'):
self.iteration += 1
image_results = dict()
image_results[f'real_{source_domain}'] = outputs[
f'real_{source_domain}'].cpu()
image_results[f'fake_{target_domain}'] = outputs[
f'fake_{target_domain}'].cpu()
image_results[f'real_{target_domain}'] = outputs[
f'real_{target_domain}'].cpu()
results = dict(
log_vars=log_vars,
num_samples=len(outputs[f'real_{source_domain}']),
results=image_results)
return results
|
querybuilder/tests/__init__.py
|
wesokes/django-query-builder
| 110 |
124112
|
<gh_stars>100-1000
default_app_config = 'querybuilder.tests.apps.QuerybuilderTestConfig'
|
scripts/03_glove_build_counts.py
|
svlandeg/sense2vec
| 1,140 |
124120
|
<reponame>svlandeg/sense2vec<gh_stars>1000+
#!/usr/bin/env python
import os
from pathlib import Path
from wasabi import msg
import typer
def main(
# fmt: off
glove_dir: str = typer.Argument(..., help="Directory containing the GloVe build"),
in_dir: str = typer.Argument(..., help="Directory with preprocessed .s2v files"),
out_dir: str = typer.Argument(..., help="Path to output directory"),
min_count: int = typer.Option(5, "--min-count", "-c", help="Minimum count for inclusion in vocab"),
memory: float = typer.Option(4.0, "--memory", "-m", help="Soft limit for memory consumption, in GB"),
window_size: int = typer.Option(15, "--window-size", "-w", help="Number of context words on either side"),
verbose: int = typer.Option(2, "--verbose", "-v", help="Set verbosity: 0, 1, or 2"),
# fmt: on
):
"""
Step 3: Build vocabulary and frequency counts
Expects a directory of preprocessed .s2v input files and will use GloVe to
collect unigram counts and construct and shuffle cooccurrence data. See here
for installation instructions: https://github.com/stanfordnlp/GloVe
Note that this script will call into GloVe and expects you to pass in the
GloVe build directory (/build if you run the Makefile). The commands will
also be printed if you want to run them separately.
"""
input_path = Path(in_dir)
output_path = Path(out_dir)
if not Path(glove_dir).exists():
msg.fail("Can't find GloVe build directory", glove_dir, exits=1)
if not input_path.exists() or not input_path.is_dir():
msg.fail("Not a valid input directory", in_dir, exits=1)
input_files = [str(fp) for fp in input_path.iterdir() if fp.suffix == ".s2v"]
if not input_files:
msg.fail("No .s2v files found in input directory", in_dir, exits=1)
msg.info(f"Using {len(input_files)} input files")
if not output_path.exists():
output_path.mkdir(parents=True)
msg.good(f"Created output directory {out_dir}")
vocab_file = output_path / f"vocab.txt"
cooc_file = output_path / f"cooccurrence.bin"
cooc_shuffle_file = output_path / f"cooccurrence.shuf.bin"
msg.info("Creating vocabulary counts")
cmd = (
f"cat {' '.join(input_files)} | {glove_dir}/vocab_count "
f"-min-count {min_count} -verbose {verbose} > {vocab_file}"
)
print(cmd)
vocab_cmd = os.system(cmd)
if vocab_cmd != 0 or not Path(vocab_file).exists():
msg.fail("Failed creating vocab counts", exits=1)
msg.good("Created vocab counts", vocab_file)
msg.info("Creating cooccurrence statistics")
cmd = (
f"cat {' '.join(input_files)} | {glove_dir}/cooccur -memory {memory} "
f"-vocab-file {vocab_file} -verbose {verbose} "
f"-window-size {window_size} > {cooc_file}"
)
print(cmd)
cooccur_cmd = os.system(cmd)
if cooccur_cmd != 0 or not Path(cooc_file).exists():
msg.fail("Failed creating cooccurrence statistics", exits=1)
msg.good("Created cooccurrence statistics", cooc_file)
msg.info("Shuffling cooccurrence file")
cmd = (
f"{glove_dir}/shuffle -memory {memory} -verbose {verbose} "
f"< {cooc_file} > {cooc_shuffle_file}"
)
print(cmd)
shuffle_cmd = os.system(cmd)
if shuffle_cmd != 0 or not Path(cooc_shuffle_file).exists():
msg.fail("Failed to shuffle cooccurrence file", exits=1)
msg.good("Shuffled cooccurrence file", cooc_shuffle_file)
if __name__ == "__main__":
typer.run(main)
|
prerender/cache/disk.py
|
bosondata/chrome-prerender
| 169 |
124122
|
<filename>prerender/cache/disk.py
import os
import lzma
import asyncio
import functools
from typing import Optional
from aiofiles.os import stat
import diskcache
from .base import CacheBackend
CACHE_ROOT_DIR: str = os.environ.get('CACHE_ROOT_DIR', '/tmp/prerender')
class DiskCache(CacheBackend):
def __init__(self) -> None:
self._cache = diskcache.Cache(CACHE_ROOT_DIR)
async def get(self, key: str, format: str = 'html') -> Optional[bytes]:
loop = asyncio.get_event_loop()
cache_get = self._cache.get
data = await loop.run_in_executor(None, cache_get, key + format)
if data is not None:
res = await loop.run_in_executor(None, lzma.decompress, data)
return res
def set(self, key: str, payload: bytes, ttl: int = None, format: str = 'html') -> None:
compressed = lzma.compress(payload)
self._cache.set(key + format, compressed, expire=ttl)
async def modified_since(self, key: str, format: str = 'html') -> Optional[float]:
loop = asyncio.get_event_loop()
cache_read = functools.partial(self._cache.get, read=True)
file = await loop.run_in_executor(None, cache_read, key + format)
if not file:
return
filename = file.name
file.close()
stats = await stat(filename)
return stats.st_mtime
|
EventFilter/RPCRawToDigi/python/rpcUnpacker_cfi.py
|
ckamtsikis/cmssw
| 852 |
124148
|
import FWCore.ParameterSet.Config as cms
import EventFilter.RPCRawToDigi.rpcUnpackingModule_cfi
rpcunpacker = EventFilter.RPCRawToDigi.rpcUnpackingModule_cfi.rpcUnpackingModule.clone()
rpcunpacker.InputLabel = cms.InputTag("rawDataCollector")
rpcunpacker.doSynchro = cms.bool(True)
|
server/security.py
|
s-bauer/yang-explorer
| 437 |
124153
|
<filename>server/security.py<gh_stars>100-1000
import logging
from django.shortcuts import render_to_response
from django.template import RequestContext
def policy_handler(request):
return render_to_response('crossdomain.xml', {}, RequestContext(request))
|
anuga/file_conversion/tests/test_2pts.py
|
samcom12/anuga_core
| 136 |
124193
|
# external modules
import unittest
import tempfile
import shutil
import numpy as num
# ANUGA modules
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.file.sww import Write_sww, SWW_file
from anuga.abstract_2d_finite_volumes.generic_boundary_conditions \
import Transmissive_boundary
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \
netcdf_float
from anuga.geospatial_data.geospatial_data import Geospatial_data
# local modules
from anuga.file_conversion.sdf2pts import sdf2pts
from anuga.file_conversion.sww2pts import sww2pts
from pprint import pprint
class Test_2Pts(unittest.TestCase):
""" Test files that convert to pts format. """
def test_hecras_cross_sections2pts(self):
"""Test conversion from HECRAS cross sections in ascii format
to native NetCDF pts format
"""
import time, os
from anuga.file.netcdf import NetCDFFile
#Write test asc file
root = 'hecrastest'
filename = root+'.sdf'
fid = open(filename, 'w')
fid.write("""
# RAS export file created on Mon 15Aug2005 11:42
# by HEC-RAS Version 3.1.1
BEGIN HEADER:
UNITS: METRIC
DTM TYPE: TIN
DTM: v:\\1\\cit\\perth_topo\\river_tin
STREAM LAYER: c:\\x_local\\hecras\\21_02_03\\up_canning_cent3d.shp
CROSS-SECTION LAYER: c:\\x_local\\hecras\\21_02_03\\up_can_xs3d.shp
MAP PROJECTION: UTM
PROJECTION ZONE: 50
DATUM: AGD66
VERTICAL DATUM:
NUMBER OF REACHES: 19
NUMBER OF CROSS-SECTIONS: 2
END HEADER:
BEGIN CROSS-SECTIONS:
CROSS-SECTION:
STREAM ID:Southern-Wungong
REACH ID:Southern-Wungong
STATION:21410
CUT LINE:
407546.08 , 6437277.542
407329.32 , 6437489.482
407283.11 , 6437541.232
SURFACE LINE:
407546.08, 6437277.54, 52.14
407538.88, 6437284.58, 51.07
407531.68, 6437291.62, 50.56
407524.48, 6437298.66, 49.58
407517.28, 6437305.70, 49.09
407510.08, 6437312.74, 48.76
END:
CROSS-SECTION:
STREAM ID:Swan River
REACH ID:Swan Mouth
STATION:840.*
CUT LINE:
381178.0855 , 6452559.0685
380485.4755 , 6453169.272
SURFACE LINE:
381178.09, 6452559.07, 4.17
381169.49, 6452566.64, 4.26
381157.78, 6452576.96, 4.34
381155.97, 6452578.56, 4.35
381143.72, 6452589.35, 4.43
381136.69, 6452595.54, 4.58
381114.74, 6452614.88, 4.41
381075.53, 6452649.43, 4.17
381071.47, 6452653.00, 3.99
381063.46, 6452660.06, 3.67
381054.41, 6452668.03, 3.67
END:
END CROSS-SECTIONS:
""")
fid.close()
#Convert to NetCDF pts
sdf2pts(root+'.sdf')
#Check contents
#Get NetCDF
fid = NetCDFFile(root+'.pts', netcdf_mode_r)
# Get the variables
#print fid.variables.keys()
points = fid.variables['points']
elevation = fid.variables['elevation']
#Check values
ref_points = [[407546.08, 6437277.54],
[407538.88, 6437284.58],
[407531.68, 6437291.62],
[407524.48, 6437298.66],
[407517.28, 6437305.70],
[407510.08, 6437312.74]]
ref_points += [[381178.09, 6452559.07],
[381169.49, 6452566.64],
[381157.78, 6452576.96],
[381155.97, 6452578.56],
[381143.72, 6452589.35],
[381136.69, 6452595.54],
[381114.74, 6452614.88],
[381075.53, 6452649.43],
[381071.47, 6452653.00],
[381063.46, 6452660.06],
[381054.41, 6452668.03]]
ref_elevation = [52.14, 51.07, 50.56, 49.58, 49.09, 48.76]
ref_elevation += [4.17, 4.26, 4.34, 4.35, 4.43, 4.58, 4.41, 4.17, 3.99, 3.67, 3.67]
#print points[:]
#print ref_points
assert num.allclose(points, ref_points)
#print attributes[:]
#print ref_elevation
assert num.allclose(elevation, ref_elevation)
#Cleanup
fid.close()
os.remove(root + '.sdf')
os.remove(root + '.pts')
def test_sww2pts_centroids_1_5(self):
"""Test that sww information can be converted correctly to pts data at specified coordinates
- in this case, the centroids.
"""
import time, os
from anuga.file.netcdf import NetCDFFile
# Used for points that lie outside mesh
NODATA_value = 1758323
# Setup
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
# Create shallow water domain
domain = Domain(*rectangular(2, 2))
domain.set_flow_algorithm('1_5')
B = Transmissive_boundary(domain)
domain.set_boundary( {'left': B, 'right': B, 'top': B, 'bottom': B})
domain.set_name('datatest_1_5')
ptsfile = domain.get_name() + '_elevation.pts'
swwfile = domain.get_name() + '.sww'
domain.set_datadir('.')
domain.format = 'sww'
domain.set_quantity('elevation', lambda x,y: -x-y)
domain.geo_reference = Geo_reference(56,308500,6189000)
sww = SWW_file(domain)
sww.store_connectivity()
sww.store_timestep()
#self.domain.tight_slope_limiters = 1
domain.evolve_to_end(finaltime = 0.01)
sww.store_timestep()
# Check contents in NetCDF
fid = NetCDFFile(sww.filename, netcdf_mode_r)
# Get the variables
x = fid.variables['x'][:]
y = fid.variables['y'][:]
elevation = fid.variables['elevation'][:]
time = fid.variables['time'][:]
stage = fid.variables['stage'][:]
volumes = fid.variables['volumes'][:]
# Invoke interpolation for vertex points
points = num.concatenate( (x[:,num.newaxis],y[:,num.newaxis]), axis=1 )
points = num.ascontiguousarray(points)
sww2pts(domain.get_name() + '.sww',
quantity = 'elevation',
data_points = points,
NODATA_value = NODATA_value)
ref_point_values = elevation
point_values = Geospatial_data(ptsfile).get_attributes()
#print 'P', point_values
#print 'Ref', ref_point_values
assert num.allclose(point_values, ref_point_values)
# Invoke interpolation for centroids
points = domain.get_centroid_coordinates()
#print points
sww2pts(domain.get_name() + '.sww',
quantity = 'elevation',
data_points = points,
NODATA_value = NODATA_value)
ref_point_values = [-0.5, -0.5, -1, -1, -1, -1, -1.5, -1.5] #At centroids
point_values = Geospatial_data(ptsfile).get_attributes()
#print 'P', point_values
#print 'Ref', ref_point_values
assert num.allclose(point_values, ref_point_values)
fid.close()
#Cleanup
os.remove(sww.filename)
os.remove(ptsfile)
def test_sww2pts_centroids_de0(self):
"""Test that sww information can be converted correctly to pts data at specified coordinates
- in this case, the centroids.
"""
import time, os
from anuga.file.netcdf import NetCDFFile
# Used for points that lie outside mesh
NODATA_value = 1758323
# Setup
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
# Create shallow water domain
domain = Domain(*rectangular(2, 2))
B = Transmissive_boundary(domain)
domain.set_boundary( {'left': B, 'right': B, 'top': B, 'bottom': B})
domain.set_name('datatest_de0')
ptsfile = domain.get_name() + '_elevation.pts'
swwfile = domain.get_name() + '.sww'
domain.set_datadir('.')
domain.format = 'sww'
domain.set_quantity('elevation', lambda x,y: -x-y)
domain.geo_reference = Geo_reference(56,308500,6189000)
sww = SWW_file(domain)
sww.store_connectivity()
sww.store_timestep()
#self.domain.tight_slope_limiters = 1
domain.evolve_to_end(finaltime = 0.01)
sww.store_timestep()
# Check contents in NetCDF
fid = NetCDFFile(sww.filename, netcdf_mode_r)
# Get the variables
x = fid.variables['x'][:]
y = fid.variables['y'][:]
elevation = fid.variables['elevation'][:]
time = fid.variables['time'][:]
stage = fid.variables['stage'][:]
volumes = fid.variables['volumes'][:]
# Invoke interpolation for vertex points
points = num.concatenate( (x[:,num.newaxis],y[:,num.newaxis]), axis=1 )
points = num.ascontiguousarray(points)
sww2pts(domain.get_name() + '.sww',
quantity = 'elevation',
data_points = points,
NODATA_value = NODATA_value)
ref_point_values = elevation
point_values = Geospatial_data(ptsfile).get_attributes()
#print 'P', point_values
#print 'Ref', ref_point_values
assert num.allclose(point_values, ref_point_values)
# Invoke interpolation for centroids
points = domain.get_centroid_coordinates()
#print points
sww2pts(domain.get_name() + '.sww',
quantity = 'elevation',
data_points = points,
NODATA_value = NODATA_value)
#ref_point_values = [-0.5, -0.5, -1, -1, -1, -1, -1.5, -1.5] #At centroids
ref_point_values = [-0.77777777, -0.77777777, -0.99999998, -0.99999998,
-0.99999998, -0.99999998, -1.22222221, -1.22222221]
point_values = Geospatial_data(ptsfile).get_attributes()
#print 'P', point_values
#print 'Ref', ref_point_values
assert num.allclose(point_values, ref_point_values)
fid.close()
#Cleanup
os.remove(sww.filename)
os.remove(ptsfile)
#-------------------------------------------------------------
if __name__ == "__main__":
suite = unittest.makeSuite(Test_2Pts, 'test_')
runner = unittest.TextTestRunner() #verbosity=2)
runner.run(suite)
|
core/preprocess_utils.py
|
saramsv/wss
| 116 |
124241
|
# Lint as: python2, python3
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions related to preprocessing inputs."""
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
def flip_dim(tensor_list, prob=0.5, dim=1):
"""Randomly flips a dimension of the given tensor.
The decision to randomly flip the `Tensors` is made together. In other words,
all or none of the images pass in are flipped.
Note that tf.random_flip_left_right and tf.random_flip_up_down isn't used so
that we can control for the probability as well as ensure the same decision
is applied across the images.
Args:
tensor_list: A list of `Tensors` with the same number of dimensions.
prob: The probability of a left-right flip.
dim: The dimension to flip, 0, 1, ..
Returns:
outputs: A list of the possibly flipped `Tensors` as well as an indicator
`Tensor` at the end whose value is `True` if the inputs were flipped and
`False` otherwise.
Raises:
ValueError: If dim is negative or greater than the dimension of a `Tensor`.
"""
random_value = tf.random.uniform([])
def flip():
flipped = []
for tensor in tensor_list:
if dim < 0 or dim >= len(tensor.get_shape().as_list()):
raise ValueError('dim must represent a valid dimension.')
flipped.append(tf.compat.v1.reverse_v2(tensor, [dim]))
return flipped
is_flipped = tf.less_equal(random_value, prob)
outputs = tf.cond(is_flipped, flip, lambda: tensor_list)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs.append(is_flipped)
return outputs
def _image_dimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the input image. Dimensions
that are statically known are python integers, otherwise they are integer
scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = tf.unstack(tf.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def get_label_resize_method(label):
"""Returns the resize method of labels depending on label dtype.
Args:
label: Groundtruth label tensor.
Returns:
tf.image.ResizeMethod.BILINEAR, if label dtype is floating.
tf.image.ResizeMethod.NEAREST_NEIGHBOR, if label dtype is integer.
Raises:
ValueError: If label is neither floating nor integer.
"""
if label.dtype.is_floating:
return tf.image.ResizeMethod.BILINEAR
elif label.dtype.is_integer:
return tf.image.ResizeMethod.NEAREST_NEIGHBOR
else:
raise ValueError('Label type must be either floating or integer.')
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width, pad_value):
"""Pads the given image with the given pad_value.
Works like tf.image.pad_to_bounding_box, except it can pad the image
with any given arbitrary pad value and also handle images whose sizes are not
known during graph construction.
Args:
image: 3-D tensor with shape [height, width, channels]
offset_height: Number of rows of zeros to add on top.
offset_width: Number of columns of zeros to add on the left.
target_height: Height of output image.
target_width: Width of output image.
pad_value: Value to pad the image tensor with.
Returns:
3-D tensor of shape [target_height, target_width, channels].
Raises:
ValueError: If the shape of image is incompatible with the offset_* or
target_* arguments.
"""
with tf.compat.v1.name_scope(None, 'pad_to_bounding_box', [image]):
image = tf.convert_to_tensor(image, name='image')
original_dtype = image.dtype
if original_dtype != tf.float32 and original_dtype != tf.float64:
# If image dtype is not float, we convert it to int32 to avoid overflow.
image = tf.cast(image, tf.int32)
image_rank_assert = tf.Assert(
tf.logical_or(
tf.equal(tf.rank(image), 3),
tf.equal(tf.rank(image), 4)),
['Wrong image tensor rank.'])
with tf.control_dependencies([image_rank_assert]):
image -= pad_value
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = tf.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = tf.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image.get_shape().ndims != 4:
raise ValueError('Input image must have either 3 or 4 dimensions.')
_, height, width, _ = _image_dimensions(image, rank=4)
target_width_assert = tf.Assert(
tf.greater_equal(
target_width, width),
['target_width must be >= width'])
target_height_assert = tf.Assert(
tf.greater_equal(target_height, height),
['target_height must be >= height'])
with tf.control_dependencies([target_width_assert]):
after_padding_width = target_width - offset_width - width
with tf.control_dependencies([target_height_assert]):
after_padding_height = target_height - offset_height - height
offset_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(after_padding_width, 0),
tf.greater_equal(after_padding_height, 0)),
['target size not possible with the given target offsets'])
batch_params = tf.stack([0, 0])
height_params = tf.stack([offset_height, after_padding_height])
width_params = tf.stack([offset_width, after_padding_width])
channel_params = tf.stack([0, 0])
with tf.control_dependencies([offset_assert]):
paddings = tf.stack([batch_params, height_params, width_params,
channel_params])
padded = tf.pad(image, paddings)
if not is_batch:
padded = tf.squeeze(padded, axis=[0])
outputs = padded + pad_value
if outputs.dtype != original_dtype:
outputs = tf.cast(outputs, original_dtype)
return outputs
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
The cropped (and resized) image.
Raises:
ValueError: if `image` doesn't have rank of 3.
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
if len(image.get_shape().as_list()) != 3:
raise ValueError('input must have rank of 3')
original_channels = image.get_shape().as_list()[2]
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
image = tf.reshape(image, cropped_shape)
image.set_shape([crop_height, crop_width, original_channels])
return image
def random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3), [
'Wrong rank for tensor %d in image_list [expected] [actual]', i, 3,
image_rank
])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height), [
'Wrong height for tensor %d in image_list [expected][actual]', i,
height, image_height
])
width_assert = tf.Assert(
tf.equal(width, image_width), [
'Wrong width for tensor %d in image_list [expected][actual]', i,
width, image_width
])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random.uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random.uniform([],
maxval=max_offset_height,
dtype=tf.int32)
offset_width = tf.random.uniform([], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
"""Gets a random scale value.
Args:
min_scale_factor: Minimum scale value.
max_scale_factor: Maximum scale value.
step_size: The step size from minimum to maximum value.
Returns:
A tensor with random scale value selected between minimum and maximum value.
If `min_scale_factor` and `max_scale_factor` are the same, a number is
returned instead.
Raises:
ValueError: min_scale_factor has unexpected value.
"""
if min_scale_factor < 0 or min_scale_factor > max_scale_factor:
raise ValueError('Unexpected value of min_scale_factor.')
if min_scale_factor == max_scale_factor:
return np.float32(min_scale_factor)
# When step_size = 0, we sample the value uniformly from [min, max).
if step_size == 0:
return tf.random.uniform([1],
minval=min_scale_factor,
maxval=max_scale_factor)
# When step_size != 0, we randomly select one discrete value from [min, max].
num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1)
scale_factors = tf.lin_space(min_scale_factor, max_scale_factor,
num_steps)
shuffled_scale_factors = tf.compat.v1.random_shuffle(scale_factors)
return shuffled_scale_factors[0]
def randomly_scale_image_and_label(image, label=None, scale=1.0):
"""Randomly scales image and label.
Args:
image: Image with shape [height, width, 3].
label: Label with shape [height, width, 1].
scale: The value to scale image and label.
Returns:
Scaled image and label.
"""
# No random scaling if scale == 1.
if scale == 1.0:
return image, label
image_shape = tf.shape(image)
new_dim = tf.cast(
tf.cast([image_shape[0], image_shape[1]], tf.float32) * scale,
tf.int32)
# Need squeeze and expand_dims because image interpolation takes
# 4D tensors as input.
## tf1 op without anti-aliasing
# image = tf.squeeze(
# tf.compat.v1.image.resize_bilinear(
# tf.expand_dims(image, 0), new_dim, align_corners=True), [0])
## tf2 op with anti-aliasing
image = tf.compat.v2.image.resize(
image, new_dim, method='bilinear', antialias=True)
if label is not None:
label = tf.compat.v1.image.resize(
label,
new_dim,
method=get_label_resize_method(label),
align_corners=True)
return image, label
def resolve_shape(tensor, rank=None, scope=None):
"""Fully resolves the shape of a Tensor.
Use as much as possible the shape components already known during graph
creation and resolve the remaining ones during runtime.
Args:
tensor: Input tensor whose shape we query.
rank: The rank of the tensor, provided that we know it.
scope: Optional name scope.
Returns:
shape: The full shape of the tensor.
"""
with tf.compat.v1.name_scope(scope, 'resolve_shape', [tensor]):
if rank is not None:
shape = tensor.get_shape().with_rank(rank).as_list()
else:
shape = tensor.get_shape().as_list()
if None in shape:
shape_dynamic = tf.shape(tensor)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = shape_dynamic[i]
return shape
def resize_to_range_helper(input_shape, min_size, max_size=None, factor=None,
keep_aspect_ratio=True):
"""Determines output size in specified range.
Adapted from //image/understanding/object_detection/core/preprocessor.py
The output size can be described by two cases:
1. If the image can be rescaled so its minimum size is equal to min_size
without the other side exceeding max_size, then do so.
2. Otherwise, resize so the largest side is equal to max_size.
An integer in `range(factor)` is added to the computed sides so that the
final dimensions are multiples of `factor` plus one.
Args:
input_shape: A 2-element list with the [height, width] of the input image.
min_size: (scalar) desired size of the smaller image side.
max_size: (optional) (scalar) maximum allowed size of the larger image
side.
factor: Make output size multiple of factor plus one.
keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input
will be resized while keeping the original aspect ratio. If False, the
input will be resized to [max_resize_value, max_resize_value] without
keeping the original aspect ratio.
Returns:
A 1-D tensor containing the [new_height, new_width].
"""
input_height, input_width = input_shape
input_height = tf.cast(input_height, tf.float32)
input_width = tf.cast(input_width, tf.float32)
input_min_size = tf.minimum(input_height, input_width)
# Calculate the larger of the possible sizes
min_size = tf.cast(min_size, tf.float32)
large_scale_factor = min_size / input_min_size
large_height = tf.cast(tf.floor(input_height * large_scale_factor), tf.int32)
large_width = tf.cast(tf.floor(input_width * large_scale_factor), tf.int32)
large_size = tf.stack([large_height, large_width])
if max_size is not None:
# Calculate the smaller of the possible sizes, use that if the larger
# is too big.
input_max_size = tf.maximum(input_height, input_width)
max_size = tf.cast(max_size, tf.float32)
small_scale_factor = max_size / input_max_size
small_height = tf.cast(
tf.floor(input_height * small_scale_factor), tf.int32)
small_width = tf.cast(tf.floor(input_width * small_scale_factor), tf.int32)
small_size = tf.stack([small_height, small_width])
output_shape = tf.cond(
tf.cast(tf.reduce_max(large_size), tf.float32) > max_size,
lambda: small_size,
lambda: large_size)
else:
output_shape = large_size
# Ensure that both output sides are multiples of factor plus one.
if factor is not None:
output_shape += (factor - (output_shape - 1) % factor) % factor
if not keep_aspect_ratio:
# If not keep the aspect ratio, we resize everything to max_size, allowing
# us to do pre-processing without extra padding.
output_shape = [tf.reduce_max(output_shape), tf.reduce_max(output_shape)]
return output_shape
def resize_to_range(image,
label=None,
min_size=None,
max_size=None,
factor=None,
keep_aspect_ratio=True,
align_corners=True,
label_layout_is_chw=False,
scope=None,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image or label so their sides are within the provided range.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum size is equal to min_size
without the other side exceeding max_size, then do so.
2. Otherwise, resize so the largest side is equal to max_size.
An integer in `range(factor)` is added to the computed sides so that the
final dimensions are multiples of `factor` plus one.
Args:
image: A 3D tensor of shape [height, width, channels].
label: (optional) A 3D tensor of shape [height, width, channels] (default)
or [channels, height, width] when label_layout_is_chw = True.
min_size: (scalar) desired size of the smaller image side.
max_size: (scalar) maximum allowed size of the larger image side. Note
that the output dimension is no larger than max_size and may be slightly
smaller than max_size when factor is not None.
factor: Make output size multiple of factor plus one.
keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input
will be resized while keeping the original aspect ratio. If False, the
input will be resized to [max_resize_value, max_resize_value] without
keeping the original aspect ratio.
align_corners: If True, exactly align all 4 corners of input and output.
label_layout_is_chw: If true, the label has shape [channel, height, width].
We support this case because for some instance segmentation dataset, the
instance segmentation is saved as [num_instances, height, width].
scope: Optional name scope.
method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR.
Returns:
A 3-D tensor of shape [new_height, new_width, channels], where the image
has been resized (with the specified method) so that
min(new_height, new_width) == ceil(min_size) or
max(new_height, new_width) == ceil(max_size).
Raises:
ValueError: If the image is not a 3D tensor.
"""
with tf.compat.v1.name_scope(scope, 'resize_to_range', [image]):
new_tensor_list = []
min_size = tf.cast(min_size, tf.float32)
if max_size is not None:
max_size = tf.cast(max_size, tf.float32)
# Modify the max_size to be a multiple of factor plus 1 and make sure the
# max dimension after resizing is no larger than max_size.
if factor is not None:
max_size = (max_size - (max_size - 1) % factor)
[orig_height, orig_width, _] = resolve_shape(image, rank=3)
new_size = resize_to_range_helper(input_shape=[orig_height, orig_width],
min_size=min_size,
max_size=max_size,
factor=factor,
keep_aspect_ratio=keep_aspect_ratio)
new_tensor_list.append(tf.image.resize(
image, new_size, method=method, align_corners=align_corners))
if label is not None:
if label_layout_is_chw:
# Input label has shape [channel, height, width].
resized_label = tf.expand_dims(label, 3)
resized_label = tf.image.resize(
resized_label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
resized_label = tf.squeeze(resized_label, 3)
else:
# Input label has shape [height, width, channel].
resized_label = tf.image.resize(
label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
new_tensor_list.append(resized_label)
else:
new_tensor_list.append(None)
return new_tensor_list
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the image with separable convolution.
Args:
image: Tensor of shape [height, width, channels], dtype float
kernel_size: kernel size of the filter
sigma: Sigma value for the Gaussian (std)
padding: Padding mode for the convolution. 'SAME' or 'VALID'
Returns:
outputs: A list of the possibly flipped `Tensors` as well as an indicator
`Tensor` at the end whose value is `True` if the inputs were flipped and
`False` otherwise.
"""
radius = tf.to_int32(kernel_size / 2)
kernel_size = radius * 2 + 1
x = tf.to_float(tf.range(-radius, radius + 1))
blur_filter = tf.exp(
-tf.pow(x, 2.0) / (2.0 * tf.pow(tf.to_float(sigma), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_gaussian_blur(image, prob=0.5):
"""Randomly blur an image.
Args:
image: Tensor
prob: probability to apply Gaussian blur
Returns:
output: blurred image
"""
random_value = tf.random.uniform([])
is_blurred = tf.less_equal(random_value, prob)
## EfficientSeg style
sigma = tf.random.uniform([]) * 1.15 + 0.15
radius = tf.cast(sigma * 4.0 + 0.5, tf.int32)
kernel_size = radius * 2 + 1
blurred = gaussian_blur(image, kernel_size, sigma)
output = tf.cond(is_blurred, lambda: blurred, lambda: image)
return output
def color_jitter(image, brightness=0, contrast=0, saturation=0, hue=0):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor. Must be in [0, 1]!
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return tf.image.random_brightness(x, max_delta=brightness)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(x, lower=1-contrast, upper=1+contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random_shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def random_color_jitter(image, prob=1.0):
"""Randomly do color jittering on the given image.
Args:
image: Tensor
prob: probability to apply color jittering
Returns:
output: blurred image
"""
brightness = 0.5
contrast = 0.5
saturation = 0.5
hue = 0.25
random_value = tf.random.uniform([])
is_jittered = tf.less_equal(random_value, prob)
jittered = color_jitter(image, brightness, contrast, saturation, hue)
output = tf.cond(is_jittered, lambda: jittered, lambda: image)
return output
def cutout_with_mask(image,
label,
pad_size,
mean_pixel,
ignore_label=255,
valid=None):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type float32.
label: An image Tensor of type int32.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
mean_pixel: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
ignore_label: What value to fill in the label in the area that has the
cutout mask applied to it.
Returns:
An image Tensor that is of type float32.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
label = tf.where(
tf.equal(mask, 0),
tf.ones_like(label, dtype=label.dtype) * ignore_label,
label)
im_mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(im_mask, 0),
tf.ones_like(image, dtype=image.dtype) * mean_pixel,
image)
if valid is not None:
valid = tf.where(
tf.equal(mask, 0),
tf.zeros_like(valid, dtype=valid.dtype),
valid)
return image, label, valid
return image, label
|
bookwyrm/views/interaction.py
|
mouse-reeve/fedireads
| 270 |
124258
|
""" boosts and favs """
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views import View
from bookwyrm import models
from .helpers import is_api_request
# pylint: disable= no-self-use
@method_decorator(login_required, name="dispatch")
class Favorite(View):
"""like a status"""
def post(self, request, status_id):
"""create a like"""
cache.delete(f"fav-{request.user.id}-{status_id}")
status = models.Status.objects.get(id=status_id)
try:
models.Favorite.objects.create(status=status, user=request.user)
except IntegrityError:
# you already fav'ed that
return HttpResponseBadRequest()
if is_api_request(request):
return HttpResponse()
return redirect(request.headers.get("Referer", "/"))
@method_decorator(login_required, name="dispatch")
class Unfavorite(View):
"""take back a fav"""
def post(self, request, status_id):
"""unlike a status"""
cache.delete(f"fav-{request.user.id}-{status_id}")
status = models.Status.objects.get(id=status_id)
try:
favorite = models.Favorite.objects.get(status=status, user=request.user)
except models.Favorite.DoesNotExist:
# can't find that status, idk
return HttpResponseNotFound()
favorite.delete()
if is_api_request(request):
return HttpResponse()
return redirect(request.headers.get("Referer", "/"))
@method_decorator(login_required, name="dispatch")
class Boost(View):
"""boost a status"""
def post(self, request, status_id):
"""boost a status"""
cache.delete(f"boost-{request.user.id}-{status_id}")
status = models.Status.objects.get(id=status_id)
# is it boostable?
if not status.boostable:
return HttpResponseBadRequest()
if models.Boost.objects.filter(
boosted_status=status, user=request.user
).exists():
# you already boosted that.
return redirect(request.headers.get("Referer", "/"))
models.Boost.objects.create(
boosted_status=status,
privacy=status.privacy,
user=request.user,
)
if is_api_request(request):
return HttpResponse()
return redirect(request.headers.get("Referer", "/"))
@method_decorator(login_required, name="dispatch")
class Unboost(View):
"""boost a status"""
def post(self, request, status_id):
"""boost a status"""
cache.delete(f"boost-{request.user.id}-{status_id}")
status = models.Status.objects.get(id=status_id)
boost = models.Boost.objects.filter(
boosted_status=status, user=request.user
).first()
boost.delete()
if is_api_request(request):
return HttpResponse()
return redirect(request.headers.get("Referer", "/"))
|
tests/test_setup_dirs.py
|
flying-sheep/goatools
| 477 |
124293
|
#!/usr/bin/env python3
"""Test that all GOATOOLS package dirs are in the setup.py file"""
# pylint: disable=wrong-import-position
from os import walk
from os.path import join
from os.path import abspath
import sys
sys.argv = [abspath(__file__), '--help']
from setup import NAME # goatools
from setup import PACKAGES # modules in goatools
from tests.utils import REPO
def test_setup_dirs():
"""Test that all GOATOOLS package dirs are in the setup.py file"""
pkgs_setup = set(m for m in PACKAGES if 'test_' not in m)
pkgs_dirs = _get_pkgmods()
assert pkgs_dirs.issubset(pkgs_setup), _errmsg(pkgs_setup, pkgs_dirs)
print('**NOTE: TEST PASSED')
def _errmsg(pkgs_setup, pkgs_dirs):
"""Print the packages which are not found in setup.py"""
len_name = len(NAME) + 1
missing = set(m[len_name:] for m in pkgs_dirs.difference(pkgs_setup))
return '**FATAL: MISSING PACKAGES in setup.py:\n NAME + ".{P}",'.format(
P='",\n NAME + ".'.join(sorted(missing)))
def _get_pkgmods():
"""Get the GOATOOLS package modules by walking the package dirs"""
pkgs = set()
len_repo = len(REPO) + 1
for root, _, _ in walk(join(REPO, NAME)):
if root[-11:] != '__pycache__':
pkg = root[len_repo:].replace('/', '.')
if 'test_' not in pkg:
pkgs.add(pkg)
return pkgs
if __name__ == '__main__':
test_setup_dirs()
|
tests/forte/data/serialization_tests.py
|
J007X/forte
| 163 |
124330
|
# Copyright 2021 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from typing import Dict
from ddt import data, ddt
from forte.data.caster import MultiPackBoxer
from forte.data.data_pack import DataPack
from forte.data.multi_pack import MultiPack
from forte.data.readers import (
OntonotesReader,
DirPackReader,
MultiPackDirectoryReader,
)
from forte.pipeline import Pipeline
from forte.processors.base import (
MultiPackProcessor,
)
from forte.processors.writers import (
PackIdJsonPackWriter,
PackIdMultiPackWriter,
AutoNamePackWriter,
)
from ft.onto.base_ontology import (
Sentence,
EntityMention,
CrossDocEntityRelation,
)
class CopySentence(MultiPackProcessor):
"""
Copy the content from existing pack to a new pack.
"""
def _process(self, input_pack: MultiPack):
from_pack: DataPack = input_pack.get_pack(self.configs.copy_from)
copy_pack: DataPack = input_pack.add_pack(self.configs.copy_to)
copy_pack.set_text(from_pack.text)
if from_pack.pack_name is not None:
copy_pack.pack_name = from_pack.pack_name + "_copy"
else:
copy_pack.pack_name = "copy"
s: Sentence
for s in from_pack.get(Sentence):
Sentence(copy_pack, s.begin, s.end)
e: EntityMention
for e in from_pack.get(EntityMention):
EntityMention(copy_pack, e.begin, e.end)
@classmethod
def default_configs(cls) -> Dict[str, str]:
return {"copy_from": "default", "copy_to": "duplicate"}
class NaiveCoref(MultiPackProcessor):
def _process(self, input_pack: MultiPack):
fp = input_pack.get_pack_at(0)
sp = input_pack.get_pack_at(1)
nes1 = list(fp.get(EntityMention))
nes2 = list(sp.get(EntityMention))
for ne1 in nes1:
for ne2 in nes2:
if ne1.text == ne2.text:
CrossDocEntityRelation(input_pack, ne1, ne2)
@ddt
class SerializationTest(unittest.TestCase):
def setUp(self):
file_dir_path = os.path.dirname(__file__)
self.data_path = os.path.join(
file_dir_path, "../../../", "data_samples", "ontonotes", "00"
)
@data(
(True, "pickle"),
(False, "pickle"),
(True, "jsonpickle"),
(False, "jsonpickle"),
)
def testMultiPackWriting(self, config_data):
zip_pack, method = config_data
# Use different sub-directory to avoid conflicting.
subdir = f"{zip_pack}_{method}"
with tempfile.TemporaryDirectory() as main_output:
# Prepare input data.
prepared_input: str = os.path.join(
main_output, subdir, "input_packs"
)
data_output: str = os.path.join(main_output, subdir, "output")
suffix = ".pickle" if method == "pickle" else ".json"
if zip_pack:
suffix = suffix + ".gz"
nlp = Pipeline[DataPack]()
nlp.set_reader(OntonotesReader())
nlp.add(
PackIdJsonPackWriter(),
{
"output_dir": prepared_input,
"overwrite": True,
"serialize_method": method,
"zip_pack": zip_pack,
},
)
nlp.run(self.data_path)
# Convert to multi pack.
coref_pl = Pipeline()
coref_pl.set_reader(
DirPackReader(),
{
"serialize_method": method,
"zip_pack": zip_pack,
"suffix": suffix,
},
)
coref_pl.add(MultiPackBoxer())
coref_pl.add(CopySentence())
coref_pl.add(NaiveCoref())
coref_pl.add(
PackIdMultiPackWriter(),
config={
"output_dir": data_output,
"overwrite": True,
"serialize_method": method,
"zip_pack": zip_pack,
},
)
coref_pl.run(prepared_input)
self.assertTrue(
os.path.exists(os.path.join(data_output, "multi.idx"))
)
self.assertTrue(
os.path.exists(os.path.join(data_output, "pack.idx"))
)
self.assertTrue(os.path.exists(os.path.join(data_output, "packs")))
self.assertTrue(os.path.exists(os.path.join(data_output, "multi")))
# Read the multi pack again.
mp_pipeline = Pipeline()
mp_pipeline.set_reader(
MultiPackDirectoryReader(),
config={
"suffix": suffix,
"zip_pack": zip_pack,
"serialize_method": method,
"data_pack_dir": os.path.join(data_output, "packs"),
"multi_pack_dir": os.path.join(data_output, "multi"),
},
).initialize()
re: CrossDocEntityRelation
for mp in mp_pipeline.process_dataset():
for re in mp.get(CrossDocEntityRelation):
self.assertEqual(re.get_parent().text, re.get_child().text)
@data(
(True, "pickle"),
(False, "pickle"),
(True, "jsonpickle"),
(False, "jsonpickle"),
)
def testPackWriting(self, config_data):
zip_pack, method = config_data
with tempfile.TemporaryDirectory() as main_output:
write_pipeline = Pipeline[DataPack]()
write_pipeline.set_reader(OntonotesReader())
write_pipeline.add(
AutoNamePackWriter(),
{
"output_dir": os.path.join(main_output, "packs"),
"overwrite": True,
"zip_pack": zip_pack,
"serialize_method": method,
},
)
write_pipeline.run(self.data_path)
read_pipeline = Pipeline[DataPack]()
read_pipeline.set_reader(DirPackReader())
|
Drake-Z/0009/0009.py
|
saurabh896/python-1
| 3,976 |
124383
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'第 0009 题:一个HTML文件,找出里面的链接。'
__author__ = 'Drake-Z'
import os, re
from html.parser import HTMLParser
from html.entities import name2codepoint
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == 'a':
for (variables, value) in attrs:
if variables == 'href':
if re.match(r'http(.*?)', value):
print(value)
if __name__ == '__main__':
with open('test.html', encoding='utf-8') as html:
parser = MyHTMLParser()
parser.feed(html.read())
|
stanford-augmented-image-classification/i_commandline_arguments.py
|
meghanaravikumar/sigopt-examples
| 213 |
124392
|
from a_resnet_training_common_cli import Hyperparameters
from stanford_cars_augmentation_cli import AugmentHyperparameters, AugmentCLI
def generate_cli_hpo(parser):
"""Adding Hyperparameters to CLI arguments"""
parser.add_argument("--" + Hyperparameters.SCEDULER_RATE.value, dest=Hyperparameters.SCEDULER_RATE.value,
type=float,
help="number of epochs to wait before annealing learning rate", required=True)
parser.add_argument("--" + Hyperparameters.LEARNING_RATE.value, dest=Hyperparameters.LEARNING_RATE.value,
type=float,
help="learning rate to use", required=True)
parser.add_argument("--" + Hyperparameters.BATCH_SIZE.value, dest=Hyperparameters.BATCH_SIZE.value, type=int,
help="batch size to use", required=True)
parser.add_argument("--" + Hyperparameters.LEARNING_RATE_SCHEDULER.value,
dest=Hyperparameters.LEARNING_RATE_SCHEDULER.value, type=float,
help="annealing schedule rate to use. multiplied to learning rate", required=True)
parser.add_argument("--" + Hyperparameters.WEIGHT_DECAY.value, dest=Hyperparameters.WEIGHT_DECAY.value, type=float,
help="weight decay to use", required=True)
parser.add_argument("--" + Hyperparameters.MOMENTUM.value, dest=Hyperparameters.MOMENTUM.value, type=float,
help="momentum to use", required=True)
parser.add_argument("--" + Hyperparameters.NESTEROV.value, dest=Hyperparameters.NESTEROV.value, action='store_true',
help="use Nesterov")
parser.add_argument("--" + "no-" + Hyperparameters.NESTEROV.value, dest=Hyperparameters.NESTEROV.value,
action='store_false',
help="do not use Nesterov")
return parser
def generate_cli_hpo_augment(parser):
parser.add_argument("--" + AugmentHyperparameters.BRIGHTNESS.value, dest=AugmentHyperparameters.BRIGHTNESS.value,
type=float,
help="brightness factor. recommended range 0 - 9", required=True, default=3.2907)
parser.add_argument("--" + AugmentHyperparameters.CONTRAST.value, dest=AugmentHyperparameters.CONTRAST.value,
type=float,
help="contrast factor. recommended range 0-100", required=True, default=56.793)
parser.add_argument("--" + AugmentHyperparameters.HUE.value, dest=AugmentHyperparameters.HUE.value,
type=float,
help="hue factor. recommend range -0.5 - 0.5", required=True, default=-0.01286)
parser.add_argument("--" + AugmentHyperparameters.SATURATION.value, dest=AugmentHyperparameters.SATURATION.value,
type=float,
help="saturation factor. recommended range 0-100", required=True, default=2.36640)
return parser
|
train_deep_ensemble.py
|
omegafragger/deterministic-uncertainty-quantification
| 180 |
124395
|
<reponame>omegafragger/deterministic-uncertainty-quantification
import argparse
import pathlib
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from utils.datasets import all_datasets
from utils.cnn_duq import SoftmaxModel as CNN
from torchvision.models import resnet18
class ResNet(nn.Module):
def __init__(self, input_size, num_classes):
super().__init__()
self.resnet = resnet18(pretrained=False, num_classes=num_classes)
# Adapted resnet from:
# https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py
self.resnet.conv1 = nn.Conv2d(
3, 64, kernel_size=3, stride=1, padding=1, bias=False
)
self.resnet.maxpool = nn.Identity()
def forward(self, x):
x = self.resnet(x)
x = F.log_softmax(x, dim=1)
return x
def train(model, train_loader, optimizer, epoch, loss_fn):
model.train()
total_loss = []
for batch_idx, (data, target) in enumerate(tqdm(train_loader)):
data = data.cuda()
target = target.cuda()
optimizer.zero_grad()
prediction = model(data)
loss = loss_fn(prediction, target)
loss.backward()
optimizer.step()
total_loss.append(loss.item())
avg_loss = torch.tensor(total_loss).mean()
print(f"Epoch: {epoch}:")
print(f"Train Set: Average Loss: {avg_loss:.2f}")
def test(models, test_loader, loss_fn):
models.eval()
loss = 0
correct = 0
for data, target in test_loader:
with torch.no_grad():
data = data.cuda()
target = target.cuda()
losses = torch.empty(len(models), data.shape[0])
predictions = []
for i, model in enumerate(models):
predictions.append(model(data))
losses[i, :] = loss_fn(predictions[i], target, reduction="sum")
predictions = torch.stack(predictions)
loss += torch.mean(losses)
avg_prediction = predictions.exp().mean(0)
# get the index of the max log-probability
class_prediction = avg_prediction.max(1)[1]
correct += (
class_prediction.eq(target.view_as(class_prediction)).sum().item()
)
loss /= len(test_loader.dataset)
percentage_correct = 100.0 * correct / len(test_loader.dataset)
print(
"Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)".format(
loss, correct, len(test_loader.dataset), percentage_correct
)
)
return loss, percentage_correct
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--epochs", type=int, default=75, help="number of epochs to train (default: 75)"
)
parser.add_argument(
"--lr", type=float, default=0.05, help="learning rate (default: 0.05)"
)
parser.add_argument(
"--ensemble", type=int, default=5, help="Ensemble size (default: 5)"
)
parser.add_argument(
"--dataset",
required=True,
choices=["FashionMNIST", "CIFAR10"],
help="Select a dataset",
)
parser.add_argument("--seed", type=int, default=1, help="random seed (default: 1)")
args = parser.parse_args()
print(args)
torch.manual_seed(args.seed)
loss_fn = F.nll_loss
ds = all_datasets[args.dataset]()
input_size, num_classes, train_dataset, test_dataset = ds
kwargs = {"num_workers": 4, "pin_memory": True}
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=128, shuffle=True, **kwargs
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=5000, shuffle=False, **kwargs
)
if args.dataset == "FashionMNIST":
milestones = [10, 20]
ensemble = [CNN(input_size, num_classes).cuda() for _ in range(args.ensemble)]
else:
# CIFAR-10
milestones = [25, 50]
ensemble = [
ResNet(input_size, num_classes).cuda() for _ in range(args.ensemble)
]
ensemble = torch.nn.ModuleList(ensemble)
optimizers = []
schedulers = []
for model in ensemble:
# Need different optimisers to apply weight decay and momentum properly
# when only optimising one element of the ensemble
optimizers.append(
torch.optim.SGD(
model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4
)
)
schedulers.append(
torch.optim.lr_scheduler.MultiStepLR(
optimizers[-1], milestones=milestones, gamma=0.1
)
)
for epoch in range(1, args.epochs + 1):
for i, model in enumerate(ensemble):
train(model, train_loader, optimizers[i], epoch, loss_fn)
schedulers[i].step()
test(ensemble, test_loader, loss_fn)
pathlib.Path("saved_models").mkdir(exist_ok=True)
path = f"saved_models/{args.dataset}_{len(ensemble)}"
torch.save(ensemble.state_dict(), path + "_ensemble.pt")
if __name__ == "__main__":
main()
|
pennylane/gradients/general_shift_rules.py
|
therooler/pennylane
| 539 |
124400
|
<reponame>therooler/pennylane
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a function for generating generalized parameter shift rules and
helper methods for processing shift rules as well as for creating tapes with
shifted parameters."""
import functools
import itertools
import warnings
import numpy as np
import pennylane as qml
def process_shifts(rule, tol=1e-10, batch_duplicates=True):
"""Utility function to process gradient rules.
Args:
rule (array): a ``(M, N)`` array corresponding to ``M`` terms
with parameter shifts. ``N`` has to be either ``2`` or ``3``.
The first column corresponds to the linear combination coefficients;
the last column contains the shift values.
If ``N=3``, the middle column contains the multipliers.
tol (float): floating point tolerance used when comparing shifts/coefficients
Terms with coefficients below ``tol`` will be removed.
batch_duplicates (bool): whether to check the input ``rule`` for duplicate
shift values in its second column.
Returns:
array: The processed shift rule with small entries rounded to 0, sorted
with respect to the absolute value of the shifts, and groups of shift
terms with identical (multiplier and) shift fused into one term each,
if ``batch_duplicates=True``.
This utility function accepts coefficients and shift values as well as optionally
multipliers, and performs the following processing:
- Set all small (within absolute tolerance ``tol``) coefficients and shifts to 0
- Remove terms where the coefficients are 0 (including the ones set to 0 in the previous step)
- Terms with the same shift value (and multiplier) are combined into a single term.
- Finally, the terms are sorted according to the absolute value of ``shift``,
This ensures that a zero-shift term, if it exists, is returned first.
"""
# set all small coefficients, multipliers if present, and shifts to zero.
rule[np.abs(rule) < tol] = 0
# remove columns where the coefficients are 0
rule = rule[~(rule[:, 0] == 0)]
if batch_duplicates:
round_decimals = int(-np.log10(tol))
rounded_rule = np.round(rule[:, 1:], round_decimals)
# determine unique shifts or (multiplier, shift) combinations
unique_mods = np.unique(rounded_rule, axis=0)
if rule.shape[0] != unique_mods.shape[0]:
matches = np.all(rounded_rule[:, np.newaxis] == unique_mods[np.newaxis, :], axis=-1)
# TODO: The following line probably can be done in numpy
coeffs = [np.sum(rule[slc, 0]) for slc in matches.T]
rule = np.hstack([np.stack(coeffs)[:, np.newaxis], unique_mods])
# sort columns according to abs(shift)
return rule[np.argsort(np.abs(rule[:, -1]))]
@functools.lru_cache(maxsize=None)
def eigvals_to_frequencies(eigvals):
r"""Convert an eigenvalue spectrum to frequency values, defined
as the the set of positive, unique differences of the eigenvalues in the spectrum.
Args:
eigvals (tuple[int, float]): eigenvalue spectra
Returns:
tuple[int, float]: frequencies
**Example**
>>> eigvals = (-0.5, 0, 0, 0.5)
>>> eigvals_to_frequencies(eigvals)
(0.5, 1.0)
"""
unique_eigvals = sorted(set(eigvals))
return tuple({j - i for i, j in itertools.combinations(unique_eigvals, 2)})
@functools.lru_cache(maxsize=None)
def frequencies_to_period(frequencies, decimals=5):
r"""Returns the period of a Fourier series as defined
by a set of frequencies.
The period is simply :math:`2\pi/gcd(frequencies)`,
where :math:`\text{gcd}` is the greatest common divisor.
Args:
spectra (tuple[int, float]): frequency spectra
decimals (int): Number of decimal places to round to
if there are non-integral frequencies.
Returns:
tuple[int, float]: frequencies
**Example**
>>> frequencies = (0.5, 1.0)
>>> frequencies_to_period(frequencies)
12.566370614359172
"""
try:
gcd = np.gcd.reduce(frequencies)
except TypeError:
# np.gcd only support integer frequencies
exponent = 10**decimals
frequencies = np.round(frequencies, decimals) * exponent
gcd = np.gcd.reduce(np.int64(frequencies)) / exponent
return 2 * np.pi / gcd
@functools.lru_cache(maxsize=None)
def _get_shift_rule(frequencies, shifts=None):
n_freqs = len(frequencies)
frequencies = qml.math.sort(qml.math.stack(frequencies))
freq_min = frequencies[0]
if len(set(frequencies)) != n_freqs or freq_min <= 0:
raise ValueError(
f"Expected frequencies to be a list of unique positive values, instead got {frequencies}."
)
mu = np.arange(1, n_freqs + 1)
if shifts is None: # assume equidistant shifts
shifts = (2 * mu - 1) * np.pi / (2 * n_freqs * freq_min)
equ_shifts = True
else:
shifts = qml.math.sort(qml.math.stack(shifts))
if len(shifts) != n_freqs:
raise ValueError(
f"Expected number of shifts to equal the number of frequencies ({n_freqs}), instead got {shifts}."
)
if len(set(shifts)) != n_freqs:
raise ValueError(f"Shift values must be unique, instead got {shifts}")
equ_shifts = np.allclose(shifts, (2 * mu - 1) * np.pi / (2 * n_freqs * freq_min))
if len(set(np.round(np.diff(frequencies), 10))) <= 1 and equ_shifts: # equidistant case
coeffs = (
freq_min
* (-1) ** (mu - 1)
/ (4 * n_freqs * np.sin(np.pi * (2 * mu - 1) / (4 * n_freqs)) ** 2)
)
else: # non-equidistant case
sin_matrix = -4 * np.sin(np.outer(shifts, frequencies))
det_sin_matrix = np.linalg.det(sin_matrix)
if abs(det_sin_matrix) < 1e-6:
warnings.warn(
f"Solving linear problem with near zero determinant ({det_sin_matrix}) "
"may give unstable results for the parameter shift rules."
)
coeffs = -2 * np.linalg.solve(sin_matrix.T, frequencies)
coeffs = np.concatenate((coeffs, -coeffs))
shifts = np.concatenate((shifts, -shifts)) # pylint: disable=invalid-unary-operand-type
return np.stack([coeffs, shifts]).T
def _iterate_shift_rule_with_multipliers(rule, order, period):
r"""Helper method to repeat a shift rule that includes multipliers multiple
times along the same parameter axis for higher-order derivatives."""
combined_rules = []
for partial_rules in itertools.product(rule, repeat=order):
c, m, s = np.stack(partial_rules).T
cumul_shift = 0.0
for _m, _s in zip(m, s):
cumul_shift *= _m
cumul_shift += _s
if period is not None:
cumul_shift = np.mod(cumul_shift + 0.5 * period, period) - 0.5 * period
combined_rules.append(np.stack([np.prod(c), np.prod(m), cumul_shift]))
# combine all terms in the linear combination into a single
# array, with column order (coefficients, multipliers, shifts)
return qml.math.stack(combined_rules)
def _iterate_shift_rule(rule, order, period=None):
r"""Helper method to repeat a shift rule multiple times along the same
parameter axis for higher-order derivatives."""
if len(rule[0]) == 3:
return _iterate_shift_rule_with_multipliers(rule, order, period)
# TODO: optimization: Without multipliers, the order of shifts does not matter,
# so that we can only iterate over the symmetric part of the combined_rules tensor.
# This requires the corresponding multinomial prefactors to be included in the coeffs.
combined_rules = np.array(list(itertools.product(rule, repeat=order)))
# multiply the coefficients of each rule
coeffs = np.prod(combined_rules[..., 0], axis=1)
# sum the shifts of each rule
shifts = np.sum(combined_rules[..., 1], axis=1)
if period is not None:
# if a period is provided, make sure the shift value is within [-period/2, period/2)
shifts = np.mod(shifts + 0.5 * period, period) - 0.5 * period
return qml.math.stack([coeffs, shifts]).T
def _combine_shift_rules(rules):
r"""Helper method to combine shift rules for multiple parameters into
simultaneous multivariate shift rules."""
combined_rules = []
for partial_rules in itertools.product(*rules):
c, *m, s = np.stack(partial_rules).T
combined = np.concatenate([[np.prod(c)], *m, s])
combined_rules.append(np.stack(combined))
return np.stack(combined_rules)
@functools.lru_cache()
def generate_shift_rule(frequencies, shifts=None, order=1):
r"""Computes the parameter shift rule for a unitary based on its generator's eigenvalue
frequency spectrum.
To compute gradients of circuit parameters in variational quantum algorithms, expressions for
cost function first derivatives with respect to the variational parameters can be cast into
linear combinations of expectation values at shifted parameter values. The coefficients and
shifts defining the linear combination can be obtained from the unitary generator's eigenvalue
frequency spectrum. Details can be found in
`Wierichs et al. (2022) <https://doi.org/10.22331/q-2022-03-30-677>`__.
Args:
frequencies (tuple[int or float]): The tuple of eigenvalue frequencies. Eigenvalue
frequencies are defined as the unique positive differences obtained from a set of
eigenvalues.
shifts (tuple[int or float]): the tuple of shift values. If unspecified,
equidistant shifts are assumed. If supplied, the length of this tuple should match the
number of given frequencies.
order (int): the order of differentiation to compute the shift rule for
Returns:
tuple: a tuple of coefficients and shifts describing the gradient rule for the
parameter-shift method. For parameter :math:`\phi`, the coefficients :math:`c_i` and the
shifts :math:`s_i` combine to give a gradient rule of the following form:
.. math:: \frac{\partial}{\partial\phi}f = \sum_{i} c_i f(\phi + s_i).
where :math:`f(\phi) = \langle 0|U(\phi)^\dagger \hat{O} U(\phi)|0\rangle`
for some observable :math:`\hat{O}` and the unitary :math:`U(\phi)=e^{iH\phi}`.
Raises:
ValueError: if ``frequencies`` is not a list of unique positive values, or if ``shifts``
(if specified) is not a list of unique values the same length as ``frequencies``.
**Examples**
An example of obtaining the frequencies from generator eigenvalues, and obtaining the parameter
shift rule:
>>> eigvals = (-0.5, 0, 0, 0.5)
>>> frequencies = eigvals_to_frequencies(eigvals)
>>> generate_shift_rule(frequencies)
array([[ 0.4267767 , 1.57079633],
[-0.4267767 , -1.57079633],
[-0.0732233 , 4.71238898],
[ 0.0732233 , -4.71238898]])
An example with explicitly specified shift values:
>>> frequencies = (1, 2, 4)
>>> shifts = (np.pi / 3, 2 * np.pi / 3, np.pi / 4)
>>> generate_shift_rule(frequencies, shifts)
array([[ 3. , 0.78539816],
[-3. , -0.78539816],
[-2.09077028, 1.04719755],
[ 2.09077028, -1.04719755],
[ 0.2186308 , 2.0943951 ],
[-0.2186308 , -2.0943951 ]])
Higher order shift rules (corresponding to the :math:`n`-th derivative of the parameter) can be
requested via the ``order`` argument. For example, to extract the second order shift rule for a
gate with generator :math:`X/2`:
>>> eigvals = (0.5, -0.5)
>>> frequencies = eigvals_to_frequencies(eigvals)
>>> generate_shift_rule(frequencies, order=2)
array([[-0.5 , 0. ],
[ 0.5 , -3.14159265]])
This corresponds to the shift rule
:math:`\frac{\partial^2 f}{\partial phi^2} = \frac{1}{2} \left[f(\phi) - f(\phi-\pi)\right]`.
"""
frequencies = tuple(f for f in frequencies if f > 0)
rule = _get_shift_rule(frequencies, shifts=shifts)
if order > 1:
T = frequencies_to_period(frequencies)
rule = _iterate_shift_rule(rule, order, period=T)
return process_shifts(rule, tol=1e-10)
def generate_multi_shift_rule(frequencies, shifts=None, orders=None):
r"""Computes the parameter shift rule with respect to two parametrized unitaries,
given their generator's eigenvalue frequency spectrum. This corresponds to a
shift rule that computes off-diagonal elements of higher order derivative tensors.
For the second order, this corresponds to the Hessian.
Args:
frequencies (list[tuple[int or float]]): List of eigenvalue frequencies corresponding
to the each parametrized unitary.
shifts (list[tuple[int or float]]): List of shift values corresponding to each parametrized
unitary. If unspecified, equidistant shifts are assumed. If supplied, the length
of each tuple in the list must be the same as the length of the corresponding tuple in
``frequencies``.
orders (list[int]): the order of differentiation for each parametrized unitary.
If unspecified, the first order derivative shift rule is computed for each parametrized
unitary.
Returns:
tuple: a tuple of coefficients, shifts for the first parameter, and shifts for the
second parameter, describing the gradient rule
for the parameter-shift method.
For parameters :math:`\phi_a` and :math:`\phi_b`, the
coefficients :math:`c_i` and the shifts :math:`s^{(a)}_i`, :math:`s^{(b)}_i`,
combine to give a gradient rule of the following form:
.. math::
\frac{\partial^2}{\partial\phi_a \partial\phi_b}f
= \sum_{i} c_i f(\phi_a + s^{(a)}_i, \phi_b + s^{(b)}_i).
where :math:`f(\phi_a, \phi_b) = \langle 0|U(\phi_a)^\dagger V(\phi_b)^\dagger \hat{O} V(\phi_b) U(\phi_a)|0\rangle`
for some observable :math:`\hat{O}` and unitaries :math:`U(\phi_a)=e^{iH_a\phi_a}` and :math:`V(\phi_b)=e^{iH_b\phi_b}`.
**Example**
>>> generate_multi_shift_rule([(1,), (1,)])
array([[ 0.25 , 1.57079633, 1.57079633],
[-0.25 , 1.57079633, -1.57079633],
[-0.25 , -1.57079633, 1.57079633],
[ 0.25 , -1.57079633, -1.57079633]])
This corresponds to the gradient rule
.. math::
\frac{\partial^2 f}{\partial x\partial y} &= \frac{1}{4}
\left[f(x+\pi/2, y+\pi/2) - f(x+\pi/2, y-\pi/2)\\
&~~~- f(x-\pi/2, y+\pi/2) + f(x-\pi/2, y-\pi/2) \right].
"""
rules = []
shifts = shifts or [None] * len(frequencies)
orders = orders or [1] * len(frequencies)
for f, s, o in zip(frequencies, shifts, orders):
rule = generate_shift_rule(f, shifts=s, order=o)
rules.append(process_shifts(rule))
return _combine_shift_rules(rules)
def generate_shifted_tapes(tape, index, shifts, multipliers=None):
r"""Generate a list of tapes where one marked trainable parameter has
been shifted by the provided shift values.
Args:
tape (.QuantumTape): input quantum tape
index (int): index of the trainable parameter to shift
shifts (Sequence[float or int]): sequence of shift values.
The length determines how many parameter-shifted tapes are created.
multipliers (Sequence[float or int]): sequence of multiplier values.
The length should match the one of ``shifts``. Each multiplier scales the
corresponding gate parameter before the shift is applied. If not provided, the
parameters will not be scaled.
Returns:
list[QuantumTape]: List of quantum tapes. In each tape the parameter indicated
by ``index`` has been shifted by the values in ``shifts``. The number of tapes
matches the lenth of ``shifts`` and ``multipliers`` (if provided).
"""
params = list(tape.get_parameters())
if multipliers is None:
multipliers = np.ones_like(shifts)
tapes = []
for shift, multiplier in zip(shifts, multipliers):
new_params = params.copy()
shifted_tape = tape.copy(copy_operations=True)
new_params[index] = new_params[index] * qml.math.convert_like(multiplier, new_params[index])
new_params[index] = new_params[index] + qml.math.convert_like(shift, new_params[index])
shifted_tape.set_parameters(new_params)
tapes.append(shifted_tape)
return tapes
def generate_multishifted_tapes(tape, indices, shifts, multipliers=None):
r"""Generate a list of tapes where multiple marked trainable
parameters have been shifted by the provided shift values.
Args:
tape (.QuantumTape): input quantum tape
indices (Sequence[int]): indices of the trainable parameters to shift
shifts (Sequence[Sequence[float or int]]): Nested sequence of shift values.
The length of the outer Sequence determines how many parameter-shifted
tapes are created. The lengths of the inner sequences should match and
have the same length as ``indices``.
multipliers (Sequence[Sequence[float or int]]): Nested sequence
of multiplier values of the same format as `shifts``. Each multiplier
scales the corresponding gate parameter before the shift is applied.
If not provided, the parameters will not be scaled.
Returns:
list[QuantumTape]: List of quantum tapes. Each tape has the marked parameters
indicated by ``indices`` shifted by the values of ``shifts``. The number
of tapes will match the summed lengths of all inner sequences in ``shifts``
and ``multipliers`` (if provided).
"""
params = list(tape.get_parameters())
if multipliers is None:
multipliers = np.ones_like(shifts)
tapes = []
for _shifts, _multipliers in zip(shifts, multipliers):
new_params = params.copy()
shifted_tape = tape.copy(copy_operations=True)
for idx, shift, multiplier in zip(indices, _shifts, _multipliers):
dtype = getattr(new_params[idx], "dtype", float)
new_params[idx] = new_params[idx] * qml.math.convert_like(multiplier, new_params[idx])
new_params[idx] = new_params[idx] + qml.math.convert_like(shift, new_params[idx])
new_params[idx] = qml.math.cast(new_params[idx], dtype)
shifted_tape.set_parameters(new_params)
tapes.append(shifted_tape)
return tapes
|
test/issues/160/main.py
|
InsertAReallyCreativeNameHere/tinyexr
| 479 |
124406
|
<gh_stars>100-1000
import OpenEXR
import Imath
import numpy as np
import simpleimageio as sio
width = 420
height = 32
border_left = 0
border_right = 420 - 80
num_splats = 10000
red = np.zeros((height, width), dtype=np.float32)
green = np.zeros((height, width), dtype=np.float32)
blue = np.zeros((height, width), dtype=np.float32)
# splat random color values
rng = np.random.default_rng()
row = rng.integers(low=0, high=height, size=num_splats)
col = rng.integers(low=border_left, high=border_right, size=num_splats)
# if any of the three channels has a fixed value, the problem goes away!
red[row, col] = rng.random(num_splats)
green[row, col] = rng.random(num_splats)
blue[row, col] = rng.random(num_splats)
# add a bunch of test pixels
red[-8, -10] = 1
green[-8, -10] = 1
blue[-8, -10] = 1
red[-4, -8] = 1
green[-4, -8] = 1
blue[-4, -8] = 1
red[-4, -2] = 1
green[-4, -2] = 1
blue[-4, -2] = 1
red[-2, -3] = 0 # setting this to anything other than 0 fixes the problem
green[-2, -3] = 1
blue[-2, -3] = 1
# fill in all of the black region with 0-red color
# red[:,border_right:] = 0
# green[:,border_right:] = 1
# blue[:,border_right:] = 1
# write PIZ compressed via OpenEXR
header = OpenEXR.Header(width, height)
header['compression'] = Imath.Compression(Imath.Compression.PIZ_COMPRESSION)
exr = OpenEXR.OutputFile("gen.exr", header)
exr.writePixels({'R': red.tobytes(), 'G': green.tobytes(), 'B': blue.tobytes()})
exr.close()
# read back in via tinyexr (used internally by simpleimageio)
tinyresult = sio.read("gen.exr")
sio.write("test2.exr", tinyresult)
|
facility/test/facility_grader_test.py
|
mike715/assignment
| 101 |
124420
|
<reponame>mike715/assignment
import sys, os, pytest
sys.path.append(os.getcwd())
from do_grader_lib import PartQuality
from facility import grader
with open('facility/data/fl_3_1', 'r') as input_data_file:
input_data = input_data_file.read()
quality = PartQuality('test', 4052, 2546)
greedy_submission = '2550.013 0\n1 1 0 2\n123\n'
opt_submission = '2545.771 0\n0 0 1 2\n123\n'
# Score Test
def test_full_credit():
result = grader.grade(input_data, quality, opt_submission)
assert(result['score'] == 1.0)
def test_full_credit_opt():
result = grader.grade(input_data, quality, '2545.771 1\n0 0 1 2\n123\n')
assert(result['score'] == 1.0)
def test_full_credit_opt_big():
result = grader.grade(input_data, quality, '2545.771 99\n0 0 1 2\n123\n')
assert(result['score'] == 1.0)
def test_full_credit_opt_neg():
result = grader.grade(input_data, quality, '2545.771 -99\n0 0 1 2\n123\n')
assert(result['score'] == 1.0)
def test_partial_credit():
result = grader.grade(input_data, quality, greedy_submission)
assert(result['score'] == 0.7)
def test_feasible_credit():
result = grader.grade(input_data, quality, '6859.940 0\n2 2 2 2\n123\n')
assert(result['score'] == 0.3)
def test_partial_credit_timelimit():
result = grader.grade(input_data, quality, '2545.771 0\n0 0 1 2\n99999\n')
assert(result['score'] == 0.7)
assert('runtime exceeded' in result['feedback'])
def test_objective_value_warning():
result = grader.grade(input_data, quality, '2000 0\n0 0 1 2\n123\n')
assert(result['score'] == 1.0)
assert('Warning' in result['feedback'])
# Not implementable without the leader board
# passed += testGrade(grade, metadata, db, 'Opt Flag Warning (7/10): ' , '2545.771 1\n1 1 0 2\n123\n', 7)
# Constraint Tests
def test_capacity_one_violated():
result = grader.grade(input_data, quality, '4.0 0\n0 0 0 0\n123\n')
assert(result['score'] == 0.0)
def test_capacity_two_violated():
result = grader.grade(input_data, quality, '4.0 0\n0 1 0 1\n123\n')
assert(result['score'] == 0.0)
# I/O Tests
def test_objective_line_long():
result = grader.grade(input_data, quality, '2545.771 0 0\n0 0 1 2\n123\n')
assert(result['score'] == 0.0)
def test_objective_line_short():
result = grader.grade(input_data, quality, '0\n0 0 1 2\n123\n')
assert(result['score'] == 0.0)
def test_solution_line_long():
result = grader.grade(input_data, quality, '2545.771 0\n0 0 1 2 2\n123\n')
assert(result['score'] == 0.0)
def test_solution_line_short():
result = grader.grade(input_data, quality, '2545.771 0\n0 0 1 2 2\n123\n')
assert(result['score'] == 0.0)
def test_line_count_long():
result = grader.grade(input_data, quality, '0\n702.788 0\n0 0 1 2\n123\n')
assert(result['score'] == 0.0)
def test_line_count_short():
result = grader.grade(input_data, quality, '0 0 1 2\n123\n')
assert(result['score'] == 0.0)
# Type Tests
def test_nan_objective():
result = grader.grade(input_data, quality, 'NaN 0\n0 0 1 2\n123\n')
assert(result['score'] == 0.0)
def test_inf_objective():
result = grader.grade(input_data, quality, 'Inf 0\n0 0 1 2\n123\n')
assert(result['score'] == 0.0)
def test_alpha_objective():
result = grader.grade(input_data, quality, 'a 0\n0 0 1 2\n123\n')
assert(result['score'] == 0.0)
def test_alpha_optflag():
result = grader.grade(input_data, quality, '2545.771 b\n0 0 1 2\n123\n')
assert(result['score'] == 0.0)
def test_alpha_solution():
result = grader.grade(input_data, quality, '2545.771 0\n0 c 1 2\n123\n')
assert(result['score'] == 0.0)
def test_alpha_time():
result = grader.grade(input_data, quality, '19 0\n0 0 1 1\n123d\n')
assert(result['score'] == 0.0)
def test_range_solution():
result = grader.grade(input_data, quality, '2545.771 0\n0 0 1 4\n123d\n')
assert(result['score'] == 0.0)
|
coding_interviews/cracking_the_coding_interview/string/001.05.py
|
LeandroTk/Algorithms
| 205 |
124427
|
'''
One Away: There are three types of edits that can be performed on strings:
insert a character, remove a character, or replace a character.
Given two strings, write a function to check if they are one edit (or zero edits) away.
Example:
pale, ple -> true
pales, pale -> true
pale, bale -> true
pale, bake -> false
bla, bleble -> false
'''
def is_two_chars_away(str1, str2):
return (len(str1) - len(str2) >= 2) or (len(str2) - len(str1) >= 2)
def number_of_needed_changes(bigger_str, smaller_str):
str_counter = {}
for char in bigger_str:
if char in str_counter:
str_counter[char] += 1
else:
str_counter[char] = 1
for char in smaller_str:
if char in str_counter:
str_counter[char] -= 1
needed_changes = 0
for char, counter in str_counter.items():
needed_changes += counter
return needed_changes
def one_away(str1, str2):
if is_two_chars_away(str1, str2):
return False
needed_changes = 0
if len(str1) >= len(str2):
needed_changes = number_of_needed_changes(str1, str2)
else:
needed_changes = number_of_needed_changes(str2, str1)
return needed_changes <= 1
data = [
('pale', 'ple', True),
('pales', 'pale', True),
('pale', 'bale', True),
('paleabc', 'pleabc', True),
('pale', 'ble', False),
('a', 'b', True),
('', 'd', True),
('d', 'de', True),
('pale', 'pale', True),
('pale', 'ple', True),
('ple', 'pale', True),
('pale', 'bale', True),
('pale', 'bake', False),
('pale', 'pse', False),
('ples', 'pales', True),
('pale', 'pas', False),
('pas', 'pale', False),
('pale', 'pkle', True),
('pkle', 'pable', False),
('pal', 'palks', False),
('palks', 'pal', False),
('bla', 'bleble', False)
]
for [test_s1, test_s2, expected] in data:
actual = one_away(test_s1, test_s2)
print(actual == expected)
|
apps/accounts/social_auth_pipeline.py
|
goztrk/django-htk
| 206 |
124450
|
<reponame>goztrk/django-htk<filename>apps/accounts/social_auth_pipeline.py<gh_stars>100-1000
# Third Party / PIP Imports
# Django Imports
from django.shortcuts import redirect
# Django Extensions Imports
from social_core.pipeline.partial import partial
# HTK Imports
from htk.apps.accounts.emails import welcome_email
from htk.apps.accounts.session_keys import *
from htk.apps.accounts.utils import associate_user_email
from htk.apps.accounts.utils import get_incomplete_signup_user_by_email
from htk.apps.accounts.utils import get_user_by_email
from htk.apps.accounts.view_helpers import redirect_to_social_auth_complete
from htk.utils import htk_setting
# Custom Pipeline Functions
# https://django-social-auth.readthedocs.org/en/v0.7.22/pipeline.html
#
# available in kwargs
#
# backend - current social authentication backend (`backend.name`)
# uid - given by authentication provider
# details - user details given by authentication provider
# user - already logged in user or newly created user
# is_new - if `user` is newly created
# 1. If there is no email, have the user enter an email
# 2. Check association. If there is an account with that email:
# a. "An account with this email address already exists. Please log in to link your {{ SOCIAL }} account."
# b. "An account with this email address is already linked to {{ SOCIAL }}. Please create a new account using a different email address."
# 3. Create the account with the username and email
def python_social_auth_shim(pipeline_func):
"""Shim layer decorator for django-social-auth to python-social auth migration
pipeline complete wasn't passing the request object, but the strategy object instead
"""
def wrapped(strategy, *args, **kwargs):
if not kwargs.get('request'):
request = strategy.request
kwargs['request'] = request
return pipeline_func(*args, **kwargs)
return wrapped
def reset_session_keys(strategy, *args, **kwargs):
"""Reset a bunch of keys used as part of the social auth flow
This is to prevent partially-completed values from a previous flow from affecting a new social auth flow
"""
for key in SOCIAL_AUTH_FLOW_KEYS:
if strategy.request.session.get(key):
del strategy.request.session[key]
return None
@partial
def check_email(strategy, details, user=None, *args, **kwargs):
"""Ask the user to enter the email if we don't have one yet
The pipeline process was cut prior to this custom pipeline function, and will resume to this same function after completing
"""
response = None
if user is None:
strategy.request.session['backend'] = kwargs.get('current_partial').backend
social_email = details.get('email')
collected_email = strategy.request.session.get(SOCIAL_REGISTRATION_SETTING_EMAIL)
if social_email:
# email available from social auth
user = get_user_by_email(social_email)
if user and user.is_active:
# a user is already associated with this email
# TODO: there is an error with linking accounts...
strategy.request.session[SOCIAL_REGISTRATION_SETTING_EMAIL] = social_email
if user.has_usable_password():
# user should log into the existing account with a password
url_name = htk_setting('HTK_ACCOUNTS_REGISTER_SOCIAL_LOGIN_URL_NAME')
else:
# no password was set, so user must log in with another social auth account
url_name = htk_setting('HTK_ACCOUNTS_REGISTER_SOCIAL_ALREADY_LINKED_URL_NAME')
response = redirect(url_name)
elif collected_email:
# email provided by user
details['email'] = collected_email
response = { 'details' : details }
else:
# no email provided from social auth
strategy.request.session[SOCIAL_REGISTRATION_SETTING_MISSING_EMAIL] = True
url_name = htk_setting('HTK_ACCOUNTS_REGISTER_SOCIAL_EMAIL_URL_NAME')
response = redirect(url_name)
return response
@partial
def check_terms_agreement(strategy, details, user=None, *args, **kwargs):
"""
Ask the user to agree to Privacy Policy and Terms of Service
"""
response = None
if user is None:
agreed_to_terms = strategy.request.session.get(SOCIAL_REGISTRATION_SETTING_AGREED_TO_TERMS, False)
if not agreed_to_terms:
email = details.get('email')
strategy.request.session[SOCIAL_REGISTRATION_SETTING_EMAIL] = email
url_name = htk_setting('HTK_ACCOUNTS_REGISTER_SOCIAL_EMAIL_AND_TERMS_URL_NAME')
response = redirect(url_name)
else:
pass
else:
pass
return response
def check_incomplete_signup(strategy, details, user=None, *args, **kwargs):
"""Checks for an incomplete signup, and sets that User instead
"""
response = None
if user is None:
social_email = details.get('email')
user = get_incomplete_signup_user_by_email(social_email)
response = {
'user' : user,
'is_new' : user is None,
}
return response
def set_username(strategy, details, user, social, *args, **kwargs):
"""This pipeline function can be used to set UserProfile.has_username_set = True
Normally not used if the auto-generated username is ugly
"""
if not user:
return None
response = None
if hasattr(user, 'profile'):
user_profile = user.profile
if hasattr(user_profile, 'has_username_set'):
user_profile.has_username_set = True
user_profile.save()
return response
def associate_email(strategy, details, user, social, *args, **kwargs):
"""Associate email with the user
"""
if not user or not social:
return None
response = None
email = details.get('email')
domain = strategy.request.get_host()
# Should confirm if the email was provided by the social auth provider, not the user
# i.e. SOCIAL_REGISTRATION_SETTING_MISSING_EMAIL was False
confirmed = not(strategy.request.session.get(SOCIAL_REGISTRATION_SETTING_MISSING_EMAIL, False))
user_email = associate_user_email(user, email, domain=domain, confirmed=confirmed)
if user_email:
# need to update the User with the activated one, so that it doesn't get overwritten later on
response = {
'user': user_email.user,
}
return response
def handle_new_user(user, is_new, *args, **kwargs):
"""Do stuff if the account was newly created
"""
if not user:
return None
if is_new:
# send a welcome email to the user, regardless of email confirmation status
welcome_email(user)
def post_connect(user, social, *args, **kwargs):
response = None
return response
|
src/api/views/bp_api_v1.py
|
LeslieLeung/2c
| 236 |
124458
|
<filename>src/api/views/bp_api_v1.py
#!/usr/bin/env python
"""
Created by howie.hu at 2021/4/10.
Description:v1 接口函数
Changelog: all notable changes to this file will be documented
"""
from flask import Blueprint, current_app, request
bp_api = Blueprint("v1", __name__, url_prefix="/v1")
@bp_api.route("/ping", methods=["GET"], strict_slashes=False)
def ping():
"""
示例接口
:return:
"""
# 获取基本配置
return "pong"
|
setup.py
|
TomKingsfordUoA/ResidualMaskingNetwork
| 242 |
124463
|
<gh_stars>100-1000
import os
from setuptools import find_packages, setup
version = None
with open("README.md") as ref:
data = ref.readlines()[2]
version = data[data.find("version-v")+9:data.find("-blue")]
assert version is not None, data
cwd = os.path.dirname(os.path.abspath(__file__))
def write_version_file():
version_path = os.path.join(cwd, "rmn", "version.py")
with open(version_path, "w") as f:
f.write(f"__version__ = '{version}'\n")
write_version_file()
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="rmn",
description="Facial Expression Recognition using Residual Masking Network",
long_description=long_description,
long_description_content_type='text/markdown',
version=version,
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages(
exclude=["docs", "tests", "env", "script", "trainers", "utils", "pretrained_ckpt"]
),
include_package_data=True,
install_requires=[
"numpy",
"opencv-python",
"torch",
"torchvision",
"requests",
"pytorchcv",
"tqdm",
],
)
|
src/0054.spiral-matrix/spiral-matrix.py
|
lyphui/Just-Code
| 782 |
124465
|
<reponame>lyphui/Just-Code
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
res = []
if not matrix: return res
x = y = i = 0
delta = ((0, 1), (1, 0), (0, -1), (-1, 0))
pos = [0, 0, len(matrix[0])-1, len(matrix)-1] # 左、上、右、下。后面懒得判断推出来的。
while pos[0] <= pos[2] and pos[1] <= pos[3]:
while pos[0] <= y <= pos[2] and pos[1] <= x <= pos[3]:
res.append(matrix[x][y])
x, y = x+delta[i][0], y+delta[i][1]
x, y = x-delta[i][0], y-delta[i][1]
i = (i+1) % 4
pos[i] += sum(delta[i])
x, y = x+delta[i][0], y+delta[i][1]
return res
|
causalml/inference/tf/__init__.py
|
rainfireliang/causalml
| 2,919 |
124483
|
from .dragonnet import DragonNet
|
scripts/visualization.py
|
hehefan/PointRNN
| 136 |
124490
|
"""
3D Point Cloud Visualization
Original Author: https://github.com/argoai/argoverse-api
Modified by <NAME>
Date October 2019
"""
import os
import numpy as np
import argparse
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from mpl_toolkits.mplot3d import Axes3D
from mayavi import mlab
from typing import Any, Iterable, List, Optional, Tuple, Union, cast
#: A stub representing mayavi_wrapper.mlab figure types
Figure = Any
#: A 3D Point
Point = np.ndarray
#: An array of 3D points
PointCloud = np.ndarray
#: Any numeric type
Number = Union[int, float]
#: RGB color created from 0.0 to 1.0 values
Color = Tuple[float, float, float]
FigSize = Tuple[float, float]
Coordinate = Tuple[float, float, float]
def plot_points_3D_mayavi(
points: np.ndarray,
bird: bool,
fig: Figure,
per_pt_color_strengths: np.ndarray = None,
fixed_color: Optional[Color] = (1, 0, 0),
colormap: str = "spectral",
) -> Figure:
"""Visualize points with Mayavi. Scale factor has no influence on point size rendering
when calling `points3d()` with the mode="point" argument, so we ignore it altogether.
The parameter "line_width" also has no effect on points, so we ignore it also.
Args:
points: The points to visualize
fig: A Mayavi figure
per_pt_color_strengths: An array of scalar values the same size as `points`
fixed_color: Use a fixed color instead of a colormap
colormap: different green to red jet for 'spectral' or 'gnuplot'
Returns:
Updated Mayavi figure
"""
if len(points) == 0:
return None
if per_pt_color_strengths is None or len(per_pt_color_strengths) != len(points):
# Height data used for shading
if bird:
per_pt_color_strengths = points[:, 2]
else:
per_pt_color_strengths = points[:, 0]
mlab.points3d(
points[:, 0], # x
points[:, 1], # y
points[:, 2], # z
per_pt_color_strengths,
mode="point", # Render each point as a 'point', not as a 'sphere' or 'cube'
colormap=colormap,
color=fixed_color, # Used a fixed (r,g,b) color instead of colormap
figure=fig,
)
return fig
def draw_coordinate_frame_at_origin(fig: Figure) -> Figure:
"""
Draw the origin and 3 vectors representing standard basis vectors to express
a coordinate reference frame.
Args:
fig: Mayavi figure
Returns:
Updated Mayavi figure
Based on
--------
https://github.com/hengck23/didi-udacity-2017/blob/master/baseline-04/kitti_data/draw.py
https://github.com/charlesq34/frustum-pointnets/blob/master/mayavi/viz_util.py
"""
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode="sphere", scale_factor=0.2)
# Form standard basis vectors e_1, e_2, e_3
axes = np.array([[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]], dtype=np.float64)
# e_1 in red
mlab.plot3d(
[0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0, 0), tube_radius=None, figure=fig
)
# e_2 in green
mlab.plot3d(
[0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1, 0), tube_radius=None, figure=fig
)
# e_3 in blue
mlab.plot3d(
[0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0, 1), tube_radius=None, figure=fig
)
return fig
def draw_lidar(
point_cloud: np.ndarray, bird: bool = True, colormap: str = "jet", fig: Optional[Figure] = None, bgcolor: Color = (0, 0, 0), fig_size: FigSize = (200, 200), focalpoint: Coordinate = (0, 0, 0), elevation: int = 0, distance: float = 62.0
) -> Figure:
"""Render a :ref:`PointCloud` with a 45 degree viewing frustum from worm-vehicle.
Creates a Mayavi figure, draws a point cloud. Since the majority of interesting objects and
scenarios are found closeby to the ground, we want to see the objects near the ground expressed
in the full range of the colormap. Since returns on power lines, trees, and buildings
will dominate and dilute the colormap otherwise, we clip the colors so that all points
beyond a certain z-elevation (height) share the same color at the edge of the colormap.
We choose anything beyond the 90th percentile as a height outlier.
Args:
point_cloud: The pointcloud to render
fig: A pre-existing Mayavi figure to render to
bgcolor: The background color
colormap: "spectral" or "gnuplot" or "jet" are best
Returns:
Updated or created Mayavi figure
"""
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=fig_size)
'''
z_thresh = np.percentile(point_cloud[:, 2], 90)
thresholded_heights = point_cloud[:, 2].copy()
# Colors of highest points will be clipped to all lie at edge of colormap
thresholded_heights[thresholded_heights > z_thresh] = 5
'''
tmp = [point_cloud]
for i in range(1,201):
tmp.append(point_cloud+0.0001*i)
tmp.append(point_cloud-0.0001*i)
point_cloud = np.concatenate(tmp, 0)
# draw points
fig = plot_points_3D_mayavi(
#points=point_cloud, fig=fig, per_pt_color_strengths=thresholded_heights, fixed_color=None, colormap=colormap
points=point_cloud, bird=bird, fig=fig, per_pt_color_strengths=None, fixed_color=None, colormap=colormap
)
fig = draw_coordinate_frame_at_origin(fig)
mlab.view(
azimuth=180, elevation=elevation, focalpoint=focalpoint, distance=distance, figure=fig
)
return fig
def mkdirs(name):
if not os.path.exists(name):
os.makedirs(name)
if __name__ == '__main__':
R = 5
gths = np.load('test-argo-5m-1024point-10step.npy')
frames = np.load('test-predicted-frames.npy')
bird_dir = 'bird'
worm_dir = 'worm'
mkdirs(bird_dir)
mkdirs(worm_dir)
distance = 2*np.sqrt(3*R*R)
point_size = 5
axes_limits = [[-R, R], [-R, R], [-R, R]] # X axis range # Y axis range # Z axis range
axes_str = ["X", "Y", "Z"]
axes = [1, 0, 2]
for i in range(gths.shape[0]):
gth = gths[i]
flow = flows[i]
frame = frames[i]
# bird’s-eye view
curr_bird = os.path.join(bird_dir, '%04d'%(i+1))
mkdirs(curr_bird)
for j in range(5):
fig = draw_lidar(gth[j], bird=True, focalpoint=(0, 0, 0), elevation=0, distance=distance)
mlab.savefig(os.path.join(curr_bird, 'ctx-%02d.png'%(j+1)))
mlab.close()
fig = draw_lidar(gth[j+5], bird=True, focalpoint=(0, 0, 0), elevation=0, distance=distance)
mlab.savefig(os.path.join(curr_bird, 'gth-%02d.png'%(j+1)))
mlab.close()
fig = draw_lidar(frame[j], bird=True, focalpoint=(0, 0, 0), elevation=0, distance=distance)
mlab.savefig(os.path.join(curr_bird, 'prd-%02d.png'%(j+1)))
mlab.close()
os.system('convert -delay 20 -loop 0 %s/ctx-*.png %s/ctx.gif'%(curr_bird, curr_bird))
os.system('convert -delay 20 -loop 0 %s/gth-*.png %s/gth.gif'%(curr_bird, curr_bird))
os.system('convert -delay 20 -loop 0 %s/prd-*.png %s/prd.gif'%(curr_bird, curr_bird))
# worm’s-eye view
curr_worm = os.path.join(worm_dir, '%04d'%(i+1))
mkdirs(curr_worm)
for j in range(5):
fig = draw_lidar(gth[j], bird=False, focalpoint=(0, 0, 0), elevation=90, distance=distance)
mlab.savefig(os.path.join(curr_worm, 'ctx-%02d.png'%(j+1)))
mlab.close()
fig = draw_lidar(gth[j+5], bird=False, focalpoint=(0, 0, 0), elevation=90, distance=distance)
mlab.savefig(os.path.join(curr_worm, 'gth-%02d.png'%(j+1)))
mlab.close()
fig = draw_lidar(frame[j], bird=False, focalpoint=(0, 0, 0), elevation=90, distance=distance)
mlab.savefig(os.path.join(curr_worm, 'prd-%02d.png'%(j+1)))
mlab.close()
os.system('convert -delay 20 -loop 0 %s/ctx-*.png %s/ctx.gif'%(curr_worm, curr_worm))
os.system('convert -delay 20 -loop 0 %s/gth-*.png %s/gth.gif'%(curr_worm, curr_worm))
os.system('convert -delay 20 -loop 0 %s/prd-*.png %s/prd.gif'%(curr_worm, curr_worm))
|
notebook/numpy_block.py
|
vhn0912/python-snippets
| 174 |
124505
|
import numpy as np
a1 = np.ones((2, 3), int)
print(a1)
# [[1 1 1]
# [1 1 1]]
a2 = np.full((2, 3), 2)
print(a2)
# [[2 2 2]
# [2 2 2]]
print(np.block([a1, a2]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]]
print(np.block([[a1], [a2]]))
# [[1 1 1]
# [1 1 1]
# [2 2 2]
# [2 2 2]]
print(np.block([[a1, a2], [a2, a1]]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]
# [2 2 2 1 1 1]
# [2 2 2 1 1 1]]
print(np.block([[[a1]], [[a2]]]))
# [[[1 1 1]
# [1 1 1]]
#
# [[2 2 2]
# [2 2 2]]]
print(np.block([[[a1]], [[a2]]]).shape)
# (2, 2, 3)
a3 = np.full(6, 3)
print(a3)
# [3 3 3 3 3 3]
print(np.block([[a1, a2], [a3]]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]
# [3 3 3 3 3 3]]
# print(np.block([[a1, a2], a3]))
# ValueError: List depths are mismatched. First element was at depth 2, but there is an element at depth 1 (arrays[1])
# print(np.block([[a1, a2, a3]]))
# ValueError: all the input array dimensions except for the concatenation axis must match exactly
|
splink/iterate.py
|
riddhi150390/splink
| 176 |
124514
|
<reponame>riddhi150390/splink
from typing import Callable
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.session import SparkSession
from .expectation_step import run_expectation_step
from .maximisation_step import run_maximisation_step
from .model import Model
from typeguard import typechecked
import logging
logger = logging.getLogger(__name__)
@typechecked
def iterate(
df_gammas: DataFrame,
model: Model,
spark: SparkSession,
compute_ll: bool = False,
save_state_fn: Callable = None,
):
"""Repeatedly run expectation and maximisation step until convergence or max itations is reached.
Args:
df_gammas (DataFrame): Spark dataframe including gamma columns (i.e. after blocking and add_gammas has been applied)
model (Model): The `splink` model object
spark (SparkSession): The SparkSession object
log_iteration (bool, optional): Whether to write a message to the log after each iteration. Defaults to False.
num_iterations (int, optional): The number of iterations to run. Defaults to 10.
compute_ll (bool, optional): Whether to compute the log likelihood. This is not necessary and significantly degrades performance. Defaults to False.
save_state_fn (function, optional): A function provided by the user that takes one arguments, params, and is executed each iteration. This is a hook that allows the user to save the state between iterations, which is mostly useful for very large jobs which may need to be restarted from where they left off if they fail.
Returns:
DataFrame: A spark dataframe including a match probability column
"""
settings = model.current_settings_obj.settings_dict
num_iterations = settings["max_iterations"]
for i in range(num_iterations):
df_e = run_expectation_step(df_gammas, model, spark, compute_ll=compute_ll)
run_maximisation_step(df_e, model, spark)
logger.info(f"Iteration {i} complete")
if save_state_fn:
save_state_fn(model)
if model.is_converged():
logger.info("EM algorithm has converged")
break
# The final version of df_e should align to the current parameters - i.e. those computed in the last max step
df_e = run_expectation_step(df_gammas, model, spark, compute_ll=compute_ll)
# The expectation step adds the current params to history, so this is needed to output a final
# version of charts/params.
if save_state_fn:
save_state_fn(model)
return df_e
|
tools/Sikuli/OpenDialogClickFolderSelect.sikuli/OpenDialogClickFolderSelect.py
|
marmyshev/vanessa-automation
| 296 |
124538
|
<gh_stars>100-1000
click("Bb60pnanxm.png")
exit(0)
|
petridish/data/openml.py
|
Bhaskers-Blu-Org2/petridishnn
| 121 |
124543
|
<reponame>Bhaskers-Blu-Org2/petridishnn
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import scipy.io.arff as arff
import bisect
import json
import os, sys
import subprocess
import tensorflow as tf
from tensorpack.dataflow import RNGDataFlow, BatchData, PrefetchData
from tensorpack.callbacks import Inferencer
from tensorpack.dataflow import DataFlow, PrefetchDataZMQ, \
PrefetchData, \
MapDataComponent, AugmentImageComponent, BatchData
from tensorpack.dataflow import imgaug
from tensorpack.utils import logger
def maybe_download_dataset(dataset_idx, json_dir=None, data_dir=None,
force_download=False, disable_download=True):
json_fn = os.path.join(json_dir, str(dataset_idx) + '.json')
data_fn = os.path.join(data_dir, str(dataset_idx) + '.arff')
if os.path.exists(json_fn) and not force_download:
print("Json info and data already exists.")
else:
if disable_download:
raise ValueError("{} should exist but not".format(json_fn))
import wget, glob
url = "https://www.openml.org/d/{dataset_idx}/json".format(dataset_idx=dataset_idx)
print("Downloading JSON file from url {}".format(url))
json_fn = wget.download(url, json_fn)
fns = glob.glob('{}*tmp'.format(json_fn))
for fn in fns:
cmd = 'rm {}'.format(fn)
print("remove tmp file with cmd : {}".format(cmd))
subprocess.call(cmd, shell=True)
with open(json_fn, 'rt') as json_in:
lines = []
for line in json_in:
lines.append(line.strip())
ss = ''.join(lines)
data_info = json.loads(ss)
#target_attr = data_info.get('default_target_attribute', None)
target_attr = None
if target_attr is None:
n_targets = 0
for feat_info in data_info['features']:
if int(feat_info.get('target', 0)) > 0:
target_attr = feat_info['name']
n_targets += 1
if n_targets != 1:
raise Exception("current logic only support 1d prediction at dataset_idx {}".format(dataset_idx))
if os.path.exists(data_fn) and not force_download:
print("data arff already exists")
else:
if disable_download:
raise ValueError("{} should exist but not".format(data_fn))
import wget
import glob
# dataset url
url = data_info['url']
print("Downloading dataset {} from url {}".format(dataset_idx, url))
data_fn = wget.download(url, out=data_fn)
fns = glob.glob('{}*tmp'.format(data_fn))
for fn in fns:
cmd = 'rm {}'.format(fn)
print("remove tmp file with cmd : {}".format(cmd))
subprocess.call(cmd, shell=True)
return data_fn, target_attr
def get_arff_data(fn, target_attr='class', check_size_only=False):
file_stat = os.stat(fn)
if check_size_only:
print("{} has size {}MB".format(fn, file_stat.st_size * 1e-6))
return None
data, meta = arff.loadarff(fn)
if not target_attr in meta.names():
raise Exception("Dataset {} is broken: target_attr {} not in meta".format(fn, target_attr))
# problem type regression/classification
if meta[target_attr][0] == 'numeric':
num_classes = 0
pred_type = tf.float32
else:
num_classes = len(meta[target_attr][1])
pred_type = tf.int32
pred_val2idx = dict()
for vi, val in enumerate(meta[target_attr][1]):
pred_val2idx[val] = vi
# feature names, types and ranges
feat_names = list(filter(lambda x : x != target_attr, meta.names()))
n_feats = len(feat_names)
feat_types = [tf.float32 for _ in range(n_feats)]
feat_dims = [None for _ in range(n_feats)]
feat_val2idx = [None for _ in range(n_feats)]
for i, name in enumerate(feat_names):
if meta[name][0] == 'numeric':
continue
feat_types[i] = tf.int32
feat_dims[i] = len(meta[name][1])
feat_val2idx[i] = dict()
for vi, val in enumerate(meta[name][1]):
feat_val2idx[i][val] = vi
n_data = len(data)
dps = [[None] * n_data for _ in range(n_feats + 1) ]
feat_means = [ 0. for _ in range(n_feats)]
feat_vars = [ 0. for _ in range(n_feats)]
for xi, x in enumerate(data):
for di, dname in enumerate(feat_names):
val = x[dname]
if feat_types[di] == tf.float32:
val = float(val)
dps[di][xi] = val
feat_means[di] += val
feat_vars[di] += val * val
else:
val = val.decode("utf-8")
dps[di][xi] = int(feat_val2idx[di][val])
if num_classes == 0:
dps[-1][xi] = float(x[target_attr])
else:
val = x[target_attr].decode("utf-8")
dps[-1][xi] = int(pred_val2idx[val])
feat_types.append(pred_type)
feat_dims.append(None)
feat_means = [ z / float(n_data) for z in feat_means ]
feat_stds = [ np.sqrt((sq / float(n_data) - m * m)) for sq, m in zip(feat_vars, feat_means)]
return dps, feat_types, feat_dims, n_data, num_classes, feat_means, feat_stds
class LoadedArffDataFlow(RNGDataFlow):
def __init__(self, dps_ys, split, shuffle=True, do_validation=False):
super(LoadedArffDataFlow, self).__init__()
self.shuffle = shuffle
self.dps = dps_ys # this should be a list of n x d_i mat, the last one is pred ys
n_samples = len(dps_ys[-1])
self.init_indices = list(range(n_samples))
np.random.seed(180451613)
np.random.shuffle(self.init_indices)
if split == 'all':
self._offset = 0
self._size = n_samples
elif split == 'train':
self._offset = 0
if do_validation:
self._size = n_samples * 8 // 10
else:
self._size = n_samples * 9 // 10
elif split == 'val' or split == 'validation':
if do_validation:
self._offset = n_samples * 8 // 10
self._size = n_samples * 9 // 10 - self._offset
else:
self._offset = n_samples * 9 // 10
self._size = n_samples - self._offset
elif do_validation and split == 'test':
self._offset = n_samples * 9 // 10
self._size = n_samples - self._offset
def size(self):
return self._size
def get_data(self):
idxs = [ i for i in self.init_indices[self._offset:(self._offset + self._size)]]
if self.shuffle:
np.random.shuffle(idxs)
for k in idxs:
yield [dp[k] for dp in self.dps]
def get_dataset_by_id(idx, data_dir_root, check_size_only=False, disable_download=True):
data_dir = os.path.join(data_dir_root, 'openml')
json_dir = os.path.join(data_dir_root, 'openml', 'json_dir')
fn, target_attr = maybe_download_dataset(idx, json_dir=json_dir, data_dir=data_dir,
disable_download=disable_download)
return get_arff_data(fn, target_attr, check_size_only)
def get_openml_dataflow(idx, data_root, splits=[], do_validation=False):
(dps_ys, types, dims, n_data,
num_classes, feat_means, feat_stds) = get_dataset_by_id(idx, data_root)
l_ds = dict()
for split in splits:
l_ds[split] = LoadedArffDataFlow(
dps_ys, split, shuffle=True, do_validation=do_validation)
return l_ds, types, dims, n_data, num_classes, feat_means, feat_stds
# copy paste from the paper: https://arxiv.org/pdf/1802.04064.pdf
cbb_openml_indices = [
3, 6, 8, 10, 11, 12, 14, 16, 18, 20, 21, 22, 23, 26, 28, 30, 31, 32,
36, 37, 39, 40, 41, 43, 44, 46, 48, 50, 53, 54, 59, 60, 61, 62, 150,
151, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 180, 181, 182, 183,
184, 187, 189, 197, 209, 223, 227, 273, 275, 276, 277, 278, 279, 285, 287,
292, 293, 294, 298, 300, 307, 310, 312, 313, 329, 333, 334, 335, 336, 337,
338, 339, 343, 346, 351, 354, 357, 375, 377, 383, 384, 385, 386, 387, 388,
389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 444, 446,
448, 450, 457, 458, 459, 461, 462, 463, 464, 465, 467, 468, 469, 472, 475,
476, 477, 478, 479, 480, 554, 679, 682, 683, 685, 694, 713, 714, 715, 716,
717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731,
732, 733, 734, 735, 736, 737, 740, 741, 742, 743, 744, 745, 746, 747, 748,
749, 750, 751, 752, 753, 754, 755, 756, 758, 759, 761, 762, 763, 764, 765,
766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780,
782, 783, 784, 785, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797,
799, 800, 801, 803, 804, 805, 806, 807, 808, 811, 812, 813, 814, 815, 816,
817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 832,
833, 834, 835, 836, 837, 838, 841, 843, 845, 846, 847, 848, 849, 850, 851,
853, 855, 857, 859, 860, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871,
872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 884, 885, 886, 888,
891, 892, 893, 894, 895, 896, 900, 901, 902, 903, 904, 905, 906, 907, 908,
909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923,
924, 925, 926, 927, 928, 929, 931, 932, 933, 934, 935, 936, 937, 938, 941,
942, 943, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 958,
959, 962, 964, 965, 969, 970, 971, 973, 974, 976, 977, 978, 979, 980, 983,
987, 988, 991, 994, 995, 996, 997, 1004, 1005, 1006, 1009, 1011, 1012, 1013,
1014, 1015, 1016, 1019, 1020, 1021, 1022, 1025, 1026, 1036, 1038, 1040,
1041, 1043, 1044, 1045, 1046, 1048, 1049, 1050, 1054, 1055, 1056, 1059,
1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1071, 1073,
1075, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087,
1088, 1100, 1104, 1106, 1107, 1110, 1113, 1115, 1116, 1117, 1120, 1121,
1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133,
1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146,
1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158,
1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1169, 1216, 1217, 1218,
1233, 1235, 1236, 1237, 1238, 1241, 1242, 1412, 1413, 1441, 1442, 1443,
1444, 1449, 1451, 1453, 1454, 1455, 1457, 1459, 1460, 1464, 1467, 1470,
1471, 1472, 1473, 1475, 1481, 1482, 1483, 1486, 1487, 1488, 1489, 1496, 1498
]
# 21 could not convert sparse str to float; 6 cannot convert sparse nominal of 1/-1
# 2 could not find nominal field ; 1 exception due to target_attr not found
# 2 StopIteration
cbb_openml_indices_failed = [
189, 273, 292, 293, 310, 351, 354, 357, 383, 384, 385, 386, 387, 388, 389,
390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 1048, 1073,
1100, 1169, 1241, 1242
]
def len_cbb_indices():
return len(cbb_openml_indices)
if __name__ == '__main__':
from urllib.request import HTTPError, ContentTooShortError
import time
failed_indices = []
data_dir = '/data/data'
try:
os.makedirs(os.path.join(data_dir, 'openml', 'json_dir'))
except:
pass
for cnt, i in enumerate(cbb_openml_indices):
print("{}/{} : urlid={} ... ".format(cnt+1, len(cbb_openml_indices), i))
start = time.time()
try:
ret = get_dataset_by_id(i, data_dir, check_size_only=True, disable_download=False)
except (HTTPError, ContentTooShortError) as e:
print("\n wget failed on {} with error {}".format(i, e))
failed_indices.append(i)
print("...done {} sec".format(time.time() - start))
print("The indices that failed: {}".format(failed_indices))
#fn = maybe_download_dataset(31)
#ret = get_arff_data(fn)
|
gryphon/lib/exchange/vault_of_satoshi.py
|
qiquanzhijia/gryphon
| 1,109 |
124569
|
# -*- coding: utf-8 -*-
import time
import os
import datetime
import requests
import hmac
import urllib
import hashlib
import base64
import json
import math
from cdecimal import *
from delorean import Delorean
from collections import OrderedDict
from gryphon.lib.money import Money
from gryphon.lib.models.datum import DatumRecorder
from exchange_order import Order
from gryphon.lib.exchange.consts import Consts
from base import *
from exceptions import *
from gryphon.lib.models.exchange import Balance
from gryphon.lib.logger import get_logger
logger = get_logger(__name__)
class VaultOfSatoshiExchange(Exchange):
def __init__(self, session=None, currency=u"CAD", use_cached_orderbook=False):
super(VaultOfSatoshiExchange, self).__init__(session)
self.name = u'VAULTOFSATOSHI'
self.friendly_name = u'Vault of Satoshi'
self.base_url = 'https://api.vaultofsatoshi.com'
self.currency = currency
self.fee = Decimal("0") # assuming $99 unlimited account
self.withdrawal_fee = Money("0.0005", "BTC")
self.bid_string = "bid"
self.ask_string = "ask"
self.use_cached_orderbook = use_cached_orderbook
|
tests/conftest.py
|
Dimche-msk/panoramisk
| 123 |
124616
|
<filename>tests/conftest.py
import subprocess
import pytest
from panoramisk import utils
class Asterisk:
def __init__(self):
self.cwd = 'tests/docker'
self.proc = None
def start(self):
self.stop()
self.proc = subprocess.Popen(
['docker-compose', 'up'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.cwd
)
for line in iter(self.proc.stdout.readline, b''):
if b'Asterisk Ready.' in line:
break
def logs(self, tail=20):
proc = subprocess.Popen(
['docker-compose', 'logs', '--tail=%s' % tail],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.cwd,
encoding='utf8',
)
stdout, _ = proc.communicate()
print(stdout)
return stdout
def stop(self):
if self.proc is not None:
self.proc.kill()
subprocess.check_call(
['docker-compose', 'down', '-v'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.cwd,
)
@pytest.fixture
def asterisk(request):
utils.EOL = '\r\n'
server = Asterisk()
yield server
server.stop()
|
sourced/ml/tests/test_indexer.py
|
vmarkovtsev/ml
| 122 |
124633
|
import tempfile
import unittest
from pyspark import Row
from sourced.ml.models import DocumentFrequencies
from sourced.ml.tests import create_spark_for_test
from sourced.ml.transformers import Indexer
class IndexerTests(unittest.TestCase):
def setUp(self):
data = [Row(to_index="to_index%d" % i, value=i) for i in range(10)]
self.data = data
self.session = create_spark_for_test()
self.data_rdd = self.session.sparkContext \
.parallelize(range(len(data))) \
.map(lambda x: data[x])
def test_call(self):
indexer = Indexer("to_index")
res = indexer(self.data_rdd)
values = indexer.values()
data_reverse = res \
.map(lambda x: Row(to_index=values[x.to_index], value=x.value)) \
.collect()
self.assertEqual(self.data, data_reverse)
def test_save_load(self):
indexer = Indexer("to_index")
res = indexer(self.data_rdd)
with tempfile.NamedTemporaryFile(suffix="-index.asdf") as tmp:
cached_index_path = tmp.name
indexer.save_index(cached_index_path)
docfreq = DocumentFrequencies().load(source=cached_index_path)
document_index = {key: int(val) for (key, val) in docfreq}
indexer = Indexer("to_index", column2id=document_index)
self.assertEqual(res.collect(), indexer(self.data_rdd).collect())
if __name__ == "__main__":
unittest.main()
|
tests/test_doc.py
|
iver56/julius
| 256 |
124669
|
from doctest import testmod
import unittest
from julius import resample, fftconv, lowpass, bands, utils
class DocStringTest(unittest.TestCase):
def test_resample(self):
self.assertEqual(testmod(resample).failed, 0)
def test_fftconv(self):
self.assertEqual(testmod(fftconv).failed, 0)
def test_lowpass(self):
self.assertEqual(testmod(lowpass).failed, 0)
def test_bands(self):
self.assertEqual(testmod(bands).failed, 0)
def test_utils(self):
self.assertEqual(testmod(utils).failed, 0)
|
rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py
|
theoway/raster-vision
| 1,577 |
124675
|
<filename>rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py
import logging
import os
import uuid
from typing import List, Optional
from rastervision.pipeline import rv_config
from rastervision.pipeline.runner import Runner
log = logging.getLogger(__name__)
AWS_BATCH = 'batch'
def submit_job(cmd: List[str],
job_name: str,
debug: bool = False,
profile: str = False,
attempts: int = 5,
parent_job_ids: List[str] = None,
num_array_jobs: Optional[int] = None,
use_gpu: bool = False,
job_queue: Optional[str] = None,
job_def: Optional[str] = None) -> str:
"""Submit a job to run on AWS Batch.
Args:
cmd: a command to run in the Docker container for the remote job
debug: if True, run the command using a ptvsd wrapper which sets up a remote
VS Code Python debugger server
profile: if True, run the command using kernprof, a line profiler
attempts: the number of times to try running the command which is useful
in case of failure.
parent_job_ids: optional list of parent Batch job ids. The job created by this
will only run after the parent jobs complete successfully.
num_array_jobs: if set, make this a Batch array job with size equal to
num_array_jobs
use_gpu: if True, run the job in a GPU-enabled queue
job_queue: if set, use this job queue
job_def: if set, use this job definition
"""
batch_config = rv_config.get_namespace_config(AWS_BATCH)
if job_queue is None:
if use_gpu:
job_queue = batch_config('gpu_job_queue')
else:
job_queue = batch_config('cpu_job_queue')
if job_def is None:
if use_gpu:
job_def = batch_config('gpu_job_def')
else:
job_def = batch_config('cpu_job_def')
import boto3
client = boto3.client('batch')
cmd_list = cmd.split(' ')
if debug:
cmd_list = [
'python', '-m', 'ptvsd', '--host', '0.0.0.0', '--port', '6006',
'--wait', '-m'
] + cmd_list
if profile:
cmd_list = ['kernprof', '-v', '-l'] + cmd_list
kwargs = {
'jobName': job_name,
'jobQueue': job_queue,
'jobDefinition': job_def,
'containerOverrides': {
'command': cmd_list
},
'retryStrategy': {
'attempts': attempts
},
}
if parent_job_ids:
kwargs['dependsOn'] = [{'jobId': id} for id in parent_job_ids]
if num_array_jobs:
kwargs['arrayProperties'] = {'size': num_array_jobs}
job_id = client.submit_job(**kwargs)['jobId']
msg = 'submitted job with jobName={} and jobId={} w/ parent(s)={}'.format(
job_name, job_id, parent_job_ids)
log.info(msg)
log.info(cmd_list)
return job_id
class AWSBatchRunner(Runner):
"""Runs pipelines remotely using AWS Batch.
Requires Everett configuration of form:
```
[AWS_BATCH]
cpu_job_queue=
cpu_job_def=
gpu_job_queue=
gpu_job_def=
attempts=
```
"""
def run(self,
cfg_json_uri,
pipeline,
commands,
num_splits=1,
pipeline_run_name: str = 'raster-vision'):
parent_job_ids = []
# pipeline-specific job queue
if hasattr(pipeline, 'job_queue'):
pipeline_job_queue = pipeline.job_queue
else:
pipeline_job_queue = None
# pipeline-specific job definition
if hasattr(pipeline, 'job_def'):
pipeline_job_def = pipeline.job_def
else:
pipeline_job_def = None
for command in commands:
# command-specific job queue, job definition
job_def = pipeline_job_def
job_queue = pipeline_job_queue
if hasattr(pipeline, command):
fn = getattr(pipeline, command)
if hasattr(fn, 'job_def'):
job_def = fn.job_def
if hasattr(fn, 'job_queue'):
job_queue = fn.job_queue
num_array_jobs = None
use_gpu = command in pipeline.gpu_commands
job_name = f'{pipeline_run_name}-{command}-{uuid.uuid4()}'
cmd = ['python', '-m', 'rastervision.pipeline.cli']
if rv_config.get_verbosity() > 1:
cmd.append('-' + 'v' * (rv_config.get_verbosity() - 1))
cmd.extend(
['run_command', cfg_json_uri, command, '--runner', AWS_BATCH])
if command in pipeline.split_commands and num_splits > 1:
num_array_jobs = num_splits
cmd += ['--num-splits', str(num_splits)]
job_id = submit_job(
cmd=' '.join(cmd),
job_name=job_name,
parent_job_ids=parent_job_ids,
num_array_jobs=num_array_jobs,
use_gpu=use_gpu,
job_queue=job_queue,
job_def=job_def)
parent_job_ids = [job_id]
job_queue = None
job_def = None
def get_split_ind(self):
return int(os.environ.get('AWS_BATCH_JOB_ARRAY_INDEX', 0))
|
CPAC/unet/__init__.py
|
Lawreros/C-PAC
| 125 |
124695
|
<gh_stars>100-1000
from .function import write_nifti, estimate_dice, extract_large_comp, predict_volumes, MyParser
from .model import weigths_init, Conv3dBlock, UpConv3dBlock, Conv2dBlock, UpConv2dBlock, UNet3d, UNet2d, MultiSliceBcUNet, MultiSliceSsUNet, MultiSliceModel
from .dataset import VolumeDataset, BlockDataset
__all__ = [
'write_nifti',
'estimate_dice',
'extract_large_comp',
'predict_volumes',
'MyParser',
'weigths_init',
'Conv3dBlock',
'UpConv3dBlock',
'Conv2dBlock',
'UpConv2dBlock',
'UNet3d',
'UNet2d',
'MultiSliceBcUNet',
'MultiSliceSsUNet',
'MultiSliceModel',
'VolumeDataset',
'BlockDataset'
]
|
selenium/pyvirtualdisplay-download-without-headless/example-chrome-wiht-options.py
|
whitmans-max/python-examples
| 140 |
124701
|
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
start_time = time.time()
options = Options()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
options.add_argument("--disable-extensions")
#driver = webdriver.Chrome(executable_path=r'/home/chromedriver/chromedriver',options=options)
driver = webdriver.Chrome(options=options)
params = {'behavior': 'allow', 'downloadPath': '/home/furas/projekty'}
driver.execute_cdp_cmd('Page.setDownloadBehavior', params)
# downloads are now enabled for this driver instance
driver.get('https://www.macrotrends.net/1476/copper-prices-historical-chart-data')
print('[INFO] loaded', time.time() - start_time)
time.sleep(5)
iframe = driver.find_element_by_xpath("//iframe[@id='chart_iframe']")
driver.switch_to.frame(iframe)
print('[INFO] switched', time.time() - start_time)
xpath = "//a[text()='All Years']"
driver.find_element_by_xpath(xpath).click()
xpath = "//button[@id='dataDownload']"
driver.find_element_by_xpath(xpath).click()
print('[INFO] clicked', time.time() - start_time)
time.sleep(10)
print('[INFO] closing', time.time() - start_time)
driver.close()
|
tests/scipy/solve_triangular.py
|
RoboticExplorationLab/micropython-ulab
| 232 |
124719
|
import math
try:
from ulab import scipy, numpy as np
except ImportError:
import scipy
import numpy as np
A = np.array([[3, 0, 2, 6], [2, 1, 0, 1], [1, 0, 1, 4], [1, 2, 1, 8]])
b = np.array([4, 2, 4, 2])
# forward substitution
result = scipy.linalg.solve_triangular(A, b, lower=True)
ref_result = np.array([1.333333333, -0.666666666, 2.666666666, -0.083333333])
for i in range(4):
print(math.isclose(result[i], ref_result[i], rel_tol=1E-6, abs_tol=1E-6))
# backward substitution
result = scipy.linalg.solve_triangular(A, b, lower=False)
ref_result = np.array([-1.166666666, 1.75, 3.0, 0.25])
for i in range(4):
print(math.isclose(result[i], ref_result[i], rel_tol=1E-6, abs_tol=1E-6))
|
src/binding/pyct_icp/__init__.py
|
xiang-1208/ct_icp
| 123 |
124720
|
from .pyct_icp import *
|
marathon/__init__.py
|
missingcharacter/marathon-python
| 202 |
124721
|
from .client import MarathonClient
from .models import MarathonResource, MarathonApp, MarathonTask, MarathonConstraint
from .exceptions import MarathonError, MarathonHttpError, NotFoundError, InvalidChoiceError
from .util import get_log
log = get_log()
|
examples/supernova/in_code_demo.py
|
hase1128/dragonfly
| 675 |
124759
|
"""
In code demo for supernova experiment.
-- <EMAIL>
"""
from dragonfly import load_config, maximise_function, maximise_multifidelity_function
# From current directory
from snls import objective as snls_objective
from snls_mf import objective as snls_mf_objective
from snls_mf import cost as snls_mf_cost
def main():
""" Main function. """
domain_vars = [{'name': 'hubble_constant', 'type': 'float', 'min': 60, 'max': 80},
{'name': 'omega_m', 'type': 'float', 'min': 0, 'max': 1},
{'name': 'omega_l', 'type': 'float', 'min': 0, 'max': 1}]
fidel_vars = [{'name': 'log10_resolution', 'type': 'float', 'min': 2, 'max': 5},
{'name': 'num_obs_to_use', 'type': 'int', 'min': 50, 'max': 192}]
fidel_to_opt = [5, 192]
max_capital = 2 * 60 * 60 # Optimisation budget in seconds
# A parallel set up where we will evaluate the function in three different threads.
num_workers = 3
# Optimise without multi-fidelity
config_params = {'domain': domain_vars}
config = load_config(config_params)
opt_val, opt_pt, history = maximise_function(snls_objective, config.domain,
max_capital, num_workers=num_workers,
capital_type='realtime', config=config)
print(opt_pt, opt_val)
# Optimise with multi-fidelity
config_params = {'domain': domain_vars, 'fidel_space': fidel_vars,
'fidel_to_opt': fidel_to_opt}
config = load_config(config_params)
# Optimise
mf_opt_val, mf_opt_pt, history = maximise_multifidelity_function(snls_mf_objective,
config.fidel_space, config.domain,
config.fidel_to_opt, snls_mf_cost,
max_capital, config=config)
print(mf_opt_pt, mf_opt_val)
if __name__ == '__main__':
main()
|
resource/lib/python2.7/site-packages/pyasn1/type/char.py
|
claudiopastorini/geofire-python
| 1,346 |
124773
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, <NAME> <<EMAIL>>
# License: http://pyasn1.sf.net/license.html
#
from pyasn1.type import univ, tag
class NumericString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
class PrintableString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
class TeletexString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
class T61String(TeletexString):
pass
class VideotexString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
class IA5String(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
class GraphicString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
class VisibleString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
class ISO646String(VisibleString):
pass
class GeneralString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
class UniversalString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
class BMPString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
class UTF8String(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
|
stevedore/tests/test_cache.py
|
jaraco/stevedore
| 133 |
124792
|
<gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for stevedore._cache
"""
import sys
from unittest import mock
from stevedore import _cache
from stevedore.tests import utils
class TestCache(utils.TestCase):
def test_disable_caching_executable(self):
"""Test caching is disabled if python interpreter is located under /tmp
directory (Ansible)
"""
with mock.patch.object(sys, 'executable', '/tmp/fake'):
sot = _cache.Cache()
self.assertTrue(sot._disable_caching)
def test_disable_caching_file(self):
"""Test caching is disabled if .disable file is present in target
dir
"""
cache_dir = _cache._get_cache_dir()
with mock.patch('os.path.isfile') as mock_path:
mock_path.return_value = True
sot = _cache.Cache()
mock_path.assert_called_with('%s/.disable' % cache_dir)
self.assertTrue(sot._disable_caching)
mock_path.return_value = False
sot = _cache.Cache()
self.assertFalse(sot._disable_caching)
@mock.patch('os.makedirs')
@mock.patch('builtins.open')
def test__get_data_for_path_no_write(self, mock_open, mock_mkdir):
sot = _cache.Cache()
sot._disable_caching = True
mock_open.side_effect = IOError
sot._get_data_for_path('fake')
mock_mkdir.assert_not_called()
|
app/master/build_store.py
|
rsennewald/ClusterRunner
| 164 |
124800
|
from collections import OrderedDict
from itertools import islice
from typing import List
from app.master.build import Build
from app.util.exceptions import ItemNotFoundError
class BuildStore:
"""
Build storage service that stores and handles all builds.
"""
_all_builds_by_id = OrderedDict()
@classmethod
def get(cls, build_id: int) -> Build:
"""
Returns a build by id
:param build_id: The id for the build whose status we are getting
"""
build = cls._all_builds_by_id.get(build_id)
if build is None:
raise ItemNotFoundError('Invalid build id: {}.'.format(build_id))
return build
@classmethod
def get_range(cls, start: int, end: int) -> List[Build]:
"""
Returns a list of all builds.
:param start: The starting index of the requested build
:param end: 1 + the index of the last requested element, although if this is greater than the total number
of builds available the length of the returned list may be smaller than (end - start)
"""
requested_builds = islice(cls._all_builds_by_id, start, end)
return [cls._all_builds_by_id[key] for key in requested_builds]
@classmethod
def add(cls, build: Build):
"""
Add new build to collection
:param build: The build to add to the store
"""
cls._all_builds_by_id[build.build_id()] = build
@classmethod
def size(cls) -> int:
"""
Return the amount of builds within the store
"""
return len(cls._all_builds_by_id)
|
shenfun/forms/__init__.py
|
spectralDNS/shenfun
| 138 |
124870
|
<reponame>spectralDNS/shenfun<gh_stars>100-1000
#pylint: disable=missing-docstring
from .project import *
from .inner import *
from .operators import *
from .arguments import *
|
src/ralph/lib/polymorphic/tests/migrations/0001_initial.py
|
DoNnMyTh/ralph
| 1,668 |
124905
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='PolymorphicModelBaseTest',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('name', models.CharField(max_length=50, null=True, blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SomethingRelated',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('name', models.CharField(max_length=50, null=True, blank=True)),
],
),
migrations.CreateModel(
name='PolymorphicModelTest',
fields=[
('polymorphicmodelbasetest_ptr', models.OneToOneField(auto_created=True, to='polymorphic_tests.PolymorphicModelBaseTest', serialize=False, parent_link=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('polymorphic_tests.polymorphicmodelbasetest', models.Model),
),
migrations.CreateModel(
name='PolymorphicModelTest2',
fields=[
('polymorphicmodelbasetest_ptr', models.OneToOneField(auto_created=True, to='polymorphic_tests.PolymorphicModelBaseTest', serialize=False, parent_link=True, primary_key=True)),
('another_related', models.ForeignKey(to='polymorphic_tests.SomethingRelated', null=True, blank=True, related_name='+')),
],
options={
'abstract': False,
},
bases=('polymorphic_tests.polymorphicmodelbasetest', models.Model),
),
migrations.AddField(
model_name='polymorphicmodelbasetest',
name='content_type',
field=models.ForeignKey(to='contenttypes.ContentType', null=True, blank=True),
),
migrations.AddField(
model_name='polymorphicmodelbasetest',
name='sth_related',
field=models.ForeignKey(to='polymorphic_tests.SomethingRelated', null=True, blank=True),
),
]
|
MicroTokenizer/tokenizers/unicode_script/generate_script_data.py
|
howl-anderson/MicroTokenizer
| 136 |
124953
|
<filename>MicroTokenizer/tokenizers/unicode_script/generate_script_data.py
def main():
# build indexes from 'scripts.txt'
idx = []
names = []
cats = []
import urllib.request
import re
import textwrap
url = "http://www.unicode.org/Public/UNIDATA/Scripts.txt"
f = urllib.request.urlopen(url)
for ln in f:
ln = ln.decode()
p = re.findall(r"([0-9A-F]+)(?:\.\.([0-9A-F]+))?\W+(\w+)\s*#\s*(\w+)", ln)
if p:
a, b, name, cat = p[0]
if name not in names:
names.append(name)
if cat not in cats:
cats.append(cat)
idx.append(
(int(a, 16), int(b or a, 16), names.index(name), cats.index(cat))
)
idx.sort()
print(
'script_data = {\n"names":%s,\n"cats":%s,\n"idx":[\n%s\n]}'
% (
"\n".join(textwrap.wrap(repr(names), 80)),
"\n".join(textwrap.wrap(repr(cats), 80)),
"\n".join(
textwrap.wrap(", ".join("(0x%x,0x%x,%d,%d)" % c for c in idx), 80)
),
)
)
if __name__ == "__main__":
main()
|
tests/dev/test_checksite.py
|
mskymoore/yoda
| 747 |
125030
|
# coding=utf-8
from unittest import TestCase
from click.testing import CliRunner
import mock
import yoda
from requests.models import Response
class TestChecksite(TestCase):
"""
Test for the following commands:
| Module: dev
| command: checksite
"""
def __init__(self, methodName="runTest"):
super(TestChecksite, self).__init__()
self.runner = CliRunner()
def runTest(self):
mocked_response = Response()
mocked_response.status_code = 400
def test_with_working_url():
result = self.runner.invoke(yoda.cli, ['dev', 'checksite', 'https://google.com'])
output_string = str(result.output.encode('ascii', 'ignore').decode('utf-8')).strip()
self.assertTrue("running" in output_string)
def test_with_invalid_url():
result = self.runner.invoke(yoda.cli, ['dev', 'checksite', 'https://google'])
self.assertEqual(result.exit_code, -1)
@mock.patch('requests.get', return_value=mocked_response)
def test_with_mocked_response_code(_self):
result = self.runner.invoke(yoda.cli, ['dev', 'checksite', 'https://google.com'])
self.assertEqual(result.exit_code, 1)
test_with_working_url()
test_with_invalid_url()
test_with_mocked_response_code()
|
gramex/handlers/functionhandler.py
|
NAnnamalai/gramex
| 130 |
125031
|
<reponame>NAnnamalai/gramex<filename>gramex/handlers/functionhandler.py<gh_stars>100-1000
import json
import tornado.web
import tornado.gen
from types import GeneratorType
from gramex.transforms import build_transform
from gramex.config import app_log, CustomJSONEncoder
from .basehandler import BaseHandler
class FunctionHandler(BaseHandler):
'''
Renders the output of a function when the URL is called via GET or POST. It
accepts these parameters when initialized:
:arg string function: a string that resolves into any Python function or
method (e.g. ``str.lower``). By default, it is called as
``function(handler)`` where handler is this RequestHandler, but you can
override ``args`` and ``kwargs`` below to replace it with other
parameters. The result is rendered as-is (and hence must be a string, or
a Future that resolves to a string.) You can also yield one or more
results. These are written immediately, in order.
:arg list args: positional arguments to be passed to the function.
:arg dict kwargs: keyword arguments to be passed to the function.
:arg dict headers: HTTP headers to set on the response.
:arg string redirect: URL to redirect to when the result is done. Used to
trigger calculations without displaying any output.
'''
@classmethod
def setup(cls, headers={}, **kwargs):
super(FunctionHandler, cls).setup(**kwargs)
# Don't use cls.info.function = build_transform(...) -- Python treats it as a method
cls.info = {}
cls.info['function'] = build_transform(kwargs, vars={'handler': None},
filename='url: %s' % cls.name)
cls.headers = headers
cls.post = cls.put = cls.delete = cls.patch = cls.options = cls.get
@tornado.gen.coroutine
def get(self, *path_args):
if self.redirects:
self.save_redirect_page()
if 'function' not in self.info:
raise ValueError('Invalid function definition in url:%s' % self.name)
result = self.info['function'](handler=self)
for header_name, header_value in self.headers.items():
self.set_header(header_name, header_value)
# Use multipart to check if the respose has multiple parts. Don't
# flush unless it's multipart. Flushing disables Etag
multipart = isinstance(result, GeneratorType) or len(result) > 1
# build_transform results are iterable. Loop through each item
for item in result:
# Resolve futures and write the result immediately
if tornado.concurrent.is_future(item):
item = yield item
# To check if item is a numpy object, avoid isinstance(numpy.int8), etc.
# Importing numpy is slow. Instead, check the class name.
# Strip trailing numbers (e.g. int8, int16, int32)
# Strip trailing underscore (e.g. str_, bytes_)
# Strip leading 'u' (e.g. uint, ulong)
cls = type(item).__name__.rstrip('0123456789_').lstrip('u')
if isinstance(item, (bytes, str)):
self.write(item)
if multipart:
self.flush()
# Ignore None as a return type
elif item is None:
pass
# Allow ANY type that can be converted by CustomJSONEncoder.
# This includes JSON types, detected by isinstance(item, ...))
# and numpy types, detected by cls in (...)
# and anything with a to_dict, e.g. DataFrames
elif (isinstance(item, (int, float, bool, list, tuple, dict)) or
cls in ('datetime', 'int', 'intc', 'float', 'bool', 'ndarray', 'bytes', 'str') or
hasattr(item, 'to_dict')):
self.write(json.dumps(item, separators=(',', ':'), ensure_ascii=True,
cls=CustomJSONEncoder))
if multipart:
self.flush()
else:
app_log.warning('url:%s: FunctionHandler can write scalars/list/dict, not %s: %s',
self.name, type(item), repr(item))
if self.redirects:
self.redirect_next()
|
libsortvis/algos/cyclesort.py
|
tknuth/sortvis
| 117 |
125044
|
def cyclesort(lst):
for i in range(len(lst)):
if i != lst[i]:
n = i
while 1:
tmp = lst[int(n)]
if n != i:
lst[int(n)] = last_value
lst.log()
else:
lst[int(n)] = None
lst.log()
last_value = tmp
n = last_value
if n == i:
lst[int(n)] = last_value
lst.log()
break
|
admin/test/test_merge_pr.py
|
stackriot/flocker
| 2,690 |
125055
|
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Tests for :module:`admin.merge_pr`.
"""
import os
import subprocess
from hypothesis import given
from hypothesis.strategies import (
booleans,
dictionaries,
fixed_dictionaries,
just,
lists,
one_of,
sampled_from,
text,
)
from hypothesis.extra.datetime import datetimes
from pyrsistent import pmap, plist
from flocker.testtools import TestCase
from admin import merge_pr
SCRIPT_FILENAME = 'merge-pr'
SCRIPT_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)),
SCRIPT_FILENAME)
def run_script(args):
return subprocess.check_output([SCRIPT_FILE] + args)
class SmokeTests(TestCase):
"""
Basic tests of running the script.
"""
def test_help(self):
output = run_script(['--help'])
self.assertIn("Merge a branch when all the tests pass.", output)
class URLTests(TestCase):
"""
Test for URL manipulation.
"""
def test_url_path(self):
"""
Correct path from a full URL.
"""
path = '/ClusterHQ/flocker/pull/1717'
self.assertEqual(path, merge_pr.url_path('https://github.com' + path))
def test_url_path_no_hostname(self):
"""
Correct path from a URL path.
"""
path = '/ClusterHQ/flocker/pull/1717'
self.assertEqual(path, merge_pr.url_path(path))
def test_url_path_parts(self):
"""
Correct segments from a full URL.
"""
path_parts = ['ClusterHQ', 'flocker', 'pull', '1717']
self.assertEqual(
[''] + path_parts,
merge_pr.url_path_parts(
'https://github.com/' + '/'.join(path_parts)))
def test_url_path_parts_no_hostname(self):
"""
Correct segments from a URL path.
"""
path_parts = ['ClusterHQ', 'flocker', 'pull', '1717']
self.assertEqual(
[''] + path_parts,
merge_pr.url_path_parts('/' + '/'.join(path_parts)))
def test_pr_api_url(self):
"""
Correct API URL for a full URL.
"""
self.assertEqual(
'https://api.github.com/repos/ClusterHQ/flocker/pulls/1717',
merge_pr.pr_api_url_from_web_url(
'https://github.com/ClusterHQ/flocker/pull/1717'))
def commit_statuses(**kwargs):
"""
Create a strategy for GitHub commit status dicts.
:param **kwargs: alter the strategy for a particular
key of the status dict, e.g. state=just(u'success')
will fix the state key of the dict to that string.
:return strategy: a strategy.
"""
base = {'updated_at': datetimes(timezones=['UTC']),
'state': text(),
'context': text(average_size=2),
'target_url': text(average_size=2),
}
base.update(**kwargs)
return fixed_dictionaries(base)
jenkins_results = sampled_from(merge_pr.JenkinsResults.iterconstants())
"""Strategy for generating JenkinsResults values."""
class StatusesTests(TestCase):
"""
Tests for interpretation of commit statuses.
https://developer.github.com/v3/repos/statuses/
"""
@given(commit_statuses())
def test_final_status_one(self, status):
"""
Final status of one status is itself.
"""
self.assertEqual(status, merge_pr.final_status([status]))
@given(commit_statuses(), commit_statuses())
def test_final_status_many(self, status1, status2):
"""
Final status of a list is the latest.
"""
target = status1
if status2['updated_at'] > status1['updated_at']:
target = status2
self.assertEqual(target, merge_pr.final_status([status2, status1]))
@given(commit_statuses(state=text().filter(lambda x: x != u'success')))
def test_not_success(self, status):
"""
Always `not_success` for anything except 'success'.
"""
self.assertEqual(True, merge_pr.not_success(status))
@given(commit_statuses(state=just(u'success')))
def test_not_success_success(self, status):
"""
`not_success` False for 'success'.
"""
self.assertEqual(False, merge_pr.not_success(status))
@given(commit_statuses(), jenkins_results)
def test_format_status(self, status, jenkins):
"""
`format_status` produces unicode that mentions the context and url.
"""
formatted = merge_pr.format_status((status, jenkins))
self.assertIsInstance(formatted, unicode)
self.assertIn(status['context'], formatted)
self.assertIn(status['target_url'], formatted)
# These strategies are far from complete coverage of the possible
# Jenkins API responses.
jenkins_builds = fixed_dictionaries(dict(
result=sampled_from([None, 'FAILURE', 'ABORTED', 'SUCCESS'])))
"""Strategy for generating records of individual builds of a Jenkins job."""
NO_BUILDS = object()
"""
Sentinel to say that `jenkins_build_results` should not include the builds key.
"""
def jenkins_build_results(inQueue=None, builds=None):
"""Create a strategy for generating Jenkins API information for a job.
:param strategy inQueue: strategy for the inQueue key, or None to use
the default.
:param strategy builds: strategy for populating the builds key, or None
for the default. The special value `NO_BUILDS` will mean that the
builds key is not in the resulting dict at all.
:return strategy: a strategy.
"""
strats = []
if inQueue is None:
inQueue = booleans()
strats.append(just(pmap()))
without_builds = fixed_dictionaries(dict(
inQueue=inQueue))
if builds is None or builds is NO_BUILDS:
strats.append(without_builds)
if builds is None:
builds = lists(jenkins_builds, average_size=1)
if builds is not NO_BUILDS:
with_builds = fixed_dictionaries(dict(
inQueue=inQueue,
builds=builds,
property=dictionaries(
text(max_size=2), text(max_size=2),
average_size=1, max_size=2)))
strats.append(with_builds)
return one_of(*strats)
class JenkinsResultsTests(TestCase):
"""
Tests for interpretation of build results from Jenkins.
"""
@given(jenkins_build_results())
def test_result_types(self, info):
"""
Result always a tuple (`JenkinsResults`, Maybe[dict])
"""
result, params = merge_pr.jenkins_info_from_response(info)
self.assertIn(result, list(merge_pr.JenkinsResults.iterconstants()))
if params is not None:
self.assertIsInstance(params, dict)
@given(jenkins_build_results(inQueue=just(True)))
def test_in_queue(self, info):
"""
Job with inQueue = True is `RUNNING`.
"""
result, params = merge_pr.jenkins_info_from_response(info)
self.assertEqual(merge_pr.JenkinsResults.RUNNING, result)
self.assertEqual({}, params)
@given(jenkins_build_results(inQueue=just(False), builds=NO_BUILDS))
def test_builds_not_present(self, info):
"""
Job without a builds list is `UNKNOWN`.
"""
result, params = merge_pr.jenkins_info_from_response(info)
self.assertEqual(merge_pr.JenkinsResults.UNKNOWN, result)
self.assertEqual({}, params)
@given(jenkins_build_results(inQueue=just(False), builds=just(plist())))
def test_no_builds(self, info):
"""
Job with empty builds list is `NOTRUN`.
"""
result, params = merge_pr.jenkins_info_from_response(info)
self.assertEqual(merge_pr.JenkinsResults.NOTRUN, result)
self.assertEqual({}, params)
|
raspberryturk/embedded/raspberryturkd.py
|
Dzhuks/raspberryturk
| 169 |
125114
|
<reponame>Dzhuks/raspberryturk
import logging
import signal
import time
from daemon import runner
import raspberryturk
from raspberryturk.embedded.agent import Agent
class RaspberryTurkDaemon(object):
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = raspberryturk.log_path('raspberryturk.out')
self.stderr_path = raspberryturk.log_path('raspberryturk.err')
self.pidfile_path = raspberryturk.run_path('raspberryturkd.pid')
self.pidfile_timeout = 5
self._interrupt_signum = None
def run(self):
raspberryturk.setup_file_logging()
self.logger = logging.getLogger(__name__)
self.logger.info("Starting RaspberryTurkDaemon.")
time.sleep(1)
with Agent() as a:
while self._interrupt_signum is None:
a.perception_action_sequence()
self.logger.warn("Received signal {}.".format(self._interrupt_signum))
self.logger.info("Stopping RaspberryTurkDaemon.")
def interrupt_handler(self, signum, frame):
self._interrupt_signum = signum
def main():
rtd = RaspberryTurkDaemon()
daemon_runner = runner.DaemonRunner(rtd)
daemon_runner.daemon_context.signal_map = {
signal.SIGINT: rtd.interrupt_handler,
signal.SIGTERM: rtd.interrupt_handler,
signal.SIGHUP: 'terminate',
}
daemon_runner.do_action()
if __name__ == '__main__':
main()
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/i/invalid/invalid_hash_returned.py
|
ciskoinch8/vimrc
| 463 |
125201
|
"""Check invalid value returned by __hash__ """
# pylint: disable=too-few-public-methods,missing-docstring,no-self-use,import-error, useless-object-inheritance
import six
from missing import Missing
class FirstGoodHash(object):
"""__hash__ returns <type 'int'>"""
def __hash__(self):
return 1
class SecondGoodHash(object):
"""__hash__ returns <type 'int'>"""
def __hash__(self):
return 0
class HashMetaclass(type):
def __hash__(cls):
return 1
@six.add_metaclass(HashMetaclass)
class ThirdGoodHash(object):
"""Hash through the metaclass."""
class FirstBadHash(object):
""" __hash__ returns a dict """
def __hash__(self): # [invalid-hash-returned]
return {}
class SecondBadHash(object):
""" __hash__ returns str """
def __hash__(self): # [invalid-hash-returned]
return "True"
class ThirdBadHash(object):
""" __hash__ returns a float"""
def __hash__(self): # [invalid-hash-returned]
return 1.11
class FourthBadHash(object):
""" __hash__ returns node which does not have 'value' in AST """
def __hash__(self): # [invalid-hash-returned]
return lambda: 3
class AmbigousHash(object):
""" Uninferable return value """
__hash__ = lambda self: Missing
class AnotherAmbiguousHash(object):
"""Potential uninferable return value"""
def __hash__(self):
return hash(Missing)
|
pubnub/endpoints/file_operations/download_file.py
|
natekspencer/pubnub-python
| 146 |
125215
|
from pubnub.endpoints.file_operations.file_based_endpoint import FileOperationEndpoint
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.crypto import PubNubFileCrypto
from pubnub.models.consumer.file import PNDownloadFileResult
from pubnub.request_handlers.requests_handler import RequestsRequestHandler
from pubnub.endpoints.file_operations.get_file_url import GetFileDownloadUrl
class DownloadFileNative(FileOperationEndpoint):
def __init__(self, pubnub):
FileOperationEndpoint.__init__(self, pubnub)
self._file_id = None
self._file_name = None
self._pubnub = pubnub
self._download_data = None
self._cipher_key = None
def cipher_key(self, cipher_key):
self._cipher_key = cipher_key
return self
def build_path(self):
return self._download_data.result.file_url
def http_method(self):
return HttpMethod.GET
def is_auth_required(self):
return False
def custom_params(self):
return {}
def file_id(self, file_id):
self._file_id = file_id
return self
def file_name(self, file_name):
self._file_name = file_name
return self
def decrypt_payload(self, data):
return PubNubFileCrypto(self._pubnub.config).decrypt(
self._cipher_key or self._pubnub.config.cipher_key,
data
)
def validate_params(self):
self.validate_subscribe_key()
self.validate_channel()
self.validate_file_name()
self.validate_file_id()
def create_response(self, envelope):
if self._cipher_key or self._pubnub.config.cipher_key:
return PNDownloadFileResult(self.decrypt_payload(envelope.content))
else:
return PNDownloadFileResult(envelope.content)
def non_json_response(self):
return True
def operation_type(self):
return PNOperationType.PNDownloadFileAction
def use_base_path(self):
return False
def build_params_callback(self):
return lambda a: {}
def name(self):
return "Downloading file"
def sync(self):
self._download_data = GetFileDownloadUrl(self._pubnub)\
.channel(self._channel)\
.file_name(self._file_name)\
.file_id(self._file_id)\
.sync()
return super(DownloadFileNative, self).sync()
def pn_async(self, callback):
return RequestsRequestHandler(self._pubnub).async_file_based_operation(self.sync, callback, "File Download")
|
valentyusb/usbcore/rx/shifter.py
|
rjeschmi/valentyusb
| 105 |
125258
|
#!/usr/bin/env python3
from migen import *
from migen.fhdl.decorators import ResetInserter
from ..test.common import BaseUsbTestCase
import unittest
@ResetInserter()
class RxShifter(Module):
"""RX Shifter
A shifter is responsible for shifting in serial bits and presenting them
as parallel data. The shifter knows how many bits to shift and has
controls for resetting the shifter.
Clock Domain
------------
usb_12 : 12MHz
Parameters
----------
Parameters are passed in via the constructor.
width : int
Number of bits to shift in.
Input Ports
-----------
i_valid : Signal(1)
Qualifier for all of the input signals. Indicates one bit of valid
data is present on the inputs.
i_data : Signal(1)
Serial input data.
Qualified by valid.
Output Ports
------------
o_data : Signal(width)
Shifted in data.
o_put : Signal(1)
Asserted for one clock once the register is full.
"""
def __init__(self, width):
self.i_valid = Signal()
self.i_data = Signal()
self.o_data = Signal(width)
self.o_put = Signal()
# Instead of using a counter, we will use a sentinel bit in the shift
# register to indicate when it is full.
shift_reg = Signal(width+1, reset=0b1)
self.comb += self.o_data.eq(shift_reg[0:width])
self.sync += [
self.o_put.eq(shift_reg[width-1] & ~shift_reg[width] & self.i_valid),
If(self.i_valid,
If(shift_reg[width],
shift_reg.eq(Cat(self.i_data, shift_reg.reset[0:width])),
).Else(
shift_reg.eq(Cat(self.i_data, shift_reg[0:width])),
),
),
]
|
test/ext/mypy/plugin_files/mixin_not_mapped.py
|
petit87/sqlalchemy
| 5,383 |
125265
|
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import registry
reg: registry = registry()
Base = declarative_base()
class SomeAbstract(Base):
__abstract__ = True
class HasUpdatedAt:
updated_at = Column(Integer)
@reg.mapped
class Foo(SomeAbstract):
__tablename__ = "foo"
id: int = Column(Integer(), primary_key=True)
name: str = Column(String)
class Bar(HasUpdatedAt, Base):
__tablename__ = "bar"
id = Column(Integer(), primary_key=True)
num = Column(Integer)
Bar.__mapper__
# EXPECTED_MYPY: "Type[HasUpdatedAt]" has no attribute "__mapper__"
HasUpdatedAt.__mapper__
# EXPECTED_MYPY: "Type[SomeAbstract]" has no attribute "__mapper__"
SomeAbstract.__mapper__
|
vimfiles/bundle/vim-python/submodules/pylint/tests/regrtest_data/absimp/string.py
|
ciskoinch8/vimrc
| 463 |
125287
|
"""
https://www.logilab.org/ticket/70495
https://www.logilab.org/ticket/70565
"""
from __future__ import absolute_import, print_function
import string
print(string)
|
running_modes/configurations/transfer_learning/adaptive_learning_rate_configuration.py
|
lilleswing/Reinvent-1
| 183 |
125302
|
<reponame>lilleswing/Reinvent-1
from dataclasses import dataclass
@dataclass
class AdaptiveLearningRateConfiguration:
mode: str = "constant"
gamma: float = 0.8
step: int = 1
start: float = 5E-4
min: float = 1E-5
threshold: float = 1E-4
average_steps: int = 4
patience: int = 8
restart_value: float = 1E-5
sample_size: int = 100
restart_times: int = 0
|
main.py
|
matthew-z/pytorch_rnet
| 227 |
125313
|
<reponame>matthew-z/pytorch_rnet
import argparse
import datetime
from pathlib import Path
from allennlp.commands import main, Subcommand
from allennlp.commands.train import train_model
from allennlp.common import Params
from allennlp.common.util import import_submodules
from allennlp.models import Model
class MyTrain(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
# pylint: disable=protected-access
description = '''Train the specified model on the specified dataset.'''
subparser = parser.add_parser(name, description=description, help='Train a model')
subparser.add_argument('param_path',
type=str,
help='path to parameter file describing the model to be trained')
subparser.add_argument('-s', '--serialization-dir',
required=False,
default="",
type=str,
help='directory in which to save the model and its logs')
subparser.add_argument('-r', '--recover',
action='store_true',
default=False,
help='recover training from the state in serialization_dir')
subparser.add_argument('-f', '--force',
action='store_true',
required=False,
help='overwrite the output directory if it exists')
subparser.add_argument('-o', '--overrides',
type=str,
default="",
help='a JSON structure used to override the experiment configuration')
subparser.add_argument('-e', '--ext-vars',
type=str,
default=None,
help='Used to provide ext variable to jsonnet')
subparser.add_argument('--fp16',
action='store_true',
required=False,
help='use fp 16 training')
subparser.add_argument('--file-friendly-logging',
action='store_true',
default=False,
help='outputs tqdm status on separate lines and slows tqdm refresh rate')
subparser.set_defaults(func=train_model_from_args)
return subparser
def train_model_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to string paths.
"""
start_time = datetime.datetime.now().strftime('%b-%d_%H-%M')
if args.serialization_dir:
serialization_dir = args.serialization_dir
else:
path = Path(args.param_path.replace("configs/", "results/")).resolve()
serialization_dir = path.with_name(path.stem) / start_time
train_model_from_file(args.param_path,
serialization_dir,
args.overrides,
args.file_friendly_logging,
args.recover,
args.force,
args.ext_vars)
def train_model_from_file(parameter_filename: str,
serialization_dir: str,
overrides: str = "",
file_friendly_logging: bool = False,
recover: bool = False,
force: bool = False,
ext_vars=None) -> Model:
"""
A wrapper around :func:`train_model` which loads the params from a file.
Parameters
----------
param_path : ``str``
A json parameter file specifying an AllenNLP experiment.
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`train_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`train_model`.
recover : ``bool`, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
"""
# Load the experiment config from a file and pass it to ``train_model``.
params = Params.from_file(parameter_filename, overrides, ext_vars=ext_vars)
return train_model(params, serialization_dir, file_friendly_logging, recover, force)
if __name__ == "__main__":
import_submodules("qa")
import_submodules("modules")
main(prog="ReadingZoo",subcommand_overrides={"train": MyTrain()})
|
research/long_block_sequencer/long_block_encoder.py
|
legacyai/tf-transformers
| 116 |
125356
|
<reponame>legacyai/tf-transformers<gh_stars>100-1000
# coding=utf-8
# Copyright 2021 TF-Transformers Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Long Block Encoder in Tensorflow 2.0"""
import tensorflow as tf
from tf_transformers.activations import get_activation
from tf_transformers.core import LegacyLayer, LegacyModel
class Long_Block_Encoder(LegacyLayer):
def __init__(
self,
model_layer,
num_splits,
dense_dimension=None,
gru_units=None,
activation='gelu',
is_training=False,
use_dropout=False,
use_gru_layer=True,
**kwargs,
):
super(Long_Block_Encoder, self).__init__(
is_training=is_training, use_dropout=use_dropout, name=model_layer.name, **kwargs
)
self.model_layer = model_layer
self.num_splits = num_splits
self.use_gru_layer = use_gru_layer
if self.use_gru_layer:
if gru_units is None:
raise ValueError("When using GRU layer, set `gru_units`")
self.projection_layer = tf.keras.layers.Bidirectional(
tf.keras.layers.GRU(gru_units, return_sequences=True, name='gru_for_long_block')
)
else:
if dense_dimension is None:
raise ValueError("When using dense projection, set `dense_dimension`")
activation = get_activation("gelu")
self.projection_layer = tf.keras.layers.Dense(
dense_dimension, activation=activation, kernel_initializer='glorot_uniform', name="gelu_for_long_block"
)
self._config_dict = model_layer._config_dict
self._mask_mode = model_layer._mask_mode
self._sequence_length = model_layer._sequence_length
self.model_inputs, self.model_outputs = self.get_model(initialize_only=True)
def call(self, inputs):
all_outputs_token_embeddings = []
inputs_splitted = {}
input_names = []
for k, v in inputs.items():
inputs_splitted[k] = tf.split(v, self.num_splits, axis=1)
input_names.append(k)
for i in range(self.num_splits):
inputs_main = {}
for name in input_names:
inputs_main[name] = inputs_splitted[name][i]
model_outputs = self.model_layer(inputs_main)
all_outputs_token_embeddings.append(model_outputs['token_embeddings'])
token_embeddings_concatanted = tf.concat(all_outputs_token_embeddings, axis=1) # over sequence length
token_embeddings_concatanted = self.projection_layer(token_embeddings_concatanted)
return {'token_embeddings': token_embeddings_concatanted}
def get_model(self, initialize_only=False):
inputs = {}
for k, v in self.model_layer.model_inputs.items():
shape = v.shape
inputs[k] = tf.keras.layers.Input(shape[1:], batch_size=shape[0], name=k, dtype=v.dtype)
layer_output = self(inputs)
if initialize_only:
return inputs, layer_output
model = LegacyModel(inputs=inputs, outputs=layer_output, name="long_span_selection")
model.model_config = self.model_layer._config_dict
return model
|
scripts/external_libs/scapy-2.4.5/scapy/contrib/automotive/gm/gmlanutils.py
|
dariusgrassi/trex-core
| 250 |
125386
|
<reponame>dariusgrassi/trex-core<filename>scripts/external_libs/scapy-2.4.5/scapy/contrib/automotive/gm/gmlanutils.py<gh_stars>100-1000
#! /usr/bin/env python
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
# scapy.contrib.description = GMLAN Utilities
# scapy.contrib.status = loads
import time
from scapy.compat import Optional, cast, Callable
from scapy.contrib.automotive.gm.gmlan import GMLAN, GMLAN_SA, GMLAN_RD, \
GMLAN_TD, GMLAN_PM, GMLAN_RMBA
from scapy.config import conf
from scapy.packet import Packet
from scapy.contrib.isotp import ISOTPSocket
from scapy.error import warning, log_loading
from scapy.utils import PeriodicSenderThread
__all__ = ["GMLAN_TesterPresentSender", "GMLAN_InitDiagnostics",
"GMLAN_GetSecurityAccess", "GMLAN_RequestDownload",
"GMLAN_TransferData", "GMLAN_TransferPayload",
"GMLAN_ReadMemoryByAddress", "GMLAN_BroadcastSocket"]
log_loading.info("\"conf.contribs['GMLAN']"
"['treat-response-pending-as-answer']\" set to True). This "
"is required by the GMLAN-Utils module to operate "
"correctly.")
try:
conf.contribs['GMLAN']['treat-response-pending-as-answer'] = False
except KeyError:
conf.contribs['GMLAN'] = {'treat-response-pending-as-answer': False}
# Helper function
def _check_response(resp, verbose):
# type: (Packet, Optional[bool]) -> bool
if resp is None:
if verbose:
print("Timeout.")
return False
if verbose:
resp.show()
return resp.service != 0x7f # NegativeResponse
class GMLAN_TesterPresentSender(PeriodicSenderThread):
def __init__(self, sock, pkt=GMLAN(service="TesterPresent"), interval=2):
# type: (ISOTPSocket, Packet, int) -> None
""" Thread to send GMLAN TesterPresent packets periodically
:param sock: socket where packet is sent periodically
:param pkt: packet to send
:param interval: interval between two packets
"""
PeriodicSenderThread.__init__(self, sock, pkt, interval)
def run(self):
# type: () -> None
while not self._stopped.is_set():
for p in self._pkts:
self._socket.sr1(p, verbose=False, timeout=0.1)
time.sleep(self._interval)
def GMLAN_InitDiagnostics(sock, broadcast_socket=None, timeout=None, verbose=None, retry=0): # noqa: E501
# type: (ISOTPSocket, Optional[ISOTPSocket], Optional[int], Optional[bool], int) -> bool # noqa: E501
""" Send messages to put an ECU into diagnostic/programming state.
:param sock: socket for communication.
:param broadcast_socket: socket for broadcasting. If provided some message
will be sent as broadcast. Recommended when used
on a network with several ECUs.
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level
:param retry: number of retries in case of failure.
:return: True on success else False
"""
# Helper function
def _send_and_check_response(sock, req, timeout, verbose):
# type: (ISOTPSocket, Packet, Optional[int], Optional[bool]) -> bool
if verbose:
print("Sending %s" % repr(req))
resp = sock.sr1(req, timeout=timeout, verbose=False)
return _check_response(resp, verbose)
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
while retry >= 0:
retry -= 1
# DisableNormalCommunication
p = GMLAN(service="DisableNormalCommunication")
if broadcast_socket is None:
if not _send_and_check_response(sock, p, timeout, verbose):
continue
else:
if verbose:
print("Sending %s as broadcast" % repr(p))
broadcast_socket.send(p)
time.sleep(0.05)
# ReportProgrammedState
p = GMLAN(service="ReportProgrammingState")
if not _send_and_check_response(sock, p, timeout, verbose):
continue
# ProgrammingMode requestProgramming
p = GMLAN() / GMLAN_PM(subfunction="requestProgrammingMode")
if not _send_and_check_response(sock, p, timeout, verbose):
continue
time.sleep(0.05)
# InitiateProgramming enableProgramming
# No response expected
p = GMLAN() / GMLAN_PM(subfunction="enableProgrammingMode")
if verbose:
print("Sending %s" % repr(p))
sock.send(p)
time.sleep(0.05)
return True
return False
def GMLAN_GetSecurityAccess(sock, key_function, level=1, timeout=None, verbose=None, retry=0): # noqa: E501
# type: (ISOTPSocket, Callable[[int], int], int, Optional[int], Optional[bool], int) -> bool # noqa: E501
""" Authenticate on ECU. Implements Seey-Key procedure.
:param sock: socket to send the message on.
:param key_function: function implementing the key algorithm.
:param level: level of access
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level
:param retry: number of retries in case of failure.
:return: True on success.
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
if key_function is None:
return False
if level % 2 == 0:
warning("Parameter Error: Level must be an odd number.")
return False
while retry >= 0:
retry -= 1
request = GMLAN() / GMLAN_SA(subfunction=level)
if verbose:
print("Requesting seed..")
resp = sock.sr1(request, timeout=timeout, verbose=0)
if not _check_response(resp, verbose):
if resp is not None and resp.returnCode == 0x37 and retry:
if verbose:
print("RequiredTimeDelayNotExpired. Wait 10s.")
time.sleep(10)
if verbose:
print("Negative Response.")
continue
seed = resp.securitySeed
if seed == 0:
if verbose:
print("ECU security already unlocked. (seed is 0x0000)")
return True
keypkt = GMLAN() / GMLAN_SA(subfunction=level + 1,
securityKey=key_function(seed))
if verbose:
print("Responding with key..")
resp = sock.sr1(keypkt, timeout=timeout, verbose=0)
if resp is None:
if verbose:
print("Timeout.")
continue
if verbose:
resp.show()
if resp.sprintf("%GMLAN.service%") == "SecurityAccessPositiveResponse": # noqa: E501
if verbose:
print("SecurityAccess granted.")
return True
# Invalid Key
elif resp.sprintf("%GMLAN.service%") == "NegativeResponse" and \
resp.sprintf("%GMLAN.returnCode%") == "InvalidKey":
if verbose:
print("Key invalid")
continue
return False
def GMLAN_RequestDownload(sock, length, timeout=None, verbose=None, retry=0):
# type: (ISOTPSocket, int, Optional[int], Optional[bool], int) -> bool
""" Send RequestDownload message.
Usually used before calling TransferData.
:param sock: socket to send the message on.
:param length: value for the message's parameter 'unCompressedMemorySize'.
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: True on success
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
while retry >= 0:
# RequestDownload
pkt = GMLAN() / GMLAN_RD(memorySize=length)
resp = sock.sr1(pkt, timeout=timeout, verbose=0)
if _check_response(resp, verbose):
return True
retry -= 1
if retry >= 0 and verbose:
print("Retrying..")
return False
def GMLAN_TransferData(sock, addr, payload, maxmsglen=None, timeout=None, verbose=None, retry=0): # noqa: E501
# type: (ISOTPSocket, int, bytes, Optional[int], Optional[int], Optional[bool], int) -> bool # noqa: E501
""" Send TransferData message.
Usually used after calling RequestDownload.
:param sock: socket to send the message on.
:param addr: destination memory address on the ECU.
:param payload: data to be sent.
:param maxmsglen: maximum length of a single iso-tp message.
default: maximum length
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: True on success.
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
startretry = retry
scheme = conf.contribs['GMLAN']['GMLAN_ECU_AddressingScheme']
if addr < 0 or addr >= 2**(8 * scheme):
warning("Error: Invalid address %s for scheme %s",
hex(addr), str(scheme))
return False
# max size of dataRecord according to gmlan protocol
if maxmsglen is None or maxmsglen <= 0 or maxmsglen > (4093 - scheme):
maxmsglen = (4093 - scheme)
maxmsglen = cast(int, maxmsglen)
for i in range(0, len(payload), maxmsglen):
retry = startretry
while True:
if len(payload[i:]) > maxmsglen:
transdata = payload[i:i + maxmsglen]
else:
transdata = payload[i:]
pkt = GMLAN() / GMLAN_TD(startingAddress=addr + i,
dataRecord=transdata)
resp = sock.sr1(pkt, timeout=timeout, verbose=0)
if _check_response(resp, verbose):
break
retry -= 1
if retry >= 0:
if verbose:
print("Retrying..")
else:
return False
return True
def GMLAN_TransferPayload(sock, addr, payload, maxmsglen=None, timeout=None,
verbose=None, retry=0):
# type: (ISOTPSocket, int, bytes, Optional[int], Optional[int], Optional[bool], int) -> bool # noqa: E501
""" Send data by using GMLAN services.
:param sock: socket to send the data on.
:param addr: destination memory address on the ECU.
:param payload: data to be sent.
:param maxmsglen: maximum length of a single iso-tp message.
default: maximum length
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: True on success.
"""
if not GMLAN_RequestDownload(sock, len(payload), timeout=timeout,
verbose=verbose, retry=retry):
return False
if not GMLAN_TransferData(sock, addr, payload, maxmsglen=maxmsglen,
timeout=timeout, verbose=verbose, retry=retry):
return False
return True
def GMLAN_ReadMemoryByAddress(sock, addr, length, timeout=None,
verbose=None, retry=0):
# type: (ISOTPSocket, int, int, Optional[int], Optional[bool], int) -> Optional[bytes] # noqa: E501
""" Read data from ECU memory.
:param sock: socket to send the data on.
:param addr: source memory address on the ECU.
:param length: bytes to read.
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: bytes red or None
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
scheme = conf.contribs['GMLAN']['GMLAN_ECU_AddressingScheme']
if addr < 0 or addr >= 2**(8 * scheme):
warning("Error: Invalid address %s for scheme %s",
hex(addr), str(scheme))
return None
# max size of dataRecord according to gmlan protocol
if length <= 0 or length > (4094 - scheme):
warning("Error: Invalid length %s for scheme %s. "
"Choose between 0x1 and %s",
hex(length), str(scheme), hex(4094 - scheme))
return None
while retry >= 0:
# RequestDownload
pkt = GMLAN() / GMLAN_RMBA(memoryAddress=addr, memorySize=length)
resp = sock.sr1(pkt, timeout=timeout, verbose=0)
if _check_response(resp, verbose):
return resp.dataRecord
retry -= 1
if retry >= 0 and verbose:
print("Retrying..")
return None
def GMLAN_BroadcastSocket(interface):
# type: (str) -> ISOTPSocket
""" Returns a GMLAN broadcast socket using interface.
:param interface: interface name
:return: ISOTPSocket configured as GMLAN Broadcast Socket
"""
return ISOTPSocket(interface, sid=0x101, did=0x0, basecls=GMLAN,
extended_addr=0xfe, padding=True)
|
examples/hacker_news_assets/hacker_news_assets/pipelines/story_recommender.py
|
rpatil524/dagster
| 4,606 |
125409
|
<gh_stars>1000+
from dagster import fs_io_manager
from dagster.core.asset_defs import build_assets_job
from hacker_news_assets.assets.comment_stories import comment_stories
from hacker_news_assets.assets.download_items import comments, stories
from hacker_news_assets.assets.recommender_model import component_top_stories, recommender_model
from hacker_news_assets.assets.user_story_matrix import user_story_matrix
from hacker_news_assets.assets.user_top_recommended_stories import user_top_recommended_stories
from hacker_news_assets.resources.fixed_s3_pickle_io_manager import fixed_s3_pickle_io_manager
from hacker_news_assets.resources.snowflake_io_manager import (
snowflake_io_manager_dev,
snowflake_io_manager_prod,
)
DEV_RESOURCES = {
"io_manager": fs_io_manager,
"warehouse_io_manager": fs_io_manager,
"source_warehouse_io_manager": snowflake_io_manager_dev,
}
PROD_RESOURCES = {
"io_manager": fixed_s3_pickle_io_manager.configured({"bucket": "hackernews-elementl-prod"}),
"warehouse_io_manager": snowflake_io_manager_prod,
"source_warehouse_io_manager": snowflake_io_manager_prod,
}
assets = [
comment_stories,
user_story_matrix,
recommender_model,
component_top_stories,
user_top_recommended_stories,
]
source_assets = [comments, stories]
story_recommender_dev = build_assets_job(
"story_recommender_dev", assets=assets, source_assets=source_assets, resource_defs=DEV_RESOURCES
)
story_recommender_prod = build_assets_job(
"story_recommender_prod",
assets=assets,
source_assets=source_assets,
resource_defs=PROD_RESOURCES,
)
|
ci/drone/supplier.py
|
parauliya/osie
| 108 |
125427
|
<gh_stars>100-1000
#!/usr/bin/env python
from __future__ import print_function
import json
import os
import packet
m = packet.Manager(os.getenv("PACKET_API_TOKEN"))
c = m.get_capacity()
cap = {}
for fac in c:
for plan, v in c[fac].items():
p = cap.get(plan, {})
level = v["level"]
lvl = p.get(level, [])
lvl.append(fac)
p[level] = lvl
cap[plan] = p
print(json.dumps(cap))
|
vissl/models/__init__.py
|
blazejdolicki/vissl
| 2,512 |
125440
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Here we create all the models required for SSL. The default model is
BaseSSLMultiInputOutputModel, however users can create their own model.
See #register_model below.
"""
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from vissl.models.model_helpers import ( # noqa
convert_sync_bn,
is_feature_extractor_model,
)
MODEL_REGISTRY = {}
MODEL_NAMES = set()
def register_model(name):
"""
Registers Self-Supervision Model.
This decorator allows VISSL to add custom models, even if the
model itself is not part of VISSL. To use it, apply this decorator
to a model class, like this:
.. code-block:: python
@register_model('my_model_name')
class MyModelName():
...
To get a model from a configuration file, see :func:`get_model`. The default
model is BaseSSLMultiInputOutputModel.
"""
def register_model_class(func):
if name in MODEL_REGISTRY:
raise ValueError("Cannot register duplicate model ({})".format(name))
MODEL_REGISTRY[name] = func
MODEL_NAMES.add(func.__name__)
return func
return register_model_class
def get_model(model_name: str):
"""
Lookup the model_name in the model registry and return.
If the model is not implemented, asserts will be thrown and workflow will exit.
"""
assert model_name in MODEL_REGISTRY, "Unknown model"
return MODEL_REGISTRY[model_name]
def build_model(model_config, optimizer_config):
"""
Given the model config and the optimizer config, construct the model.
The returned model is not copied to gpu yet (if using gpu) and neither
wrapped with DDP yet. This is done later train_task.py .prepare()
"""
model_name = model_config.BASE_MODEL_NAME
model_cls = get_model(model_name)
return model_cls(model_config, optimizer_config)
# automatically import any Python files in the models/ directory
FILE_ROOT = Path(__file__).parent
import_all_modules(FILE_ROOT, "vissl.models")
|
examples/utils/app.py
|
pichatelli/simple-settings
| 213 |
125490
|
from simple_settings import settings
from simple_settings.utils import settings_stub
# Stub examples
with settings_stub(SOME_SETTING='foo'):
assert settings.SOME_SETTING == 'foo'
assert settings.SOME_SETTING == 'bar'
@settings_stub(SOME_SETTING='foo')
def get_some_setting():
return settings.SOME_SETTING
assert get_some_setting() == 'foo'
assert settings.SOME_SETTING == 'bar'
|
Chapter03/3_9a_unix_domain_socket_server.py
|
shamir456/Python-Network-Programming-Cookbook-Second-Edition
| 125 |
125559
|
#!/usr/bin/env python
# Python Network Programming Cookbook, Second Edition -- Chapter - 3
# This program is optimized for Python 2.7.12 and Python 3.5.2.
# It may run on any other version with/without modifications.
import socket
import os
import time
SERVER_PATH = "/tmp/python_unix_socket_server"
def run_unix_domain_socket_server():
if os.path.exists(SERVER_PATH):
os.remove( SERVER_PATH )
print ("starting unix domain socket server.")
server = socket.socket( socket.AF_UNIX, socket.SOCK_DGRAM )
server.bind(SERVER_PATH)
print ("Listening on path: %s" %SERVER_PATH)
while True:
datagram = server.recv( 1024 )
if not datagram:
break
else:
print ("-" * 20)
print (datagram)
if "DONE" == datagram:
break
print ("-" * 20)
print ("Server is shutting down now...")
server.close()
os.remove(SERVER_PATH)
print ("Server shutdown and path removed.")
if __name__ == '__main__':
run_unix_domain_socket_server()
|
LeetCode/python3/704.py
|
ZintrulCre/LeetCode_Archiver
| 279 |
125570
|
class Solution:
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
start, end = 0, len(nums)- 1
while start <= end:
mid = start + (end - start) // 2
if nums[mid] < target:
start = mid + 1
elif nums[mid] > target:
end = mid - 1
else:
return mid
return -1
|
tools/npy_2_aud.py
|
KeigoTakamura/Waveglow_Inference_in_CUDA
| 135 |
125578
|
<gh_stars>100-1000
import os, sys
import numpy as np
from scipy.io.wavfile import write
folder = sys.argv[1]
for file in os.listdir(folder):
if file.endswith(".npy"):
print(file, file.split(".")[0])
a = np.load(folder+file)
write(folder+file.split(".")[0]+".wav", 22050, a)
|
oss_src/unity/python/sframe/__init__.py
|
venkattgg/venkey
| 493 |
125616
|
"""
@package graphlab
...
GraphLab Create is a machine learning platform that enables data scientists and
app developers to easily create intelligent applications at scale. Building an
intelligent, predictive application involves iterating over multiple steps:
cleaning the data, developing features, training a model, and creating and
maintaining a predictive service. GraphLab Create does all of this in one
platform. It is easy to use, fast, and powerful.
For more details on the GraphLab Create see http://turi.com, including
documentation, tutorial, etc.
"""
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# Important to call this before everything else
from .sys_util import setup_environment_from_config_file \
as _setup_environment_from_config_file
_setup_environment_from_config_file()
from . import util
from .util import set_runtime_config
from .util import get_runtime_config
from .version_info import __VERSION__, version, build_number
from .connect import _get_metric_tracker
from . import visualization
import os as _os
import sys as _sys
if _sys.platform != 'win32' or \
(_os.path.exists(_os.path.join(_os.path.dirname(__file__), 'cython', 'libstdc++-6.dll')) and \
_os.path.exists(_os.path.join(_os.path.dirname(__file__), 'cython', 'libgcc_s_seh-1.dll'))):
from .data_structures.sgraph import Vertex, Edge
from .data_structures.sgraph import SGraph
from .data_structures.sframe import SFrame
from .data_structures.sarray import SArray
from .data_structures.sketch import Sketch
from .data_structures.image import Image
from .data_structures.sarray_builder import SArrayBuilder
from .data_structures.sframe_builder import SFrameBuilder
from .data_structures.sgraph import load_sgraph, load_graph
from .toolkits._model import Model, CustomModel, load_model
from . import aggregate
from . import toolkits
from .toolkits.image_analysis import image_analysis
from .data_structures.sframe import load_sframe, get_spark_integration_jar_path
from .data_structures.DBConnection import connect_odbc, get_libodbc_path, set_libodbc_path
# internal util
from .connect.main import launch as _launch
from .connect.main import stop as _stop
from .connect import main as glconnect
from .util import get_environment_config
from .util import get_graphlab_object_type
from .util import get_log_location, get_client_log_location, get_server_log_location
from .version_info import version
from .version_info import __VERSION__
class DeprecationHelper(object):
def __init__(self, new_target):
self.new_target = new_target
def _warn(self):
import warnings
import logging
warnings.warn("Graph has been renamed to SGraph. The Graph class will be removed in the next release.", PendingDeprecationWarning)
logging.warning("Graph has been renamed to SGraph. The Graph class will be removed in the next release.")
def __call__(self, *args, **kwargs):
self._warn()
return self.new_target(*args, **kwargs)
def __getattr__(self, attr):
self._warn()
return getattr(self.new_target, attr)
Graph = DeprecationHelper(SGraph)
from .cython import cy_pylambda_workers
################### Extension Importing ########################
from . import extensions
from .extensions import ext_import
extensions._add_meta_path()
# rewrite the extensions module
class _extensions_wrapper(object):
def __init__(self, wrapped):
self._wrapped = wrapped
self.__doc__ = wrapped.__doc__
def __getattr__(self, name):
try:
return getattr(self._wrapped, name)
except:
pass
from .connect.main import get_unity
get_unity()
return getattr(self._wrapped, name)
from . import _json as json # imports from _json.py in this directory
_sys.modules[__name__ + ".extensions"] = _extensions_wrapper(_sys.modules[__name__ + ".extensions"])
# rewrite the import
extensions = _sys.modules[__name__ + ".extensions"]
else:
from dependencies import get_dependencies
package_dir = _os.path.dirname(__file__)
print("""
ACTION REQUIRED: Dependencies libstdc++-6.dll and libgcc_s_seh-1.dll not found.
1. Ensure user account has write permission to %s
2. Run sframe.get_dependencies() to download and install them.
3. Restart Python and import sframe again.
By running the above function, you agree to the following licenses.
* libstdc++: https://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html
* xz: http://git.tukaani.org/?p=xz.git;a=blob;f=COPYING
""" % package_dir)
|
examples/parallel/run_parallel_sw_merimbula_test.py
|
GeoscienceAustralia/anuga_core
| 136 |
125623
|
#!/usr/bin/env python
#########################################################
#
# Main file for parallel mesh testing.
#
# This is a modification of the run_parallel_advection.py
# file.
#
#
# *) The (new) files that have been added to manage the
# grid partitioning are
# +) pmesh_divide_metis.py: subdivide a pmesh
# +) build_submesh.py: build the submeshes on the host
# processor.
# +) build_local.py: build the GA mesh datastructure
# on each processor.
# +) build_commun.py: handle the communication between
# the host and processors
#
# *) Things still to do:
# +) Overlap the communication and computation: The
# communication routines in build_commun.py should be
# interdispersed in the build_submesh.py and build_local.py
# files. This will overlap the communication and
# computation and will be far more efficient. This should
# be done after more testing and there more confidence in
# the subpartioning.
# +) Much more testing especially with large numbers of
# processors.
# Authors: <NAME>, <NAME> and <NAME>,
# June 2005
#
#
#
#########################################################
import sys
#import pypar # The Python-MPI interface
import time
# Numeric arrays
import numpy as num
#from numpy import array, zeros, float
# Print debugging information
from print_stats import print_test_stats, build_full_flag
# pmesh
from anuga.shallow_water import Domain
from parallel_shallow_water import Parallel_domain
from anuga.abstract_2d_finite_volumes.pmesh2domain\
import pmesh_to_domain_instance
# Reuse previous mesh import
from anuga.caching import cache
# Mesh partition routines
from distribute_mesh import pmesh_divide_metis
from distribute_mesh import build_submesh
from distribute_mesh import build_local_mesh
from distribute_mesh import send_submesh, rec_submesh, extract_submesh
###############################
# Read in processor information
###############################
numprocs = pypar.size()
myid = pypar.rank()
processor_name = pypar.get_processor_name()
############################
# Set the initial conditions
############################
rect = num.zeros( 4, num.float) # Buffer for results
class Set_Stage:
"""Set an initial condition with constant water height, for x<x0
"""
def __init__(self, x0=0.25, x1=0.5, h=1.0):
self.x0 = x0
self.x1 = x1
self.h = h
def __call__(self, x, y):
return self.h*((x>self.x0)&(x<self.x1))
#######################
# Partition the domain
#######################
if myid == 0:
# Read in the test files
# filename = 'test-100.tsh'
# filename = 'merimbula_10785_1.tsh'
filename = 'merimbula_43200.tsh'
# Build the whole domain
domain_full = pmesh_to_domain_instance(filename, Domain)
# domain_full = cache(pmesh_to_domain_instance,
# (filename, Domain),
# dependencies = [filename])
rect = num.array(domain_full.get_extent(), num.float)
print (rect)
# Initialise the wave
#domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
# domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,0.0))
# Subdivide the domain
# Note the different arguments compared with pmesh_divide,
# pmesh_divide_steve etc.
nodes, triangles, boundary, triangles_per_proc, quantities = \
pmesh_divide_metis(domain_full, numprocs)
print (triangles_per_proc)
rect = num.array(domain_full.get_extent(), num.float)
submesh = build_submesh(nodes, triangles, boundary,\
quantities, triangles_per_proc)
# Send the mesh partition to the appropriate processor
for p in range(1, numprocs):
send_submesh(submesh, triangles_per_proc, p)
# Build the local mesh for processor 0
points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
extract_submesh(submesh, triangles_per_proc)
# Read in the mesh partition that belongs to this
# processor (note that the information is in the
# correct form for the GA data structure
else:
points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict , \
no_full_nodes, no_full_trigs = rec_submesh(0)
###########################################
# Start the computations on each subpartion
###########################################
#if myid == 0:
# print 'ghost'
# print ghost_recv_dict
#processor_name
#if myid == 0:
# print 'full'
# print full_send_dict
# The visualiser needs to know the size of the whole domain
pypar.broadcast(rect,0)
domain = Parallel_domain(points, vertices, boundary,
full_send_dict = full_send_dict,
ghost_recv_dict = ghost_recv_dict)
# Make a note of which triangles are full and which are ghost
tri_full_flag = build_full_flag(domain, ghost_recv_dict)
try:
#domain.initialise_visualiser(rect=rect)
#domain.visualiser.coloring['stage'] = True
#domain.visualiser.scale_z['stage'] = 0.2
#domain.visualiser.scale_z['elevation'] = 0.05
pass
except:
print ('No visualiser')
domain.default_order = 1
#Boundaries
from anuga.interface import Transmissive_boundary, Reflective_boundary
T = Transmissive_boundary(domain)
R = Reflective_boundary(domain)
domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R, 'ghost':None} )
domain.set_quantity('stage', quantities['stage'])
domain.set_quantity('elevation', quantities['elevation'])
domain.store = False
#---------
# Evolution
t0 = time.time()
print ('Processor %d on %s: No of elements %d'%(domain.processor,processor_name,domain.number_of_elements))
yieldstep = 50.0
finaltime = 500.0
#yieldstep = 1000
#finaltime = 40000
#yieldstep = 1
#finaltime = 1
#processor_name
#for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
# if myid == 0:
# domain.write_time()
#print 'Processor %d, Integral of stage %d'%\
# (domain.processor,domain.quantities['stage'].get_integral())
# print_test_stats(domain, tri_full_flag)
# Profiling
#import profile
#profiler = profile.Profile()
#result.dump_stats("profile." + str(numprocs) + "." + str(myid) + ".dat")
## #New hotshot profiling
## import hotshot
## profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof")
## s = '''for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
## if myid == 0:
## domain.write_time()
## print_test_stats(domain, tri_full_flag)
## '''
## result = profiler.runctx(s, globals(), locals())
## profiler.close()
#from vtk_realtime_visualiser import Visualiser
#V = Visualiser(domain,default_scale_z=100.0)
#V.coloring['stage'] = True
#V.coloring['elevation'] = False
#V.setup['elevation']=True
#V.updating['stage']=True
#V.qcolor['stage'] = (0.1,0.4,0.99)
#V.start()
#V.idle.wait()
#V.idle.clear()
for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
if myid == 0:
domain.write_time()
#print_test_stats(domain, tri_full_flag)
# V.redraw_ready.set()
# V.idle.wait()
# V.idle.clear()
# V.unpaused.wait()
#print 'P%d: That took %.2f seconds' %(myid, time.time()-t0)
#print 'P%d: Communication time %.2f seconds' %(myid, domain.communication_time)
#print 'P%d: Reduction Communication time %.2f seconds' %(myid, domain.communication_reduce_time)
#print 'P%d: Broadcast time %.2f seconds' %(myid, domain.communication_broadcast_time)
if myid == 0:
print ('That took %.2f seconds' %(time.time()-t0))
print ('Communication time %.2f seconds'%domain.communication_time)
print ('Reduction Communication time %.2f seconds'%domain.communication_reduce_time)
print ('Broadcast time %.2f seconds'%domain.communication_broadcast_time)
pypar.finalize()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.