max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
pylayers/antprop/examples/ex_antenna4.py
|
usmanwardag/pylayers
| 143 |
125640
|
from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
This test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : evaluate the antenna vsh coefficient with a downsampling factor of 2
4 : display the 16 first
"""
filename = 'S1R1.mat'
A = Antenna(filename,directory='ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
#
# Calculate Vector Spherical Harmonics
#
A = vsh(A,dsf)
A.C.s1tos2(15)
EBr,EBi,ECr,ECi= A.Fsynth2s()
plt.figure()
plt.subplot(221)
plt.plot(EBr)
plt.subplot(222)
plt.plot(EBi)
plt.subplot(223)
plt.plot(ECr)
plt.subplot(224)
plt.plot(ECi)
plt.subplot(224)
|
netbuilder/lego/data.py
|
nrupatunga/pynetbuilder
| 381 |
125702
|
<gh_stars>100-1000
"""
Copyright 2016 Yahoo Inc.
Licensed under the terms of the 2 clause BSD license.
Please see LICENSE file in the project root for terms.
"""
from base import BaseLego
from caffe.proto import caffe_pb2
import google.protobuf as pb
from caffe import layers as L
from caffe import params as P
import caffe
'''
Generic class to read data layer
info from config files.
'''
class ConfigDataLego(BaseLego):
def __init__(self, data_file):
self.data_file = data_file
def attach(self, netspec):
return
class ImageDataLego(BaseLego):
def __init__(self, params):
if params['include'] == 'test':
params['include'] = dict(phase=caffe.TEST)
elif params['include'] == 'train':
params['include'] = dict(phase=caffe.TRAIN)
params['image_data_param'] = dict(source=params['source'] ,
batch_size=params['batch_size'])
if 'mean_file' in params:
params['transform_param'] = dict(mean_file=params['mean_file'])
self._required = ['name', 'source', 'batch_size', 'include']
super(ImageDataLego, self).__init__(params)
def _init_default_params(self):
self._default['ntop'] = 2
def attach(self, netspec):
param_packet = self._construct_param_packet()
data_lego, label_lego = L.ImageData(**param_packet)
netspec['data'] = data_lego
netspec['label'] = label_lego
return data_lego, label_lego
|
tests/core/test_urn.py
|
joshuataylor/great_expectations
| 6,451 |
125719
|
<gh_stars>1000+
from urllib.parse import parse_qs
import pytest
from pyparsing import ParseException
from great_expectations.core.urn import ge_urn
def test_ge_validations_urn():
# We should be able to parse validations urns
urn = (
"urn:great_expectations:validations:my_suite:expect_something.observed_value:query=s%20tring&query="
"string3&query2=string2"
)
res = ge_urn.parseString(urn)
assert res["urn_type"] == "validations"
assert res["expectation_suite_name"] == "my_suite"
assert res["metric_name"] == "expect_something.observed_value"
kwargs_dict = parse_qs(res["metric_kwargs"])
assert kwargs_dict == {"query": ["s tring", "string3"], "query2": ["string2"]}
# no kwargs is ok
urn = "urn:great_expectations:validations:my_suite:expect_something.observed_value"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "validations"
assert res["expectation_suite_name"] == "my_suite"
assert res["metric_name"] == "expect_something.observed_value"
assert "metric_kwargs" not in res
def test_ge_metrics_urn():
urn = "urn:great_expectations:metrics:20200403T1234.324Z:my_suite:expect_something.observed_value:column=mycol"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "metrics"
assert res["run_id"] == "20200403T1234.324Z"
assert res["expectation_suite_name"] == "my_suite"
assert res["metric_name"] == "expect_something.observed_value"
kwargs_dict = parse_qs(res["metric_kwargs"])
assert kwargs_dict == {"column": ["mycol"]}
# No kwargs is ok
urn = "urn:great_expectations:metrics:20200403T1234.324Z:my_suite:expect_something.observed_value"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "metrics"
assert res["run_id"] == "20200403T1234.324Z"
assert res["expectation_suite_name"] == "my_suite"
assert res["metric_name"] == "expect_something.observed_value"
assert "kwargs_dict" not in res
def test_ge_stores_urn():
urn = "urn:great_expectations:stores:my_store:mymetric:kw=param"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "stores"
assert res["store_name"] == "my_store"
assert res["metric_name"] == "mymetric"
kwargs_dict = parse_qs(res["metric_kwargs"])
assert kwargs_dict == {
"kw": ["param"],
}
# No kwargs is ok
urn = "urn:great_expectations:stores:my_store:mymetric"
res = ge_urn.parseString(urn)
assert res["urn_type"] == "stores"
assert res["store_name"] == "my_store"
assert res["metric_name"] == "mymetric"
assert "metric_kwargs" not in res
def test_invalid_urn():
# Must start with "urn:great_expectations"
with pytest.raises(ParseException) as e:
ge_urn.parseString("not_a_ge_urn")
assert "not_a_ge_urn" in e.value.line
# Must have one of the recognized types
with pytest.raises(ParseException) as e:
ge_urn.parseString("urn:great_expectations:foo:bar:baz:bin:barg")
assert "urn:great_expectations:foo:bar:baz:bin:barg" in e.value.line
# Cannot have too many parts
with pytest.raises(ParseException) as e:
ge_urn.parseString(
"urn:great_expectations:validations:foo:bar:baz:bin:barg:boo"
)
assert "urn:great_expectations:validations:foo:bar:baz:bin:barg:boo" in e.value.line
|
Burp/lib/issue_listener.py
|
wisdark/HUNT
| 1,628 |
125724
|
from javax.swing.event import ListSelectionListener
class IssueListener(ListSelectionListener):
def __init__(self, view, table, scanner_pane, issue_name, issue_param):
self.view = view
self.table = table
self.scanner_pane = scanner_pane
self.issue_name = issue_name
self.issue_param = issue_param
def valueChanged(self, e):
row = self.table.getSelectedRow()
issue_param = self.table.getModel().getValueAt(row, 1)
hostname = self.table.getModel().getValueAt(row, 2)
path = self.table.getModel().getValueAt(row, 3)
scanner_issue_id = self.table.getModel().getValueAt(row, 4)
self.view.set_tabbed_pane(self.scanner_pane, self.table, hostname, path, self.issue_name, issue_param, scanner_issue_id)
|
recipes/Python/580736_xtopdf_Publish_DelimiterSeparated_Values_DSV/recipe-580736.py
|
tdiprima/code
| 2,023 |
125738
|
<reponame>tdiprima/code
from __future__ import print_function
"""
DSVToPDF.py
Author: <NAME>
Web site: https://vasudevram.github.io
Blog: https://jugad2.blogspot.com
Product store: https://gumroad.com/vasudevram
Twitter: https://mobile.twitter.com/vasudevram
Purpose: Show how to publish DSV data (Delimiter-Separated Values)
to PDF, using the xtopdf toolkit.
Requires:
- ReportLab: https://www.reportlab.com/ftp/reportlab-1.21.1.tar.gz
- xtopdf: https://bitbucket.org/vasudevram/xtopdf
First install ReportLab, then install xtopdf, using instructions here:
http://jugad2.blogspot.in/2012/07/guide-to-installing-and-using-xtopdf.html
The DSV data can be read from either files or standard input.
The delimiter character is configurable by the user and can
be specified as either a character or its ASCII code.
References:
DSV format: https://en.wikipedia.org/wiki/Delimiter-separated_values
TAOUP (The Art Of Unix Programming): Data File Metaformats:
http://www.catb.org/esr/writings/taoup/html/ch05s02.html
ASCII table: http://www.asciitable.com/
"""
import sys
import string
from PDFWriter import PDFWriter
def err_write(message):
sys.stderr.write(message)
def error_exit(message):
err_write(message)
sys.exit(1)
def usage(argv, verbose=False):
usage1 = \
"{}: publish DSV (Delimiter-Separated-Values) data to PDF.\n".format(argv[0])
usage2 = "Usage: python" + \
" {} [ -c delim_char | -n delim_code ] [ dsv_file ] ...\n".format(argv[0])
usage3 = [
"where one of either the -c or -n option must be given,\n",
"delim_char is a single ASCII delimiter character, and\n",
"delim_code is a delimiter character's ASCII code.\n",
"Text lines will be read from specified DSV file(s) or\n",
"from standard input, split on the specified delimiter\n",
"specified by either the -c or -n option, processed, and\n",
"written, formatted, to PDF files with the name dsv_file.pdf.\n",
]
usage4 = "Use the -h or --help option for a more detailed help message.\n"
err_write(usage1)
err_write(usage2)
if verbose:
'''
for line in usage3:
err_write(line)
'''
err_write(''.join(usage3))
if not verbose:
err_write(usage4)
def str_to_int(s):
try:
return int(s)
except ValueError as ve:
error_exit(repr(ve))
def valid_delimiter(delim_code):
return not invalid_delimiter(delim_code)
def invalid_delimiter(delim_code):
# Non-ASCII codes not allowed, i.e. codes outside
# the range 0 to 255.
if delim_code < 0 or delim_code > 255:
return True
# Also, don't allow some specific ASCII codes;
# add more, if it turns out they are needed.
if delim_code in (10, 13):
return True
return False
def dsv_to_pdf(dsv_fil, delim_char, pdf_filename):
with PDFWriter(pdf_filename) as pw:
pw.setFont("Courier", 12)
pw.setHeader(pdf_filename[:-4] + " => " + pdf_filename)
pw.setFooter("Generated by xtopdf: https://google.com/search?q=xtopdf")
for idx, lin in enumerate(dsv_fil):
fields = lin.split(delim_char)
assert len(fields) > 0
# Knock off the newline at the end of the last field,
# since it is the line terminator, not part of the field.
if fields[-1][-1] == '\n':
fields[-1] = fields[-1][:-1]
# Treat a blank line as a line with one field,
# an empty string (that is what split returns).
pw.writeLine(' - '.join(fields))
def main():
# Get and check validity of arguments.
sa = sys.argv
lsa = len(sa)
if lsa == 1:
usage(sa)
sys.exit(0)
elif lsa == 2:
# Allow the help option with any letter case.
if sa[1].lower() in ("-h", "--help"):
usage(sa, verbose=True)
sys.exit(0)
else:
usage(sa)
sys.exit(0)
# If we reach here, lsa is >= 3.
# Check for valid mandatory options (sic).
if not sa[1] in ("-c", "-n"):
usage(sa, verbose=True)
sys.exit(0)
# If -c option given ...
if sa[1] == "-c":
# If next token is not a single character ...
if len(sa[2]) != 1:
error_exit(
"{}: Error: -c option needs a single character after it.".format(sa[0]))
if not sa[2] in string.printable:
error_exit(
"{}: Error: -c option needs a printable ASCII character after it.".format(\
sa[0]))
delim_char = sa[2]
# else if -n option given ...
elif sa[1] == "-n":
delim_code = str_to_int(sa[2])
if invalid_delimiter(delim_code):
error_exit(
"{}: Error: invalid delimiter code {} given for -n option.".format(\
sa[0], delim_code))
delim_char = chr(delim_code)
else:
# Checking for what should not happen ... a bit of defensive programming here.
error_exit("{}: Program error: neither -c nor -n option given.".format(sa[0]))
try:
# If no filenames given, do sys.stdin to PDF ...
if lsa == 3:
print("Converting content of standard input to PDF.")
dsv_fil = sys.stdin
dsv_to_pdf(dsv_fil, delim_char, "dsv_output.pdf")
dsv_fil.close()
print("Output is in dsv_output.pdf")
# else (filenames given), convert them to PDFs ...
else:
for dsv_filename in sa[3:]:
pdf_filename = dsv_filename + ".pdf"
print("Converting file {} to PDF.", dsv_filename)
dsv_fil = open(dsv_filename, 'r')
dsv_to_pdf(dsv_fil, delim_char, pdf_filename)
dsv_fil.close()
print("Output is in {}".format(pdf_filename))
except IOError as ioe:
error_exit("{}: Error: {}".format(sa[0], repr(ioe)))
if __name__ == '__main__':
main()
|
nbtutor/ipython/factories/kvp_heap_object_factory.py
|
vincentxavier/nbtutor
| 423 |
125788
|
<filename>nbtutor/ipython/factories/kvp_heap_object_factory.py
from typing import Any, Collection, Dict, Union, Optional
from ..models.heap_object import HeapObject, RenderOptions
from ..models.options import Options
from ..models.unique_identifier import UniqueIdentifier
from .base_heap_object_factory import HeapObjectFactory
class KvpHeapObjectFactory(HeapObjectFactory):
def __init__(self, obj: Dict, options: Options = None) -> None:
super().__init__(obj, options)
self._items = obj.items()
self._object_id = self.get_object_id(obj)
self._max_len = (self.options.max_size or len(self._items)) if self.options is not None else len(obj)
self._render_options: Optional[RenderOptions] = None
if (len(self._items) > self._max_len):
self._render_options = RenderOptions(True)
self._object = {k: v for (k, v) in list(self._items)[:self._max_len]}
def get_id(self) -> str:
return self._object_id
def get_value(self) -> str:
return self.get_type()
def get_objects_to_reduce(self) -> Union[None, Collection[Any]]:
return self._object.values()
def create(self) -> HeapObject:
heap_obj = HeapObject(self.get_id(), self.get_type(), self.get_value(), 'kvp', self._render_options)
heap_obj.immutable = False
heap_obj.references = {k: UniqueIdentifier(HeapObjectFactory.get_object_id(v)) for k, v in self._object.items()}
return heap_obj
|
core/REST_permissions.py
|
gcnoopy/YaraGuardian
| 178 |
125821
|
<filename>core/REST_permissions.py
from rest_framework import permissions
from rest_framework.permissions import BasePermission
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import Group
def group_owner(request):
group_name = request.resolver_match.kwargs.get('group_name')
try:
group = Group.objects.get(name=group_name)
except ObjectDoesNotExist:
pass
else:
if request.user == group.groupmeta.owner:
return True
return False
def group_admin(request):
group_name = request.resolver_match.kwargs.get('group_name')
try:
group = Group.objects.get(name=group_name)
except ObjectDoesNotExist:
pass
else:
if request.user == group.groupmeta.owner:
return True
elif request.user in group.groupmeta.admins.all():
return True
return False
def group_member(request):
group_name = request.resolver_match.kwargs.get('group_name')
if request.user.groups.filter(name=group_name).exists():
return True
return False
class IsGroupMember(BasePermission):
def has_permission(self, request, view):
return group_member(request)
class IsGroupMemberOrPublicReadOnly(BasePermission):
def has_permission(self, request, view):
if group_member(request):
return True
elif request.method in permissions.SAFE_METHODS:
return True
return False
class IsGroupAdmin(BasePermission):
def has_permission(self, request, view):
if group_member(request) and group_admin(request):
return True
return False
class IsGroupAdminOrMemberReadOnly(BasePermission):
def has_permission(self, request, view):
if group_member(request):
if group_admin(request):
return True
elif request.method in permissions.SAFE_METHODS:
return True
return False
class IsGroupAdminOrMemberAddMethod(BasePermission):
def has_permission(self, request, view):
if group_member(request):
if group_admin(request):
return True
elif request.method == 'POST':
return True
return False
class IsGroupOwner(BasePermission):
def has_permission(self, request, view):
if group_member(request) and group_owner(request):
return True
return False
class IsGroupOwnerOrMemberReadOnly(BasePermission):
def has_permission(self, request, view):
if group_member(request):
if group_owner(request):
return True
elif request.method in permissions.SAFE_METHODS:
return True
return False
class IsGroupOwnerOrPublicReadOnly(BasePermission):
def has_permission(self, request, view):
if group_member(request) and group_owner(request):
return True
elif request.method in permissions.SAFE_METHODS:
return True
return False
|
imapfw/drivers/imap.py
|
paralax/imapfw
| 492 |
125835
|
# The MIT License (MIT).
# Copyright (c) 2015, <NAME> & contributors.
from imapfw.imap import Imap as ImapBackend
from imapfw.interface import adapts, checkInterfaces
from .driver import Driver, DriverInterface
# Annotations.
from imapfw.imap import SearchConditions, FetchAttributes
from imapfw.types.folder import Folders, Folder
from imapfw.types.message import Messages
#TODO: remove "reverse" later: the DriverInterface must define all the
# interfaces of this object.
@checkInterfaces(reverse=False)
@adapts(DriverInterface)
class Imap(Driver):
"""The Imap driver, possibly redefined by the rascal."""
local = False
def __init__(self, *args):
super(Imap, self).__init__(*args)
self.imap = ImapBackend(self.conf.get('backend'))
def connect(self):
host = self.conf.get('host')
port = int(self.conf.get('port'))
return self.imap.connect(host, port)
def getCapability(self):
return self.imap.getCapability()
def getFolders(self) -> Folders:
return self.imap.getFolders()
def getMessages(self, messages: Messages,
attributes: FetchAttributes) -> Messages:
return self.imap.getMessages(messages, attributes)
def getNamespace(self):
return self.imap.getNamespace()
def login(self) -> None:
user = self.conf.get('username')
password = self.conf.get('password')
return self.imap.login(user, password)
def logout(self) -> None:
self.imap.logout()
def searchUID(self, conditions: SearchConditions=SearchConditions()):
return self.imap.searchUID(conditions)
def select(self, folder: Folder) -> None:
return self.imap.select(folder)
#def append(self, server, mail):
#response = server.append(mail)
#return response
#def update(self, server, mail):
#response = server.update(mail)
#return response
#def fetch(self, server, uids):
#response = server.fetch(uids)
#return response
|
mxnet/tests/unittests/test_rl_scheduler.py
|
zhiqiangdon/autogluon
| 4,462 |
125836
|
<gh_stars>1000+
import numpy as np
import pickle
import autogluon.core as ag
def test_rl_scheduler():
@ag.args(
lr=ag.space.Categorical(1e-3, 1e-2),
wd=ag.space.Categorical(1e-3, 1e-2),
epochs=10)
def rl_train_fn(args, reporter):
for e in range(args.epochs):
dummy_accuracy = 1 - np.power(1.8, -np.random.uniform(e, 2 * e))
reporter(epoch=e + 1, accuracy=dummy_accuracy, lr=args.lr, wd=args.wd)
scheduler = ag.scheduler.RLScheduler(rl_train_fn,
resource={'num_cpus': 4, 'num_gpus': 0},
num_trials=10,
reward_attr='accuracy',
time_attr='epoch',
checkpoint=None)
scheduler.run()
scheduler.join_jobs()
best_config = scheduler.get_best_config()
best_task_id = scheduler.get_best_task_id()
assert pickle.dumps(scheduler.config_history[best_task_id]) == pickle.dumps(best_config)
|
services/core/VolttronCentral/tests/test_platforms.py
|
cloudcomputingabc/volttron
| 406 |
125869
|
import pytest
import base64
from mock import MagicMock
from volttrontesting.utils.utils import AgentMock
from volttron.platform.vip.agent import Agent
from volttroncentral.platforms import PlatformHandler, Platforms
from volttroncentral.agent import VolttronCentralAgent
@pytest.fixture
def mock_vc():
VolttronCentralAgent.__bases__ = (AgentMock.imitate(Agent, VolttronCentralAgent()),)
vc = VolttronCentralAgent()
vc._configure("test_config", "NEW", {})
yield vc
def test_when_platform_added_disconnected(mock_vc):
platforms = Platforms(vc=mock_vc)
assert platforms
assert len(platforms.get_platform_vip_identities()) == 0
assert len(platforms.get_platform_list(None, None)) == 0
new_platform_vip = "vcp-test_platform"
platforms.add_platform(new_platform_vip)
assert len(platforms.get_platform_vip_identities()) == 1
assert len(platforms.get_platform_list(None, None)) == 1
encoded_vip = base64.b64encode(new_platform_vip.encode('utf-8')).decode('utf-8')
platform = platforms.get_platform(encoded_vip)
assert isinstance(platform, PlatformHandler)
assert platform.vip_identity == new_platform_vip
platforms.disconnect_platform(new_platform_vip)
assert len(platforms.get_platform_list(None, None)) == 0
assert len(platforms.get_platform_vip_identities()) == 0
def test_platform_added_during_handle_platform_connection():
scaneventmock = MagicMock()
platformsmock = MagicMock()
vc = VolttronCentralAgent()
vc._platform_scan_event = scaneventmock
vc._platforms = platformsmock
vip_id = "vcp-platform1"
vc._handle_platform_connection(vip_id)
assert platformsmock.add_platform.called
def test_platform_scan():
vipmock = MagicMock()
peerlistmock = MagicMock()
peerlistmock.return_value.get.return_value = ["vcp-1", "vcp-2"]
vipmock.peerlist = peerlistmock
coremock = MagicMock()
vc = VolttronCentralAgent()
vc.vip = vipmock
vc.core = coremock
# scanning of platform test starts here.
vc._scan_platform_connect_disconnect()
assert len(vc._platforms.get_platform_vip_identities()) == 2
assert "vcp-1" in vc._platforms.get_platform_vip_identities()
assert "vcp-2" in vc._platforms.get_platform_vip_identities()
assert len(vc._platforms.get_platform_list(None, None)) == 2
|
example_app/main.py
|
iwpnd/fastapi-aws-lambda-example
| 117 |
125917
|
<filename>example_app/main.py
from fastapi import FastAPI
from mangum import Mangum
from example_app.api.api_v1.api import router as api_router
from example_app.core.config import API_V1_STR, PROJECT_NAME
app = FastAPI(
title=PROJECT_NAME,
# if not custom domain
# openapi_prefix="/prod"
)
app.include_router(api_router, prefix=API_V1_STR)
@app.get("/ping")
def pong():
"""
Sanity check.
This will let the user know that the service is operational.
And this path operation will:
* show a lifesign
"""
return {"ping": "pong!"}
handler = Mangum(app, enable_lifespan=False)
|
example.py
|
abersheeran/index.py
| 242 |
125948
|
<gh_stars>100-1000
import asyncio
from pathlib import Path as FilePath
from typing_extensions import Annotated
from indexpy import (
HTTPException,
HttpRoute,
Index,
SocketRoute,
required_method,
websocket,
)
from indexpy.parameters.field_functions import Path
from indexpy.openapi import OpenAPI
async def homepage():
"""
Homepage
"""
return "hello, index.py"
async def exc():
raise Exception("For get debug page.")
async def message():
"""
Message
For testing server send event response
"""
async def message_gen():
for i in range(5):
await asyncio.sleep(app.state.wait_time)
yield {"id": i, "data": "hello"}
return message_gen()
async def sources(filepath: Annotated[str, Path()]):
"""
Return source files
"""
realpath = FilePath(".") / filepath.lstrip("./")
if realpath.exists() and realpath.is_file():
return realpath
else:
raise HTTPException(404)
async def ws():
await websocket.accept()
while not await websocket.is_disconnected():
await websocket.send_json({"data": "(^_^)"})
await asyncio.sleep(app.state.wait_time)
await websocket.close()
app = Index(
debug=True,
routes=[
HttpRoute("/", homepage),
HttpRoute("/exc", exc),
HttpRoute("/message", message),
HttpRoute("/sources/{filepath:any}", sources) @ required_method("GET"),
SocketRoute("/", ws),
],
)
app.router << "/docs" // OpenAPI().routes
app.state.wait_time = 1
|
spikeextractors/extractors/nixioextractors/__init__.py
|
zekearneodo/spikeextractors
| 145 |
125952
|
<filename>spikeextractors/extractors/nixioextractors/__init__.py
from .nixioextractors import NIXIORecordingExtractor, NIXIOSortingExtractor
|
zhaquirks/aurora/__init__.py
|
WolfRevo/zha-device-handlers
| 213 |
125971
|
"""Module for Aurora devices."""
|
cli/tests/pcluster/models/test_imagebuilder_resources.py
|
enrico-usai/cfncluster
| 415 |
125984
|
<reponame>enrico-usai/cfncluster
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from assertpy import assert_that
from pcluster.aws.common import AWSClientError
from pcluster.models.imagebuilder import ImageBuilderStack
from tests.pcluster.aws.dummy_aws_api import mock_aws_api
class TestImageBuilderStack:
@pytest.mark.parametrize(
"describe_stack_resouces_result, expected_error, expected_imagebuilder_image_is_building",
[
(
{
"StackResourceDetail": {
"ResourceStatus": "BUILD_COMPLETE",
}
},
False,
False,
),
(
{
"StackResourceDetail": {
"ResourceStatus": "CREATE_IN_PROGRESS",
}
},
False,
True,
),
(AWSClientError(function_name="describe_stack_resource", message="test error"), True, False),
],
)
def test_initialization(
self, mocker, describe_stack_resouces_result, expected_error, expected_imagebuilder_image_is_building
):
mock_aws_api(mocker)
if expected_error:
mocker.patch(
"pcluster.aws.cfn.CfnClient.describe_stack_resource", side_effect=describe_stack_resouces_result
)
stack = ImageBuilderStack({})
assert_that(stack._imagebuilder_image_resource).is_none()
else:
mocker.patch(
"pcluster.aws.cfn.CfnClient.describe_stack_resource", return_value=describe_stack_resouces_result
)
stack = ImageBuilderStack({})
assert_that(stack._imagebuilder_image_resource).is_equal_to(describe_stack_resouces_result)
assert_that(stack.imagebuilder_image_is_building).is_equal_to(expected_imagebuilder_image_is_building)
|
demo/demo_sem.py
|
Alias-Alan/pypsy
| 169 |
125999
|
# coding=utf-8
# 结构方程模型的参数估计
from __future__ import division, print_function, unicode_literals
from psy import sem, data
import numpy as np
data_ = data['ex5.11.dat']
beta = np.array([
[0, 0],
[1, 0]
])
gamma = np.array([
[1, 1],
[0, 0]
])
x = [0, 1, 2, 3, 4, 5]
lam_x = np.array([
[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
])
y = [6, 7, 8, 9, 10, 11]
lam_y = np.array([
[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
])
lam_x, lam_y, phi_x, beta, gamma, var_e, var_e_x, var_e_y = sem(data_, y, x, lam_x, lam_y, beta, gamma)
print('==========内源变量因子载荷=========')
print(lam_x)
print('=========外源变量因子载荷==========')
print(lam_y)
print('===========内源潜变量协方差矩阵=========')
print(phi_x)
print('============路径方程外源变量系数=========')
print(beta)
print('============路径方程内源变量系数=======')
print(gamma)
print('=============路径方程误差方差========')
print(np.diag(var_e))
print('============内源变量误差方差======')
print(np.diag(var_e_x))
print('=============外源变量误差方差=========')
print(np.diag(var_e_y))
|
open_seq2seq/parts/centaur/conv_block.py
|
gioannides/OpenSeq2Seq
| 1,459 |
126018
|
<filename>open_seq2seq/parts/centaur/conv_block.py<gh_stars>1000+
# Copyright (c) 2019 NVIDIA Corporation
import tensorflow as tf
from .batch_norm import BatchNorm1D
class ConvBlock:
"""
Convolutional block for Centaur model.
"""
def __init__(self,
name,
conv,
norm,
activation_fn,
dropout,
training,
is_residual,
is_causal):
"""
Convolutional block constructor.
Args:
name: name of the block.
conv: convolutional layer.
norm: normalization layer to use after the convolutional layer.
activation_fn: activation function to use after the normalization.
dropout: dropout rate.
training: whether it is training mode.
is_residual: whether the block should contain a residual connection.
is_causal: whether the convolutional layer should be causal.
"""
self.name = name
self.conv = conv
self.norm = norm
self.activation_fn = activation_fn
self.dropout = dropout
self.training = training
self.is_residual = is_residual
self.is_casual = is_causal
def __call__(self, x):
with tf.variable_scope(self.name):
if self.is_casual:
# Add padding from the left side to avoid looking to the future
pad_size = self.conv.kernel_size[0] - 1
y = tf.pad(x, [[0, 0], [pad_size, 0], [0, 0]])
else:
y = x
y = self.conv(y)
if self.norm is not None:
y = self.norm(y, training=self.training)
if self.activation_fn is not None:
y = self.activation_fn(y)
if self.dropout is not None:
y = self.dropout(y, training=self.training)
return x + y if self.is_residual else y
@staticmethod
def create(index,
conv_params,
regularizer,
bn_momentum,
bn_epsilon,
cnn_dropout_prob,
training,
is_residual=True,
is_causal=False):
activation_fn = conv_params.get("activation_fn", tf.nn.relu)
conv = tf.layers.Conv1D(
name="conv_%d" % index,
filters=conv_params["num_channels"],
kernel_size=conv_params["kernel_size"],
strides=conv_params["stride"],
padding=conv_params["padding"],
kernel_regularizer=regularizer
)
norm = BatchNorm1D(
name="bn_%d" % index,
gamma_regularizer=regularizer,
momentum=bn_momentum,
epsilon=bn_epsilon
)
dropout = tf.layers.Dropout(
name="dropout_%d" % index,
rate=cnn_dropout_prob
)
if "is_causal" in conv_params:
is_causal = conv_params["is_causal"]
if "is_residual" in conv_params:
is_residual = conv_params["is_residual"]
return ConvBlock(
name="layer_%d" % index,
conv=conv,
norm=norm,
activation_fn=activation_fn,
dropout=dropout,
training=training,
is_residual=is_residual,
is_causal=is_causal
)
|
pyclustering/nnet/examples/pcnn_segmentation.py
|
JosephChataignon/pyclustering
| 1,013 |
126033
|
<reponame>JosephChataignon/pyclustering<filename>pyclustering/nnet/examples/pcnn_segmentation.py
"""!
@brief Examples of usage and demonstration of abilities of Pulse Coupled Neural Network in image segmentation.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
from PIL import Image
from pyclustering.utils import read_image, rgb2gray, draw_image_mask_segments
from pyclustering.nnet.pcnn import pcnn_network, pcnn_parameters, pcnn_visualizer
from pyclustering.nnet import *
from pyclustering.samples.definitions import IMAGE_SIMPLE_SAMPLES, IMAGE_MAP_SAMPLES, IMAGE_REAL_SAMPLES
def template_segmentation_image(image, parameters, simulation_time, brightness, scale_color=True, fastlinking=False,
show_spikes=False, ccore_flag=True):
image_source = Image.open(image)
image_size = image_source.size
width = image_size[0]
height = image_size[1]
stimulus = read_image(image)
stimulus = rgb2gray(stimulus)
if brightness is not None:
for pixel_index in range(len(stimulus)):
if stimulus[pixel_index] < brightness:
stimulus[pixel_index] = 1
else:
stimulus[pixel_index] = 0
else:
maximum_stimulus = float(max(stimulus))
minimum_stimulus = float(min(stimulus))
delta = maximum_stimulus - minimum_stimulus
for pixel_index in range(len(stimulus)):
if scale_color is True:
stimulus[pixel_index] = 1.0 - ((float(stimulus[pixel_index]) - minimum_stimulus) / delta)
else:
stimulus[pixel_index] = float(stimulus[pixel_index]) / 255
if parameters is None:
parameters = pcnn_parameters()
parameters.AF = 0.1
parameters.AL = 0.1
parameters.AT = 0.8
parameters.VF = 1.0
parameters.VL = 1.0
parameters.VT = 30.0
parameters.W = 1.0
parameters.M = 1.0
parameters.FAST_LINKING = fastlinking
net = pcnn_network(len(stimulus), parameters, conn_type.GRID_EIGHT, height=height, width=width, ccore=ccore_flag)
output_dynamic = net.simulate(simulation_time, stimulus)
pcnn_visualizer.show_output_dynamic(output_dynamic)
ensembles = output_dynamic.allocate_sync_ensembles()
draw_image_mask_segments(image, ensembles)
pcnn_visualizer.show_time_signal(output_dynamic)
if show_spikes is True:
spikes = output_dynamic.allocate_spike_ensembles()
draw_image_mask_segments(image, spikes)
pcnn_visualizer.animate_spike_ensembles(output_dynamic, image_size)
def segmentation_image_simple1():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE01, None, 47, 235)
def segmentation_image_simple2():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE02, None, 47, 235)
def segmentation_image_simple6():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE06, None, 47, 128)
def segmentation_image_black_thin_lines1():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_THIN_BLACK_LINES01, None, 47, 128)
def segmentation_image_black_thin_lines2():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_THIN_BLACK_LINES02, None, 47, 128)
def segmentation_image_black_thin_lines3():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_THIN_BLACK_LINES03, None, 47, 128)
def segmentation_gray_image_simple1():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE01, None, 47, None, True, False, True)
def segmentation_gray_image_simple5():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE05, None, 47, None, True, False, True)
def segmentation_gray_image_beach():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BEACH, None, 94, None, True, False, True)
def segmentation_gray_image_building():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BUILDING, None, 47, None, True, False, True)
def segmentation_fast_linking_image_beach():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BEACH, None, 47, None, False, True, True)
def segmentation_fast_linking_image_building():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BUILDING, None, 47, None, False, True, True)
def segmentation_fast_linking_image_fruits():
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_FRUITS_SMALL, None, 47, None, False, True, True)
def segmentation_fast_linking_white_sea():
template_segmentation_image(IMAGE_MAP_SAMPLES.IMAGE_WHITE_SEA_SMALL, None, 47, None, False, True, True)
def segmentation_fast_linking_nil():
template_segmentation_image(IMAGE_MAP_SAMPLES.IMAGE_NILE_SMALL, None, 47, None, False, True, True)
def segmentation_fast_linking_field_flowers():
parameters = pcnn_parameters()
parameters.AF = 0.1
parameters.AL = 0.1
parameters.AT = 0.8
parameters.VF = 1.0
parameters.VL = 1.0
parameters.VT = 80.0
parameters.W = 1.0
parameters.M = 1.0
parameters.FAST_LINKING = True
template_segmentation_image(IMAGE_REAL_SAMPLES.IMAGE_FIELD_FLOWER, parameters, 80, None, False, True, True)
# Examples of simple image segmentation
segmentation_image_simple1()
segmentation_image_simple2()
segmentation_image_simple6()
# Line allocation
segmentation_image_black_thin_lines1()
segmentation_image_black_thin_lines2()
segmentation_image_black_thin_lines3()
# More complex image segmentation examples
segmentation_gray_image_simple1()
segmentation_gray_image_simple5()
segmentation_gray_image_beach()
segmentation_gray_image_building()
# Fast linking usage examples
segmentation_fast_linking_image_beach()
segmentation_fast_linking_image_building()
segmentation_fast_linking_image_fruits()
segmentation_fast_linking_white_sea()
segmentation_fast_linking_nil()
segmentation_fast_linking_field_flowers()
|
pytorch_tools/losses/base.py
|
bonlime/pytorch-tools
| 155 |
126065
|
from torch.nn.modules.loss import _Loss
import torch
from enum import Enum
from typing import Union
class Mode(Enum):
BINARY = "binary"
MULTICLASS = "multiclass"
MULTILABEL = "multilabel"
class Reduction(Enum):
SUM = "sum"
MEAN = "mean"
NONE = "none"
SAMPLE_SUM = "sample_sum" # mean by sample dim + sum by batch dim
def _reduce(x: torch.Tensor, reduction: Union[str, Reduction]="mean") -> torch.Tensor:
r"""Reduce input in batch dimension if needed.
Args:
x: Tensor with shape (N, *).
reduction: Specifies the reduction type:
``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
"""
reduction = Reduction(reduction)
if reduction == Reduction.NONE:
return x
elif reduction == Reduction.MEAN:
return x.mean()
elif reduction == Reduction.SUM:
return x.sum()
else:
raise ValueError("Uknown reduction. Expected one of {'none', 'mean', 'sum'}")
class Loss(_Loss):
"""Loss which supports addition and multiplication"""
def __add__(self, other):
if isinstance(other, Loss):
return SumOfLosses(self, other)
else:
raise ValueError("Loss should be inherited from `Loss` class")
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, value):
if isinstance(value, (int, float)):
return WeightedLoss(self, value)
else:
raise ValueError("Loss should be multiplied by int or float")
def __rmul__(self, other):
return self.__mul__(other)
class WeightedLoss(Loss):
"""
Wrapper class around loss function that applies weighted with fixed factor.
This class helps to balance multiple losses if they have different scales
"""
def __init__(self, loss, weight=1.0):
super().__init__()
self.loss = loss
self.register_buffer("weight", torch.tensor([weight]))
def forward(self, *inputs):
return self.loss(*inputs) * self.weight[0]
class SumOfLosses(Loss):
def __init__(self, l1, l2):
super().__init__()
self.l1 = l1
self.l2 = l2
def __call__(self, *inputs):
return self.l1(*inputs) + self.l2(*inputs)
|
setup.py
|
peteroconnor-bc/artemis
| 235 |
126095
|
from setuptools import setup, find_packages
import re
# Get the version, following advice from https://stackoverflow.com/a/7071358/851699
VERSIONFILE="artemis/_version.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(
name='artemis-ml',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/quva-lab/artemis',
long_description='Artemis aims to get rid of all the boring, bureaucratic coding (plotting, file management, etc) involved in machine learning projects, so you can get to the good stuff quickly.',
install_requires=['numpy', 'scipy', 'matplotlib', 'pytest', 'pillow', 'tabulate', 'si-prefix', 'enum34'],
extras_require = {
'remote_plotting': ["paramiko", "netifaces"]
},
version=verstr,
packages=find_packages(),
scripts=[])
|
sdk/python/pulumi_azure/servicebus/get_topic.py
|
henriktao/pulumi-azure
| 109 |
126130
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetTopicResult',
'AwaitableGetTopicResult',
'get_topic',
'get_topic_output',
]
@pulumi.output_type
class GetTopicResult:
"""
A collection of values returned by getTopic.
"""
def __init__(__self__, auto_delete_on_idle=None, default_message_ttl=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, enable_express=None, enable_partitioning=None, id=None, max_size_in_megabytes=None, name=None, namespace_name=None, requires_duplicate_detection=None, resource_group_name=None, status=None, support_ordering=None):
if auto_delete_on_idle and not isinstance(auto_delete_on_idle, str):
raise TypeError("Expected argument 'auto_delete_on_idle' to be a str")
pulumi.set(__self__, "auto_delete_on_idle", auto_delete_on_idle)
if default_message_ttl and not isinstance(default_message_ttl, str):
raise TypeError("Expected argument 'default_message_ttl' to be a str")
pulumi.set(__self__, "default_message_ttl", default_message_ttl)
if duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window, str):
raise TypeError("Expected argument 'duplicate_detection_history_time_window' to be a str")
pulumi.set(__self__, "duplicate_detection_history_time_window", duplicate_detection_history_time_window)
if enable_batched_operations and not isinstance(enable_batched_operations, bool):
raise TypeError("Expected argument 'enable_batched_operations' to be a bool")
pulumi.set(__self__, "enable_batched_operations", enable_batched_operations)
if enable_express and not isinstance(enable_express, bool):
raise TypeError("Expected argument 'enable_express' to be a bool")
pulumi.set(__self__, "enable_express", enable_express)
if enable_partitioning and not isinstance(enable_partitioning, bool):
raise TypeError("Expected argument 'enable_partitioning' to be a bool")
pulumi.set(__self__, "enable_partitioning", enable_partitioning)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if max_size_in_megabytes and not isinstance(max_size_in_megabytes, int):
raise TypeError("Expected argument 'max_size_in_megabytes' to be a int")
pulumi.set(__self__, "max_size_in_megabytes", max_size_in_megabytes)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if namespace_name and not isinstance(namespace_name, str):
raise TypeError("Expected argument 'namespace_name' to be a str")
pulumi.set(__self__, "namespace_name", namespace_name)
if requires_duplicate_detection and not isinstance(requires_duplicate_detection, bool):
raise TypeError("Expected argument 'requires_duplicate_detection' to be a bool")
pulumi.set(__self__, "requires_duplicate_detection", requires_duplicate_detection)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if support_ordering and not isinstance(support_ordering, bool):
raise TypeError("Expected argument 'support_ordering' to be a bool")
pulumi.set(__self__, "support_ordering", support_ordering)
@property
@pulumi.getter(name="autoDeleteOnIdle")
def auto_delete_on_idle(self) -> str:
"""
The ISO 8601 timespan duration of the idle interval after which the Topic is automatically deleted, minimum of 5 minutes.
"""
return pulumi.get(self, "auto_delete_on_idle")
@property
@pulumi.getter(name="defaultMessageTtl")
def default_message_ttl(self) -> str:
"""
The ISO 8601 timespan duration of TTL of messages sent to this topic if no TTL value is set on the message itself.
"""
return pulumi.get(self, "default_message_ttl")
@property
@pulumi.getter(name="duplicateDetectionHistoryTimeWindow")
def duplicate_detection_history_time_window(self) -> str:
"""
The ISO 8601 timespan duration during which duplicates can be detected.
"""
return pulumi.get(self, "duplicate_detection_history_time_window")
@property
@pulumi.getter(name="enableBatchedOperations")
def enable_batched_operations(self) -> bool:
"""
Boolean flag which controls if server-side batched operations are enabled.
"""
return pulumi.get(self, "enable_batched_operations")
@property
@pulumi.getter(name="enableExpress")
def enable_express(self) -> bool:
"""
Boolean flag which controls whether Express Entities are enabled. An express topic holds a message in memory temporarily before writing it to persistent storage.
"""
return pulumi.get(self, "enable_express")
@property
@pulumi.getter(name="enablePartitioning")
def enable_partitioning(self) -> bool:
"""
Boolean flag which controls whether to enable the topic to be partitioned across multiple message brokers.
"""
return pulumi.get(self, "enable_partitioning")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="maxSizeInMegabytes")
def max_size_in_megabytes(self) -> int:
"""
Integer value which controls the size of memory allocated for the topic. For supported values see the "Queue/topic size" section of [this document](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-quotas).
"""
return pulumi.get(self, "max_size_in_megabytes")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> str:
return pulumi.get(self, "namespace_name")
@property
@pulumi.getter(name="requiresDuplicateDetection")
def requires_duplicate_detection(self) -> bool:
"""
Boolean flag which controls whether the Topic requires duplicate detection.
"""
return pulumi.get(self, "requires_duplicate_detection")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def status(self) -> str:
"""
The Status of the Service Bus Topic. Acceptable values are Active or Disabled.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="supportOrdering")
def support_ordering(self) -> bool:
"""
Boolean flag which controls whether the Topic supports ordering.
"""
return pulumi.get(self, "support_ordering")
class AwaitableGetTopicResult(GetTopicResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTopicResult(
auto_delete_on_idle=self.auto_delete_on_idle,
default_message_ttl=self.default_message_ttl,
duplicate_detection_history_time_window=self.duplicate_detection_history_time_window,
enable_batched_operations=self.enable_batched_operations,
enable_express=self.enable_express,
enable_partitioning=self.enable_partitioning,
id=self.id,
max_size_in_megabytes=self.max_size_in_megabytes,
name=self.name,
namespace_name=self.namespace_name,
requires_duplicate_detection=self.requires_duplicate_detection,
resource_group_name=self.resource_group_name,
status=self.status,
support_ordering=self.support_ordering)
def get_topic(name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTopicResult:
"""
Use this data source to access information about an existing Service Bus Topic.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.servicebus.get_topic(name="existing",
resource_group_name="existing",
namespace_name="existing")
pulumi.export("id", example.id)
```
:param str name: The name of this Service Bus Topic.
:param str namespace_name: The name of the Service Bus Namespace.
:param str resource_group_name: The name of the Resource Group where the Service Bus Topic exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:servicebus/getTopic:getTopic', __args__, opts=opts, typ=GetTopicResult).value
return AwaitableGetTopicResult(
auto_delete_on_idle=__ret__.auto_delete_on_idle,
default_message_ttl=__ret__.default_message_ttl,
duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window,
enable_batched_operations=__ret__.enable_batched_operations,
enable_express=__ret__.enable_express,
enable_partitioning=__ret__.enable_partitioning,
id=__ret__.id,
max_size_in_megabytes=__ret__.max_size_in_megabytes,
name=__ret__.name,
namespace_name=__ret__.namespace_name,
requires_duplicate_detection=__ret__.requires_duplicate_detection,
resource_group_name=__ret__.resource_group_name,
status=__ret__.status,
support_ordering=__ret__.support_ordering)
@_utilities.lift_output_func(get_topic)
def get_topic_output(name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTopicResult]:
"""
Use this data source to access information about an existing Service Bus Topic.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.servicebus.get_topic(name="existing",
resource_group_name="existing",
namespace_name="existing")
pulumi.export("id", example.id)
```
:param str name: The name of this Service Bus Topic.
:param str namespace_name: The name of the Service Bus Namespace.
:param str resource_group_name: The name of the Resource Group where the Service Bus Topic exists.
"""
...
|
mayan/apps/authentication/utils.py
|
bonitobonita24/Mayan-EDMS
| 343 |
126152
|
def get_context_user(context):
if 'user' in context:
return context['user']
elif 'request' in context:
return getattr(context['request'], 'user', None)
|
tables/req_versions.py
|
xmatthias/PyTables
| 869 |
126249
|
<reponame>xmatthias/PyTables
"""Required versions for PyTables dependencies."""
from packaging.version import Version
# **********************************************************************
# Keep these in sync with setup.cfg and user's guide
# **********************************************************************
# Minimum recommended versions for mandatory packages
min_numpy_version = Version('1.9.3')
min_numexpr_version = Version('2.6.2')
min_hdf5_version = Version('1.8.4')
min_blosc_version = Version("1.4.1")
min_blosc_bitshuffle_version = Version("1.8.0")
"""The minumum Blosc version where BitShuffle can be used safely."""
|
tests/test_port/test_misc.py
|
tadeu/markdown-it-py
| 285 |
126252
|
<reponame>tadeu/markdown-it-py
from markdown_it import MarkdownIt
from markdown_it import presets
def test_highlight_arguments():
def highlight_func(str_, lang, attrs):
assert lang == "a"
assert attrs == "b c d"
return "<pre><code>==" + str_ + "==</code></pre>"
conf = presets.commonmark.make()
conf["options"]["highlight"] = highlight_func
md = MarkdownIt(config=conf)
assert md.render("``` a b c d \nhl\n```") == "<pre><code>==hl\n==</code></pre>\n"
|
align/schema/pdk.py
|
mabrains/ALIGN-public
| 119 |
126254
|
from pydantic import validator, ValidationError, Field
from .types import BaseModel, Union, Optional, Literal, List
from typing import Dict
import pathlib
class ParasiticValues(BaseModel):
mean: int = 0
min: int = 0
max: int = 0
class Layer(BaseModel):
name: str
gds_layer_number: int
gds_data_type: Optional[Dict[str, int]] = Field(default_factory=lambda: {"draw": 0})
class LayerMetal(Layer):
direction: Literal['h', 'v']
min_length: int
max_length: Optional[int]
min_end_to_end: int
offset: int
width: Union[int, List[int]]
space: Union[int, List[int]]
color: Optional[List[str]]
stop_pitch: int
stop_point: int
stop_offset: int
unit_c: Optional[Dict[int, ParasiticValues]]
unit_r: Optional[Dict[int, ParasiticValues]]
unit_cc: Optional[Dict[int, ParasiticValues]]
@validator('name')
def _validate_name(cls, v):
assert v.startswith('M'), f'Metal layer name {v} should start with M'
return v
@validator('min_length', 'min_end_to_end', 'width', 'space', 'stop_pitch', 'stop_point')
def _validate_positive(cls, v):
if isinstance(v, List):
assert min(v) > 0, f'Values {v} should be positive'
else:
assert v > 0, f'Value {v} should be positive'
return v
@validator('stop_offset')
def _validate_non_negative(cls, v):
if isinstance(v, List):
assert min(v) >= 0, f'Values {v} should be non-negative'
else:
assert v >= 0, f'Value {v} should be positive'
return v
@validator('space')
def _validate_width(cls, v, values):
if isinstance(v, List):
assert len(v) == len(values['width']), f'width and space length should match'
return v
class LayerVia(Layer):
class Config:
allow_mutation = True
stack: List[str]
width_x: int
width_y: int
space_x: int
space_y: int
layer_l_width: Optional[List[int]] = None
layer_l_enc_x: Optional[int] = 0
layer_l_enc_y: Optional[int] = 0
layer_h_width: Optional[List[int]] = None
layer_h_enc_x: Optional[int] = 0
layer_h_enc_y: Optional[int] = 0
unit_r: Optional[Dict[int, ParasiticValues]]
@validator('stack')
def _validate_stack(cls, v):
assert len(v) == 2
return v
class PDK(BaseModel):
class Config:
allow_mutation = True
name: str
layers: Dict[str, Union[LayerMetal, LayerVia]] = Field(default_factory=lambda: {})
scale_factor: int = 1
@validator('layers')
def _validate_via(cls, layers):
for key, via in layers.items():
if isinstance(via, LayerVia):
ml, mh = via.stack
assert ml in layers, f'Lower layer {ml} not found for {key} {layers.keys()}'
assert mh in layers, f'Higher layer {mh} not found for {key}'
assert layers[ml].direction != layers[mh].direction, f'Lower and higher layer directions are not orthogonal'
if via.layer_l_width is None:
via.layer_l_width = layers[ml].width.copy()
if via.layer_h_width is None:
via.layer_h_width = layers[mh].width.copy()
return layers
def add_layer(self, layer):
assert layer.name not in self.layers
self.layers[layer.name] = layer
def generate_adr_collaterals(self, write_path: pathlib.Path, x_pitch: int, x_grid: int, y_pitch: int, y_grid: int, region: List[int]):
with open(write_path/"adr_forbidden_patterns.txt", "wt") as fp:
# TODO: Write rules for horizontal and vertical via spacing
fp.write(f'\n')
with open(write_path/"adr_options.txt", "wt") as fp:
fp.write(f'Option name=gr_region_width_in_poly_pitches value={x_grid}\n')
fp.write(f'Option name=gr_region_height_in_diff_pitches value={y_grid}\n')
with open(write_path/"adr_design_rules.txt", "wt") as fp:
for name, layer in self.layers.items():
if isinstance(layer, LayerMetal):
fp.write(f'Rule name={name}_minete type=minete value={layer.min_end_to_end} layer={name}\n')
fp.write(f'Rule name={name}_minlength type=minlength value={layer.min_length} layer={name}\n')
with open(write_path/"adr_metal_templates.txt", "wt") as fp:
for name, layer in self.layers.items():
if isinstance(layer, LayerMetal):
line = f'MetalTemplate layer={name} name={name}_template_0'
line += f' widths={",".join(str(i) for i in layer.width)}'
line += f' spaces={",".join(str(i) for i in layer.space)}'
if layer.color is not None and len(layer.color) > 0:
line += f' colors={",".join(str(i) for i in layer.color)}'
line += " stops=%s" % (",".join( str(i) for i in [layer.stop_pitch - 2*layer.stop_point, 2*layer.stop_point]))
line += '\n'
fp.write(line)
# Single metal template instance. Generalize to multiple as needed in the future.
with open(write_path/"adr_metal_templates_instances.txt", "wt") as fp:
for name, layer in self.layers.items():
if isinstance(layer, LayerMetal):
line = f'MetalTemplateInstance template={name}_template_0'
line += f' pgdoffset_abs={layer.offset}'
line += f' ogdoffset_abs={layer.stop_point}'
line += f' region={":".join(str(i) for i in region)}'
line += '\n'
fp.write(line)
def _via_string(via: LayerVia):
via_str = f'Generator name={via.name}_{via.width_x}_{via.width_y} {{ \n'
via_str += f' Layer1 value={via.stack[0]} {{\n'
via_str += f' x_coverage value={via.layer_l_enc_x}\n'
via_str += f' y_coverage value={via.layer_l_enc_y}\n'
via_str += f' widths value={",".join(str(i) for i in via.layer_l_width)}\n'
via_str += f' }}\n'
via_str += f' Layer2 value={via.stack[1]} {{\n'
via_str += f' x_coverage value={via.layer_h_enc_x}\n'
via_str += f' y_coverage value={via.layer_h_enc_y}\n'
via_str += f' widths value={",".join(str(i) for i in via.layer_h_width)}\n'
via_str += f' }}\n'
via_str += f' CutWidth value={via.width_x}\n'
via_str += f' CutHeight value={via.width_y}\n'
via_str += f' cutlayer value={via.name}\n'
via_str += f'}}\n'
return via_str
with open(write_path/"adr_via_generators.txt", "wt") as fp:
for name, layer in self.layers.items():
if isinstance(layer, LayerVia):
via_str = _via_string(layer)
fp.write(via_str)
fp.write(f'\n')
with open(write_path/"adr_layers.txt", "wt") as fp:
# Dummy layer required for global grid
line = f'Layer name=diffusion pgd=hor level=0 {{\n'
line += f' Type value=diffusion\n'
line += f' Technology pitch={y_pitch}\n'
line += f'}}\n'
fp.write(line)
# Dummy layer required for global grid
line = f'Layer name=wirepoly pgd=ver level=1 {{\n'
line += f' Type value=wire\n'
line += f' Type value=poly\n'
line += f' Technology pitch={x_pitch}\n'
line += f'}}\n'
fp.write(line)
# identify electrical connectivity
connected_layers = dict()
for name, layer in self.layers.items():
if isinstance(layer, LayerVia):
ml = layer.stack[0]
mh = layer.stack[1]
connected_layers[name] = [ml, mh]
if ml not in connected_layers:
connected_layers[ml] = []
connected_layers[ml].append(name)
if mh not in connected_layers:
connected_layers[mh] = []
connected_layers[mh].append(name)
level = 2
for i in range(0, 99):
name = f'M{i}'
if name in self.layers:
layer = self.layers[name]
pgd = 'ver' if layer.direction == 'v' else 'hor'
line = f'Layer name={name} pgd={pgd} level={level} {{\n'
line += f' Type value=wire\n'
line += f' Type value=metal\n'
for l in connected_layers[name]:
line += f' ElectricallyConnected layer={l}\n'
line += f'}}\n'
fp.write(line)
level +=1
name = f'V{i}'
if name in self.layers:
line = f'Layer name={name} level={level} {{\n'
line += f' Type value=via\n'
for l in connected_layers[name]:
line += f' ElectricallyConnected layer={l}\n'
line += f'}}\n'
fp.write(line)
level +=1
fp.write(f'\n')
|
app/lib/daemon/manager.py
|
grepleria/SnitchDNS
| 152 |
126256
|
<reponame>grepleria/SnitchDNS
import ipaddress
import time
class DaemonManager:
@property
def ip(self):
return self.__bind_ip
@property
def port(self):
return self.__bind_port
def __init__(self, bind_ip, bind_port, system, shell):
self.__bind_ip = bind_ip
self.__bind_port = int(bind_port)
self.__system = system
self.__shell = shell
def start(self):
command = [
'flask',
'snitch_daemon',
'--bind-ip',
self.ip,
'--bind-port',
str(self.port)
]
self.__shell.execute(command, wait=False, venv=True)
# Wait a little while - I'm not using a loop intentionally.
time.sleep(5)
return self.is_running()
def stop(self):
pids = self.is_running()
if pids:
for pid in pids:
self.__system.process_kill(pid)
# Wait a bit.
time.sleep(5)
return not self.is_running()
def is_running(self):
ids = []
processes = self.__system.process_list()
for process in processes:
cmdline = process['cmdline']
if ('snitch_daemon' in cmdline) and (self.ip in cmdline) and (str(self.port) in cmdline) :
ids.append(process['id'])
return ids if len(ids) > 0 else False
def is_configured(self):
if self.port < 1024 or self.port > 65535:
return False
elif not self.__is_valid_ip_address(self.ip):
return False
return True
def __is_valid_ip_address(self, ip):
try:
obj = ipaddress.ip_address(ip)
except ValueError:
return False
return True
|
ch06-Drawing function/Drawing_UTF-8_strings.py
|
Anancha/OpenCV-Python-Tutorial
| 2,875 |
126307
|
<filename>ch06-Drawing function/Drawing_UTF-8_strings.py<gh_stars>1000+
# -*- coding: utf-8 -*-
# @Time : 2017/7/23 下午9:11
# @Author : play4fun
# @File : Drawing_UTF-8_strings.py
# @Software: PyCharm
"""
Drawing_UTF-8_strings.py:
https://fireant.github.io/misc/2017/01/28/ttf-opencv.html
"""
import cv2
import numpy as np
img = np.zeros((100, 300, 3), dtype=np.uint8)
ft = cv2.freetype.createFreeType2() # 需要安装freetype模块 cv2' has no attribute 'freetype'
# ft.loadFontData(fontFileName='Ubuntu-R.ttf',id=0)
# ft.loadFontData(fontFileName='/usr/share/fonts/truetype/freefont/FreeSans.ttf',id=0)#不支持中文
# ft.loadFontData(fontFileName='/usr/share/fonts-droid/truetype/DroidSansFallback.ttf',id=0)#树莓派,搞定
# sudo apt-get install ttf-wqy-zenhei #安装字体
# ft.loadFontData(fontFileName='/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', id=0) # 文泉驿的开源中文字体
# macOS 直接加载当前文件夹的ttc字体文件
ft.loadFontData(fontFileName='wqy-zenhei.ttc', id=0)
ft.putText(img=img,
# text='Quick Fox',
text='你好中文',
org=(15, 70), # text 左上角 坐标
fontHeight=60, # 字体高度
color=(255, 255, 255), # 字体为白色
thickness=-1, # 厚度
line_type=cv2.LINE_AA,
bottomLeftOrigin=True)
# cv2.imwrite('freetype.png', img)
cv2.imshow('freetype', img)
cv2.waitKey(0)
|
extras/forms.py
|
maznu/peering-manager
| 127 |
126326
|
<filename>extras/forms.py
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from requests.exceptions import HTTPError
from utils.forms import (
APISelectMultiple,
BootstrapMixin,
DynamicModelMultipleChoiceField,
StaticSelect,
add_blank_choice,
)
from .enums import JobResultStatus
from .models import IXAPI, JobResult
class IXAPIForm(BootstrapMixin, forms.ModelForm):
identity = forms.CharField(widget=StaticSelect)
class Meta:
model = IXAPI
fields = ("name", "url", "api_key", "api_secret", "identity")
def clean(self):
cleaned_data = super().clean()
ixapi = IXAPI(
url=cleaned_data["url"],
api_key=cleaned_data["api_key"],
api_secret=cleaned_data["api_secret"],
)
try:
# Try to query API and see if it raises an error
ixapi.get_accounts()
except HTTPError as e:
# Fail form validation on HTTP error to provide a feedback to the user
if e.response.status_code >= 400 and e.response.status_code < 500:
possible_issue = "make sure the URL, key and secret are correct"
else:
possible_issue = "the server is malfunctioning or unavailable"
raise ValidationError(
f"Unable to connect to IX-API ({e.response.status_code} {e.response.reason}), {possible_issue}."
)
class IXAPIFilterForm(BootstrapMixin, forms.Form):
model = IXAPI
q = forms.CharField(required=False, label="Search")
class JobResultFilterForm(BootstrapMixin, forms.Form):
model = JobResult
q = forms.CharField(required=False, label="Search")
name = forms.CharField(required=False)
user_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
display_field="username",
label="User",
widget=APISelectMultiple(api_url="/api/users/users/"),
)
status = forms.ChoiceField(
required=False,
choices=add_blank_choice(JobResultStatus.choices),
widget=StaticSelect(),
)
|
crf/alphabet.py
|
Pandinosaurus/crf
| 348 |
126348
|
class Alphabet:
"""
Bijective mapping from strings to integers.
>>> a = Alphabet()
>>> [a[x] for x in 'abcd']
[0, 1, 2, 3]
>>> list(map(a.lookup, range(4)))
['a', 'b', 'c', 'd']
>>> a.stop_growth()
>>> a['e']
>>> a.freeze()
>>> a.add('z')
Traceback (most recent call last):
...
ValueError: Alphabet is frozen. Key "z" not found.
>>> print(a.plaintext())
a
b
c
d
"""
def __init__(self):
self._mapping = {} # str -> int
self._flip = {} # int -> str; timv: consider using array or list
self._i = 0
self._frozen = False
self._growing = True
def __repr__(self):
return 'Alphabet(size=%s,frozen=%s)' % (len(self), self._frozen)
def freeze(self):
self._frozen = True
def stop_growth(self):
self._growing = False
@classmethod
def from_iterable(cls, s):
"Assumes keys are strings."
inst = cls()
for x in s:
inst.add(x)
# inst.freeze()
return inst
def keys(self):
return self._mapping.iterkeys()
def items(self):
return self._mapping.iteritems()
def imap(self, seq, emit_none=False):
"""
Apply alphabet to sequence while filtering. By default, `None` is not
emitted, so the Note that the output sequence may have fewer items.
"""
if emit_none:
for s in seq:
yield self[s]
else:
for s in seq:
x = self[s]
if x is not None:
yield x
def map(self, seq, *args, **kwargs):
return list(self.imap(seq, *args, **kwargs))
def add_many(self, x):
for k in x:
self.add(k)
def lookup(self, i):
if i is None:
return None
#assert isinstance(i, int)
return self._flip[i]
def lookup_many(self, x):
return map(self.lookup, x)
def __contains__(self, k):
#assert isinstance(k, basestring)
return k in self._mapping
def __getitem__(self, k):
try:
return self._mapping[k]
except KeyError:
#if not isinstance(k, basestring):
# raise ValueError("Invalid key (%s): only strings allowed." % (k,))
if self._frozen:
raise ValueError('Alphabet is frozen. Key "%s" not found.' % (k,))
if not self._growing:
return None
x = self._mapping[k] = self._i
self._i += 1
self._flip[x] = k
return x
add = __getitem__
def __setitem__(self, k, v):
assert k not in self._mapping
assert isinstance(v, int)
self._mapping[k] = v
self._flip[v] = k
def __iter__(self):
for i in range(len(self)):
yield self._flip[i]
def enum(self):
for i in range(len(self)):
yield (i, self._flip[i])
def tolist(self):
"Ordered list of the alphabet's keys."
return [self._flip[i] for i in range(len(self))]
def __len__(self):
return len(self._mapping)
def plaintext(self):
"assumes keys are strings"
return '\n'.join(self)
@classmethod
def load(cls, filename):
if not os.path.exists(filename):
return cls()
with open(filename) as f:
return cls.from_iterable(l.strip() for l in f)
def save(self, filename):
with open(filename, 'w') as f:
f.write(self.plaintext())
def __eq__(self, other):
return self._mapping == other._mapping
|
stonesoup/dataassociator/tests/conftest.py
|
Red-Portal/Stone-Soup-1
| 157 |
126357
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from ...hypothesiser.probability import PDAHypothesiser
from ...hypothesiser.distance import DistanceHypothesiser
from ...measures import Mahalanobis
from ...models.measurement.linear import LinearGaussian
from ...types.array import CovarianceMatrix
from ...models.transition.linear import ConstantVelocity, CombinedLinearGaussianTransitionModel
from ...predictor.kalman import KalmanPredictor
from ...updater.kalman import ExtendedKalmanUpdater
@pytest.fixture()
def measurement_model():
return LinearGaussian(ndim_state=4, mapping=[0, 2],
noise_covar=CovarianceMatrix(np.diag([1, 1])))
@pytest.fixture()
def predictor():
transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1),
ConstantVelocity(1)])
return KalmanPredictor(transition_model)
@pytest.fixture()
def updater(measurement_model):
return ExtendedKalmanUpdater(measurement_model)
@pytest.fixture()
def probability_hypothesiser(predictor, updater):
return PDAHypothesiser(predictor, updater,
clutter_spatial_density=1.2e-2,
prob_detect=0.9, prob_gate=0.99)
@pytest.fixture()
def distance_hypothesiser(predictor, updater):
return DistanceHypothesiser(predictor, updater, Mahalanobis(), 10)
|
passwords/type7.py
|
m00tiny/scripts
| 877 |
126410
|
<filename>passwords/type7.py
import sys
V = [0x64, 0x73, 0x66, 0x64, 0x3b, 0x6b, 0x66, 0x6f, 0x41, 0x2c, 0x2e,
0x69, 0x79, 0x65, 0x77, 0x72, 0x6b, 0x6c, 0x64, 0x4a, 0x4b, 0x44,
0x48, 0x53, 0x55, 0x42, 0x73, 0x67, 0x76, 0x63, 0x61, 0x36, 0x39,
0x38, 0x33, 0x34, 0x6e, 0x63, 0x78, 0x76, 0x39, 0x38, 0x37, 0x33,
0x32, 0x35, 0x34, 0x6b, 0x3b, 0x66, 0x67, 0x38, 0x37]
if len(sys.argv) != 2:
print("Usage: type7.py hash")
sys.exit(0)
hash = sys.argv[1]
i = int(hash[:2], 16)
r = ""
for j in range(2, len(hash) - 2, 2):
h = int(hash[j:j+2], 16)
r = r + chr(h ^ V[i])
i = (i + 1) % 53
print r
|
batchflow/models/torch/blocks/__init__.py
|
analysiscenter/dataset
| 101 |
126427
|
""" Blocks: large parts that implement idea/named entity from popular articles. """
from .core import Block, DefaultBlock, Downsample, Upsample
from .named_blocks import VGGBlock, ResBlock, ResNeStBlock, DenseBlock, MBConvBlock, InvResBlock, ConvNeXtBlock
from .attention import SEBlock, SCSEBlock, SimpleSelfAttention, BAM, CBAM, FPA, SelectiveKernelConv, SplitAttentionConv
from .pyramid import PyramidPooling, ASPP, KSAC
from ..layers import Combine, Branch # convenience
|
venv/Lib/site-packages/pydeck/types/image.py
|
ajayiagbebaku/NFL-Model
| 7,702 |
126457
|
<reponame>ajayiagbebaku/NFL-Model
import base64
import os
import pathlib
import re
from pydeck.types import String
from pydeck.types.base import PydeckType
# See https://github.com/django/django/blob/stable/1.3.x/django/core/validators.py#L45
valid_url_regex = re.compile(
r"^(?:http|ftp)s?://"
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|"
r"localhost|"
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"
r"(?::\d+)?"
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
valid_image_regex = re.compile(
r".(gif|jpe?g|tiff?|png|webp|bmp)$",
re.IGNORECASE,
)
def get_encoding(path: str) -> str:
extension = pathlib.Path(path).suffix.replace(".", "")
return f"data:image/{extension};base64,"
class Image(PydeckType):
"""Indicate an image for pydeck
Parameters
----------
path : str
Path to image (either remote or local)
"""
def __init__(self, path: str):
if not self.validate(path):
raise ValueError(f"{path} is not contain a valid image path")
self.path = path
self.is_local = not valid_url_regex.search(self.path)
def __repr__(self):
if self.is_local:
with open(os.path.expanduser(self.path), "rb") as img_file:
encoded_string = get_encoding(self.path) + base64.b64encode(img_file.read()).decode("utf-8")
return repr(String(encoded_string, quote_type=""))
else:
return self.path
def __eq__(self, other):
return str(self) == str(other)
@staticmethod
def validate(path):
# Necessary-but-not-sufficient checks for being a valid image for @deck.gl/json
return any((valid_image_regex.search(path), valid_url_regex.search(path), path.startswith("data/image")))
|
dialogue-engine/test/programytest/parser/template/graph_tests/test_resetlearn.py
|
cotobadesign/cotoba-agent-oss
| 104 |
126462
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.resetlearn import TemplateResetLearnNode
from programy.parser.exceptions import ParserException
from programy.parser.template.nodes.base import TemplateNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphResetLearnTests(TemplateGraphTestClient):
def test_learnf_type1(self):
template = ET.fromstring("""
<template>
<resetlearn />
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertIsNotNone(ast.children[0])
self.assertIsInstance(ast.children[0], TemplateResetLearnNode)
self.assertEqual(0, len(ast.children[0].children))
def test_learnf_type2(self):
template = ET.fromstring("""
<template>
<resetlearn></resetlearn>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertIsNotNone(ast.children[0])
self.assertIsInstance(ast.children[0], TemplateResetLearnNode)
self.assertEqual(0, len(ast.children[0].children))
def test_request_with_children(self):
template = ET.fromstring("""
<template>
<resetlearn>Error</resetlearn>
</template>
""")
with self.assertRaises(ParserException):
self._graph.parse_template_expression(template)
def test_removal(self):
client_context1 = self.create_client_context("testid")
template = ET.fromstring("""
<template>
<learn>
<category>
<pattern>HELLO THERE</pattern>
<template>HIYA ONE</template>
</category>
</learn>
</template>
""")
ast = self._graph.parse_template_expression(template)
learn_node = ast.children[0]
learn_node.resolve(client_context1)
response = client_context1.bot.ask_question(client_context1, "HELLO THERE")
self.assertEqual("HIYA ONE.", response)
client_context2 = self.create_client_context("testid")
template = ET.fromstring("""
<template>
<learn>
<category>
<pattern>HELLO THERE</pattern>
<template>HIYA TWO</template>
</category>
</learn>
</template>
""")
ast = self._graph.parse_template_expression(template)
learn_node = ast.children[0]
learn_node.resolve(client_context2)
response = client_context2.bot.ask_question(client_context2, "HELLO THERE")
self.assertEqual("HIYA TWO.", response)
template = ET.fromstring("""
<template>
<resetlearn />
</template>
""")
ast = self._graph.parse_template_expression(template)
learn_node = ast.children[0]
learn_node.resolve(client_context2)
|
tests/test_density_GaussianDensity.py
|
SyedZiaul/freud
| 172 |
126502
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pytest
import freud
matplotlib.use("agg")
class TestGaussianDensity:
def test_random_point_with_cell_list(self):
fftpack = pytest.importorskip("scipy.fftpack")
fft = fftpack.fft
fftshift = fftpack.fftshift
width = 20
r_max = 10.0
sigma = 0.1
num_points = 10000
box_size = r_max * 3.1
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
for w in (width, (width, width), [width, width]):
gd = freud.density.GaussianDensity(w, r_max, sigma)
# Test access
with pytest.raises(AttributeError):
gd.box
with pytest.raises(AttributeError):
gd.density
gd.compute((box, points))
# Test access
gd.box
gd.density
# Verify the output dimensions are correct
assert gd.density.shape == (width, width)
assert np.prod(gd.density.shape) == np.prod(gd.width)
myDiff = gd.density
myFFT = fft(fft(myDiff[:, :], axis=1), axis=0)
myDiff = (myFFT * np.conj(myFFT)).real
myDiff = fftshift(myDiff)[:, :]
npt.assert_equal(
np.where(myDiff == np.max(myDiff)),
(np.array([width // 2]), np.array([width // 2])),
)
def test_change_box_dimension(self):
width = 20
r_max = 9.9
sigma = 0.01
num_points = 100
box_size = r_max * 3.1
# test that a 3D system computed after computing a 2D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
gd = freud.density.GaussianDensity(width, r_max, sigma)
gd.compute((box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=False
)
with pytest.raises(ValueError):
gd.compute((test_box, test_points))
# test that a 2D system computed after computing a 3D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=False)
gd = freud.density.GaussianDensity(width, r_max, sigma)
gd.compute((box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=True
)
with pytest.raises(ValueError):
gd.compute((test_box, test_points))
def test_sum_2d(self):
# Ensure that each point's Gaussian sums to 1
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
gd.compute(system=(box, points))
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), num_points, rtol=1e-4)
def test_sum_3d(self):
# Ensure that each point's Gaussian sums to 1
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
box, points = freud.data.make_random_system(
box_size, num_points, is2D=False
)
gd.compute(system=(box, points))
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), num_points, rtol=1e-4)
def test_sum_values_2d(self):
# Ensure that the Gaussian convolution sums to the sum of the values
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
system = freud.data.make_random_system(box_size, num_points, is2D=True)
values = np.random.rand(num_points)
gd.compute(system, values)
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), np.sum(values), rtol=1e-4)
def test_sum_values_3d(self):
# Ensure that the Gaussian convolution sums to the sum of the values
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
system = freud.data.make_random_system(box_size, num_points, is2D=False)
values = np.random.rand(num_points)
gd.compute(system, values)
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), np.sum(values), rtol=1e-4)
def test_repr(self):
gd = freud.density.GaussianDensity(100, 10.0, 0.1)
assert str(gd) == str(eval(repr(gd)))
# Use both signatures
gd3 = freud.density.GaussianDensity((98, 99, 100), 10.0, 0.1)
assert str(gd3) == str(eval(repr(gd3)))
def test_repr_png(self):
width = 20
r_max = 2.0
sigma = 0.01
num_points = 100
box_size = r_max * 3.1
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
gd = freud.density.GaussianDensity(width, r_max, sigma)
with pytest.raises(AttributeError):
gd.plot()
assert gd._repr_png_() is None
gd.compute((box, points))
gd.plot()
gd = freud.density.GaussianDensity(width, r_max, sigma)
test_box = freud.box.Box.cube(box_size)
gd.compute((test_box, points))
gd.plot()
assert gd._repr_png_() is None
plt.close("all")
|
library/oci_identity_tag_default.py
|
slmjy/oci-ansible-modules
| 106 |
126515
|
<filename>library/oci_identity_tag_default.py
#!/usr/bin/python
# Copyright (c) 2017, 2019 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_identity_tag_default
short_description: Manage a TagDefault resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a TagDefault resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new Tag Default in the specified Compartment for the specified Tag Definition.
version_added: "2.5"
options:
compartment_id:
description:
- The OCID of the Compartment. The Tag Default will apply to any resource contained in this Compartment.
- Required for create using I(state=present).
tag_definition_id:
description:
- The OCID of the Tag Definition. The Tag Default will always assign a default value for this Tag Definition.
- Required for create using I(state=present).
value:
description:
- The default value for the Tag Definition. This will be applied to all resources created in the Compartment.
- Required for create using I(state=present), update using I(state=present) with tag_default_id present.
tag_default_id:
description:
- The OCID of the Tag Default.
- Required for update using I(state=present), I(state=absent).
aliases: ["id"]
state:
description:
- The state of the TagDefault.
- Use I(state=present) to create or update a TagDefault.
- Use I(state=absent) to delete a TagDefault.
required: false
default: 'present'
choices: ["present", "absent"]
author:
- <NAME> (@manojmeda)
- <NAME> (@mross22)
- <NAME> (@nalsaber)
extends_documentation_fragment: [ oracle, oracle_creatable_resource, oracle_wait_options ]
"""
EXAMPLES = """
- name: Create tag_default
oci_identity_tag_default:
compartment_id: ocid1.compartment.oc1..aaaaaaaamnuh3osn3n77vx2ofkx5zwpaqae5rox2zfoscd7z3uvnhpqf5f7q
tag_definition_id: ocid1.tagdefinition.oc1..aaaaaaaash5swxlw2ppo2rjqy5cwknrggb3ogpdzmsk6f4kdjfcwwkys3zga
value: such-default-wow
- name: Update tag_default
oci_identity_tag_default:
value: so-tagging
tag_default_id: ocid1.tagdefault.oc1..xxxxxxEXAMPLExxxxxx
- name: Delete tag_default
oci_identity_tag_default:
tag_default_id: ocid1.tagdefault.oc1..xxxxxxEXAMPLExxxxxx
state: absent
"""
RETURN = """
tag_default:
description:
- Details of the TagDefault resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The OCID of the Tag Default.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
compartment_id:
description:
- The OCID of the Compartment. The Tag Default will apply to any resource contained in this Compartment.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
tag_namespace_id:
description:
- The OCID of the Tag Namespace that contains the Tag Definition.
returned: on success
type: string
sample: ocid1.tagnamespace.oc1..xxxxxxEXAMPLExxxxxx
tag_definition_id:
description:
- The OCID of the Tag Definition. The Tag Default will always assign a default value for this Tag Definition.
returned: on success
type: string
sample: ocid1.tagdefinition.oc1..xxxxxxEXAMPLExxxxxx
tag_definition_name:
description:
- The name used in the Tag Definition. This field is informational in the context of the Tag Default.
returned: on success
type: string
sample: tag_definition_name_example
value:
description:
- The default value for the Tag Definition. This will be applied to all resources created in the Compartment.
returned: on success
type: string
sample: value_example
time_created:
description:
- Date and time the `TagDefault` object was created, in the format defined by RFC3339.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
lifecycle_state:
description:
- The tag default's current state. After creating a tagdefault, make sure its `lifecycleState` is ACTIVE before using it.
returned: on success
type: string
sample: ACTIVE
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"tag_namespace_id": "ocid1.tagnamespace.oc1..xxxxxxEXAMPLExxxxxx",
"tag_definition_id": "ocid1.tagdefinition.oc1..xxxxxxEXAMPLExxxxxx",
"tag_definition_name": "tag_definition_name_example",
"value": "value_example",
"time_created": "2016-08-25T21:10:29.600Z",
"lifecycle_state": "ACTIVE"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_common_utils, oci_wait_utils
from ansible.module_utils.oracle.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.identity import IdentityClient
from oci.identity.models import CreateTagDefaultDetails
from oci.identity.models import UpdateTagDefaultDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class TagDefaultHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
@staticmethod
def get_module_resource_id_param():
return "tag_default_id"
def get_module_resource_id(self):
return self.module.params.get("tag_default_id")
def get_get_fn(self):
return self.client.get_tag_default
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_tag_default,
tag_default_id=self.module.params.get("tag_default_id"),
)
def list_resources(self):
required_list_method_params = []
optional_list_method_params = ["compartment_id", "tag_definition_id"]
required_kwargs = dict(
(param, self.module.params[param]) for param in required_list_method_params
)
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_tag_defaults, **kwargs
)
def get_create_model_class(self):
return CreateTagDefaultDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_tag_default,
call_fn_args=(),
call_fn_kwargs=dict(create_tag_default_details=create_details),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.client,
resource_helper=self,
wait_for_states=self.module.params.get("wait_until")
or oci_common_utils.get_resource_active_states(),
)
def get_update_model_class(self):
return UpdateTagDefaultDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_tag_default,
call_fn_args=(),
call_fn_kwargs=dict(
tag_default_id=self.module.params.get("tag_default_id"),
update_tag_default_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.client,
resource_helper=self,
wait_for_states=self.module.params.get("wait_until")
or oci_common_utils.get_resource_active_states(),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_tag_default,
call_fn_args=(),
call_fn_kwargs=dict(
tag_default_id=self.module.params.get("tag_default_id")
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.client,
resource_helper=self,
wait_for_states=self.module.params.get("wait_until")
or oci_common_utils.get_resource_terminated_states(),
)
TagDefaultHelperCustom = get_custom_class("TagDefaultHelperCustom")
class ResourceHelper(TagDefaultHelperCustom, TagDefaultHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
compartment_id=dict(type="str"),
tag_definition_id=dict(type="str"),
value=dict(type="str"),
tag_default_id=dict(aliases=["id"], type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="tag_default",
service_client_class=IdentityClient,
namespace="identity",
)
result = dict(changed=False)
if resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
examples/question_answering_confidence.py
|
skirdey/FARM
| 1,551 |
126531
|
<reponame>skirdey/FARM<gh_stars>1000+
import logging
import torch
from pathlib import Path
from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings
from farm.modeling.tokenization import Tokenizer
from farm.data_handler.processor import SquadProcessor
from farm.data_handler.data_silo import DataSilo
from farm.modeling.adaptive_model import AdaptiveModel
from farm.infer import QAInferencer
from farm.eval import Evaluator
from farm.evaluation.metrics import metrics_per_bin
def question_answering_confidence():
##########################
########## Logging
##########################
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO)
# reduce verbosity from transformers library
logging.getLogger('transformers').setLevel(logging.WARNING)
##########################
########## Settings
##########################
set_all_seeds(seed=42)
device, n_gpu = initialize_device_settings(use_cuda=True)
lang_model = "deepset/roberta-base-squad2"
do_lower_case = False
batch_size = 80
data_dir = Path("../data/squad20")
# We use the same file for dev and test set only for demo purposes
dev_filename = "dev-v2.0.json"
test_filename = "dev-v2.0.json"
accuracy_at = 3 # accuracy at n is useful for answers inside long documents
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model,
do_lower_case=do_lower_case)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
processor = SquadProcessor(
tokenizer=tokenizer,
max_seq_len=384,
label_list=["start_token", "end_token"],
metric="squad",
train_filename=None,
dev_filename=dev_filename,
test_filename=test_filename,
data_dir=data_dir,
doc_stride=192,
)
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(
processor=processor,
batch_size=batch_size)
# 4. Load pre-trained question-answering model
model = AdaptiveModel.convert_from_transformers(lang_model, device=device, task_type="question_answering")
model.connect_heads_with_processor(data_silo.processor.tasks, require_labels=True)
# Number of predictions the model will make per Question.
# The multiple predictions are used for evaluating top n recall.
model.prediction_heads[0].n_best = accuracy_at
# 5. The calibration of model confidence scores sets one parameter, which is called temperature and can be accessed through the prediction_head.
# This temperature is applied to each logit in the forward pass, where each logit is divided by the temperature.
# A softmax function is applied to the logits afterward to get confidence scores in the range [0,1].
# A temperature larger than 1 decreases the model’s confidence scores.
logger.info(f"Parameter used for temperature scaling of model confidence scores: {model.prediction_heads[0].temperature_for_confidence}")
# 6a. We can either manually set the temperature (default value is 1.0)...
model.prediction_heads[0].temperature_for_confidence = torch.nn.Parameter((torch.ones(1) * 1.0).to(device=device))
# 6b. ...or we can run the evaluator on the dev set and use it to calibrate confidence scores with a technique called temperature scaling.
# It will align the confidence scores with the model's accuracy based on the dev set data by tuning the temperature parameter.
# During the calibration, this parameter is automatically set internally as an attribute of the prediction head.
evaluator_dev = Evaluator(
data_loader=data_silo.get_data_loader("dev"),
tasks=data_silo.processor.tasks,
device=device
)
result_dev = evaluator_dev.eval(model, return_preds_and_labels=True, calibrate_conf_scores=True)
# evaluator_dev.log_results(result_dev, "Dev", logging=False, steps=len(data_silo.get_data_loader("dev")))
# 7. Optionally, run the evaluator on the test set to see how well the confidence scores are aligned with the model's accuracy
evaluator_test = Evaluator(
data_loader=data_silo.get_data_loader("test"),
tasks=data_silo.processor.tasks,
device=device
)
result_test = evaluator_test.eval(model, return_preds_and_labels=True)[0]
logger.info("Grouping predictions by confidence score and calculating metrics for each bin.")
em_per_bin, confidence_per_bin, count_per_bin = metrics_per_bin(result_test["preds"], result_test["labels"], num_bins=10)
for bin_number in range(10):
logger.info(f"Bin {bin_number} - exact match: {em_per_bin[bin_number]}, average confidence score: {confidence_per_bin[bin_number]}")
# 8. Hooray! You have a model with calibrated confidence scores.
# Store the model and the temperature parameter will be stored automatically as an attribute of the prediction head.
save_dir = Path("../saved_models/qa-confidence-tutorial")
model.save(save_dir)
processor.save(save_dir)
# 9. When making a prediction with the calibrated model, we could filter out predictions where the model is not confident enough
# To this end, load the stored model, which will automatically load the stored temperature parameter.
# The confidence scores are automatically adjusted based on this temperature parameter.
# For each prediction, we can check the model's confidence and decide whether to output the prediction or not.
inferencer = QAInferencer.load(save_dir, batch_size=40, gpu=True)
logger.info(f"Loaded model with stored temperature: {inferencer.model.prediction_heads[0].temperature_for_confidence}")
QA_input = [
{
"questions": ["Who counted the game among the best ever made?"],
"text": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created."
}]
result = inferencer.inference_from_dicts(dicts=QA_input, return_json=False)[0]
if result.prediction[0].confidence > 0.9:
print(result.prediction[0].answer)
else:
print("The confidence is not high enough to give an answer.")
if __name__ == "__main__":
question_answering_confidence()
|
pytorch-commen-code/code2.py
|
riciche/SimpleCVReproduction
| 923 |
126551
|
<reponame>riciche/SimpleCVReproduction<gh_stars>100-1000
import torch
tensor = torch.randn(3,4,5)
print(tensor.type())
print(tensor.size())
# size不是shape
print(tensor.dim())
print(tensor.shape)
# 骚操作:命名张量
images = torch.randn(3,3,4,4)
print(images.sum(dim=1).shape)
print(images.select(dim=1, index=0).shape)
# pytorch1.3之后
NCHW = ["N", "C, "H", "W"]
images = torch.randn(3,3,4,4,names=NCHW)
images.sum("C")
images.select('C', index=0)
# 将张量尺寸与指定顺序对齐。
tensor = tensor.align_to('N', 'C', 'H', 'W')
# won't use it in the future.
# data type transfer
torch.set_default_tensor_type(torch.FloatTensor)
tensor = tensor.cuda()
tensor = tensor.cpu()
tensor = tensor.float()
tensor = tensor.long()
|
tests/test_numpy.py
|
lishoujun/pysimdjson
| 531 |
126564
|
import json
import pytest
import simdjson
def with_buffer(content):
import numpy
parser = simdjson.Parser()
doc = parser.parse(content)
assert len(numpy.frombuffer(doc.as_buffer(of_type='d'))) == 10001
def without_buffer(content):
import numpy
parser = simdjson.Parser()
doc = parser.parse(content)
assert len(numpy.array(doc.as_list())) == 10001
def with_builtin(content):
import numpy
assert len(numpy.array(json.loads(content))) == 10001
def with_orjson(content):
import numpy
import orjson
assert len(numpy.array(orjson.loads(content))) == 10001
@pytest.mark.slow
@pytest.mark.parametrize('loader', [
with_buffer, without_buffer, with_builtin, with_orjson])
def test_array_to_numpy(benchmark, loader):
"""Test how quickly we can load a homogeneous array of floats into a
numpy array."""
with open('jsonexamples/numbers.json', 'rb') as src:
content = src.read()
benchmark.group = 'numpy array (deserialize)'
benchmark.extra_info['group'] = 'numpy'
benchmark(loader, content)
|
test/test_rnadiff.py
|
khourhin/sequana
| 138 |
126565
|
from sequana.rnadiff import RNADiffResults, RNADiffAnalysis, RNADesign
from . import test_dir
import pytest
def test_design():
d = RNADesign(f"{test_dir}/data/rnadiff/design.csv")
assert d.comparisons == [('Complemented_csrA', 'Mut_csrA'), ('Complemented_csrA', 'WT'), ('Mut_csrA', 'WT')]
assert d.conditions == ['Complemented_csrA', 'Mut_csrA', 'WT']
d = RNADesign(f"{test_dir}/data/rnadiff/design.csv", reference="WT")
assert d.comparisons == [('Complemented_csrA', 'WT'), ('Mut_csrA', 'WT')]
assert d.conditions == ['Complemented_csrA', 'Mut_csrA', 'WT']
@pytest.mark.xfail(reason="too slow or service may be down")
def test_rnadiff_onefolder():
# Featurecounts are saved in sequana/resources/testing/rnadiff/rnadiff_onecond_ex1
# generated from Featurecount of the file to be found in
# sequana/resources/testing/featurecounts/featurecounts_ex1
counts = f"{test_dir}/data/rnadiff/rnadiff_onecond_ex1/counts.csv"
design = f"{test_dir}/data/rnadiff/rnadiff_onecond_ex1/design.csv"
gff = f"{test_dir}/data/rnadiff/rnadiff_onecond_ex1/Lepto.gff"
an = RNADiffAnalysis(counts, design,
condition="condition", comparisons=[("Complemented_csrA", "WT")],
fc_feature="gene", fc_attribute="ID", gff=gff)
an
r = an.run()
r.plot_count_per_sample()
r.plot_percentage_null_read_counts()
#r.plot_volcano()
r.plot_pca()
r.plot_mds()
r.plot_isomap()
r.plot_density()
r.plot_boxplot_normeddata()
r.plot_boxplot_rawdata()
r.plot_dendogram()
r.plot_dispersion()
r.plot_feature_most_present()
r.comparisons['Complemented_csrA_vs_WT'].plot_volcano()
r.comparisons['Complemented_csrA_vs_WT'].plot_padj_hist()
r.comparisons['Complemented_csrA_vs_WT'].plot_pvalue_hist()
r.summary()
r.alpha = 1
r.log2_fc = 1
|
tests/test_socfaker_pcap.py
|
priamai/soc-faker
| 122 |
126606
|
def test_socfaker_pcap(socfaker_fixture):
assert socfaker_fixture.pcap()
|
nitorch/metrics.py
|
ArneBinder/Pytorch-LRP
| 117 |
126659
|
from sklearn.metrics import recall_score, roc_curve, auc
def specificity(y_true, y_pred):
return recall_score(y_true, y_pred, pos_label=0)
def sensitivity(y_true, y_pred):
return recall_score(y_true, y_pred, pos_label=1)
def balanced_accuracy(y_true, y_pred):
spec = specificity(y_true, y_pred)
sens = sensitivity(y_true, y_pred)
return (spec + sens) / 2
def auc_score(y_true, y_pred):
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
return auc(fpr, tpr)
|
tests/notebooks/percent/hydrogen.py
|
st--/jupytext
| 5,378 |
126661
|
# %%
import pandas as pd
# %% Display a data frame
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]},
index=pd.Index(['x0', 'x1'], name='x'))
df
# %% Pandas plot {"tags": ["parameters"]}
df.plot(kind='bar')
|
bookstore/tests/test_archive.py
|
anderser/bookstore
| 192 |
126685
|
<reponame>anderser/bookstore
"""Tests for archive"""
import asyncio
import pytest
import json
import logging
from bookstore.archive import ArchiveRecord, BookstoreContentsArchiver
from nbformat.v4 import new_notebook
def test_create_contentsarchiver():
assert BookstoreContentsArchiver()
def test_create_contentsarchiver_invalid_args_count():
with pytest.raises(TypeError):
BookstoreContentsArchiver(42, True, 'hello')
@pytest.mark.asyncio
async def test_archive_failure_on_no_lock():
archiver = BookstoreContentsArchiver()
assert archiver
record = ArchiveRecord('my_notebook_path.ipynb', json.dumps(new_notebook()), 100.2)
assert record
await archiver.archive(record)
@pytest.mark.asyncio
async def test_archive_abort_with_lock(caplog):
"""Acquire a lock in advance so that when the archiver attempts to archive, it will abort."""
archiver = BookstoreContentsArchiver()
record = ArchiveRecord('my_notebook_path.ipynb', json.dumps(new_notebook()), 100.2)
lock = asyncio.Lock()
archiver.path_locks['my_notebook_path.ipynb'] = lock
async with lock:
with caplog.at_level(logging.INFO):
await archiver.archive(record)
assert 'Skipping archive of my_notebook_path.ipynb' in caplog.text
def test_pre_save_hook():
archiver = BookstoreContentsArchiver()
model = {"type": "notebook", "content": new_notebook()}
target_path = "my_notebook_path.ipynb"
archiver.run_pre_save_hook(model, target_path)
def test_pre_save_hook_bad_model():
archiver = BookstoreContentsArchiver()
model = {"type": "file", "content": new_notebook()}
target_path = "my_notebook_path.ipynb"
archiver.run_pre_save_hook(model, target_path)
|
Image_Synthesis_From_Text/char-CNN-RNN/pascal1k/create_word_rep.py
|
alexkrojas13/Deep-Learning-Challenges
| 298 |
126715
|
<gh_stars>100-1000
import numpy as np
import lutorpy as lua
require("torch")
file_list = open('filelist.txt', 'r')
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{} "
key_idx = 1
dictionary = {}
for alph_idx in range(len(alphabet)):
dictionary[alphabet[alph_idx]] = key_idx
key_idx += 1
file_list = open('filelist.txt', 'r')
for file in file_list.readlines():
file = file.split(' ')[0]
save_file = file + '.npy'
th_save_file = file + 'text.t7'
file = file + '.txt'
captions = open(file, 'r')
# doclength is 31
cur_cap = np.zeros([1,201,5], dtype=int)
caption_idx = 0
for caption in captions.readlines():
caption = caption[:-2]
cnt = 0
for char_idx in range(len(caption)):
cur_cap[0,cnt,caption_idx] = dictionary[caption[char_idx].lower()]
cnt += 1
caption_idx += 1
if caption_idx > 4:
break
tensor_t = torch.fromNumpyArray(cur_cap)
torch.save(th_save_file, tensor_t)
np.save(save_file, cur_cap)
|
prepro/prepro_atis.py
|
SeonjeongHwang/xlnet_extension_cuda11
| 137 |
126724
|
<gh_stars>100-1000
import argparse
import json
import os.path
import uuid
def add_arguments(parser):
parser.add_argument("--input_dir", help="input data directory", required=True)
parser.add_argument("--output_dir", help="output data directory", required=True)
def preprocess(input_dir,
output_dir):
if not os.path.exists(input_dir):
raise FileNotFoundError("directory not found")
word_vocab_file = os.path.join(input_dir, "atis.dict.vocab.csv")
word_vocab_list = read_text(word_vocab_file)
intent_label_file = os.path.join(input_dir, "atis.dict.intent.csv")
intent_label_list = read_text(intent_label_file)
slot_label_file = os.path.join(input_dir, "atis.dict.slots.csv")
slot_label_list = read_text(slot_label_file)
train_query_file = os.path.join(input_dir, "atis.train.query.csv")
train_query_list = read_text(train_query_file)
train_intent_file = os.path.join(input_dir, "atis.train.intent.csv")
train_intent_list = read_text(train_intent_file)
train_slot_file = os.path.join(input_dir, "atis.train.slots.csv")
train_slot_list = read_text(train_slot_file)
train_raw_list = zip(train_query_list, train_intent_list, train_slot_list)
train_processed_list = []
for query_id, intent_id, slot_id in train_raw_list:
train_data = {
"id": str(uuid.uuid4()),
"text": " ".join([word_vocab_list[int(token_vocab_id)] for token_vocab_id in query_id.split(' ')[1:-1]]),
"token_label": " ".join([slot_label_list[int(token_slot_id)] for token_slot_id in slot_id.split(' ')[1:-1]]),
"sent_label": intent_label_list[int(intent_id)],
}
train_processed_list.append(train_data)
train_file = os.path.join(output_dir, "train-atis.json")
save_json(train_processed_list, train_file)
test_query_file = os.path.join(input_dir, "atis.test.query.csv")
test_query_list = read_text(test_query_file)
test_intent_file = os.path.join(input_dir, "atis.test.intent.csv")
test_intent_list = read_text(test_intent_file)
test_slot_file = os.path.join(input_dir, "atis.test.slots.csv")
test_slot_list = read_text(test_slot_file)
test_raw_list = zip(test_query_list, test_intent_list, test_slot_list)
test_processed_list = []
for query_id, intent_id, slot_id in test_raw_list:
test_data = {
"id": str(uuid.uuid4()),
"text": " ".join([word_vocab_list[int(token_vocab_id)] for token_vocab_id in query_id.split(' ')[1:-1]]),
"token_label": " ".join([slot_label_list[int(token_slot_id)] for token_slot_id in slot_id.split(' ')[1:-1]]),
"sent_label": intent_label_list[int(intent_id)]
}
test_processed_list.append(test_data)
test_file = os.path.join(output_dir, "test-atis.json")
save_json(test_processed_list, test_file)
def read_text(data_path):
if os.path.exists(data_path):
with open(data_path, "r") as file:
return [line.rstrip('\n') for line in file]
else:
raise FileNotFoundError("input file not found")
def read_json(data_path):
if os.path.exists(data_path):
with open(data_path, "r") as file:
return json.load(file)
else:
raise FileNotFoundError("input file not found")
def save_text(data_list,
data_path):
data_folder = os.path.dirname(data_path)
if not os.path.exists(data_folder):
os.mkdir(data_folder)
with open(data_path, "w") as file:
for data in data_list:
file.write("{0}\n".format(data))
def save_json(data_list,
data_path):
data_folder = os.path.dirname(data_path)
if not os.path.exists(data_folder):
os.mkdir(data_folder)
with open(data_path, "w") as file:
json.dump(data_list, file, indent=4)
def main(args):
preprocess(args.input_dir, args.output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
main(args)
|
scripts/cmt/test/__init__.py
|
kamilisa/cmt
| 199 |
126749
|
<gh_stars>100-1000
"""
CMT Unit Test framework.
"""
from cmt.test.mayaunittest import TestCase
__all__ = ["TestCase", "run_tests"]
|
samples/copy_feature_service_deforgorg.py
|
kevinsigwart/ArcREST
| 208 |
126765
|
<filename>samples/copy_feature_service_deforgorg.py
"""
This sample shows how to copy a feature service
"""
import arcrest
import tempfile
import os
import uuid
import json
from arcresthelper import securityhandlerhelper
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect, sys
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def main():
proxy_port = None
proxy_url = None
# Info for the organization that currently houses the item.
securityinfoSource = {}
securityinfoSource['security_type'] = 'Portal'#LDAP, NTLM, OAuth, Portal, PKI
securityinfoSource['username'] = ""
securityinfoSource['password'] = ""
securityinfoSource['org_url'] = "http://www.arcgis.com"
securityinfoSource['proxy_url'] = proxy_url
securityinfoSource['proxy_port'] = proxy_port
securityinfoSource['referer_url'] = None
securityinfoSource['token_url'] = None
securityinfoSource['certificatefile'] = None
securityinfoSource['keyfile'] = None
securityinfoSource['client_id'] = None
securityinfoSource['secret_id'] = None
# Info for the organization that the item will be copied to.
securityinfoTarget = {}
securityinfoTarget['security_type'] = 'Portal'#LDAP, NTLM, OAuth, Portal, PKI
securityinfoTarget['username'] = ""
securityinfoTarget['password'] = ""
securityinfoTarget['org_url'] = "http://www.arcgis.com"
securityinfoTarget['proxy_url'] = proxy_url
securityinfoTarget['proxy_port'] = proxy_port
securityinfoTarget['referer_url'] = None
securityinfoTarget['token_url'] = None
securityinfoTarget['certificatefile'] = None
securityinfoTarget['keyfile'] = None
securityinfoTarget['client_id'] = None
securityinfoTarget['secret_id'] = None
itemId = ""#Item ID of item to copy
new_service_name = "" #name of new item
try:
shhSource = securityhandlerhelper.securityhandlerhelper(securityinfoSource)
shhTarget = securityhandlerhelper.securityhandlerhelper(securityinfoTarget)
if shhSource.valid == False or shhTarget.valid == False:
print shhSource.message + " " + shhTarget.message
else:
adminSource = arcrest.manageorg.Administration(securityHandler=shhSource.securityhandler)
adminTarget = arcrest.manageorg.Administration(securityHandler=shhTarget.securityhandler)
portalSource = adminSource.portals.portalSelf
portalTarget = adminTarget.portals.portalSelf
res = portalTarget.isServiceNameAvailable(name=new_service_name,
serviceType='Feature Service')
if 'available' in res:
if res['available'] == False:
print "Pick a new name"
return
else:
print "Pick a new name"
return
itemSource = adminSource.content.getItem(itemId)
fs = arcrest.agol.FeatureService(url=itemSource.url, securityHandler=shhSource.securityhandler,
initialize=True,
proxy_url=None,
proxy_port=None)
wkid = None
wkt = None
if 'wkid' in fs.initialExtent['spatialReference']:
wkid = fs.initialExtent['spatialReference']['wkid']
else:
wkt = fs.initialExtent['spatialReference']['wkt']
if fs.xssPreventionInfo is not None:
xssPreventionEnabled = fs.xssPreventionInfo['xssPreventionEnabled']
xssPreventionRule = fs.xssPreventionInfo['xssPreventionRule']
xssInputRule = fs.xssPreventionInfo['xssInputRule']
else:
xssPreventionEnabled = None
xssPreventionRule = None
xssInputRule = None
#Edit tracking needs to be turned off when item is created
enableEditorTracking = False
enableOwnershipAccessControl = False
allowOthersToUpdate = False
allowOthersToDelete = False
if fs.syncCapabilities is not None:
supportsAsync = fs.syncCapabilities['supportsAsync']
supportsRegisteringExistingData = fs.syncCapabilities['supportsRegisteringExistingData']
supportsSyncDirectionControl = fs.syncCapabilities['supportsSyncDirectionControl']
supportsPerLayerSync = fs.syncCapabilities['supportsPerLayerSync']
supportsPerReplicaSync = fs.syncCapabilities['supportsPerReplicaSync']
supportsRollbackOnFailure = fs.syncCapabilities['supportsRollbackOnFailure']
else:
supportsAsync = None
supportsRegisteringExistingData = None
supportsSyncDirectionControl = None
supportsPerLayerSync = None
supportsPerReplicaSync = None
supportsRollbackOnFailure = None
createSerParams = arcrest.manageorg.CreateServiceParameters(
name=new_service_name,
spatialReference=arcrest.geometry.SpatialReference(wkid=wkid, wkt=wkt),
serviceDescription=fs.serviceDescription,
hasStaticData=fs.hasStaticData,
maxRecordCount=fs.maxRecordCount,
supportedQueryFormats=fs.supportedQueryFormats,
capabilities=fs.capabilities,
description=fs.description,
copyrightText=fs.copyrightText,
initialExtent=arcrest.geometry.Envelope(
xmin=fs.initialExtent['xmin'],
ymin=fs.initialExtent['ymin'],
xmax=fs.initialExtent['xmax'],
ymax=fs.initialExtent['ymax'],
wkid=wkid,
wkt=wkt),
allowGeometryUpdates=fs.allowGeometryUpdates,
units=fs.units,
xssPreventionEnabled=xssPreventionEnabled,
xssPreventionRule=xssPreventionRule,
xssInputRule=xssInputRule,
currentVersion=fs.currentVersion,
enableEditorTracking = enableEditorTracking,
enableOwnershipAccessControl = enableOwnershipAccessControl,
allowOthersToUpdate = allowOthersToUpdate,
allowOthersToDelete = allowOthersToDelete,
supportsAsync = supportsAsync,
supportsRegisteringExistingData = supportsRegisteringExistingData,
supportsSyncDirectionControl = supportsSyncDirectionControl,
supportsPerLayerSync = supportsPerLayerSync,
supportsPerReplicaSync = supportsPerReplicaSync,
supportsRollbackOnFailure = supportsRollbackOnFailure,
hasVersionedData = fs.hasVersionedData,
supportsDisconnectedEditing = fs.supportsDisconnectedEditing,
size =fs.size,
syncEnabled =fs.syncEnabled
)
userTarget = adminTarget.content.users.user()
newServiceResult = userTarget.createService(createServiceParameter=createSerParams)
print(newServiceResult)
item = adminTarget.content.getItem(itemId=newServiceResult.id).userItem
params = arcrest.manageorg.ItemParameter()
params.title = new_service_name
params.licenseInfo = itemSource.licenseInfo
params.description = itemSource.description
params.snippet = itemSource.snippet
params.tags = itemSource.tags
params.accessInformation = itemSource.accessInformation
params.extent = itemSource.extent
params.spatialReference = itemSource.spatialReference
tempDir = tempfile.gettempdir()
filename = new_service_name #"%s" % uuid.uuid4().get_hex()
tempFile = itemSource.saveThumbnail(fileName = filename, filePath= tempDir)
params.thumbnail = tempFile
updateItemResults = item.updateItem(itemParameters=params,
clearEmptyFields=True,
data=None,
metadata=None,
text=None)
print(updateItemResults)
if itemSource.protected:
print(item.protect())
adminNewFS = arcrest.hostedservice.AdminFeatureService(url=newServiceResult.url, securityHandler=shhTarget.securityhandler)
adminExistFS = fs.administration
jsdic = {}
exJson = adminExistFS.json
jsdic['layers'] = exJson['layers']
if 'tables' in exJson:
jsdic['tables'] = exJson['tables']
else:
jsdic['tables'] = []
for k in jsdic['layers']:
k['spatialReference'] = {}
if wkt is not None:
k['spatialReference']['wkt'] = wkt
if wkid is not None:
k['spatialReference']['wkid'] = wkid
if 'adminLayerInfo' in k:
if 'tableName' in k['adminLayerInfo']:
k['adminLayerInfo'].pop('tableName',None)
for k in jsdic['tables']:
if 'adminLayerInfo' in k:
if 'tableName' in k['adminLayerInfo']:
k['adminLayerInfo'].pop('tableName',None)
res=adminNewFS.addToDefinition(json_dict=jsdic)
print(res)
if fs.editorTrackingInfo is not None:
json_dict = {'editorTrackingInfo':{}}
json_dict['editorTrackingInfo']['enableEditorTracking'] = fs.editorTrackingInfo['enableEditorTracking']
json_dict['editorTrackingInfo']['allowOthersToDelete'] = fs.editorTrackingInfo['allowOthersToDelete']
json_dict['editorTrackingInfo']['allowOthersToUpdate'] = fs.editorTrackingInfo['allowOthersToUpdate']
json_dict['editorTrackingInfo']['enableOwnershipAccessControl'] = fs.editorTrackingInfo['enableOwnershipAccessControl']
res = adminNewFS.updateDefinition(json_dict=json_dict)
print(res)
except:
line, filename, synerror = trace()
print("error on line: %s" % line)
print("error in file name: %s" % filename)
print("with error message: %s" % synerror)
if __name__ == "__main__":
main()
|
mmrotate/core/visualization/__init__.py
|
liuyanyi/mmrotate
| 449 |
126777
|
<gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
from .image import imshow_det_rbboxes
from .palette import get_palette
__all__ = ['imshow_det_rbboxes', 'get_palette']
|
examples/synthetic/borehole_constrained/in_code_demo.py
|
hase1128/dragonfly
| 675 |
126793
|
<reponame>hase1128/dragonfly
"""
In code demo for borehole_constrained example.
-- <EMAIL>
"""
from __future__ import print_function
from dragonfly import load_config, maximise_function, maximise_multifidelity_function
# From current directory
from borehole_constrained import objective
from borehole_constrained_mf import objective as mf_objective
from borehole_constrained_mf import cost as mf_cost
def main():
""" Main function. """
# First Specify all parameters
domain_vars = [{'name': 'rw', 'type': 'float', 'min': 0.05, 'max': 0.15, 'dim': 1},
{'name': 'L_Kw', 'type': 'float', 'min': 0, 'max': 1, 'dim': 2},
{'name': 'Tu', 'type': 'int', 'min': 63070, 'max': 115600, 'dim': ''},
{'name': 'Tl', 'type': 'float', 'min': 63.1, 'max': 116},
{'name': 'Hu_Hl', 'type': 'int', 'min': 0, 'max': 240, 'dim': 2},
{'name': 'r', 'type': 'float', 'min': 100, 'max': 50000},
]
domain_constraints = [{'constraint': 'np.sqrt(rw[0]) + L_Kw[1] <= 0.9'},
{'constraint': 'r/100.0 + Hu_Hl[1] < 200'}
]
fidel_vars = [{'name': 'fidel_0', 'type': 'float', 'min': 0.05, 'max': 0.25},
{'name': 'fidel_1', 'type': 'discrete_numeric', 'items': "0.1:0.05:1.01"},
]
fidel_space_constraints = [
{'name': 'fsc1', 'constraint': 'fidel_0 + fidel_1 <= 0.9'}
]
fidel_to_opt = [0.1, 0.75]
# Budget of evaluations
max_num_evals = 100 # Optimisation budget (max number of evaluations)
max_mf_capital = max_num_evals * mf_cost(fidel_to_opt) # Multi-fideltiy capital
# First do the MF version
config_params = {'domain': domain_vars, 'fidel_space': fidel_vars,
'domain_constraints': domain_constraints,
'fidel_space_constraints': fidel_space_constraints,
'fidel_to_opt': fidel_to_opt}
config = load_config(config_params)
# Optimise
mf_opt_pt, mf_opt_val, history = maximise_multifidelity_function(mf_objective,
config.fidel_space, config.domain,
config.fidel_to_opt, mf_cost,
max_mf_capital, config=config)
print(mf_opt_pt, mf_opt_val)
# Non-MF version
config_params = {'domain': domain_vars, 'domain_constraints': domain_constraints}
config = load_config(config_params)
max_capital = 100 # Optimisation budget (max number of evaluations)
# Optimise
opt_pt, opt_val, history = maximise_function(objective, config.domain,
max_num_evals, config=config)
print(opt_pt, opt_val)
if __name__ == '__main__':
main()
|
src/states/menu.py
|
AnonVodka/VALORANT-rank-yoinker
| 107 |
126814
|
<reponame>AnonVodka/VALORANT-rank-yoinker
class Menu:
def __init__(self, Requests, log, presences):
self.Requests = Requests
self.log = log
self.presences = presences
def get_party_json(self, GamePlayersPuuid, presencesDICT):
party_json = {}
for presence in presencesDICT:
if presence["puuid"] in GamePlayersPuuid:
decodedPresence = self.presences.decode_presence(presence["private"])
if decodedPresence["isValid"]:
if decodedPresence["partySize"] > 1:
try:
party_json[decodedPresence["partyId"]].append(presence["puuid"])
except KeyError:
party_json.update({decodedPresence["partyId"]: [presence["puuid"]]})
self.log(f"retrieved party json: {party_json}")
return party_json
def get_party_members(self, self_puuid, presencesDICT):
res = []
for presence in presencesDICT:
if presence["puuid"] == self_puuid:
decodedPresence = self.presences.decode_presence(presence["private"])
if decodedPresence["isValid"]:
party_id = decodedPresence["partyId"]
res.append({"Subject": presence["puuid"], "PlayerIdentity": {"AccountLevel":
decodedPresence["accountLevel"]}})
for presence in presencesDICT:
decodedPresence = self.presences.decode_presence(presence["private"])
if decodedPresence["isValid"]:
if decodedPresence["partyId"] == party_id and presence["puuid"] != self_puuid:
res.append({"Subject": presence["puuid"], "PlayerIdentity": {"AccountLevel":
decodedPresence["accountLevel"]}})
self.log(f"retrieved party members: {res}")
return res
|
loudml/loudml/schemas.py
|
jkbrandt/loudml
| 245 |
126823
|
<gh_stars>100-1000
"""
Common schemas for user input validation
"""
import loudml.errors
from voluptuous import (
All,
Any,
Boolean,
Invalid,
Length,
Match,
message,
Required,
Optional,
Range,
Schema,
)
import voluptuous as vol
from urllib.parse import urlparse
from .misc import (
make_ts,
parse_timedelta,
)
key = All(
str,
Length(min=1),
Match("^[a-zA-Z0-9-_@]+$"),
)
time_str_key = All(
str,
Length(min=1),
Match("^[:0-9]+$"),
)
dotted_key = All(
str,
Length(min=1),
Match("^[a-zA-Z0-9-_@.]+$"),
)
bracket_key = All(
str,
Length(min=1),
Match("^{{[a-zA-Z0-9-_@.]+}}$"),
)
seasonality = Schema({
Optional('daytime', default=False): Boolean(),
Optional('weekday', default=False): Boolean(),
})
score = Any(All(Any(int, float), Range(min=0, max=100)), None)
class Url:
"""Validate an URL."""
def __init__(self, **kwargs):
self._kwargs = kwargs
def __call__(self, v):
url_in = str(v)
res = urlparse(url_in)
if len(res.fragment) or len(res.query) or len(res.scheme):
raise vol.Invalid(
'You have attempted to access a restricted URL, the URL contains invalid data.') # noqa
if not len(res.path) or res.path[0] != '/':
raise vol.Invalid(
'You have attempted to access a restricted URL, the URL contains invalid path.') # noqa
return res.path
ScheduledJob = Schema({
Required('name'): All(str, Length(max=256)),
Required('method'): Any('head', 'get', 'post', 'patch', 'delete'),
Required('relative_url'): All(str, Url()),
Optional('params'): Schema({str: Any(int, float, str, bool)}),
Optional('json'): Schema({str: Any(int, float, str, bool)}),
Required('every'): Schema({
Required('count'): Any(int, float),
Required('unit'): Any(
'second',
'seconds',
'minute',
'minutes',
'hour',
'hours',
'day',
'days',
'week',
'weeks',
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday',
),
Optional('at'): All(time_str_key, Length(max=256)),
}),
})
class TimeDelta:
"""
Schema for time-delta
"""
def __init__(self, **kwargs):
self._kwargs = kwargs
def __call__(self, v):
parse_timedelta(v, **self._kwargs)
return v
@message('expected absolute or relative date', cls=Invalid)
def Timestamp(v):
"""
Schema for timestamps
"""
try:
make_ts(v)
except TypeError:
raise ValueError("value expected")
return v
def validate(schema, data, name=None):
"""
Validate data against a schema
"""
try:
return schema(data)
except Invalid as exn:
raise loudml.errors.Invalid(
exn.error_message,
name=name,
path=exn.path,
)
|
networks/emotions_recognition_retail_0003/emotions_recognition_retail_0003.py
|
neuralbotnetworks/ncappzoo
| 968 |
126832
|
#! /usr/bin/env python3
# Copyright(c) 2019 Intel Corporation.
# License: MIT See LICENSE file in root directory.
from argparse import ArgumentParser, SUPPRESS
from openvino.inference_engine import IENetwork, IEPlugin, IECore
import cv2
import logging as log
import numpy as np
import os
import sys
import time
# Specify target device
FRAME_WIDTH = 640
FRAME_HEIGHT = 480
RED_COLOR = (255, 0, 0)
GREEN_COLOR = (50, 255, 50)
DARK_GREEN_COLOR = (10, 150, 50)
YELLOW_COLOR = (50, 255, 255)
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--mirror", action="store_true", help="Flip camera")
args.add_argument("-fps", "--show_fps", action="store_true", help="Show fps information on top of camera view")
args.add_argument("--face_ir", metavar="FACE_DETECTION_IR_File", type=str,
default="/face-detection-retail-0004.xml",
help="Absolute path to the face detection neural network IR file.")
args.add_argument("-emotion_ir", metavar="EMOTION_RECOGNITION_IR_File", type=str,
default="/emotions-recognition-retail-0003.xml",
help="Absolute path to the emotion detection neural network IR file.")
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
face_model_xml = os.getcwd() + args.face_ir
face_model_bin = os.path.splitext(face_model_xml)[0] + ".bin"
emotions_model_xml = os.getcwd() + args.emotion_ir
emotions_model_bin = os.path.splitext(emotions_model_xml)[0] + ".bin"
device = "MYRIAD"
fps = ""
camera_id = 0
emotionLabel = ['Neutral', 'Happy', 'Sad', 'Surprise', 'Anger']
cap = cv2.VideoCapture(camera_id)
log.info("Loading Camera id {}".format(camera_id))
# Read IR - face detection
face_net = IENetwork(model=face_model_xml, weights=face_model_bin)
log.info("Face-Detection network has been loaded:\n\t{}\n\t{}".format(face_model_xml, face_model_bin))
# Read IR - emotions recognition
emotion_net = IENetwork(model=emotions_model_xml, weights=emotions_model_bin)
log.info("Emotions-Recognition network has been loaded:\n\t{}\n\t{}".format(emotions_model_xml, emotions_model_bin))
log.info("Setting device: {}".format(device))
plugin = IEPlugin(device=device)
log.info("Loading Face-Detection model to the plugin")
face_exec_net = plugin.load(network=face_net)
# Set configurations for face detection
face_input_blob = next(iter(face_net.inputs))
face_out_blob = next(iter(face_net.outputs))
log.info("Loading Emotions-Recognition model to the plugin")
emotion_exec_net = plugin.load(network=emotion_net)
# Set configurations for emotion detection
emotion_input_blob = next(iter(emotion_net.inputs))
emotion_out_blob = next(iter(emotion_net.outputs))
if args.mirror:
log.info("Using camera mirror")
log.info("emotions-recognition-retail sample is starting...")
while cap.isOpened():
t1 = time.time()
ret_val, img = cap.read()
if not ret_val:
break
if args.mirror:
img = cv2.flip(img, 1)
prepimg = cv2.resize(img, (300, 300))
prepimg = prepimg[np.newaxis, :, :, :]
prepimg = prepimg.transpose((0, 3, 1, 2))
face_outputs = face_exec_net.infer(inputs={face_input_blob: prepimg})
res = face_exec_net.requests[0].outputs[face_out_blob]
for detection in res[0][0]:
confidence = float(detection[2])
xmin = int(detection[3] * img.shape[1])
ymin = int(detection[4] * img.shape[0])
xmax = int(detection[5] * img.shape[1])
ymax = int(detection[6] * img.shape[0])
if confidence > 0.7:
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color=GREEN_COLOR)
if ymin >= 64 and ymax >= 64:
emoimg = img[ymin:ymax, xmin:xmax]
emoimg = cv2.resize(emoimg, (64, 64))
emoimg = emoimg.transpose((2, 0, 1))
emoimg = emoimg.reshape(1, 3, 64, 64)
emotion_outputs = emotion_exec_net.infer(inputs={emotion_input_blob: emoimg})
res = emotion_exec_net.requests[0].outputs[emotion_out_blob]
out_emotion_reshape = res.reshape(-1, 5)
emotion_text = emotionLabel[np.argmax(out_emotion_reshape)]
cv2.putText(img, emotion_text, (abs(xmin), abs(ymin - 10)), cv2.FONT_HERSHEY_DUPLEX, 0.7, (50, 255, 255), 1, 1)
if args.show_fps:
elapsed_time = time.time() - t1
fps = "(Playback) {:.1f} FPS".format(1 / elapsed_time)
cv2.putText(img, fps, (15, FRAME_HEIGHT - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)
cv2.putText(img, "Hit 'ESC' or 'q' to Exit", (FRAME_WIDTH - 150, FRAME_HEIGHT - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)
cv2.imshow('emotions-recognition-retail sample', img)
waitkey = cv2.waitKey(1)
if waitkey & 0xFF == ord('q') or waitkey == 27:
break # esc or 'q' to quit
cv2.destroyAllWindows()
if __name__ == '__main__':
sys.exit(main() or 0)
|
examples/pyramid_backbone_redis_chat_persistence/chatter4/__init__.py
|
benthomasson/gevent-socketio
| 625 |
126866
|
#!/usr/bin/env python
from sqlalchemy import engine_from_config
from pyramid.config import Configurator
from chatter4.models import DBSession
from chatter4.views import socketio_service
from chatter4.views import index
from chatter4.views import get_log
def simple_route(config, name, url, fn, renderer=None):
if not renderer:
renderer = "chatter4:templates/%s.mako" % name
config.add_route(name, url)
config.add_view(fn, route_name=name, renderer=renderer)
def main(global_config, **settings):
config = Configurator()
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
simple_route(config, 'index', '/', index)
simple_route(config, 'get_log', '/get_log', get_log, renderer='json')
simple_route(config, 'socket_io', 'socket.io/*remaining', socketio_service)
config.add_static_view('static', 'static', cache_max_age=3600)
app = config.make_wsgi_app()
return app
|
docs_src/path_params/tutorial003.py
|
Aryabhata-Rootspring/fastapi
| 53,007 |
126913
|
<reponame>Aryabhata-Rootspring/fastapi
from fastapi import FastAPI
app = FastAPI()
@app.get("/users/me")
async def read_user_me():
return {"user_id": "the current user"}
@app.get("/users/{user_id}")
async def read_user(user_id: str):
return {"user_id": user_id}
|
setup.py
|
adamchainz/ec2_metadata
| 284 |
126971
|
<gh_stars>100-1000
from __future__ import annotations
from setuptools import setup
setup()
|
TopQuarkAnalysis/TopEventProducers/python/producers/TtFullLepEvtFilter_cfi.py
|
ckamtsikis/cmssw
| 852 |
126976
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
#
# module to filter events based on member functions of the TtFullLeptonicEvent
#
ttFullLepEventFilter = cms.EDFilter("TtFullLepEvtFilter",
src = cms.InputTag("ttFullLepEvent"),
cut = cms.string("isHypoValid('kGenMatch') & genMatchSumDR < 999.")
)
|
tutorials/W0D5_Statistics/solutions/W0D5_Tutorial2_Solution_aba8ef46.py
|
eduardojdiniz/CompNeuro
| 2,294 |
126978
|
<filename>tutorials/W0D5_Statistics/solutions/W0D5_Tutorial2_Solution_aba8ef46.py
"""
Remember that joint probability can generally be expressed as 𝑃(𝑎,𝑏)=𝑃(𝑎|𝑏)𝑃(𝑏)
𝑃(ℎ+,𝑣+)=𝑃(ℎ+|𝑣+)𝑃(𝑣+)=0.1∗0.3=0.03
"""
|
tests/export/html/test_drawing_graphic_blip.py
|
botzill/pydocx
| 127 |
126988
|
# coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
import base64
from pydocx.constants import EMUS_PER_PIXEL
from pydocx.openxml.packaging import ImagePart, MainDocumentPart
from pydocx.test import DocumentGeneratorTestCase
from pydocx.test.utils import WordprocessingDocumentFactory
class DrawingGraphicBlipTestCase(DocumentGeneratorTestCase):
def test_inline_image_with_multiple_ext_definitions(self):
# Ensure that the image size can be calculated correctly even if the
# image size ext isn't the first ext in the drawing node
width_px = 5
height_px = 10
document_xml = '''
<p>
<r>
<t>Foo</t>
<drawing>
<inline>
<graphic>
<graphicData>
<pic>
<blipFill>
<blip embed="foobar">
<extLst>
<ext/>
</extLst>
</blip>
</blipFill>
<spPr>
<xfrm>
<ext cx="{cx}" cy="{cy}"/>
</xfrm>
</spPr>
</pic>
</graphicData>
</graphic>
</inline>
</drawing>
<t>Bar</t>
</r>
</p>
'''.format(
cx=width_px * EMUS_PER_PIXEL,
cy=height_px * EMUS_PER_PIXEL,
)
document = WordprocessingDocumentFactory()
image_url = 'http://google.com/image1.gif'
document_rels = document.relationship_format.format(
id='foobar',
type=ImagePart.relationship_type,
target=image_url,
target_mode='External',
)
document.add(MainDocumentPart, document_xml, document_rels)
expected_html = '''
<p>
Foo
<img
height="{height}px"
src="http://google.com/image1.gif"
width="{width}px"
/>
Bar
</p>
'''.format(width=width_px, height=height_px)
self.assert_document_generates_html(document, expected_html)
def test_anchor_with_multiple_ext_definitions(self):
width_px = 5
height_px = 10
# Ensure that the image size can be calculated correctly even if the
# image size ext isn't the first ext in the drawing node
document_xml = '''
<p>
<r>
<t>Foo</t>
<drawing>
<anchor>
<graphic>
<graphicData>
<pic>
<blipFill>
<blip embed="foobar">
<extLst>
<ext/>
</extLst>
</blip>
</blipFill>
<spPr>
<xfrm>
<ext cx="{cx}" cy="{cy}"/>
</xfrm>
</spPr>
</pic>
</graphicData>
</graphic>
</anchor>
</drawing>
<t>Bar</t>
</r>
</p>
'''.format(
cx=width_px * EMUS_PER_PIXEL,
cy=height_px * EMUS_PER_PIXEL,
)
document = WordprocessingDocumentFactory()
image_url = 'http://google.com/image1.gif'
document_rels = document.relationship_format.format(
id='foobar',
type=ImagePart.relationship_type,
target=image_url,
target_mode='External',
)
document.add(MainDocumentPart, document_xml, document_rels)
expected_html = '''
<p>
Foo
<img
height="{height}px"
src="http://google.com/image1.gif"
width="{width}px"
/>
Bar
</p>
'''.format(width=width_px, height=height_px)
self.assert_document_generates_html(document, expected_html)
def test_anchor_with_no_size_ext(self):
# Ensure the image html is still rendered even if the size cannot be
# calculated
document_xml = '''
<p>
<r>
<t>Foo</t>
<drawing>
<anchor>
<graphic>
<graphicData>
<pic>
<blipFill>
<blip embed="foobar"/>
</blipFill>
<spPr>
<xfrm/>
</spPr>
</pic>
</graphicData>
</graphic>
</anchor>
</drawing>
<t>Bar</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
image_url = 'http://google.com/image1.gif'
document_rels = document.relationship_format.format(
id='foobar',
type=ImagePart.relationship_type,
target=image_url,
target_mode='External',
)
document.add(MainDocumentPart, document_xml, document_rels)
expected_html = '''
<p>
Foo
<img src="http://google.com/image1.gif" />
Bar
</p>
'''
self.assert_document_generates_html(document, expected_html)
def test_blip_embed_refers_to_undefined_image_relationship(self):
# Ensure that if a blip embed refers to an undefined image
# relationshipp, the image rendering is skipped
document_xml = '''
<p>
<r>
<t>Foo</t>
<drawing>
<anchor>
<graphic>
<graphicData>
<pic>
<blipFill>
<blip embed="foobar" />
</blipFill>
</pic>
</graphicData>
</graphic>
</anchor>
</drawing>
<t>Bar</t>
</r>
</p>
'''
document = WordprocessingDocumentFactory()
document.add(MainDocumentPart, document_xml)
expected_html = '<p>FooBar</p>'
self.assert_document_generates_html(document, expected_html)
def test_internal_image_is_included_with_base64_content(self):
width_px = 5
height_px = 10
document_xml = '''
<p>
<r>
<t>Foo</t>
<drawing>
<anchor>
<graphic>
<graphicData>
<pic>
<blipFill>
<blip embed="foobar" />
</blipFill>
<spPr>
<xfrm>
<ext cx="{cx}" cy="{cy}"/>
</xfrm>
</spPr>
</pic>
</graphicData>
</graphic>
</anchor>
</drawing>
<t>Bar</t>
</r>
</p>
'''.format(
cx=width_px * EMUS_PER_PIXEL,
cy=height_px * EMUS_PER_PIXEL,
)
document = WordprocessingDocumentFactory()
document_rels = document.relationship_format.format(
id='foobar',
type=ImagePart.relationship_type,
target='media/image1.jpeg',
target_mode='Internal',
)
document.add(MainDocumentPart, document_xml, document_rels)
image_data = 'fake data'
expected_html = '''
<p>
Foo
<img
height="{height}px"
src="data:image/jpeg;base64,{data}"
width="{width}px"
/>
Bar
</p>
'''.format(
width=width_px,
height=height_px,
# This is kind of weird, needed otherwise python 3.3 breaks
data=base64.b64encode(image_data.encode('utf-8')).decode('utf-8'),
)
self.assert_document_generates_html(
document,
expected_html,
additional_parts={
'word/media/image1.jpeg': image_data,
},
)
def test_internal_image_is_not_included_if_part_is_missing(self):
width_px = 5
height_px = 10
document_xml = '''
<p>
<r>
<t>Foo</t>
<drawing>
<anchor>
<graphic>
<graphicData>
<pic>
<blipFill>
<blip embed="foobar" />
</blipFill>
<spPr>
<xfrm>
<ext cx="{cx}" cy="{cy}"/>
</xfrm>
</spPr>
</pic>
</graphicData>
</graphic>
</anchor>
</drawing>
<t>Bar</t>
</r>
</p>
'''.format(
cx=width_px * EMUS_PER_PIXEL,
cy=height_px * EMUS_PER_PIXEL,
)
document = WordprocessingDocumentFactory()
document_rels = document.relationship_format.format(
id='foobar',
type=ImagePart.relationship_type,
target='media/image1.jpeg',
target_mode='Internal',
)
document.add(MainDocumentPart, document_xml, document_rels)
expected_html = '<p>FooBar</p>'
self.assert_document_generates_html(
document,
expected_html,
additional_parts={
# Purposefully commented out
# 'word/media/image1.jpeg': '',
},
)
|
mode/examples/Topics/Vectors/AccelerationWithVectors/AccelerationWithVectors.pyde
|
timgates42/processing.py
| 1,224 |
126994
|
"""
Acceleration with Vectors
by <NAME>.
Demonstration of the basics of motion with vector.
A "Mover" object stores location, velocity, and acceleration as vectors The
motion is controlled by affecting the acceleration (in this case towards the
mouse)
For more examples of simulating motion and physics with vectors, see
Simulate/ForcesWithVectors, Simulate/GravitationalAttraction3D
"""
from mover import Mover
def setup():
global mover
size(640, 360)
mover = Mover()
def draw():
background(0)
# Update the location
mover.update()
# Display the Mover
mover.display()
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/SUN/triangle_list.py
|
ShujaKhalid/deep-rl
| 210 |
127021
|
<reponame>ShujaKhalid/deep-rl
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_SUN_triangle_list'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_SUN_triangle_list',error_checker=_errors._error_checker)
GL_R1UI_C3F_V3F_SUN=_C('GL_R1UI_C3F_V3F_SUN',0x85C6)
GL_R1UI_C4F_N3F_V3F_SUN=_C('GL_R1UI_C4F_N3F_V3F_SUN',0x85C8)
GL_R1UI_C4UB_V3F_SUN=_C('GL_R1UI_C4UB_V3F_SUN',0x85C5)
GL_R1UI_N3F_V3F_SUN=_C('GL_R1UI_N3F_V3F_SUN',0x85C7)
GL_R1UI_T2F_C4F_N3F_V3F_SUN=_C('GL_R1UI_T2F_C4F_N3F_V3F_SUN',0x85CB)
GL_R1UI_T2F_N3F_V3F_SUN=_C('GL_R1UI_T2F_N3F_V3F_SUN',0x85CA)
GL_R1UI_T2F_V3F_SUN=_C('GL_R1UI_T2F_V3F_SUN',0x85C9)
GL_R1UI_V3F_SUN=_C('GL_R1UI_V3F_SUN',0x85C4)
GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN=_C('GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN',0x85C3)
GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN=_C('GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN',0x85C2)
GL_REPLACEMENT_CODE_ARRAY_SUN=_C('GL_REPLACEMENT_CODE_ARRAY_SUN',0x85C0)
GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN=_C('GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN',0x85C1)
GL_REPLACEMENT_CODE_SUN=_C('GL_REPLACEMENT_CODE_SUN',0x81D8)
GL_REPLACE_MIDDLE_SUN=_C('GL_REPLACE_MIDDLE_SUN',0x0002)
GL_REPLACE_OLDEST_SUN=_C('GL_REPLACE_OLDEST_SUN',0x0003)
GL_RESTART_SUN=_C('GL_RESTART_SUN',0x0001)
GL_TRIANGLE_LIST_SUN=_C('GL_TRIANGLE_LIST_SUN',0x81D7)
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,arrays.GLvoidpArray)
def glReplacementCodePointerSUN(type,stride,pointer):pass
@_f
@_p.types(None,_cs.GLubyte)
def glReplacementCodeubSUN(code):pass
@_f
@_p.types(None,arrays.GLubyteArray)
def glReplacementCodeubvSUN(code):pass
@_f
@_p.types(None,_cs.GLuint)
def glReplacementCodeuiSUN(code):pass
@_f
@_p.types(None,arrays.GLuintArray)
def glReplacementCodeuivSUN(code):pass
@_f
@_p.types(None,_cs.GLushort)
def glReplacementCodeusSUN(code):pass
@_f
@_p.types(None,arrays.GLushortArray)
def glReplacementCodeusvSUN(code):pass
|
lang-python/src/test/resources/tiny_py_interpreter/tinypy/AST/stmt.py
|
nrs011/steady
| 357 |
127053
|
<filename>lang-python/src/test/resources/tiny_py_interpreter/tinypy/AST/stmt.py
import copy
from enum import Enum
from tinypy.AST.ast import Statement, Expression, MemoryContext
from tinypy.AST.expr import AddOp, SubOp, MultOp, DivOp, ModOp, LshiftOp, RshiftOp, BinOp, UnaryOp, Compare
from tinypy.AST.expr import BitAndOp, BitOrOp, BitXorOp, Name, CallExpr
from tinypy import AST
from tinypy import runtime
"""
# Function definition.
# @name - text name of the function
# @args - list of arguments (just names)
# @body - list of statements, which form functions body
#
# Every function has name which is written to the outer namespace.
# For the top-level function definitions, the outer namespace is the global namespace.
# For nested functions its the namespace of the outer function.
#
# Our way to implement name scoping is to set current namespace during the evaluation of ANY *STATEMENT*
# Actually, we'll need to set the new (and then back the old one) when evaluating only functions,
# as there are no scoping rules for other statements; thus, @Name expression will need to check
# only single global variable - current namespace, and function calls will switch scopes.
#
# This solution is far from perfect. However, it just works as there is no need for modules.
# Implementing modules will require providing each @Name node an ability to get a proper namespace.
"""
class FunctionDef(Statement):
def __init__(self, name:str, args:list, body:list):
super().__init__()
self.name = name
self.args = args
self.body = body
def getNamespace(self) -> runtime.Memory.Namespace:
return runtime.Memory.CurrentNamespace
def eval(self) -> None:
declarationNamespace = self.getNamespace()
def container(*args):
namespace = runtime.Memory.Namespace(outerScope=declarationNamespace)
previousNamespace = runtime.Memory.CurrentNamespace
runtime.Memory.CurrentNamespace = namespace
if len(args) != len(self.args):
message = "%s() takes %d positional arguments but %d were given" % \
(self.name, len(self.args), len(args))
raise runtime.Errors.TypeError(message)
for pair in zip (self.args, args):
namespace.set(name=pair[0], value=pair[1])
returnValue = None
for stmt in self.body:
res = stmt.eval()
if isinstance(res, ControlFlowMark):
if res.type == ControlFlowMark.Type.Return:
if res.toEval != None:
returnValue = res.toEval.eval()
break
runtime.Memory.CurrentNamespace = previousNamespace
return returnValue
# Finally, write the function container to the memory.
# Call to the container will trigger eval of function body
declarationNamespace.set(self.name, container)
return None
"""
# An if statement.
# @test holds a single node, such as a Compare node.
# @body and orelse each hold a list of nodes.
#
# @elif clauses don’t have a special representation in the AST, but rather
# appear as extra If nodes within the orelse section of the previous one.
#
# Optional clauses such as @else are stored as an empty list if they’re not present.
"""
class IfStmt(Statement):
def __init__(self, test, body:[], orelse:[]):
super().__init__()
self.test = test
self.body = body
self.orelse = orelse
def eval(self):
test = self.test.eval()
result = []
for stmt in self.body if (test) else self.orelse:
evalResult = stmt.eval()
if isinstance(evalResult, ControlFlowMark):
if evalResult.type != ControlFlowMark.Type.Pass:
return evalResult
if type(evalResult) is list:
result += evalResult
else:
result.append(evalResult)
return result
"""
# An while statement.
# @test holds a single node, such as a @Compare node.
# @body and @orelse each hold a list of nodes.
#
# @orelse is not used as it is not present in grammar.
"""
class WhileStmt(Statement):
def __init__(self, test, body:[], orelse:[]):
super().__init__()
self.test = test
self.body = body
def eval(self):
result = []
while self.test.eval():
shouldBreak = False
for stmt in self.body:
evalResult = stmt.eval()
if isinstance(evalResult, ControlFlowMark):
if evalResult.type == ControlFlowMark.Type.Break:
shouldBreak = True
break
elif evalResult.type == ControlFlowMark.Type.Continue:
break
elif evalResult.type == ControlFlowMark.Type.Pass:
pass
elif evalResult.type == ControlFlowMark.Type.Return:
return evalResult
if type(evalResult) is list:
result += evalResult
else:
result.append(evalResult)
if shouldBreak:
break
return result
"""
# A for loop.
# @target holds the variable(s) the loop assigns to, as a single Name, Tuple or List node.
# @iter holds the item to be looped over, again as a single node.
# @body and orelse contain lists of nodes to execute.
#
# @orelse is not used as it is not present in grammar.
"""
class ForStmt(Statement):
def __init__(self, target, iter, body, orelse=None):
super().__init__()
self.target = target
self.iter = iter
self.body = body
if not isinstance(target, Name):
raise runtime.Errors.SyntaxError("can't assign to literal")
if orelse is not None:
raise NotImplementedError("You should implement orelse in grammar first!")
def eval(self):
result = []
# Check if target name exists. If no - create it.
#runtime.Memory.CurrentNamespace.get(self)
for x in self.iter.eval():
# Set target to the current value
runtime.Memory.CurrentNamespace.set(self.target.id, x)
shouldBreak = False
for stmt in self.body:
evalResult = stmt.eval()
if isinstance(evalResult, ControlFlowMark):
if evalResult.type == ControlFlowMark.Type.Break:
shouldBreak = True
break
elif evalResult.type == ControlFlowMark.Type.Continue:
break
elif evalResult.type == ControlFlowMark.Type.Pass:
pass
elif evalResult.type == ControlFlowMark.Type.Return:
return evalResult
if type(evalResult) is list:
result += evalResult
else:
result.append(evalResult)
if shouldBreak:
break
return result
"""
# An assignment.
# @targets is a list of nodes,
# @value is a single node.
#
# Multiple nodes in targets represents assigning the same value to each.
# Unpacking is represented by putting a Tuple or List within targets.
#
# Notice, that grammar I've implemented doesn't allow to assign to operators/keywords/literals;
# Because of this we don't perform check for the type of a target value here.
"""
class AssignStmt(Statement):
def __init__(self, target, value:Expression):
super().__init__()
self.target = target
self.value = value
def eval(self) -> None:
if isinstance(self.target, AST.expr.CallExpr):
raise runtime.Errors.SyntaxError("can't assign to function call")
lValue = self.target.eval()
rValue = self.value.eval()
if isinstance(lValue, Subscript.AssignWrapper):
lValue.collection[lValue.index] = rValue
return
runtime.Memory.CurrentNamespace.set(name=lValue, value=rValue)
class AugAssignStmt(AssignStmt):
opTable = {
'+=' : AddOp,
'-=' : SubOp,
'*=' : MultOp,
'/=' : DivOp,
'%=' : ModOp,
'&=' : BitAndOp,
'|=' : BitOrOp,
'^=' : BitXorOp,
'<<=' : LshiftOp,
'>>=' : RshiftOp,
}
def __init__(self, name, value, op):
nameNodeLoad = copy.copy(name)
nameNodeStore = copy.copy(name)
nameNodeLoad.ctx = MemoryContext.Load
nameNodeStore.ctx = MemoryContext.Store
binOp = AugAssignStmt.opTable[op](left=nameNodeLoad, right=value)
super().__init__(target=nameNodeStore, value=binOp)
"""
# Attribute access (e.g., name.attribute)
# @value is a node, typically a Name.
# @attr is a bare string giving the name of the attribute
# @ctx is Load, Store or Del according to how the attribute is acted on.
"""
class Attribute(Statement):
class Wrapper():
def __init__(self, name, attr):
self.name = name
self.attr = attr
def __init__(self, value, attr, ctx):
super().__init__()
self.value = value
self.attr = attr
self.ctx = ctx
def eval(self):
value = self.value.eval()
if self.ctx == MemoryContext.Load:
if hasattr(value, self.attr):
return getattr(value, self.attr)
else:
msg = "object has no attribute %s" % self.attr
raise runtime.Errors.AttributeError(msg)
elif self.ctx == MemoryContext.Store:
raise NotImplementedError("Assigning to attributes is not supported!")
#
# if isinstance(value, object):
# if value.__class__.__module__ == 'builtins':
# raise runtime.Errors.ArithmeticError("writing to attributes of built-in objects is not supported")
# elif callable(value):
# return Attribute.Wrapper(self.value, self.attr)
"""
A subscript, such as l[1].
@value is the object, often a Name.
@slice is one of @Index or @Slice.
@ctx is Load, Store or Del according to what it does with the subscript.
"""
class Subscript(Statement):
class AssignWrapper:
def __init__(self, collection, index):
self.collection = collection
self.index = index
def __init__(self, value, slice, ctx):
super().__init__()
self.value = value
self.slice = slice
self.ctx = ctx
def eval(self):
lValue = self.value.eval()
try:
if isinstance(self.slice, Index):
index = self.slice.eval()
if self.ctx == MemoryContext.Load:
return lValue[index]
elif self.ctx == MemoryContext.Store:
return Subscript.AssignWrapper(lValue, index)
else:
raise NotImplementedError
elif isinstance(self.slice, Slice):
lower, upper = self.slice.eval()
if self.ctx == MemoryContext.Load:
return lValue[lower:upper]
else:
raise NotImplementedError("Writing to slices & deleting elements is not supported")
else:
raise ValueError("Unexpected slice type")
except IndexError as e:
raise runtime.Errors.IndexError(e)
except KeyError as e:
raise runtime.Errors.KeyError(e)
except TypeError as e:
raise runtime.Errors.TypeError(e)
"""
Simple subscripting with a single value: l[1]
"""
class Index(Statement):
def __init__(self, value):
super().__init__()
self.value = value
def eval(self):
return self.value.eval()
"""
Regular slicing: l[1:2]
"""
class Slice(Statement):
def __init__(self, lower, upper, step):
super().__init__()
self.lower = lower
self.upper = upper
self.step = step
if self.step != None:
raise NotImplementedError()
def eval(self):
lower = upper = None
if self.lower != None:
lower = self.lower.eval()
if self.upper != None:
upper = self.upper.eval()
return lower, upper
"""
# Control flow statements.
# Each statement returns corresponding @ControlFlowMark as a result of evaluation.
# Compound statements are checking whether evaluation result is a such mark, and react accordingly.
"""
class ControlFlowStmt(Statement):
pass
class ReturnStmt(ControlFlowStmt):
def __init__(self, expr):
super().__init__()
self.expr = expr
def eval(self):
return ControlFlowMark(ControlFlowMark.Type.Return, self.expr)
class PassStmt(ControlFlowStmt):
def eval(self):
return ControlFlowMark(ControlFlowMark.Type.Pass)
class ContinueStmt(ControlFlowStmt):
def eval(self):
return ControlFlowMark(ControlFlowMark.Type.Continue)
class BreakStmt(ControlFlowStmt):
def eval(self):
return ControlFlowMark(ControlFlowMark.Type.Break)
class ControlFlowMark:
class Type(Enum):
Return = 1
Break = 2
Continue = 3
Pass = 4
def __init__(self, type, toEval=None):
self.type = type
self.toEval = toEval
|
test/tst_compression.py
|
timgates42/netcdf4-python
| 574 |
127057
|
<filename>test/tst_compression.py
from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from netCDF4.utils import _quantize
from numpy.testing import assert_almost_equal
import os, tempfile, unittest
ndim = 100000
ndim2 = 100
chunk1 = 10; chunk2 = ndim2
nfiles = 7
files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)]
array = uniform(size=(ndim,))
array2 = uniform(size=(ndim,ndim2))
lsd = 3
def write_netcdf(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=False,contiguous=False,\
chunksizes=None,complevel=6,fletcher32=False):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
foo = file.createVariable('data',\
dtype,('n'),zlib=zlib,least_significant_digit=least_significant_digit,\
shuffle=shuffle,contiguous=contiguous,complevel=complevel,fletcher32=fletcher32,chunksizes=chunksizes)
foo[:] = data
file.close()
file = Dataset(filename)
data = file.variables['data'][:]
file.close()
def write_netcdf2(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=False,contiguous=False,\
chunksizes=None,complevel=6,fletcher32=False):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
file.createDimension('n2', ndim2)
foo = file.createVariable('data2',\
dtype,('n','n2'),zlib=zlib,least_significant_digit=least_significant_digit,\
shuffle=shuffle,contiguous=contiguous,complevel=complevel,fletcher32=fletcher32,chunksizes=chunksizes)
foo[:] = data
file.close()
file = Dataset(filename)
data = file.variables['data2'][:]
file.close()
class CompressionTestCase(unittest.TestCase):
def setUp(self):
self.files = files
# no compression
write_netcdf(self.files[0],False,None,array)
# compressed, lossless, no shuffle.
write_netcdf(self.files[1],True,None,array)
# compressed, lossless, with shuffle.
write_netcdf(self.files[2],True,None,array,shuffle=True)
# compressed, lossy, no shuffle.
write_netcdf(self.files[3],True,lsd,array)
# compressed, lossy, with shuffle.
write_netcdf(self.files[4],True,lsd,array,shuffle=True)
# compressed, lossy, with shuffle and fletcher32 checksum.
write_netcdf(self.files[5],True,lsd,array,shuffle=True,fletcher32=True)
# 2-d compressed, lossy, with shuffle and fletcher32 checksum and
# chunksizes.
write_netcdf2(self.files[6],True,lsd,array2,shuffle=True,fletcher32=True,chunksizes=(chunk1,chunk2))
def tearDown(self):
# Remove the temporary files
for file in self.files:
os.remove(file)
def runTest(self):
"""testing zlib and shuffle compression filters"""
uncompressed_size = os.stat(self.files[0]).st_size
# check compressed data.
f = Dataset(self.files[1])
size = os.stat(self.files[1]).st_size
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() == {'zlib':True,'shuffle':False,'complevel':6,'fletcher32':False}
assert(size < 0.95*uncompressed_size)
f.close()
# check compression with shuffle
f = Dataset(self.files[2])
size = os.stat(self.files[2]).st_size
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() == {'zlib':True,'shuffle':True,'complevel':6,'fletcher32':False}
assert(size < 0.85*uncompressed_size)
f.close()
# check lossy compression without shuffle
f = Dataset(self.files[3])
size = os.stat(self.files[3]).st_size
checkarray = _quantize(array,lsd)
assert_almost_equal(checkarray,f.variables['data'][:])
assert(size < 0.27*uncompressed_size)
f.close()
# check lossy compression with shuffle
f = Dataset(self.files[4])
size = os.stat(self.files[4]).st_size
assert_almost_equal(checkarray,f.variables['data'][:])
assert(size < 0.20*uncompressed_size)
size_save = size
f.close()
# check lossy compression with shuffle and fletcher32 checksum.
f = Dataset(self.files[5])
size = os.stat(self.files[5]).st_size
assert_almost_equal(checkarray,f.variables['data'][:])
assert f.variables['data'].filters() == {'zlib':True,'shuffle':True,'complevel':6,'fletcher32':True}
assert(size < 0.20*uncompressed_size)
# should be slightly larger than without fletcher32
assert(size > size_save)
# check chunksizes
f.close()
f = Dataset(self.files[6])
checkarray2 = _quantize(array2,lsd)
assert_almost_equal(checkarray2,f.variables['data2'][:])
assert f.variables['data2'].filters() == {'zlib':True,'shuffle':True,'complevel':6,'fletcher32':True}
assert f.variables['data2'].chunking() == [chunk1,chunk2]
f.close()
if __name__ == '__main__':
unittest.main()
|
pytorch/examples/SRNN/helpermethods.py
|
krantikiran/EdgeML
| 719 |
127062
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import argparse
def getSRNN2Args():
def checkIntPos(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError(
"%s is an invalid positive int value" % value)
return ivalue
def checkIntNneg(value):
ivalue = int(value)
if ivalue < 0:
raise argparse.ArgumentTypeError(
"%s is an invalid non-neg int value" % value)
return ivalue
def checkFloatNneg(value):
fvalue = float(value)
if fvalue < 0:
raise argparse.ArgumentTypeError(
"%s is an invalid non-neg float value" % value)
return fvalue
def checkFloatPos(value):
fvalue = float(value)
if fvalue <= 0:
raise argparse.ArgumentTypeError(
"%s is an invalid positive float value" % value)
return fvalue
parser = argparse.ArgumentParser(
description='Hyperparameters for 2 layer SRNN Algorithm')
parser.add_argument('-d', '--data-dir', required=True,
help='Directory containing processed data.')
parser.add_argument('-h0', '--hidden-dim0', type=checkIntPos, default=64,
help='Hidden dimension of lower layer RNN cell.')
parser.add_argument('-h1', '--hidden-dim1', type=checkIntPos, default=32,
help='Hidden dimension of upper layer RNN cell.')
parser.add_argument('-bz', '--brick-size', type=checkIntPos, required=True,
help='Brick size to be used at the lower layer.')
parser.add_argument('-c', '--cell-type', default='LSTM',
help='Type of RNN cell to use among [LSTM, FastRNN, ' +
'FastGRNN')
parser.add_argument('-p', '--num-prototypes', type=checkIntPos, default=20,
help='Number of prototypes.')
parser.add_argument('-g', '--gamma', type=checkFloatPos, default=None,
help='Gamma for Gaussian kernel. If not provided, ' +
'median heuristic will be used to estimate gamma.')
parser.add_argument('-e', '--epochs', type=checkIntPos, default=10,
help='Total training epochs.')
parser.add_argument('-b', '--batch-size', type=checkIntPos, default=128,
help='Batch size for each pass.')
parser.add_argument('-r', '--learning-rate', type=checkFloatPos,
default=0.01,
help='Learning rate for ADAM Optimizer.')
parser.add_argument('-pS', '--print-step', type=int, default=200,
help='The number of update steps between print ' +
'calls to console.')
parser.add_argument('-vS', '--val-step', type=int, default=5,
help='The number of epochs between validation' +
'performance evaluation')
return parser.parse_args()
|
L1Trigger/L1TMuonBarrel/test/kalmanTools/makePTPhiLUTs.py
|
ckamtsikis/cmssw
| 852 |
127095
|
from __future__ import print_function
from math import pi,floor
print(int(((-330+1024)*pi/(6.0*2048.0))/(0.625*pi/180.0)))
#phi=[]
#for i in range(0,2048):
# p = int((i*pi/(6.0*2048.0)+15.0*pi/180.0)/(0.625*pi/180.0))
# p = int((i*2*pi/(6.0*2048.0))/(0.625*pi/180.0))
# phi.append(str(p))
#print('const ap_int<8> phiLUT[2047] = {'+','.join(phi)+'};')
#import pdb;pdb.set_trace()
def bits(number, size_in_bits):
"""
The bin() function is *REALLY* unhelpful when working with negative numbers.
It outputs the binary representation of the positive version of that number
with a '-' at the beginning. Woop-di-do. Here's how to derive the two's-
complement binary of a negative number:
complement(bin(+n - 1))
`complement` is a function that flips each bit. `+n` is the negative number
made positive.
"""
if number < 0:
return compliment(bin(abs(number) - 1)[2:]).rjust(size_in_bits, '1')
else:
return bin(number)[2:].rjust(size_in_bits, '0')
def compliment(value):
return ''.join(COMPLEMENT[x] for x in value)
COMPLEMENT = {'1': '0', '0': '1'}
phiLUT=[]
kPHI = 57.2958/0.625/1024.;
for i in range(0,1024):
phiLUT.append(0)
for phi in range(-512,512):
address = int(bits(phi,10),2)
phiF=float(phi)
phiNew = 24+int(floor(kPHI*phiF));
if phiNew > 69:
phiNew = 69;
if phiNew < -8:
phiNew = -8;
phiLUT[address]=(str(phiNew))
print('const ap_int<8> phiLUT[1024] = {'+','.join(phiLUT)+'};')
ptLUT=[]
lsb = 1.25/(1<<13)
for i in range(0,4096):
ptLUT.append(6)
for K in range(-2048,2048):
address = int(bits(K,12),2)
if K>=0:
charge=1
else:
charge=-1
FK=lsb*abs(K)
if abs(K)>2047:
FK=lsb*2047
if abs(K)<26:
FK=lsb*26
FK = 0.898*FK/(1.0-0.6*FK);
FK=FK-26.382*FK*FK*FK*FK*FK;
FK=FK-charge*1.408e-3;
FK=FK/1.17;
if (FK!=0.0):
pt=int(2.0/FK)
else:
pt=511
if pt>511:
pt=511
if pt<6:
pt=6;
ptLUT[address]=str(pt)
print('const ap_uint<9> ptLUT[4096] = {'+','.join(ptLUT)+'};')
|
src/foreground_scaling.py
|
ML-Toolkit/trimap_generator
| 168 |
127096
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2, os, sys
import numpy as np
from abc import ABC, abstractmethod
def extractImage(path):
# error handller if the intended path is not found
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE);
return image
def checkImage(image):
"""
Args:
image: input image to be checked
Returns:
binary image
Raises:
RGB image, grayscale image, all-black, and all-white image
"""
if len(image.shape) > 2:
print("ERROR: non-binary image (RGB)");
sys.exit();
smallest = image.min(axis=0).min(axis=0); # lowest pixel value; should be 0 (black)
largest = image.max(axis=0).max(axis=0); # highest pixel value; should be 1 (white)
if (smallest == 0 and largest == 0):
print("ERROR: non-binary image (all black)");
sys.exit();
elif (smallest == 255 and largest == 255):
print("ERROR: non-binary image (all white)");
sys.exit();
elif (smallest > 0 or largest < 255 ):
print("ERROR: non-binary image (grayscale)");
sys.exit();
else:
return True
class FGScale(ABC):
"""
An abstract base class that enables image erosion or dilation PRE trimap
Attribute: binary image
Method: scaling with two inputs: image and iterations
"""
def __init__(self, image):
self.image = image;
@abstractmethod
def scaling(self, image, iteration):
pass
class Erosion(FGScale):
def __init__(self, image):
self.image = image
def scaling(self, image, erosion):
erosion = int(erosion)
kernel = np.ones((3,3), np.uint8) ## Design an odd-sized erosion kernel
image = cv2.erode(image, kernel, iterations=erosion) ## The number of erosions
image = np.where(image > 0, 255, image) ## Any gray-clored pixel becomes white (smoothing)
# Error-handler to prevent entire foreground annihilation
if cv2.countNonZero(image) == 0:
print("ERROR: foreground has been entirely eroded");
sys.exit();
return image;
class Dilation(FGScale):
def __init__(self, image):
self.image = image
def scaling(self, image, dilation):
dilation = int(dilation)
kernel = np.ones((3,3), np.uint8) ## Design an odd-sized erosion kernel
image = cv2.dilate(image, kernel, iterations=dilation) ## The number of dilations
image = np.where(image > 0, 255, image) ## Any gray-clored pixel becomes white (smoothing)
# Error-handler to prevent entire foreground domination
height = image.shape[0];
width = image.shape[1];
totalpixels = height*width;
n_white_pix = np.sum(image == 255)
if n_white_pix == totalpixels:
print("ERROR: foreground has been entirely expanded");
sys.exit();
return image;
#############################################
### TESTING SECTION ###
#############################################
if __name__ == '__main__':
path = "./images/test_images/test_image_12.png"
image = extractImage(path)
unit01 = Erosion(image)
new_image = unit01.scaling(image, 2)
cv2.imshow('Displayed Image', new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
src/nvim/testdir/pyxfile/pyx.py
|
uga-rosa/neovim
| 48,021 |
127109
|
import sys
print(sys.version)
|
Chapter15/python_object_to_json.py
|
add54/ADMIN_SYS_PYTHON
| 116 |
127114
|
<filename>Chapter15/python_object_to_json.py<gh_stars>100-1000
import json
python_dict = {"Name": "Harry", "Age": 26}
python_list = ["Mumbai", "Pune"]
python_tuple = ("Basketball", "Cricket")
python_str = ("hello_world")
python_int = (150)
python_float = (59.66)
python_T = (True)
python_F = (False)
python_N = (None)
json_obj = json.dumps(python_dict)
json_arr1 = json.dumps(python_list)
json_arr2 = json.dumps(python_tuple)
json_str = json.dumps(python_str)
json_num1 = json.dumps(python_int)
json_num2 = json.dumps(python_float)
json_t = json.dumps(python_T)
json_f = json.dumps(python_F)
json_n = json.dumps(python_N)
print("json object : ", json_obj)
print("jason array1 : ", json_arr1)
print("json array2 : ", json_arr2)
print("json string : ", json_str)
print("json number1 : ", json_num1)
print("json number2 : ", json_num2)
print("json true", json_t)
print("json false", json_f)
print("json null", json_n)
|
examples/ecommerce-app/checkout-api/src/remove_sensitive_data.py
|
carvantes/aws-serverless-event-fork-pipelines
| 126 |
127150
|
<reponame>carvantes/aws-serverless-event-fork-pipelines
"""Lambda function handler."""
# must be the first import in files with lambda function handlers
import lambdainit # noqa: F401
import base64
import json
import lambdalogging
LOG = lambdalogging.getLogger(__name__)
def handler(event, context):
"""Remove sensitive data from checkout events."""
LOG.debug('Received event: %s', event)
if 'records' not in event:
LOG.warning('No records found in event.')
return {}
transformed_records = [_transform(r) for r in event['records']]
response = {'records': transformed_records}
LOG.debug("Returning response: %s", response)
return response
def _transform(record):
transformed_record = {
'recordId': record['recordId'],
'result': 'Ok'
}
checkout_event = json.loads(base64.b64decode(record['data']))
payment_info = checkout_event.get('payment', {})
sensitive_payment_keys = [key for key in payment_info.keys() if key.startswith('card-')]
LOG.debug("Removing sensitive payment information from checkout event: %s", sensitive_payment_keys)
for payment_key in sensitive_payment_keys:
payment_info.pop(payment_key)
transformed_record['data'] = base64.b64encode(bytes(json.dumps(checkout_event), 'UTF-8')).decode('UTF-8')
return transformed_record
|
rman_operators/rman_operators_utils.py
|
N500/RenderManForBlender
| 432 |
127164
|
from .. import rman_bl_nodes
from ..rfb_icons import get_bxdf_icon, get_light_icon, get_lightfilter_icon, get_projection_icon
from ..rman_constants import RMAN_BL_NODE_DESCRIPTIONS
def get_description(category, node_name):
description = None
for n in rman_bl_nodes.__RMAN_NODES__.get(category, list()):
if n.name == node_name:
if n.help:
description = n.help
break
if not description:
description = RMAN_BL_NODE_DESCRIPTIONS.get(node_name, node_name)
return description
def get_bxdf_items():
items = []
i = 1
for bxdf_cat, bxdfs in rman_bl_nodes.__RMAN_NODE_CATEGORIES__['bxdf'].items():
if not bxdfs[1]:
continue
tokens = bxdf_cat.split('_')
bxdf_category = ' '.join(tokens[1:])
items.append(('', bxdf_category.capitalize(), '', 0, 0))
for n in bxdfs[1]:
rman_bxdf_icon = get_bxdf_icon(n.name)
items.append( (n.name, n.name, '', rman_bxdf_icon.icon_id, i))
i += 1
return items
def get_light_items():
rman_light_icon = get_light_icon("PxrRectLight")
items = []
i = 0
dflt = 'PxrRectLight'
items.append((dflt, dflt, '', rman_light_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_LIGHT_NODES__:
if n.name == 'PxrMeshLight':
continue
if n.name != dflt:
i += 1
light_icon = get_light_icon(n.name)
description = get_description('light', n.name)
description = RMAN_BL_NODE_DESCRIPTIONS.get(n.name, n.name)
if n.help:
description = n.help
items.append( (n.name, n.name, description, light_icon.icon_id, i))
return items
def get_lightfilter_items():
items = []
i = 0
rman_light_icon = get_lightfilter_icon("PxrBlockerLightFilter")
dflt = 'PxrBlockerLightFilter'
items.append((dflt, dflt, '', rman_light_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_LIGHTFILTER_NODES__:
if n.name != dflt:
i += 1
light_icon = get_lightfilter_icon(n.name)
description = RMAN_BL_NODE_DESCRIPTIONS.get(n.name, n.name)
if n.help:
description = n.help
items.append( (n.name, n.name, description, light_icon.icon_id, i))
return items
def get_projection_items():
items = []
i = 0
proj_icon = get_projection_icon("PxrCamera")
dflt = 'PxrCamera'
items.append((dflt, dflt, '', proj_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_PROJECTION_NODES__:
if n.name != dflt:
i += 1
proj_icon = get_projection_icon(n.name)
description = RMAN_BL_NODE_DESCRIPTIONS.get(n.name, n.name)
if n.help:
description = n.help
items.append( (n.name, n.name, description, proj_icon.icon_id, i))
return items
|
pycantonese/tests/test_docs.py
|
jacksonllee/pycantonese
| 124 |
127193
|
<reponame>jacksonllee/pycantonese<filename>pycantonese/tests/test_docs.py
"""Tests related to the documentation."""
import pytest
import requests
@pytest.mark.parametrize(
"url",
[
"https://talkbank.org/manuals/CHAT.pdf",
"https://pylangacq.org/",
"http://compling.hss.ntu.edu.sg/hkcancor/",
"https://github.com/jacksonllee/pycantonese/blob/main/pycantonese/data/hkcancor/README.md", # noqa: E501
"https://childes.talkbank.org/data/Biling/YipMatthews.zip",
"https://pylangacq.org/read.html",
"https://pylangacq.org/headers.html",
"https://docs.python.org/3/library/re.html",
"https://www.lshk.org/jyutping",
"https://www.tug.org/TUGboat/tb17-2/tb51rei.pdf",
"https://universaldependencies.org/u/pos/index.html",
"https://pycantonese.org/index.html#links",
# Archives
"https://pycantonese.org/papers/lee-chen-tsui-wicl3-slides-2016-03-12.pdf",
"https://pycantonese.org/papers/lee-chen-tsui-wicl3-handout-2016-03-12.pdf",
"https://pycantonese.org/papers/Lee-pycantonese-2015.html",
"https://github.com/jacksonllee/pycantonese/blob/main/docs/tutorials/lee-pycantonese-2021-05-16.ipynb", # noqa: E501
"https://github.com/jacksonllee/pycantonese/blob/main/docs/tutorials/lee-python-2021-april.ipynb", # noqa: E501
"https://github.com/chaaklau/school-of-cantonese-2021-materials/blob/main/chaak_sfp_2021_05_16.ipynb", # noqa: E501
"https://github.com/charlestklam/school-of-cantonese-studies-2021/blob/main/Multiword_Expressions_Discontinuous_Constructions.ipynb", # noqa: E501
],
)
def test_urls_work(url):
"""URLs used in the documentation shouldn't be dead."""
with requests.get(url) as r:
assert r.status_code == 200
|
terrascript/provider/camptocamp/jwt.py
|
mjuenema/python-terrascript
| 507 |
127197
|
<reponame>mjuenema/python-terrascript
# terrascript/provider/camptocamp/jwt.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:19:50 UTC)
import terrascript
class jwt(terrascript.Provider):
"""Terraform Provider for generating JSON Web Tokens"""
__description__ = "Terraform Provider for generating JSON Web Tokens"
__namespace__ = "camptocamp"
__name__ = "jwt"
__source__ = "https://github.com/camptocamp/terraform-provider-jwt"
__version__ = "0.0.3"
__published__ = "2020-11-21T14:17:38Z"
__tier__ = "community"
__all__ = ["jwt"]
|
cloudia/word_data.py
|
kakadaXY/cloudia
| 128 |
127216
|
<reponame>kakadaXY/cloudia
from typing import Any, List, Tuple, Dict, Callable, Union
from itertools import repeat, chain, zip_longest
from collections import Counter
from joblib import Parallel, delayed
import pandas as pd
from cloudia.utils import function_wrapper, make_nagisa_tagger
class WordData:
def __init__(self, data: Any, parse_func: Callable[..., List[str]], multiprocess: bool, individual: bool, **args):
words, self.names = self._init_data(data)
self.counter_list = self.parse(words, parse_func, multiprocess, individual, **args)
self.words = [self.convert_weight(x) for x in self.counter_list]
def parse(self, words, parse_func: Callable[..., List[str]], multiprocess: bool, individual: bool, **args) -> List[Counter]:
if isinstance(words[0], list):
word_list_length = len(words[0])
if individual:
words = list(chain.from_iterable(words))
words = self._parse(words, parse_func, multiprocess, **args)
words = list(zip_longest(*[iter(words)] * word_list_length))
words = [sum(w, Counter()) for w in words]
else:
words = [' '.join(x) for x in words]
words = self._parse(words, parse_func, multiprocess, **args)
else:
words = self._parse(words, parse_func, multiprocess, **args)
return words
def convert_weight(self, c: Counter) -> Dict[str, float]:
most_common = c.most_common()
_max_count = most_common[0][1]
weight = {k: v / _max_count for k, v in most_common}
weight = {k: weight[k] for k in list(weight.keys())}
return weight
def _parse(self, words: List[str], parse_func: Callable[..., List[str]], multiprocess: bool, **args) -> Union[List[Counter], List[List[Counter]]]:
if multiprocess:
return self._parallel_parse(words, function_wrapper(parse_func), **args)
return self._single_thread_parse(words, parse_func, **args)
def _single_thread_parse(self, words: List[str], parse_func: Callable[..., List[str]], **args) -> List[Counter]:
if args['parser'] == 'default':
args.update({'parser': make_nagisa_tagger(args['single_words'])})
return [Counter(parse_func(x, **args)) for x in words]
def _parallel_parse(self, words: List[str], parse_func: Callable, **args) -> List[List[Counter]]:
parsed_words = Parallel(n_jobs=-1)([delayed(parse_func)(w, **dict(**a, **{'_index': i})) for i, (w, a) in enumerate(zip(words, repeat(args)))])
parsed_words.sort(key=lambda x: x[1])
parsed_words = [t[0] for t in parsed_words]
return parsed_words
def _init_data(self, data: Any) -> Tuple[List[str], List[str]]:
# TODO: set assert
words, names = [], []
if isinstance(data, list):
if isinstance(data[0], tuple):
if isinstance(data[0][1], pd.Series):
words = [d.values.tolist() for n, d in data]
names = [n for n, d in data]
else:
words = [w for n, w in data]
names = [n for n, w in data]
elif isinstance(data[0], str):
words = data
names = [f'word cloud {i+1}' for i in range(len(data))]
elif isinstance(data[0], pd.Series):
words = [d.values.tolist() for d in data]
names = [d.name for d in data]
elif isinstance(data, str):
words = [data]
names = ['word cloud']
elif isinstance(data, tuple):
words = [data[1]]
names = [data[0]]
elif isinstance(data, pd.DataFrame):
names = data.columns.tolist()
words = [data[x].values.tolist() for x in names]
elif isinstance(data, pd.Series):
words = [data.values.tolist()]
names = [data.name]
return words, names
def __iter__(self):
for n, w in zip(self.names, self.words):
yield n, w
|
src/api/extension.py
|
piwaniuk/critic
| 216 |
127227
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2015 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
class ExtensionError(api.APIError):
"""Base exception for all errors related to the Extension class"""
pass
class InvalidExtensionId(ExtensionError):
"""Raised when a invalid extension id is used"""
def __init__(self, value):
"""Constructor"""
super(InvalidExtensionId, self).__init__(
"Invalid extension id: %r" % value)
self.value = value
class InvalidExtensionKey(ExtensionError):
"""Raised when an invalid extension key is used"""
def __init__(self, value):
"""Constructor"""
super(InvalidExtensionKey, self).__init__(
"Invalid extension key: %r" % value)
self.value = value
class Extension(api.APIObject):
"""Representation of a Critic extension"""
@property
def id(self):
"""The extension's unique id"""
return self._impl.id
@property
def name(self):
"""The extension's name"""
return self._impl.name
@property
def key(self):
"""The extension's unique key
For a system extension, the key is the extension's name. For other
extensions, the key is the publisher's username followed by a slash
followed by the extension's name."""
return self._impl.getKey(self.critic)
@property
def publisher(self):
"""The extension's publisher
The user that published the extension. This may not be the author
(who may not be a user of this Critic system.)
None if this is a system extension."""
return self._impl.getPublisher(self.critic)
@property
def default_version(self):
"""The default extension version
This is typically the version whose extension description and other
metadata should be presented as the extension's true metadata."""
return self._impl.getDefaultVersion()
def fetch(critic, extension_id=None, key=None):
"""Fetch an Extension object with the given extension id or key
Exactly one of the 'extension_id' and 'key' arguments can be used.
Exceptions:
InvalidExtensionId: if 'extension_id' is used and is not a valid
extension id.
InvalidExtensionKey: if 'key' is used and is not a valid extensions
key."""
import api.impl
assert isinstance(critic, api.critic.Critic)
assert (extension_id is None) != (key is None)
return api.impl.extension.fetch(critic, extension_id, key)
def fetchAll(critic, publisher=None, installed_by=None):
"""Fetch Extension objects for all extensions in the system
If 'publisher' is not None, it must be an api.user.User object, and only
extensions published by this user are returned.
If 'installed_by' is not None, it must be an api.user.User object, and
only extensions that this user has installed are returned. This may
include extensions that are universally installed (i.e. installed for all
users, and not by this user directly.)"""
import api.impl
assert isinstance(critic, api.critic.Critic)
assert publisher is None or isinstance(publisher, api.user.User)
assert installed_by is None or isinstance(installed_by, api.user.User)
return api.impl.extension.fetchAll(critic, publisher, installed_by)
|
listings/chap6/listing_6_4_find_metric_groups.py
|
genekuo/fight-churn
| 151 |
127278
|
import pandas as pd
import numpy as np
import os
from collections import Counter
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform
def find_correlation_clusters(corr,corr_thresh):
dissimilarity = 1.0 - corr
hierarchy = linkage(squareform(dissimilarity), method='single')
diss_thresh = 1.0 - corr_thresh
labels = fcluster(hierarchy, diss_thresh, criterion='distance')
return labels
def relabel_clusters(labels,metric_columns):
cluster_count = Counter(labels)
cluster_order = {cluster[0]: idx for idx, cluster in enumerate(cluster_count.most_common())}
relabeled_clusters = [cluster_order[l] for l in labels]
relabled_count = Counter(relabeled_clusters)
labeled_column_df = pd.DataFrame({'group': relabeled_clusters, 'column': metric_columns}).sort_values(
['group', 'column'], ascending=[True, True])
return labeled_column_df, relabled_count
def make_load_matrix(labeled_column_df,metric_columns,relabled_count,corr):
load_mat = np.zeros((len(metric_columns), len(relabled_count)))
for row in labeled_column_df.iterrows():
orig_col = metric_columns.index(row[1][1])
if relabled_count[row[1][0]]>1:
load_mat[orig_col, row[1][0]] = 1.0/ (np.sqrt(corr) * float(relabled_count[row[1][0]]) )
else:
load_mat[orig_col, row[1][0]] = 1.0
is_group = load_mat.astype(bool).sum(axis=0) > 1
column_names=['metric_group_{}'.format(d + 1) if is_group[d]
else labeled_column_df.loc[labeled_column_df['group']==d,'column'].iloc[0]
for d in range(0, load_mat.shape[1])]
loadmat_df = pd.DataFrame(load_mat, index=metric_columns, columns=column_names)
loadmat_df['name'] = loadmat_df.index
sort_cols = list(loadmat_df.columns.values)
sort_order = [False] * loadmat_df.shape[1]
sort_order[-1] = True
loadmat_df = loadmat_df.sort_values(sort_cols, ascending=sort_order)
loadmat_df = loadmat_df.drop('name', axis=1)
return loadmat_df
def save_load_matrix(data_set_path,loadmat_df, labeled_column_df):
save_path = data_set_path.replace('.csv', '_load_mat.csv')
print('saving loadings to ' + save_path)
loadmat_df.to_csv(save_path)
save_path = data_set_path.replace('.csv', '_groupmets.csv')
print('saving metric groups to ' + save_path)
group_lists=['|'.join(labeled_column_df[labeled_column_df['group']==g]['column'])
for g in set(labeled_column_df['group'])]
pd.DataFrame(group_lists,index=loadmat_df.columns.values,columns=['metrics']).to_csv(save_path)
def find_metric_groups(data_set_path,group_corr_thresh=0.5):
score_save_path=data_set_path.replace('.csv','_scores.csv')
assert os.path.isfile(score_save_path),'You must run listing 5.3 or 7.5 to save metric scores first'
score_data = pd.read_csv(score_save_path,index_col=[0,1])
score_data.drop('is_churn',axis=1,inplace=True)
metric_columns = list(score_data.columns.values)
labels = find_correlation_clusters(score_data.corr(),group_corr_thresh)
labeled_column_df, relabled_count = relabel_clusters(labels,metric_columns)
loadmat_df = make_load_matrix(labeled_column_df, metric_columns, relabled_count,group_corr_thresh)
save_load_matrix(data_set_path,loadmat_df,labeled_column_df)
|
tools/eval.py
|
briana-jin-zhang/spatial-segmentation
| 733 |
127299
|
import argparse
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import sys
sys.path.append('.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('res', type=str)
parser.add_argument('gt', type=str)
args = parser.parse_args()
return args
def main(args):
evaluate(args.res, args.gt)
def evaluate(res_file, gt_file):
annType = 'segm'
cocoGt = COCO(gt_file)
cocoDt = cocoGt.loadRes(res_file)
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = cocoGt.getImgIds()
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if __name__ == "__main__":
args = parse_args()
main(args)
|
devtools/print_requirements.py
|
Wheelspawn/pudl
| 285 |
127337
|
<reponame>Wheelspawn/pudl<filename>devtools/print_requirements.py
#!/usr/bin/python3
"""Print out install requirements from setup.py for use with pip install."""
import distutils.core
setup = distutils.core.run_setup("setup.py")
for dep in setup.install_requires:
print(dep)
|
libp2p/security/exceptions.py
|
swedneck/py-libp2p
| 315 |
127357
|
from libp2p.exceptions import BaseLibp2pError
class HandshakeFailure(BaseLibp2pError):
pass
|
tests/functional/simple_snapshot/test_changing_check_cols_snapshot.py
|
tomasfarias/dbt-core
| 799 |
127386
|
import pytest
from dbt.tests.util import run_dbt, check_relations_equal
snapshot_sql = """
{% snapshot snapshot_check_cols_new_column %}
{{
config(
target_database=database,
target_schema=schema,
strategy='check',
unique_key='id',
check_cols=var("check_cols", ['name']),
updated_at="'" ~ var("updated_at") ~ "'::timestamp",
)
}}
{% if var('version') == 1 %}
select 1 as id, 'foo' as name
{% else %}
select 1 as id, 'foo' as name, 'bar' as other
{% endif %}
{% endsnapshot %}
"""
expected_csv = """
id,name,other,dbt_scd_id,dbt_updated_at,dbt_valid_from,dbt_valid_to
1,foo,NULL,0d73ad1b216ad884c9f7395d799c912c,2016-07-01 00:00:00.000,2016-07-01 00:00:00.000,2016-07-02 00:00:00.000
1,foo,bar,7df3783934a6a707d51254859260b9ff,2016-07-02 00:00:00.000,2016-07-02 00:00:00.000,
""".lstrip()
@pytest.fixture(scope="class")
def snapshots():
return {"snapshot_check_cols_new_column.sql": snapshot_sql}
@pytest.fixture(scope="class")
def seeds():
return {"snapshot_check_cols_new_column_expected.csv": expected_csv}
@pytest.fixture(scope="class")
def project_config_update():
return {
"seeds": {
"quote_columns": False,
"test": {
"snapshot_check_cols_new_column_expected": {
"+column_types": {
"dbt_updated_at": "timestamp without time zone",
"dbt_valid_from": "timestamp without time zone",
"dbt_valid_to": "timestamp without time zone",
},
},
},
},
}
def test_simple_snapshot(project):
"""
Test that snapshots using the "check" strategy and explicit check_cols support adding columns.
Approach:
1. Take a snapshot that checks a single non-id column
2. Add a new column to the data
3. Take a snapshot that checks the new non-id column too
As long as no error is thrown, then the snapshot was successful
"""
# 1. Create a table that represents the expected data after a series of snapshots
results = run_dbt(["seed", "--show", "--vars", "{version: 1, updated_at: 2016-07-01}"])
assert len(results) == 1
# Snapshot 1
results = run_dbt(
["snapshot", "--vars", "{version: 1, check_cols: ['name'], updated_at: 2016-07-01}"]
)
assert len(results) == 1
# Snapshot 2
results = run_dbt(
[
"snapshot",
"--vars",
"{version: 2, check_cols: ['name', 'other'], updated_at: 2016-07-02}",
]
)
assert len(results) == 1
check_relations_equal(
project.adapter,
["snapshot_check_cols_new_column", "snapshot_check_cols_new_column_expected"],
compare_snapshot_cols=True,
)
|
panel/api/views.py
|
freejooo/vigilio
| 137 |
127440
|
<reponame>freejooo/vigilio<gh_stars>100-1000
import logging
from dataclasses import asdict
from typing import List, Dict, Any
import dotenv
from celery.app.control import Inspect
from django.conf import settings
from django.contrib.auth.models import User
from qbittorrent import Client
from rest_framework import status, generics
from rest_framework.exceptions import APIException, NotFound
from rest_framework.generics import GenericAPIView
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.status import HTTP_204_NO_CONTENT
from rest_framework.views import APIView
from panel.api.handlers import (
TorrentProcessHandler,
FilesHandler,
MovieManagementHandler,
FilesResult,
)
from panel.api.serializers import (
TorrentSerializer,
CelerySerializer,
FilesSerializer,
MovieManagementSerializer,
MovieAddSerializer,
GlobalSettingsSerializer,
MudSourceSerializer,
RedownloadSubtitlesSerializer,
)
from panel.api.utils import (
get_celery_nodes,
is_redis_online,
AddMovieHandler,
is_qbittorrent_running,
get_dotenv_location,
get_dotenv_values,
DotenvFilter,
)
from panel.api.validators import MovieManagementValidation, FilesValidation
from panel.decorators import check_demo, DemoOrIsAuthenticated
from panel.management.commands import superuser
from panel.models import MudSource
from panel.tasks import redownload_subtitles
from panel.tasks.inmemory import set_redis
from panel.tasks.torrent import get_qbittorrent_client
from watch.celery import app
logger = logging.getLogger(__name__)
class TorrentEndpoint(GenericAPIView):
serializer_class = TorrentSerializer
handler_class = TorrentProcessHandler
permission_classes = [DemoOrIsAuthenticated]
verbose_request_logging = True
def get(self, request: Request) -> Response:
try:
client: Client = get_qbittorrent_client()
except Exception:
raise APIException("Qbittorrent connection failed.")
return Response(
{"torrents": client.torrents()},
status=status.HTTP_200_OK,
)
@check_demo
def post(self, request: Request) -> Response:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
if not is_qbittorrent_running():
raise APIException(
"Qbittorrent is not running. Cannot process the request."
)
handler = self.handler_class().handle(torrent_process=serializer.object)
return Response(handler)
class CeleryEndpoint(GenericAPIView):
serializer_class = CelerySerializer
permission_classes = [DemoOrIsAuthenticated]
verbose_request_logging = True
def get(self, request: Request) -> Response:
if not is_redis_online():
raise APIException("Redis is offline.")
nodes: List[str] = get_celery_nodes()
logger.info(f"Celery nodes: {nodes}")
inspect: Inspect = app.control.inspect(nodes)
return Response(
{
"active": self._concatenate(inspect.active()),
"reserved": self._concatenate(inspect.reserved()),
"scheduled": self._concatenate(inspect.scheduled()),
},
status=status.HTTP_200_OK,
)
@check_demo
def post(self, request: Request) -> Response:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
if not is_redis_online():
raise APIException("RabbitMQ is not running. Cannot process the request.")
app.control.revoke(serializer.object.process_id)
return Response({"status": "success"})
@staticmethod
def _concatenate(
node_result: Dict[str, List[Dict[str, Any]]]
) -> List[Dict[str, Any]]:
if not node_result:
return []
_result: List[Dict[str, Any]] = []
for value in node_result.values():
_result += value
return _result
class FilesEndpoint(GenericAPIView):
serializer_class = FilesSerializer
validator_class = FilesValidation
handler_class = FilesHandler
permission_classes = [DemoOrIsAuthenticated]
verbose_request_logging = True
def get(self, request: Request) -> Response:
serializer = self.get_serializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
validation = self.validator_class(files_data=serializer.object)
validation.is_valid()
result: FilesResult = self.handler_class().handle(files_data=serializer.object)
return Response(asdict(result))
@check_demo
def post(self, request: Request) -> Response:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
validation = self.validator_class(files_data=serializer.object)
validation.is_valid()
result = self.handler_class().handle(files_data=serializer.object, is_get=False)
return Response({"operation": result})
class MovieManagementEndpoint(GenericAPIView):
serializer_class = MovieManagementSerializer
validator_class = MovieManagementValidation
handler_class = MovieManagementHandler
permission_classes = [DemoOrIsAuthenticated]
verbose_request_logging = True
@check_demo
def post(self, request: Request) -> Response:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
validation = self.validator_class(
management=serializer.object, user=request.user
)
validation.is_valid()
result = self.handler_class().handle(
management=serializer.object, user=request.user
)
return Response({"operation": result})
class MovieAddEndpoint(GenericAPIView):
serializer_class = MovieAddSerializer
permission_classes = [DemoOrIsAuthenticated]
verbose_request_logging = True
@check_demo
def post(self, request: Request) -> Response:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
result: str = AddMovieHandler(
source=serializer.object.source, imdb_id=serializer.object.imdb_id
).handle()
return Response({"operation": result})
class GlobalSettingsEndpoint(GenericAPIView):
serializer_class = GlobalSettingsSerializer
permission_classes = [DemoOrIsAuthenticated]
verbose_request_logging = True
def get(self, request: Request) -> Response:
result: Dict[str, Any] = {}
_dotenv: Dict[str, str] = get_dotenv_values(
filters=DotenvFilter.get_filter(user=request.user)
)
result["dotenv"] = _dotenv
result["forcePasswordChange"] = False
admin: User = User.objects.first()
if admin.check_password(superuser.PASSWORD):
result["forcePasswordChange"] = True
return Response(result)
@check_demo
def post(self, request: Request) -> Response:
serializer = self.get_serializer(data=request.data, user=request.user)
serializer.is_valid(raise_exception=True)
for key, value in serializer.object.dotenv.items():
dotenv.set_key(get_dotenv_location(), key, value)
set_redis(key, str(value))
dotenv.load_dotenv(settings.DOTENV)
return Response({"dotenv": "success"})
class MudSourcesList(generics.ListCreateAPIView):
queryset = MudSource.objects.all()
serializer_class = MudSourceSerializer
permission_classes = [DemoOrIsAuthenticated]
verbose_request_logging = True
@check_demo
def perform_create(self, serializer: MudSourceSerializer) -> None:
serializer.save()
class MudSourceEndpoint(APIView):
permission_classes = [DemoOrIsAuthenticated]
verbose_request_logging = True
@check_demo
def delete(self, request: Request, mud_id: int) -> Response:
try:
mud_source: MudSource = MudSource.objects.get(pk=mud_id)
except MudSource.DoesNotExist:
raise NotFound(f"Mud Source ID: {mud_id} not found.")
else:
mud_source.delete()
return Response(None, status=HTTP_204_NO_CONTENT)
class RedownloadSubtitlesEndpoint(GenericAPIView):
serializer_class = RedownloadSubtitlesSerializer
permission_classes = [DemoOrIsAuthenticated]
verbose_request_logging = True
@check_demo
def post(self, request: Request) -> Response:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
for _id in serializer.object:
redownload_subtitles.delay(movie_id=_id)
return Response({"operation": "started"})
|
task/game_signin.py
|
dhbowen1/unicom-task
| 178 |
127450
|
# -*- coding: utf-8 -*-
# @Time : 2021/08/14 16:30
# @Author : srcrs
# @Email : <EMAIL>
import requests,json,time,re,login,logging,traceback,os,random,notify,datetime
from lxml.html import fromstring
#游戏任务中心每日打卡领积分,游戏任务自然数递增至7,游戏频道每日1积分
#位置: 首页 --> 游戏 --> 每日打卡
class game_signin:
def run(self, client, user):
data1 = {
'methodType': 'iOSIntegralGet',
'gameLevel': '1',
'deviceType': 'iOS'
}
try:
client.get('https://img.client.10010.com/gametask/index.html?yw_code=&desmobile=' + user['username'] + '&[email protected]')
time.sleep(2)
headers = {
'origin': 'https://img.client.10010.com',
'referer': 'https://img.client.10010.com/gametask/index.html?yw_code=&desmobile=' + user['username'] + '&[email protected]'
}
client.headers.update(headers)
#游戏频道积分
gameCenter_exp = client.post('https://m.client.10010.com/producGameApp',data=data1)
gameCenter_exp.encoding='utf-8'
res1 = gameCenter_exp.json()
if res1['code'] == '0000':
logging.info('【游戏频道打卡】: 获得' + str(res1['integralNum']) + '积分')
else:
logging.info('【游戏频道打卡】: ' + res1['msg'])
client.headers.pop('referer')
client.headers.pop('origin')
time.sleep(1)
except Exception as e:
print(traceback.format_exc())
logging.error('【游戏频道打卡】: 错误,原因为: ' + str(e))
|
lib/company_jira.py
|
Rehzende/project-dev-kpis
| 113 |
127473
|
<reponame>Rehzende/project-dev-kpis<filename>lib/company_jira.py<gh_stars>100-1000
import os
from jira import JIRA
JIRA_API_SERVER = os.getenv('JIRA_API_SERVER')
JIRA_API_USER = os.getenv('JIRA_API_USER')
JIRA_API_PASSWORD = os.getenv('JIRA_API_PASSWORD')
jira = JIRA(
server=JIRA_API_SERVER,
basic_auth=(JIRA_API_USER, JIRA_API_PASSWORD)
) if JIRA_API_SERVER != 'https://localhost/' else None
|
typed_python/compiler/global_variable_definition.py
|
APrioriInvestments/typed_python
| 105 |
127504
|
<reponame>APrioriInvestments/typed_python
# Copyright 2020 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python import Alternative
GlobalVariableMetadata = Alternative(
"GlobalVariableMetadata",
StringConstant=dict(value=str),
BytesConstant=dict(value=bytes),
IntegerConstant=dict(value=int),
# a pointer to a PythonObjectOfType (e.g. our version, with
# refcounts that are not in the GIL)
PointerToPyObject=dict(value=object),
# the raw ID of a global python object, like a builtin or
# an exception
IdOfPyObject=dict(value=object),
# a pointer to the Type* in the underlying
RawTypePointer=dict(value=type),
PointerToTypedPythonObject=dict(value=object, type=type),
# a typed python object at module scope (and therefore truly global)
PointerToTypedPythonObjectAsMemberOfDict=dict(
sourceDict=object, name=str, type=type
),
ClassVtable=dict(value=type),
ClassMethodDispatchSlot=dict(
clsType=object,
methodName=str,
retType=object,
argTupleType=object,
kwargTupleType=object
)
)
class GlobalVariableDefinition:
"""Representation for a single globally defined value.
Each such value has a formal name (which should be unique across
all possible compiled value sets, so usually its a hash), a type,
and some metadata indicating to the calling context what its for.
"""
def __init__(self, name, typ, metadata):
"""Initialize a GlobalVariableDefinition.
Args:
name - a string uniquely identifying the global variable
typ - a native_ast type
metadata - any 'value-like' python object we can use
to identify the variable.
"""
self.name = name
self.type = typ
self.metadata = metadata
|
caffe-grid/src/main/python/com/yahoo/__init__.py
|
jenniew/IntelCaffeOnSpark_mirror
| 1,436 |
127623
|
from ml import *
__all__=["ml"]
|
Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/plotting.py
|
nipunjain099/AutoGuard
| 147 |
127627
|
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def plot_cca(image, objects_cordinates):
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
ax.imshow(image, cmap=plt.cm.gray)
for each_cordinate in objects_cordinates:
min_row, min_col, max_row, max_col = each_cordinate
bound_box = mpatches.Rectangle((min_col, min_row), max_col - min_col,
max_row - min_row, fill=False, edgecolor='red', linewidth=2)
ax.add_patch(bound_box)
plt.show()
|
tools/_init_paths.py
|
sibeiyang/sgmn
| 130 |
127661
|
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
# path
this_dir = osp.dirname(__file__)
# refer path
refer_dir = osp.join(this_dir, '..', 'data', 'ref')
sys.path.insert(0, refer_dir)
# lib path
sys.path.insert(0, osp.join(this_dir, '..'))
sys.path.insert(0, osp.join(this_dir, '..', 'lib'))
sys.path.insert(0, osp.join(this_dir, '..', 'tools'))
sys.path.insert(0, osp.join(this_dir, '..', 'utils'))
|
src/network/socket_depth.py
|
nitsanw/grav
| 283 |
127686
|
#!/usr/bin/python
## Heavily inspired by /usr/share/bcc/tools/tcptop
import sys
import time
import datetime
from bcc import BPF
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
prog="""
#include <linux/types.h>
#include <uapi/linux/ptrace.h>
#include <uapi/linux/bpf_perf_event.h>
#include <linux/sched.h>
#include <linux/socket.h>
#include <net/sock.h>
#include <asm/atomic.h>
struct ipv4_key_t {
u32 saddr;
u32 daddr;
u16 lport;
u16 dport;
};
BPF_HASH(total_rcv_mem, struct ipv4_key_t);
BPF_HASH(peak_rcv_mem, struct ipv4_key_t);
int trace_socket_rcv(struct pt_regs *ctx, struct sock *sk, struct sk_buff *skb) {
u16 dport = 0, family = sk->__sk_common.skc_family;
if (family == AF_INET) {
struct ipv4_key_t ipv4_key = {};
ipv4_key.saddr = sk->__sk_common.skc_rcv_saddr;
ipv4_key.daddr = sk->__sk_common.skc_daddr;
ipv4_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport;
ipv4_key.dport = ntohs(dport);
u64 zero = 0, *total, *max;
int rmem = sk->sk_rmem_alloc.counter;
total = total_rcv_mem.lookup_or_init(&ipv4_key, &zero);
(*total) += rmem + skb->data_len;
max = peak_rcv_mem.lookup_or_init(&ipv4_key, &zero);
if (rmem > (*max)) {
(*max) = rmem + skb->data_len;
}
}
return 0;
};
"""
bpf = BPF(text=prog)
bpf.attach_kprobe(event="tcp_v4_do_rcv", fn_name="trace_socket_rcv")
def to_socket_key(k):
return inet_ntop(AF_INET, pack("I", k.saddr)) + ":" + str(k.lport) + "," + inet_ntop(AF_INET, pack("I", k.daddr)) + ":" + str(k.dport)
with open("/tmp/tcpv4-peak.csv", "a+", 0) as p:
with open("/tmp/tcpv4-total.csv", "a+", 0) as t:
while True:
time.sleep(1)
current_time = datetime.datetime.now()
total_depth = bpf["total_rcv_mem"]
max_depth = bpf["peak_rcv_mem"]
if len(total_depth) == 0 and len(max_depth) == 0:
print("No data captured")
else:
for socket, total in total_depth.iteritems():
t.write("{0},{1},{2},{3}\n".format(current_time.strftime("%H:%M:%S"), current_time.strftime("%s"), to_socket_key(socket), total.value))
for socket, peak in max_depth.iteritems():
p.write("{0},{1},{2},{3}\n".format(current_time.strftime("%H:%M:%S"), current_time.strftime("%s"), to_socket_key(socket), peak.value))
total_depth.clear()
max_depth.clear()
|
docs/assets/update_formats.py
|
Kronuz/Xapiand
| 370 |
127720
|
<filename>docs/assets/update_formats.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import json
import msgpack
from collections import OrderedDict
def main():
if len(sys.argv) != 2:
print("usage: {} <file.json>".format(sys.argv[0]))
sys.exit(64)
name, ext = os.path.splitext(sys.argv[1])
json_content = open(name + '.json')
msgpack_content = open(name + '.msgpack', 'wb')
ndjson_content = open(name + '.ndjson', 'w')
http_content = open(name + '.http', 'w')
content = json.load(json_content, object_pairs_hook=OrderedDict)
for i, a in enumerate(content, 1):
msgpack_content.write(msgpack.dumps(a))
aj = json.dumps(a)
print(aj, file=ndjson_content)
print("""PUT /bank/{} HTTP/1.1
Host: localhost:8880
Content-Type: application/json
Content-Length: {}
{}""".format(i, len(aj), aj), file=http_content)
if __name__ == '__main__':
main()
|
DiffAugment-biggan-imagenet/compare_gan/architectures/infogan.py
|
Rian-T/data-efficient-gans
| 1,902 |
127727
|
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of InfoGAN generator and discriminator architectures.
Details are available in https://arxiv.org/pdf/1606.03657.pdf.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.architectures import abstract_arch
from compare_gan.architectures.arch_ops import batch_norm
from compare_gan.architectures.arch_ops import conv2d
from compare_gan.architectures.arch_ops import deconv2d
from compare_gan.architectures.arch_ops import linear
from compare_gan.architectures.arch_ops import lrelu
import tensorflow as tf
class Generator(abstract_arch.AbstractGenerator):
"""Generator architecture based on InfoGAN."""
def apply(self, z, y, is_training):
"""Build the generator network for the given inputs.
Args:
z: `Tensor` of shape [batch_size, z_dim] with latent code.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: boolean, are we in train or eval model.
Returns:
A tensor of size [batch_size] + self._image_shape with values in [0, 1].
"""
del y
h, w, c = self._image_shape
bs = z.shape.as_list()[0]
net = linear(z, 1024, scope="g_fc1")
net = lrelu(batch_norm(net, is_training=is_training, name="g_bn1"))
net = linear(net, 128 * (h // 4) * (w // 4), scope="g_fc2")
net = lrelu(batch_norm(net, is_training=is_training, name="g_bn2"))
net = tf.reshape(net, [bs, h // 4, w // 4, 128])
net = deconv2d(net, [bs, h // 2, w // 2, 64], 4, 4, 2, 2, name="g_dc3")
net = lrelu(batch_norm(net, is_training=is_training, name="g_bn3"))
net = deconv2d(net, [bs, h, w, c], 4, 4, 2, 2, name="g_dc4")
out = tf.nn.sigmoid(net)
return out
class Discriminator(abstract_arch.AbstractDiscriminator):
"""Discriminator architecture based on InfoGAN."""
def apply(self, x, y, is_training):
"""Apply the discriminator on a input.
Args:
x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: Boolean, whether the architecture should be constructed for
training or inference.
Returns:
Tuple of 3 Tensors, the final prediction of the discriminator, the logits
before the final output activation function and logits form the second
last layer.
"""
use_sn = self._spectral_norm
batch_size = x.shape.as_list()[0]
# Resulting shape: [bs, h/2, w/2, 64].
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name="d_conv1", use_sn=use_sn))
# Resulting shape: [bs, h/4, w/4, 128].
net = conv2d(net, 128, 4, 4, 2, 2, name="d_conv2", use_sn=use_sn)
net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn2")
net = lrelu(net)
# Resulting shape: [bs, h * w * 8].
net = tf.reshape(net, [batch_size, -1])
# Resulting shape: [bs, 1024].
net = linear(net, 1024, scope="d_fc3", use_sn=use_sn)
net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn3")
net = lrelu(net)
# Resulting shape: [bs, 1].
out_logit = linear(net, 1, scope="d_fc4", use_sn=use_sn)
out = tf.nn.sigmoid(out_logit)
return out, out_logit, net
|
vh/gate_proxy.py
|
rEinve/ajenti-v
| 150 |
127742
|
<reponame>rEinve/ajenti-v
from ajenti.api import plugin
from ajenti.plugins.vh.api import ApplicationGatewayComponent
@plugin
class ProxyPass (ApplicationGatewayComponent):
id = 'proxy'
title = _('Reverse proxy')
|
tests/Python/Delay/PyATKDelay_stereouniversaldelay_test.py
|
D-J-Roberts/AudioTK
| 249 |
127745
|
<reponame>D-J-Roberts/AudioTK
#!/usr/bin/env python
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter, PipelineGlobalSinkFilter
from ATK.Delay import DoubleDualMultipleUniversalFixedDelayLineFilter
sample_rate = 96000
def filter(inputl, inputr, blend_ch1=0, blend_ch2=0,
feedback_ch1_ch1=0, feedback_ch1_ch2=0, feedback_ch2_ch1=0, feedback_ch2_ch2=0,
feedforward_ch1_ch1=1, feedforward_ch1_ch2=0, feedforward_ch2_ch1=0, feedforward_ch2_ch2=1):
import numpy as np
outputl = np.zeros(inputl.shape, dtype=np.float64)
outputr = np.zeros(inputl.shape, dtype=np.float64)
infilterL = DoubleInPointerFilter(inputl, False)
infilterL.input_sampling_rate = sample_rate
infilterR = DoubleInPointerFilter(inputr, False)
infilterR.input_sampling_rate = sample_rate
delayfilter = DoubleDualMultipleUniversalFixedDelayLineFilter(5000)
delayfilter.input_sampling_rate = sample_rate
delayfilter.set_input_port(0, infilterL, 0)
delayfilter.set_input_port(1, infilterR, 0)
delayfilter.set_delay(0,4800) #50ms
delayfilter.set_delay(1,3600) #37.5ms
delayfilter.set_blend(0,blend_ch1)
delayfilter.set_blend(1,blend_ch2)
delayfilter.set_feedback(0,0,feedback_ch1_ch1)
delayfilter.set_feedback(0,1,feedback_ch1_ch2)
delayfilter.set_feedback(1,0,feedback_ch2_ch1)
delayfilter.set_feedback(1,1,feedback_ch2_ch2)
delayfilter.set_feedforward(0,0,feedforward_ch1_ch1)
delayfilter.set_feedforward(0,1,feedforward_ch1_ch2)
delayfilter.set_feedforward(1,0,feedforward_ch2_ch1)
delayfilter.set_feedforward(1,1,feedforward_ch2_ch2)
outfilterl = DoubleOutPointerFilter(outputl, False)
outfilterl.input_sampling_rate = sample_rate
outfilterl.set_input_port(0, delayfilter, 0)
outfilterr = DoubleOutPointerFilter(outputr, False)
outfilterr.input_sampling_rate = sample_rate
outfilterr.set_input_port(0, delayfilter, 1)
pipelineend = PipelineGlobalSinkFilter()
pipelineend.input_sampling_rate = sample_rate
pipelineend.add_filter(outfilterl)
pipelineend.add_filter(outfilterr)
pipelineend.process(inputl.shape[1])
return outputl, outputr
def dualmultipleuniversaldelay_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
d = np.fromfile(dirname + os.sep + "input_SUDl.dat", dtype=np.float64).reshape(1, -1)
dr = np.fromfile(dirname + os.sep + "input_SUDr.dat", dtype=np.float64).reshape(1, -1)
refl = np.fromfile(dirname + os.sep + "output_SUDl.dat", dtype=np.float64).reshape(1, -1)
refr = np.fromfile(dirname + os.sep + "output_SUDr.dat", dtype=np.float64).reshape(1, -1)
outl, outr = filter(d, dr, blend_ch1=1, blend_ch2=1,
feedforward_ch1_ch1=-1, feedforward_ch2_ch2=-1, feedforward_ch1_ch2=.1, feedforward_ch2_ch1=.7,
feedback_ch1_ch1=-.5, feedback_ch1_ch2=.1, feedback_ch2_ch1=.3, feedback_ch2_ch2=-.1, )
assert_almost_equal(outl, refl)
assert_almost_equal(outr, refr)
if __name__ == "__main__":
import numpy as np
samples = 2000000
freq_max = 20000
import matplotlib.pyplot as plt
import sys, os
sys.path.append(os.path.dirname(os.path.realpath(__file__))+"/..")
from display.compare_spec import plot_me
t = np.arange(samples, dtype=np.float64).reshape(1, -1) / sample_rate
d = np.sin(np.pi * (sample_rate * freq_max / samples * (t + .1)) * t)
dr = d[:,::-1].copy()
d[:,:1000].tofile("input_SUDl.dat")
dr[:,:1000].tofile("input_SUDr.dat")
outl, outr = filter(d, dr, blend_ch1=1, blend_ch2=1,
feedforward_ch1_ch1=-1, feedforward_ch2_ch2=-1, feedforward_ch1_ch2=.1, feedforward_ch2_ch1=.7,
feedback_ch1_ch1=-.5, feedback_ch1_ch2=.1, feedback_ch2_ch1=.3, feedback_ch2_ch2=-.1, )
outl[:,:1000].tofile("output_SUDl.dat")
outr[:,:1000].tofile("output_SUDr.dat")
plt.figure()
plot_me((d[0], outl[0]), sample_rate)
plt.gcf().suptitle("Delay Channel R")
plt.figure()
plot_me((dr[0], outr[0]), sample_rate)
plt.gcf().suptitle("Delay Channel L")
plt.show()
|
academicstoday_project/teacher/views/overview.py
|
LeeDoona/EasyGrading
| 146 |
127748
|
from django.shortcuts import render
from django.core import serializers
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf import settings
import json
import datetime
from registrar.models import Course
from registrar.models import Announcement
from registrar.models import Syllabus
from registrar.models import Policy
from registrar.models import Lecture
from registrar.models import Assignment
from registrar.models import Quiz
from registrar.models import Exam
from registrar.models import CourseSubmission
# Public Functions
#--------------------
@login_required(login_url='/landpage')
def overview_page(request, course_id):
course = Course.objects.get(id=course_id)
try:
review = CourseSubmission.objects.get(course=course)
except CourseSubmission.DoesNotExist:
review = None
try:
announcements = Announcement.objects.filter(course=course).order_by('-post_date')
except Announcement.DoesNotExist:
announcements = None
try:
syllabus = Syllabus.objects.get(course=course)
except Syllabus.DoesNotExist:
syllabus = None
try:
policy = Policy.objects.get(course=course)
except Policy.DoesNotExist:
policy = None
try:
lectures = Lecture.objects.filter(course=course).order_by('-lecture_num')
except Lecture.DoesNotExist:
lectures = None
try:
assignments = Assignment.objects.filter(course=course).order_by('-assignment_num')
except Assignment.DoesNotExist:
assignments = None
try:
quizzes = Quiz.objects.filter(course=course).order_by('-quiz_num')
except Quiz.DoesNotExist:
quizzes = None
try:
exams = Exam.objects.filter(course=course).order_by('-exam_num')
except Exam.DoesNotExist:
exams = None
return render(request, 'teacher/overview/view.html',{
'course': course,
'total_final_mark_worth': total_final_mark_worth(course),
'has_final_exam': has_final_exam(exams),
'review': review,
'announcements' : announcements,
'syllabus': syllabus,
'lectures': lectures,
'assignments': assignments,
'quizzes': quizzes,
'exams': exams,
'policy': policy,
'COURSE_SUBMITTED_FOR_REVIEW_STATUS': settings.COURSE_SUBMITTED_FOR_REVIEW_STATUS,
'COURSE_IN_REVIEW_STATUS': settings.COURSE_IN_REVIEW_STATUS,
'COURSE_UNAVAILABLE_STATUS': settings.COURSE_UNAVAILABLE_STATUS,
'COURSE_AVAILABLE_STATUS': settings.COURSE_AVAILABLE_STATUS,
'COURSE_REJECTED_STATUS': settings.COURSE_REJECTED_STATUS,
'user': request.user,
'tab': 'overview',
'HAS_ADVERTISMENT': settings.APPLICATION_HAS_ADVERTISMENT,
'local_css_urls': settings.SB_ADMIN_2_CSS_LIBRARY_URLS,
'local_js_urls': settings.SB_ADMIN_2_JS_LIBRARY_URLS,
})
@login_required(login_url='/landpage')
def submit_course_for_review(request, course_id):
course = Course.objects.get(id=course_id)
response_data = {'status' : 'failed', 'message' : ''}
# Validate announcements
try:
announcements = Announcement.objects.filter(course=course).order_by('-post_date')
if announcements.count() < 1:
response_data['message'] = 'zero announcements'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Announcement.DoesNotExist:
response_data['message'] = 'no announcements detected'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate syllabus
try:
Syllabus.objects.get(course=course)
except Syllabus.DoesNotExist:
response_data['message'] = 'no syllabus set'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate policy
try:
Policy.objects.get(course=course)
except Policy.DoesNotExist:
response_data['message'] = 'no policy set'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate lectures
try:
lectures = Lecture.objects.filter(course=course).order_by('-lecture_num')
if lectures.count() < 2:
response_data['message'] = 'minimum 2 lectures required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Lecture.DoesNotExist:
response_data['message'] = 'no policy set'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate assignments
try:
assignments = Assignment.objects.filter(course=course).order_by('-assignment_num')
if assignments.count() < 1:
response_data['message'] = 'minimum 1 assignment required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Assignment.DoesNotExist:
response_data['message'] = 'no assignment(s)'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate quizzes
try:
quizzes = Quiz.objects.filter(course=course).order_by('-quiz_num')
if quizzes.count() < 1:
response_data['message'] = 'minimum 1 quiz required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Quiz.DoesNotExist:
response_data['message'] = 'no quiz(zes) found'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate exams
try:
exams = Exam.objects.filter(course=course).order_by('-exam_num')
if exams.count() < 1:
response_data['message'] = 'minimum 1 exam required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Exam.DoesNotExist:
response_data['message'] = 'no exams(s) found'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate final mark calculator
total_worth = total_final_mark_worth(course)
if total_worth != 100:
response_data['message'] = 'total final mark must add up to 100%'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Make sure we have a final exam
is_final = has_final_exam(exams)
if is_final == False:
response_data['message'] = 'course requires only 1 final exam'
return HttpResponse(json.dumps(response_data), content_type="application/json")
review = CourseSubmission.objects.create(
course=course,
)
review.save()
# Make course available.
course.status = settings.COURSE_AVAILABLE_STATUS
course.save()
response_data = {'status' : 'success', 'message' : 'submitted course review'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Private Functions
#--------------------
# Function looks through the course assignments/exams/quizzes and returns
# the accumulated worth total.
def total_final_mark_worth(course):
total_worth = 0 # Variable used to track total worth of the coursework.
# Fetch from database
try:
assignments = Assignment.objects.filter(course=course).order_by('-assignment_num')
except Assignment.DoesNotExist:
assignments = None
try:
quizzes = Quiz.objects.filter(course=course).order_by('-quiz_num')
except Quiz.DoesNotExist:
quizzes = None
try:
exams = Exam.objects.filter(course=course).order_by('-exam_num')
except Exam.DoesNotExist:
exams = None
# Iterate through all coursework and calculate the total.
for assignment in assignments:
total_worth += assignment.worth
for quiz in quizzes:
total_worth += quiz.worth
for exam in exams:
total_worth += exam.worth
return total_worth
# Function will iterate through all the exams and return either True or False
# depending if a 'final exam' was found in the list.
def has_final_exam(exams):
count = 0
for exam in exams:
if exam.is_final == True:
count += 1
return count == 1
|
tests/test_api.py
|
proteanblank/poseidon
| 251 |
127766
|
import falcon
import pytest
from falcon import testing
from poseidon_api.api import api
@pytest.fixture
def client():
return testing.TestClient(api)
def test_v1(client):
response = client.simulate_get('/v1')
assert response.status == falcon.HTTP_OK
def test_network(client):
response = client.simulate_get('/v1/network')
assert len(response.json) == 2
assert response.status == falcon.HTTP_OK
def test_network_by_ip(client):
response = client.simulate_get('/v1/network/10.0.0.1')
assert len(response.json['dataset']) == 0
assert response.status == falcon.HTTP_OK
def test_network_full(client):
response = client.simulate_get('/v1/network_full')
assert len(response.json) == 1
assert response.status == falcon.HTTP_OK
def test_info(client):
response = client.simulate_get('/v1/info')
assert response.status == falcon.HTTP_OK
|
tests/test_camera_distortion.py
|
aferrall/redner
| 1,146 |
127793
|
import pyredner
import torch
pyredner.set_use_gpu(torch.cuda.is_available())
position = torch.tensor([1.0, 0.0, -3.0])
look_at = torch.tensor([1.0, 0.0, 0.0])
up = torch.tensor([0.0, 1.0, 0.0])
fov = torch.tensor([45.0])
clip_near = 1e-2
# randomly generate distortion parameters
torch.manual_seed(1234)
target_distort_params = (torch.rand(8) - 0.5) * 0.05
resolution = (256, 256)
cam = pyredner.Camera(position = position,
look_at = look_at,
up = up,
fov = fov,
clip_near = clip_near,
resolution = resolution,
distortion_params = target_distort_params)
checkerboard_texture = pyredner.imread('scenes/teapot.png')
if pyredner.get_use_gpu():
checkerboard_texture = checkerboard_texture.cuda(device = pyredner.get_device())
mat_checkerboard = pyredner.Material(\
diffuse_reflectance = checkerboard_texture)
mat_black = pyredner.Material(\
diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = pyredner.get_device()))
plane = pyredner.Object(vertices = torch.tensor([[-1.0,-1.0, 0.0],
[-1.0, 1.0, 0.0],
[ 1.0,-1.0, 0.0],
[ 1.0, 1.0, 0.0]],
device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2],
[1, 3, 2]],
dtype = torch.int32,
device = pyredner.get_device()),
uvs = torch.tensor([[0.05, 0.05],
[0.05, 0.95],
[0.95, 0.05],
[0.95, 0.95]], device = pyredner.get_device()),
material = mat_checkerboard)
scene = pyredner.Scene(camera=cam, objects=[plane])
img = pyredner.render_albedo(scene=scene)
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/target.png')
# Read the target image we just saved.
target = pyredner.imread('results/test_camera_distortion/target.exr')
if pyredner.get_use_gpu():
target = target.cuda(device = pyredner.get_device())
cam.distortion_params = torch.zeros(8, requires_grad = True)
scene = pyredner.Scene(camera=cam, objects=[plane])
img = pyredner.render_albedo(scene=scene)
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/init.exr')
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/init.png')
# Optimize for triangle vertices.
optimizer = torch.optim.Adam([cam.distortion_params], lr=1e-3)
for t in range(200):
print('iteration:', t)
optimizer.zero_grad()
scene = pyredner.Scene(camera=cam, objects=[plane])
img = pyredner.render_albedo(scene=scene)
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/iter_{}.png'.format(t))
loss = (img - target).pow(2).sum()
print('loss:', loss.item())
loss.backward()
print('grad:', cam.distortion_params.grad)
optimizer.step()
print('distortion_params:', cam.distortion_params)
img = pyredner.render_albedo(scene=scene)
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/final.exr')
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/final.png')
# Convert the intermediate renderings to a video.
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/test_camera_distortion/iter_%d.png", "-vb", "20M",
"results/test_camera_distortion/out.mp4"])
|
user_agents/__init__.py
|
nagesh4193/python-user-agents
| 1,166 |
127803
|
VERSION = (2, 2, 0)
from .parsers import parse
|
tutorials/stock-wallet/microservices/crypto/tests/test_commands/test_services.py
|
bhardwajRahul/minos-python
| 247 |
127810
|
import sys
import unittest
import pendulum
from src import (
Crypto,
CryptoCommandService,
)
from minos.networks import (
InMemoryRequest,
Response,
)
from tests.utils import (
build_dependency_injector,
)
class TestCryptoCommandService(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
self.injector = build_dependency_injector()
async def asyncSetUp(self) -> None:
await self.injector.wire(modules=[sys.modules[__name__]])
async def asyncTearDown(self) -> None:
await self.injector.unwire()
def test_constructor(self):
service = CryptoCommandService()
self.assertIsInstance(service, CryptoCommandService)
async def test_remote_crypto(self):
now = pendulum.now()
now_minus_one_month = now.subtract(months=1)
service = CryptoCommandService()
response = service.call_remote("BTC/USD", now_minus_one_month.to_datetime_string())
self.assertIsInstance(service, CryptoCommandService)
if __name__ == "__main__":
unittest.main()
|
src/add_lat_lon_2d.py
|
deephyper/WeatherBench
| 343 |
127833
|
import argparse
import xarray as xr
import numpy as np
import xesmf as xe
from glob import glob
import os
import shutil
def add_2d(
ds,
):
"""
Regrid horizontally.
:param ds: Input xarray dataset
"""
ds['lat2d'] = ds.lat.expand_dims({'lon': ds.lon}).transpose()
ds['lon2d'] = ds.lon.expand_dims({'lat': ds.lat})
return ds
def convert_z_to_orography(ds):
"""
Convert geopotential of surface to height in meters
Args:
ds: Input dataset
Returns:
ds: Same dataset with orography instead of z
"""
ds['z'] = ds.z / 9.80665
ds = ds.rename({'z': 'orography'})
ds.orography.attrs['units'] = 'm'
return ds
def main(
input_fns,
custom_fn=None,
):
"""
:param input_fns: Input files. Can use *. If more than one, loop over them
:param custom_fn: If not None, use custom file name. Otherwise infer from parameters.
"""
# Get files for starred expressions
if '*' in input_fns[0]:
input_fns = sorted(glob(input_fns[0]))
# Loop over input files
for fn in input_fns:
print(f'Extracting from file: {fn}')
ds = xr.open_dataset(fn).isel(time=0).drop('time')
ds = convert_z_to_orography(add_2d(ds))
fn_out = (
custom_fn or fn
)
print(f"Saving file: {fn_out}")
ds.to_netcdf(fn_out+'.tmp')
ds.close()
shutil.move(fn_out+'.tmp', fn_out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_fns',
type=str,
nargs='+',
help="Input files (full path). Can use *. If more than one, loop over them",
required=True
)
parser.add_argument(
'--custom_fn',
type=str,
help="If not None, use custom file name. Otherwise infer from parameters.",
default=None
)
args = parser.parse_args()
main(
input_fns=args.input_fns,
custom_fn=args.custom_fn,
)
|
pykka/_envelope.py
|
fjarri/pykka
| 796 |
127847
|
<filename>pykka/_envelope.py
class Envelope:
"""
Envelope to add metadata to a message.
This is an internal type and is not part of the public API.
:param message: the message to send
:type message: any
:param reply_to: the future to reply to if there is a response
:type reply_to: :class:`pykka.Future`
"""
# Using slots speeds up envelope creation with ~20%
__slots__ = ["message", "reply_to"]
def __init__(self, message, reply_to=None):
self.message = message
self.reply_to = reply_to
def __repr__(self):
return f"Envelope(message={self.message!r}, reply_to={self.reply_to!r})"
|
minlp-tokenizer/minlptokenizer/tokenizer.py
|
ishine/MiNLP
| 749 |
127857
|
# Copyright 2020 The MiNLP Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import regex
import os
import tensorflow as tf
import math
from minlptokenizer.lexicon import Lexicon
from minlptokenizer.vocab import Vocab
from minlptokenizer.tag import Tag
from minlptokenizer.exception import *
from multiprocessing import Pool
import itertools
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
pwd = os.path.dirname(__file__)
def batch_generator(list_texts, size=configs['tokenizer_limit']['max_batch_size']):
"""
list generator 用于迭代生成batch
:param list_texts:待切分的语料列表
:param size: 每个batch的大小
:return: 迭代器
"""
if isinstance(list_texts, list) or isinstance(list_texts, tuple):
batch_num = math.ceil(len(list_texts) / size)
for i in range(batch_num):
yield list_texts[i * size:(i + 1) * size]
def format_string(ustring):
"""
全角转半角,多个连续控制符、空格替换成单个空格
"""
if not ustring.strip():
raise ZeroLengthException()
if len(ustring) > configs['tokenizer_limit']['max_string_length']:
raise MaxLengthException(len(ustring))
half_wide_string = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 12288: # 全角空格直接转换
inside_code = 32
elif 65296 <= inside_code <= 65305 or 65313 <= inside_code <= 65339: # 全角字符(除空格和英文标点)转化
inside_code -= 65248
half_wide_string += chr(inside_code)
return regex.sub(r'[\p{Z}\s]+', ' ', half_wide_string.strip())
def tag2words(text, predict_results):
words = []
word = ''
for idx, ch in enumerate(text):
word += ch
tag = predict_results[idx]
if tag == Tag.S.value or tag == Tag.E.value or tag == Tag.X.value:
words.append(word)
word = ''
if word:
words.append(word)
return regex.split(r'\s+', ' '.join(words))
class MiNLPTokenizer:
sess_dict = {'fine': None, 'coarse': None}
def __init__(self, file_or_list=None, granularity='fine'):
"""
分词器初始化
:param file_or_list: 用户自定义词典文件或列表
:param granularity: 分词粒度参数,fine表示细粒度分词,coarse表示粗粒度分词
"""
self.__vocab_path = os.path.join(pwd, configs['vocab_path'])
self.__pb_model_path = os.path.join(pwd, configs['tokenizer_granularity'][granularity]['model'])
self.__vocab = Vocab(self.__vocab_path)
self.__lexicon = Lexicon(file_or_list)
self.__granularity = granularity
for lexicon_file in configs['lexicon_files']:
self.__lexicon.add_words(os.path.join(pwd, lexicon_file))
def _cut(self, text_batch):
"""
分词函数
:param text_batch: 待分词字符串列表
:return: 分词结果
"""
# pb模型加载
if not MiNLPTokenizer.sess_dict[self.__granularity]:
with tf.io.gfile.GFile(self.__pb_model_path, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
g = tf.Graph()
with g.as_default():
tf.import_graph_def(graph_def, name='')
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True # 使用过程中动态申请显存,按需分配
MiNLPTokenizer.sess_dict[self.__granularity] = tf.compat.v1.Session(graph=g, config=tf_config)
sess = MiNLPTokenizer.sess_dict[self.__granularity]
char_ids_input = sess.graph.get_tensor_by_name('char_ids_batch:0')
factor_input = sess.graph.get_tensor_by_name('factor_batch:0')
tag_ids = sess.graph.get_tensor_by_name('tag_ids:0')
# 模型预测
texts = list(map(format_string, text_batch))
factor = self.__lexicon.get_factor(texts)
input_char_id = self.__vocab.get_char_ids(texts)
feed_dict = {
char_ids_input: input_char_id,
factor_input: factor
}
predict_results = sess.run(tag_ids, feed_dict=feed_dict)
return list(map(lambda x, y: tag2words(x, y), texts, predict_results))
def cut(self, text_or_list, n_jobs=1):
"""
分词函数,支持传入字符串或者字符串列表
:param text_or_list: 待分词字符串或者字符串列表
:param n_jobs: 进程数量,默认为1,不开启多进程
:return: 分词结果
"""
if n_jobs <= 0:
raise ThreadNumberException()
if isinstance(text_or_list, str):
return self._cut([text_or_list])[0]
elif isinstance(text_or_list, list):
generator = batch_generator(text_or_list, size=configs['tokenizer_limit']['max_batch_size'])
if n_jobs == 1:
return list(itertools.chain.from_iterable([self._cut(batch) for batch in generator]))
else:
process_pool = Pool(n_jobs)
res = process_pool.map(self._cut, generator)
process_pool.close()
return list(itertools.chain.from_iterable(res))
else:
raise UnSupportedException()
def set_interfere_factor(self, interfere_factor):
"""
设置用户词典干预强度,值越大,分词结果越符合词典
:param interfere_factor: 干预强度,默认值:2
"""
self.__lexicon.set_interfere_factor(interfere_factor)
def reset_interfere_factor(self):
"""
重置用户词典干预强度为默认值:2
"""
self.__lexicon.reset_interfere_factor()
|
examples/example_project/context.py
|
szabolcsdombi/zengl
| 116 |
127886
|
import zengl
class Context:
context = None
main_uniform_buffer = None
main_uniform_buffer_data = bytearray(b'\x00' * 64)
@classmethod
def initialize(cls):
ctx = zengl.context()
cls.context = ctx
cls.main_uniform_buffer = ctx.buffer(size=64)
ctx.includes['main_uniform_buffer'] = '''
layout (std140) uniform MainUniformBuffer {
mat4 mvp;
};
'''
@classmethod
def update_camera(cls, eye, target, aspect, fov):
cls.main_uniform_buffer_data[0:64] = zengl.camera(eye, target, aspect=aspect, fov=fov)
@classmethod
def flush_uniform_buffer(cls):
cls.main_uniform_buffer.write(cls.main_uniform_buffer_data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.