content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-10-28 13:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ems', '0023_auto_20171028_0756'),
('registrations', '0022_auto_20171024_2329'),
('messportal', '0004_bitsprofshowbill'),
]
operations = [
migrations.AddField(
model_name='bitsprofshowbill',
name='bitsian',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='ems.Bitsian'),
),
migrations.AddField(
model_name='profshowbill',
name='participant',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='registrations.Participant'),
),
]
|
python
|
VO = "https://www.vote.org"
VA = "https://www.voteamerica.com"
CODE = 'code'
REG = 'reg'
POLLS = 'polls'
CITIES = 'cities' # XXX This should move to campaign.Campaign but it's disruptive
ABS = 'abs'
REGDL = 'regdl'
ABROAD = 'abroad'
class States:
ALABAMA = 'Alabama'
ALASKA = 'Alaska'
ARIZONA = 'Arizona'
ARKANSAS = 'Arkansas'
CALIFORNIA = 'California'
COLORADO = 'Colorado'
CONNECTICUT = 'Connecticut'
DELAWARE = 'Delaware'
DISTRICT_OF_COLUMBIA = 'District of Columbia'
FLORIDA = 'Florida'
GEORGIA = 'Georgia'
GUAM = 'Guam'
HAWAII = 'Hawaii'
IDAHO = 'Idaho'
ILLINOIS = 'Illinois'
INDIANA = 'Indiana'
IOWA = 'Iowa'
KANSAS = 'Kansas'
KENTUCKY = 'Kentucky'
LOUISIANA = 'Louisiana'
MAINE = 'Maine'
MARYLAND = 'Maryland'
MASSACHUSETTS = 'Massachusetts'
MICHIGAN = 'Michigan'
MINNESOTA = 'Minnesota'
MISSISSIPPI = 'Mississippi'
MISSOURI = 'Missouri'
MONTANA = 'Montana'
NEBRASKA = 'Nebraska'
NEVADA = 'Nevada'
NEW_HAMPSHIRE = 'New Hampshire'
NEW_JERSEY = 'New Jersey'
NEW_MEXICO = 'New Mexico'
NEW_YORK = 'New York'
NORTH_CAROLINA = 'North Carolina'
NORTH_DAKOTA = 'North Dakota'
OHIO = 'Ohio'
OKLAHOMA = 'Oklahoma'
OREGON = 'Oregon'
PENNSYLVANIA = 'Pennsylvania'
PUERTO_RICO = 'Puerto Rico'
RHODE_ISLAND = 'Rhode Island'
SOUTH_CAROLINA = 'South Carolina'
SOUTH_DAKOTA = 'South Dakota'
TENNESSEE = 'Tennessee'
TEXAS = 'Texas'
UTAH = 'Utah'
VERMONT = 'Vermont'
VIRGINIA = 'Virginia'
WASHINGTON = 'Washington'
WEST_VIRGINIA = 'West Virginia'
WISCONSIN = 'Wisconsin'
WYOMING = 'Wyoming'
def url_encode_state(state):
return '-'.join(state.split(' ')).lower()
def absentee_ballot(state):
return VA + f"/absentee-mail-ballot/{url_encode_state(state)}/#guide"
def reg_deadline(state):
return VA + f"/voter-registration/{url_encode_state(state)}/#guide"
def abroad(state):
return f"https://www.votefromabroad.org/states/{URLS_BY_STATE[state][CODE]}"
# Static info, extended with initializers below.
# Static schema:
# code: Postal state/territory code
# reg: URL for state voter registration
# polls: URL for state polling places
# cities: Places people and things in state
# vote: Optional call to action for hashtag
# Dynamic schema:
# abs: Absentee ballot info (3rd party)
# regdl: Registration deadlines (3rd party)
# abroad: Voters abroad info
# XXX More of this should go in configuration.
URLS_BY_STATE = {
"Alabama": {CODE: "AL",
REG: "https://myinfo.alabamavotes.gov/VoterView/RegistrantSearch.do",
POLLS: "https://myinfo.alabamavotes.gov/VoterView/PollingPlaceSearch.do"},
"Alaska": {CODE: "AK",
REG: "https://myvoterinformation.alaska.gov/",
POLLS: "https://myvoterinformation.alaska.gov/"},
"Arizona": {CODE: "AZ",
REG: "https://voter.azsos.gov/VoterView/RegistrantSearch.do",
POLLS: "https://my.arizona.vote/PortalList.aspx"},
"Arkansas": {CODE: "AR",
REG: "https://www.voterview.ar-nova.org/voterview",
POLLS: "https://www.voterview.ar-nova.org/voterview"},
"California": {CODE: "CA",
REG: "https://www.sos.ca.gov/elections/registration-status/",
POLLS: "https://www.sos.ca.gov/elections/polling-place/"},
"Colorado": {CODE: "CO",
REG: "https://www.sos.state.co.us/voter/pages/pub/olvr/findVoterReg.xhtml",
POLLS: "https://www.sos.state.co.us/pubs/elections/Resources/CountyElectionOffices.html"},
"Connecticut": {CODE: "CT",
REG: "https://www.dir.ct.gov/sots/LookUp.aspx",
POLLS: "https://portaldir.ct.gov/sots/LookUp.aspx"},
"Delaware": {CODE: "DE",
REG: "https://ivote.de.gov/voterview",
POLLS: "https://ivote.de.gov/VoterView"},
"District of Columbia": {CODE: "DC",
REG: "https://www.dcboe.org/Voters/Register-To-Vote/Check-Voter-Registration-Status",
POLLS: "https://www.dcboe.org/Voters/Where-to-Vote/Find-Out-Where-to-Vote"},
"Florida": {CODE: "FL",
REG: "http://registration.elections.myflorida.com/CheckVoterStatus",
POLLS: "https://registration.elections.myflorida.com/CheckVoterStatus"},
"Georgia": {CODE: "GA",
# A bit better than voteamerica.org's for the Jan. 2021 runoff,
# but does not display in a Twitter card
# ABS: "https://www.vote411.org/georgia#absentee-ballot-process",
# REGDL: "https://sos.ga.gov/admin/files/2020%20Revised%20Short%20Calendar.pdf",
REGDL: "December 7, 2020",
REG: "https://www.mvp.sos.ga.gov/",
POLLS: "https://www.mvp.sos.ga.gov/MVP/mvp.do"},
"Guam": {CODE: "GU",
ABS: "https://gec.guam.gov/index.php/in-office/in-office-absentee-voting",
REG: "https://gec.guam.gov/validate",
REGDL: "https://gec.guam.gov/index.php/gec-2018-election-important-dates",
POLLS: "https://drive.google.com/file/d/1w6pdGRrjwqVMa8cRbx_-9zObMCVQQ3aR/view"},
"Hawaii": {CODE: "HI",
REG: "https://olvr.hawaii.gov/register.aspx",
POLLS: "https://olvr.hawaii.gov/altpollingplacesearch.aspx"},
"Idaho": {CODE: "ID",
REG: "https://elections.sos.idaho.gov/ElectionLink/ElectionLink/VoterSearch.aspx",
POLLS: "https://elections.sos.idaho.gov/ElectionLink/ElectionLink/ViewPollingLocation.aspx"},
"Illinois": {CODE: "IL",
REG: "https://ova.elections.il.gov/RegistrationLookup.aspx",
POLLS: "https://ova.elections.il.gov/PollingPlaceLookup.aspx"},
"Indiana": {CODE: "IN",
REG: "https://indianavoters.in.gov/",
POLLS: "https://indianavoters.in.gov/"},
"Iowa": {CODE: "IA",
REG: "https://sos.iowa.gov/elections/VoterReg/RegToVote/search.aspx",
POLLS: "https://sos.iowa.gov/elections/voterreg/pollingplace/search.aspx"},
"Kansas": {CODE: "KS",
REG: "https://myvoteinfo.voteks.org/VoterView/RegistrantSearch.do",
POLLS: "https://myvoteinfo.voteks.org/VoterView/PollingPlaceSearch.do"},
"Kentucky": {CODE: "KY",
REG: "https://vrsws.sos.ky.gov/VIC/",
POLLS: "https://www.sos.ky.gov/elections/Pages/Polling-Locations.aspx"},
"Louisiana": {CODE: "LA",
REG: "https://voterportal.sos.la.gov/",
POLLS: "https://voterportal.sos.la.gov/"},
"Maine": {CODE: "ME",
REG: "http://www.maine.gov/portal/government/edemocracy/voter_lookup.php",
POLLS: "https://www1.maine.gov/portal/government/edemocracy/voter_lookup.php"},
"Maryland": {CODE: "MD",
REG: "https://voterservices.elections.maryland.gov/votersearch",
POLLS: "https://elections.maryland.gov/voting/where.html"},
"Massachusetts": {CODE: "MA",
REG: "https://www.sec.state.ma.us/VoterRegistrationSearch/MyVoterRegStatus.aspx",
POLLS: "https://www.sec.state.ma.us/wheredoivotema/bal/MyElectionInfo.aspx"},
"Michigan": {CODE: "MI",
REG: "https://mvic.sos.state.mi.us/",
POLLS: "https://mvic.sos.state.mi.us/"},
"Minnesota": {CODE: "MN",
REG: "https://mnvotes.sos.state.mn.us/VoterStatus.aspx",
POLLS: "https://pollfinder.sos.state.mn.us/"},
"Mississippi": {CODE: "MS",
REG: "https://www.msegov.com/sos/voter_registration/AmIRegistered",
POLLS: "https://www.sos.ms.gov/PollingPlace/Pages/default.aspx"},
"Missouri": {CODE: "MO",
REG: "https://s1.sos.mo.gov/elections/voterlookup/",
POLLS: "https://voteroutreach.sos.mo.gov/PRD/VoterOutreach/VOSearch.aspx"},
"Montana": {CODE: "MT",
REG: "https://app.mt.gov/voterinfo/",
POLLS: "https://app.mt.gov/voterinfo/"},
"Nebraska": {CODE: "NE",
REG: "https://www.votercheck.necvr.ne.gov/VoterView/RegistrantSearch.do",
POLLS: "https://www.votercheck.necvr.ne.gov/VoterView/PollingPlaceSearch.do"},
"Nevada": {CODE: "NV",
REG: "https://nvsos.gov/votersearch/",
POLLS: "https://www.nvsos.gov/votersearch/"},
"New Hampshire": {CODE: "NH",
REG: "https://app.sos.nh.gov/Public/PartyInfo.aspx",
POLLS: "https://app.sos.nh.gov/Public/PollingPlaceSearch.aspx"},
"New Jersey": {CODE: "NJ",
REG: "https://voter.njsvrs.com/PublicAccess/jsp/UserLogin/Login.jsp",
POLLS: "https://voter.svrs.nj.gov/polling-place-search"},
"New Mexico": {CODE: "NM",
REG: "https://voterportal.servis.sos.state.nm.us/WhereToVote.aspx",
POLLS: "https://voterportal.servis.sos.state.nm.us/WhereToVoteAddress.aspx"},
"New York": {CODE: "NY",
REG: "https://voterlookup.elections.ny.gov/",
POLLS: "https://voterlookup.elections.ny.gov/"},
"North Carolina": {CODE: "NC",
REG: "https://vt.ncsbe.gov/RegLkup/",
POLLS: "https://vt.ncsbe.gov/PPLkup/"},
"North Dakota": {CODE: "ND",
REG: "https://vip.sos.nd.gov/PortalListDetails.aspx?ptlhPKID=79&ptlPKID=7",
POLLS: "https://vip.sos.nd.gov/wheretovote.aspx"},
"Ohio": {CODE: "OH",
REG: "https://voterlookup.ohiosos.gov/voterlookup.aspx",
POLLS: "https://voterlookup.ohiosos.gov/VoterLookup.aspx"},
"Oklahoma": {CODE: "OK",
REG: "https://services.okelections.us/voterSearch.aspx",
POLLS: "https://okvoterportal.okelections.us/"},
"Oregon": {CODE: "OR",
REG: "https://secure.sos.state.or.us/orestar/vr/showVoterSearch.do?source=SOS",
POLLS: "https://sos.oregon.gov/voting/Pages/drop-box-locator.aspx"},
"Pennsylvania": {CODE: "PA",
REG: "https://www.pavoterservices.state.pa.us/Pages/VoterRegistrationStatus.aspx",
POLLS: "https://www.pavoterservices.pa.gov/Pages/PollingPlaceInfo.aspx"},
"Puerto Rico": {CODE: "PR",
REG: "http://consulta.ceepur.org/",
REGDL: "http://ww2.ceepur.org/Home/EducacionElectoral",
ABS: "http://ww2.ceepur.org/Home/SolicituddeVoto#VotoAusente",
POLLS: "http://www.ceepur.org/directorio.htm"},
"Rhode Island": {CODE: "RI",
REG: "https://vote.sos.ri.gov/Home/UpdateVoterRecord?ActiveFlag=0",
POLLS: "https://vote.sos.ri.gov/Home/PollingPlaces?ActiveFlag=2"},
"South Carolina": {CODE: "SC",
REG: "https://info.scvotes.sc.gov/eng/voterinquiry/VoterInformationRequest.aspx"
"?PagMode=VoterInfo",
POLLS: "https://info.scvotes.sc.gov/eng/voterinquiry/VoterInformationRequest.aspx"
"?PageMode=VoterInfo"},
"South Dakota": {CODE: "SD",
REG: "https://vip.sdsos.gov/viplogin.aspx",
POLLS: "https://vip.sdsos.gov/viplogin.aspx"},
"Tennessee": {CODE: "TN",
REG: "https://tnmap.tn.gov/voterlookup/",
POLLS: "https://web.go-vote-tn.elections.tn.gov/"},
"Texas": {CODE: "TX",
REG: "https://teamrv-mvp.sos.texas.gov/MVP/mvp.do",
POLLS: "https://teamrv-mvp.sos.texas.gov/MVP/mvp.do"},
"Utah": {CODE: "UT",
REG: "https://votesearch.utah.gov/voter-search/search/search-by-voter/voter-info",
POLLS: "https://votesearch.utah.gov/voter-search/search/search-by-address/how-and-where-can-i-vote"},
"Vermont": {CODE: "VT",
REG: "https://mvp.sec.state.vt.us/",
POLLS: "https://mvp.sec.state.vt.us/"},
"Virginia": {CODE: "VA",
REG: "https://vote.elections.virginia.gov/VoterInformation",
POLLS: "https://www.elections.virginia.gov/casting-a-ballot/"},
"Washington": {CODE: "WA",
REG: "https://www.sos.wa.gov/elections/myvote/",
POLLS: "https://www.sos.wa.gov/elections/auditors/"},
"West Virginia": {CODE: "WV",
REG: "https://apps.sos.wv.gov/elections/voter/",
POLLS: "https://services.sos.wv.gov/Elections/Voter/FindMyPollingPlace"},
"Wisconsin": {CODE: "WI",
REG: "https://myvote.wi.gov/en-US/RegisterToVote",
POLLS: "https://myvote.wi.gov/en-US/FindMyPollingPlace"},
"Wyoming": {CODE: "WY",
REG: "https://sos.wyo.gov/Elections/Docs/WYCountyClerks.pdf",
POLLS: "https://soswy.state.wy.us/Elections/PollPlace/Default.aspx"}
}
# Now do some dynamic setup in the table
for k, v in URLS_BY_STATE.items():
if ABS not in v:
v[ABS] = absentee_ballot(k)
if REGDL not in v:
v[REGDL] = reg_deadline(k)
if ABROAD not in v:
v[ABROAD] = abroad(k)
|
python
|
from azure.mgmt.keyvault import KeyVaultManagementClient
from azure.keyvault import KeyVaultClient
from common_util import CommonUtil
from credentials.credentials_provider import ResourceCredentialsProvider
import logging as log
from azure.mgmt.keyvault.v2016_10_01.models import AccessPolicyUpdateKind, VaultAccessPolicyProperties, \
AccessPolicyEntry, Permissions, KeyPermissions, SecretPermissions
class KeyVaultUtil(CommonUtil):
def __init__(self, credentialsProvider):
isinstance(credentialsProvider, ResourceCredentialsProvider)
super(KeyVaultUtil, self).__init__(credentialsProvider)
self.__credentialsProvider = credentialsProvider
def setKeyVaultPolicy(self):
subscriptionList = self.getSubscriptions()
for subscription in subscriptionList:
subscriptionId = subscription.subscription_id
keyVaultList = self.__listKeyVaults(subscriptionId)
log.info("Found " + str(len(keyVaultList)) + " KeyVaults")
for keyVault in keyVaultList:
keyVaultClient = self.getKeyVaultManagementClient(subscriptionId)
keyPermissionList = [KeyPermissions.list]
secretPermissionList = [SecretPermissions.list]
permissions = Permissions(keys=keyPermissionList, secrets=secretPermissionList)
tenantId = self.__credentialsProvider.getConfig().getTenantId()
servicePrincipalId = self.getServicePrincipalId()
if servicePrincipalId == None:
raise Exception("Error Fetching service Principal Id")
appId = self.getAppId()
if appId == None:
raise Exception("Error Fetching service App Id")
accessPolicyEntry = AccessPolicyEntry(tenant_id=tenantId, object_id=servicePrincipalId,
application_id=None, permissions=permissions)
vaultAccessProperties = VaultAccessPolicyProperties(access_policies=[accessPolicyEntry])
accessPolicyKind = AccessPolicyUpdateKind(AccessPolicyUpdateKind.add)
accessPolicy = keyVaultClient.vaults.update_access_policy(keyVault.getResourceGroupName(),
keyVault.getKeyVaultName(), accessPolicyKind,
vaultAccessProperties)
log.info("Assigned access policy permissions to KeyVault: " + keyVault.getKeyVaultName()
+ " Resource Group: " + keyVault.getResourceGroupName())
def __listKeyVaults(self, subscriptionId):
keyVaultClient = KeyVaultManagementClient(self.__credentialsProvider.getManagementCredentials(), subscriptionId)
keyVaultList = []
for keyVault in keyVaultClient.vaults.list(raw=True):
keyVaultList.append(KeyVaultParsed(keyVault.id))
return keyVaultList
class KeyVaultParsed(object):
def __init__(self, id):
self.__resourceGroup = id.split("/")[4];
self.__keyVaultName = id.split("/")[8];
def getResourceGroupName(self):
return self.__resourceGroup
def getKeyVaultName(self):
return self.__keyVaultName
|
python
|
import numpy as np
from numpy.linalg import norm
from MeshFEM import sparse_matrices
def preamble(obj, xeval, perturb, etype, fixedVars = []):
if (xeval is None): xeval = obj.getVars()
if (perturb is None): perturb = np.random.uniform(low=-1,high=1, size=obj.numVars())
if (etype is None): etype = obj.__class__.EnergyType.Full
xold = obj.getVars()
perturb = np.copy(perturb)
perturb[fixedVars] = 0.0
return (xold, xeval, perturb, etype)
def fdGrad(obj, fd_eps, xeval = None, perturb = None, etype = None, fixedVars = []):
xold, xeval, perturb, etype = preamble(obj, xeval, perturb, etype, fixedVars)
def evalAt(x):
obj.setVars(x)
val = obj.energy(etype)
return val
fd_delta_E = (evalAt(xeval + perturb * fd_eps) - evalAt(xeval - perturb * fd_eps)) / (2 * fd_eps)
obj.setVars(xold)
return fd_delta_E
def validateGrad(obj, fd_eps = 1e-6, xeval = None, perturb = None, etype = None, fixedVars = []):
xold, xeval, perturb, etype = preamble(obj, xeval, perturb, etype, fixedVars)
obj.setVars(xeval)
g = obj.gradient(etype)
analytic_delta_E = g.dot(perturb)
fd_delta_E = fdGrad(obj, fd_eps, xeval, perturb, etype, fixedVars)
return (fd_delta_E, analytic_delta_E)
def validateHessian(obj, fd_eps = 1e-6, xeval = None, perturb = None, etype = None, fixedVars = []):
xold, xeval, perturb, etype = preamble(obj, xeval, perturb, etype, fixedVars)
def gradAt(x):
obj.setVars(x)
val = obj.gradient(etype)
return val
obj.setVars(xeval)
h = obj.hessian(etype)
fd_delta_grad = (gradAt(xeval + perturb * fd_eps) - gradAt(xeval - perturb * fd_eps)) / (2 * fd_eps)
analytic_delta_grad = h.apply(perturb)
obj.setVars(xold)
return (norm(analytic_delta_grad - fd_delta_grad) / norm(fd_delta_grad), fd_delta_grad, analytic_delta_grad)
def gradConvergence(obj, perturb=None, energyType=None, fixedVars = []):
epsilons = np.logspace(-9, -3, 100)
errors = []
if (energyType is None): energyType = obj.EnergyType.Full
if (perturb is None): perturb = np.random.uniform(-1, 1, size=obj.numVars())
for eps in epsilons:
fd, an = validateGrad(obj, etype=energyType, perturb=perturb, fd_eps=eps, fixedVars = fixedVars)
err = np.abs(an - fd) / np.abs(an)
errors.append(err)
return (epsilons, errors, an)
def gradConvergencePlot(obj, perturb=None, energyType=None, fixedVars = []):
from matplotlib import pyplot as plt
eps, errors, ignore = gradConvergence(obj, perturb, energyType, fixedVars)
plt.title('Directional derivative fd test for gradient')
plt.ylabel('Relative error')
plt.xlabel('Step size')
plt.loglog(eps, errors)
plt.grid()
def hessConvergence(obj, perturb=None, energyType=None, fixedVars = []):
epsilons = np.logspace(-9, -3, 100)
errors = []
if (energyType is None): energyType = obj.EnergyType.Full
if (perturb is None): perturb = np.random.uniform(-1, 1, size=obj.numVars())
for eps in epsilons:
err, fd, an = validateHessian(obj, etype=energyType, perturb=perturb, fd_eps=eps, fixedVars = fixedVars)
errors.append(err)
return (epsilons, errors, an)
def hessConvergencePlot(obj, perturb=None, energyType=None, fixedVars = []):
from matplotlib import pyplot as plt
eps, errors, ignore = hessConvergence(obj, perturb, energyType, fixedVars)
plt.title('Directional derivative fd test for Hessian')
plt.ylabel('Relative error')
plt.xlabel('Step size')
plt.loglog(eps, errors)
plt.grid()
|
python
|
from functools import partial
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.crypto import get_random_string
generate_random_string = partial(
get_random_string,
length=50,
allowed_chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789!@#$%^&*(-_=+)')
def hub_credentials(hub_url):
"""A callback that returns no credentials, for anonymous
subscriptions. Meant to be overriden if developers need to
authenticate with certain hubs"""
return
def get_hub_credentials(hub_url):
creds_path = getattr(settings, 'PUSH_CREDENTIALS',
'django_push.subscriber.utils.hub_credentials')
creds_path, creds_function = creds_path.rsplit('.', 1)
creds_module = import_module(creds_path)
return getattr(creds_module, creds_function)(hub_url)
def get_domain():
if hasattr(settings, 'PUSH_DOMAIN'):
return settings.PUSH_DOMAIN
elif 'django.contrib.sites' in settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
return Site.objects.get_current().domain
raise ImproperlyConfigured(
"Unable to deterermine the site's host. Either use "
"django.contrib.sites and set SITE_ID in your settings or "
"set PUSH_DOMAIN to your site's domain.")
|
python
|
from setuptools import setup
from os import path
# Read the contents of the README file
cwd = path.abspath(path.dirname(__file__))
with open(path.join(cwd, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="pysvglib",
version="0.3.2",
description="SVG drawing library",
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Multimedia :: Graphics',
],
keywords='svg graphics',
url='https://github.com/gbingersoll/pysvglib',
author='Greg Ingersoll',
author_email='[email protected]',
license='MIT',
packages=['svg'],
extras_require={
'dev': ['pytest', 'pycodestyle', 'setuptools', 'wheel']
}
)
|
python
|
from argparse import ArgumentParser
import numpy as np
import py360convert
import os
import cv2
import os.path as osp
from typing import Union
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot, ret_result
from mmseg.core.evaluation import get_palette
from PIL import Image
import mmcv
from equilib import Equi2Equi
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='cityscapes',
help='Color palette used for segmentation map')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
model = init_segmentor(args.config, args.checkpoint, device=args.device)
res_width, res_height = 224, 224
#img_path = './demo/OFtest/000000.png'
img_path = './demo/1013_take_009/000000.png'
orig_img = np.array(Image.open(img_path))
height, width, _ = orig_img.shape
# Initialize equi2equi
equi2equi = Equi2Equi(
w_out=width,
h_out=height,
sampling_method="default",
mode="bilinear",
)
def preprocess(
img: Union[np.ndarray, Image.Image],
is_cv2: bool = False,
) -> np.ndarray:
"""Preprocesses image"""
if isinstance(img, np.ndarray) and is_cv2:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if isinstance(img, Image.Image):
# Sometimes images are RGBA
img = img.convert("RGB")
img = np.asarray(img)
assert len(img.shape) == 3, "input must be dim=3"
assert img.shape[-1] == 3, "input must be HWC"
img = np.transpose(img, (2, 0, 1))
return img
rot_45 = {
"roll": 0, #
"pitch": -np.pi / 6, # vertical
"yaw": 0, # horizontal
}
rot_minus45 = {
"roll": 0, #
"pitch": np.pi / 6, # vertical
"yaw": 0, # horizontal
}
for j in range(564, 1258):
# test a single image
#img_path = './demo/OFtest/{:06d}.png'.format(j)
img_path = './demo/1013_take_009/{:06d}.png'.format(j)
orig_img = np.array(Image.open(img_path))
OR_im = np.zeros((height, width)).astype(np.uint8)
# scroll by 180 deg
num_rot = 360 // 120
for i in range(num_rot):
# scroll image
im_scroll = np.roll(orig_img, width // num_rot * i , axis=1)
src_img = Image.fromarray(im_scroll)
src_img = preprocess(src_img)
# change pitch
img_45 = equi2equi(
src=src_img,
rot=rot_45,
)
img_minus45 = equi2equi(
src=src_img,
rot=rot_minus45,
)
src_img = np.transpose(src_img, (1, 2, 0))
src_img = Image.fromarray(src_img)
img_45 = np.transpose(img_45, (1, 2, 0))
img_45 = Image.fromarray(img_45)
img_minus45 = np.transpose(img_minus45, (1, 2, 0))
img_minus45 = Image.fromarray(img_minus45)
src_img_path = os.getcwd() + "/demo/tmp/src_img.png"
src_img.save(src_img_path)
img_45_path = os.getcwd() + "/demo/tmp/img_45.png"
img_45.save(img_45_path)
img_minus45_path = os.getcwd() + "/demo/tmp/img_minus45.png"
img_minus45.save(img_minus45_path)
# segmentation
result_src = inference_segmentor(model, src_img_path)
result_45 = inference_segmentor(model, img_45_path)
result_minus45 = inference_segmentor(model,img_minus45_path)
src_img = ret_result(model, args.img, result_src, get_palette(args.palette))
img_45 = ret_result(model, args.img, result_45, get_palette(args.palette))
img_minus45 = ret_result(model, args.img, result_minus45, get_palette(args.palette))
tmp_src_img = Image.fromarray(src_img)
tmp_img_45 = Image.fromarray(img_45)
tmp_img_minus45 = Image.fromarray(img_minus45)
src_img_path = os.getcwd() + "/demo/tmp/src_img_seg.png"
tmp_src_img.save(src_img_path)
img_45_path = os.getcwd() + "/demo/tmp/img_45_seg.png"
tmp_img_45.save(img_45_path)
img_minus45_path = os.getcwd() + "/demo/tmp/img_minus45_seg.png"
tmp_img_minus45.save(img_minus45_path)
img_45 = np.stack((img_45,) * 3, -1)
img_45 = preprocess(img_45)
#img_45 = np.transpose(img_45, (2, 0, 1))
img_minus45 = np.stack((img_minus45,) * 3, -1)
img_minus45 = preprocess(img_minus45)
#img_minus45 = np.transpose(img_minus45, (2, 0, 1))
# change pitch
img_45 = equi2equi(
src=img_45,
rot=rot_minus45,
)
img_minus45 = equi2equi(
src=img_minus45,
rot=rot_45,
)
img_45 = np.transpose(img_45, (1, 2, 0))
img_minus45 = np.transpose(img_minus45, (1, 2, 0))
tmp_src_img = Image.fromarray(src_img)
tmp_img_45 = Image.fromarray(img_45)
tmp_img_minus45 = Image.fromarray(img_minus45)
src_img_path = os.getcwd() + "/demo/tmp/src_img_seg_ori.png"
tmp_src_img.save(src_img_path)
img_45_path = os.getcwd() + "/demo/tmp/img_45_seg_ori.png"
tmp_img_45.save(img_45_path)
img_minus45_path = os.getcwd() + "/demo/tmp/img_minus45_seg_ori.png"
tmp_img_minus45.save(img_minus45_path)
src_img = np.roll(src_img, -width // num_rot * i, axis=1).astype(np.uint8)
img_45 = np.roll(img_45, -width // num_rot * i, axis=1).astype(np.uint8)[:, :, 0]
img_minus45 = np.roll(img_minus45, -width // num_rot * i, axis=1).astype(np.uint8)[:,:,0]
external = np.zeros(src_img.shape).astype(src_img.dtype)
retval, im_th = cv2.threshold(src_img, 127, 255, cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cv2.drawContours(external, contours, max_index, 255, -1)
src_img = external
if any(external[:, 0]) or any(external[:, -1]):
external2 = external.copy()
areas[max_index] = 0
max_index = np.argmax(areas)
cv2.drawContours(external2, contours, max_index, 255, -1)
if any(external2[:, 0]) and any(external2[:, -1]):
src_img = external2
external = np.zeros(src_img.shape).astype(src_img.dtype)
retval, im_th = cv2.threshold(img_45, 127, 255, cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cv2.drawContours(external, contours, max_index, 255, -1)
img_45 = external
if any(external[:, 0]) or any(external[:, -1]):
external2 = external.copy()
areas[max_index] = 0
max_index = np.argmax(areas)
cv2.drawContours(external2, contours, max_index, 255, -1)
if any(external2[:, 0]) and any(external2[:, -1]):
img_45 = external2
external = np.zeros(src_img.shape).astype(src_img.dtype)
retval, im_th = cv2.threshold(img_minus45, 127, 255, cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cv2.drawContours(external, contours, max_index, 255, -1)
img_minus45 = external
if any(external[:, 0]) or any(external[:, -1]):
external2 = external.copy()
areas[max_index] = 0
max_index = np.argmax(areas)
cv2.drawContours(external2, contours, max_index, 255, -1)
if any(external2[:, 0]) and any(external2[:, -1]):
img_minus45 = external2
tmp_src_img = Image.fromarray(src_img)
tmp_img_45 = Image.fromarray(img_45)
tmp_img_minus45 = Image.fromarray(img_minus45)
src_img_path = os.getcwd() + "/demo/tmp/src_img_sil.png"
tmp_src_img.save(src_img_path)
img_45_path = os.getcwd() + "/demo/tmp/img_45_seg_sil.png"
tmp_img_45.save(img_45_path)
img_minus45_path = os.getcwd() + "/demo/tmp/img_minus45_sil.png"
tmp_img_minus45.save(img_minus45_path)
OR_im = cv2.bitwise_or(OR_im, src_img)
OR_im = cv2.bitwise_or(OR_im, img_45)
OR_im = cv2.bitwise_or(OR_im, img_minus45)
ORim_path = os.getcwd() + "/demo/tmp/OR_im.png"
out = Image.fromarray(OR_im)
out.save(ORim_path)
external = np.zeros(OR_im.shape).astype(OR_im.dtype)
retval, im_th = cv2.threshold(OR_im, 127, 255, cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cv2.drawContours(external, contours, max_index, 255, -1)
OR_im = external
if any(OR_im[:, 0]) or any(OR_im[:, -1]):
external2 = OR_im.copy()
areas[max_index] = 0
max_index = np.argmax(areas)
cv2.drawContours(external2, contours, max_index, 255, -1)
if any(external2[:, 0]) and any(external2[:, -1]):
OR_im = external2
# save result
out = Image.fromarray(OR_im)
out.save('./demo/tmp/extract.png')
#out.show()
out2 = out.resize((res_width, res_height))
#out2.save('./demo/OFtest_equilib2/{:06d}.png'.format(j))
out2.save('./demo/1013_take_009_equilib3/{:06d}.png'.format(j))
#img2.save('./demo/1_e2c_col.png'.format(i))
print('{:06d}.png saved'.format(j))
if __name__ == '__main__':
main()
|
python
|
import json
import requests
import logging
import hashlib
import time
from fake_useragent import UserAgent
from uuid import uuid4
from .camera import EzvizCamera
# from pyezviz.camera import EzvizCamera
COOKIE_NAME = "sessionId"
CAMERA_DEVICE_CATEGORY = "IPC"
API_BASE_URI = "https://apiieu.ezvizlife.com"
API_ENDPOINT_LOGIN = "/v3/users/login"
API_ENDPOINT_CLOUDDEVICES = "/api/cloud/v2/cloudDevices/getAll"
API_ENDPOINT_PAGELIST = "/v3/userdevices/v1/devices/pagelist"
API_ENDPOINT_DEVICES = "/v3/devices/"
API_ENDPOINT_SWITCH_STATUS = '/api/device/switchStatus'
API_ENDPOINT_PTZCONTROL = "/ptzControl"
API_ENDPOINT_ALARM_SOUND = "/alarm/sound"
API_ENDPOINT_DATA_REPORT = "/api/other/data/report"
API_ENDPOINT_DETECTION_SENSIBILITY = "/api/device/configAlgorithm"
API_ENDPOINT_DETECTION_SENSIBILITY_GET = "/api/device/queryAlgorithmConfig"
LOGIN_URL = API_BASE_URI + API_ENDPOINT_LOGIN
CLOUDDEVICES_URL = API_BASE_URI + API_ENDPOINT_CLOUDDEVICES
DEVICES_URL = API_BASE_URI + API_ENDPOINT_DEVICES
PAGELIST_URL = API_BASE_URI + API_ENDPOINT_PAGELIST
DATA_REPORT_URL = API_BASE_URI + API_ENDPOINT_DATA_REPORT
SWITCH_STATUS_URL = API_BASE_URI + API_ENDPOINT_SWITCH_STATUS
DETECTION_SENSIBILITY_URL = API_BASE_URI + API_ENDPOINT_DETECTION_SENSIBILITY
DETECTION_SENSIBILITY_GET_URL = API_BASE_URI + API_ENDPOINT_DETECTION_SENSIBILITY_GET
DEFAULT_TIMEOUT = 10
MAX_RETRIES = 3
class PyEzvizError(Exception):
pass
class EzvizClient(object):
def __init__(self, account, password, session=None, sessionId=None, timeout=None, cloud=None, connection=None):
"""Initialize the client object."""
self.account = account
self.password = password
# self._user_id = None
# self._user_reference = None
self._session = session
self._sessionId = sessionId
self._data = {}
self._timeout = timeout
self._CLOUD = cloud
self._CONNECTION = connection
def _login(self):
"""Login to Ezviz' API."""
# Ezviz API sends md5 of password
m = hashlib.md5()
m.update(self.password.encode('utf-8'))
md5pass = m.hexdigest()
payload = {"account": self.account, "password": md5pass, "featureCode": "92c579faa0902cbfcfcc4fc004ef67e7"}
try:
req = self._session.post(LOGIN_URL,
data=payload,
headers={"Content-Type": "application/x-www-form-urlencoded",
"clientType": "1",
"customNo": "1000001"},
timeout=self._timeout)
except OSError:
raise PyEzvizError("Can not login to API")
if req.status_code == 400:
raise PyEzvizError("Login error: Please check your username/password: %s ", str(req.text))
# let's parse the answer, session is in {.."loginSession":{"sessionId":"xxx...}
try:
response_json = req.json()
sessionId = str(response_json["loginSession"]["sessionId"])
if not sessionId:
raise PyEzvizError("Login error: Please check your username/password: %s ", str(req.text))
self._sessionId = sessionId
except (OSError, json.decoder.JSONDecodeError) as e:
raise PyEzvizError("Impossible to decode response: \nResponse was: [%s] %s", str(e), str(req.status_code), str(req.text))
return True
def _get_pagelist(self, filter=None, json_key=None, max_retries=0):
"""Get data from pagelist API."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
if filter == None:
raise PyEzvizError("Trying to call get_pagelist without filter")
try:
req = self._session.get(PAGELIST_URL,
params={'filter': filter},
headers={ 'sessionId': self._sessionId},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to relogin
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self._get_pagelist(max_retries+1)
if req.text is "":
raise PyEzvizError("No data")
try:
json_output = req.json()
except (OSError, json.decoder.JSONDecodeError) as e:
raise PyEzvizError("Impossible to decode response: " + str(e) + "\nResponse was: " + str(req.text))
if json_key == None:
json_result = json_output
else:
json_result = json_output[json_key]
if not json_result:
raise PyEzvizError("Impossible to load the devices, here is the returned response: %s ", str(req.text))
return json_result
def _switch_status(self, serial, status_type, enable, max_retries=0):
"""Switch status on a device"""
try:
req = self._session.post(SWITCH_STATUS_URL,
data={ 'sessionId': self._sessionId,
'enable': enable,
'serial': serial,
'channel': '0',
'netType' : 'WIFI',
'clientType': '1',
'type': status_type},
timeout=self._timeout)
if req.status_code == 401:
# session is wrong, need to relogin
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self._switch_status(serial, type, enable, max_retries+1)
response_json = req.json()
if response_json['resultCode'] != '0':
raise PyEzvizError("Could not set the switch, maybe a permission issue ?: Got %s : %s)",str(req.status_code), str(req.text))
return False
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
return True
def _switch_devices_privacy(self, enable=0):
"""Switch privacy status on ALL devices (batch)"""
# enable=1 means privacy is ON
# get all devices
devices = self._get_devices()
# foreach, launch a switchstatus for the proper serial
for idx, device in enumerate(devices):
serial = devices[idx]['serial']
self._switch_status(serial, TYPE_PRIVACY_MODE, enable)
return True
def load_cameras(self):
"""Load and return all cameras objects"""
# get all devices
devices = self.get_DEVICE()
cameras = []
# foreach, launch a switchstatus for the proper serial
for idx, device in enumerate(devices):
if devices[idx]['deviceCategory'] == CAMERA_DEVICE_CATEGORY:
camera = EzvizCamera(self, device['deviceSerial'])
camera.load()
cameras.append(camera.status())
return cameras
def ptzControl(self, command, serial, action, speed=5, max_retries=0):
"""PTZ Control by API."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
if command == None:
raise PyEzvizError("Trying to call ptzControl without command")
if action == None:
raise PyEzvizError("Trying to call ptzControl without action")
try:
req = self._session.put(DEVICES_URL + serial + API_ENDPOINT_PTZCONTROL,
data={'command': command,
'action': action,
'channelNo': "1",
'speed': speed,
'uuid': str(uuid4()),
'serial': serial},
headers={ 'sessionId': self._sessionId,
'clientType': "1"},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.ptzControl(max_retries+1)
def login(self):
"""Set http session."""
if self._sessionId is None:
self._session = requests.session()
# adding fake user-agent header
self._session.headers.update({'User-agent': str(UserAgent().random)})
return self._login()
def data_report(self, serial, enable=1, max_retries=0):
"""Enable alarm notifications."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
# operationType = 2 if disable, and 1 if enable
operationType = 2 - int(enable)
print(f"enable: {enable}, operationType: {operationType}")
try:
req = self._session.post(DATA_REPORT_URL,
data={ 'clientType': '1',
'infoDetail': json.dumps({
"operationType" : int(operationType),
"detail" : '0',
"deviceSerial" : serial + ",2"
}, separators=(',',':')),
'infoType': '3',
'netType': 'WIFI',
'reportData': None,
'requestType': '0',
'sessionId': self._sessionId
},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.data_report(serial, enable, max_retries+1)
return True
# soundtype: 0 = normal, 1 = intensive, 2 = disabled ... don't ask me why...
def detection_sensibility(self, serial, sensibility=3, max_retries=0):
"""Enable alarm notifications."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
if sensibility not in [0,1,2,3,4,5,6]:
raise PyEzvizError("Unproper sensibility (should be within 1 to 6).")
try:
req = self._session.post(DETECTION_SENSIBILITY_URL,
data={ 'subSerial' : serial,
'type': '0',
'sessionId': self._sessionId,
'value': sensibility,
},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.detection_sensibility(serial, enable, max_retries+1)
return True
def get_detection_sensibility(self, serial, max_retries=0):
"""Enable alarm notifications."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
try:
req = self._session.post(DETECTION_SENSIBILITY_GET_URL,
data={ 'subSerial' : serial,
'sessionId': self._sessionId,
'clientType': 1
},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.get_detection_sensibility(serial, enable, max_retries+1)
elif req.status_code != 200:
raise PyEzvizError("Could not get detection sensibility: Got %s : %s)",str(req.status_code), str(req.text))
response_json = req.json()
if response_json['resultCode'] != '0':
# raise PyEzvizError("Could not get detection sensibility: Got %s : %s)",str(req.status_code), str(req.text))
return 'Unknown'
else:
return response_json['algorithmConfig']['algorithmList'][0]['value']
def alarm_sound(self, serial, soundType, enable=1, max_retries=0):
"""Enable alarm sound by API."""
if max_retries > MAX_RETRIES:
raise PyEzvizError("Can't gather proper data. Max retries exceeded.")
if soundType not in [0,1,2]:
raise PyEzvizError("Invalid soundType, should be 0,1,2: " + str(soundType))
try:
req = self._session.put(DEVICES_URL + serial + API_ENDPOINT_ALARM_SOUND,
data={ 'enable': enable,
'soundType': soundType,
'voiceId': '0',
'deviceSerial': serial
},
headers={ 'sessionId': self._sessionId},
timeout=self._timeout)
except OSError as e:
raise PyEzvizError("Could not access Ezviz' API: " + str(e))
if req.status_code == 401:
# session is wrong, need to re-log-in
self.login()
logging.info("Got 401, relogging (max retries: %s)",str(max_retries))
return self.alarm_sound(serial, enable, soundType, max_retries+1)
elif req.status_code != 200:
logging.error("Got %s : %s)",str(req.status_code), str(req.text))
return True
def switch_devices_privacy(self,enable=0):
"""Switch status on all devices."""
return self._switch_devices_privacy(enable)
def switch_status(self, serial, status_type, enable=0):
"""Switch status of a device."""
return self._switch_status(serial, status_type, enable)
def get_PAGE_LIST(self, max_retries=0):
return self._get_pagelist(filter='CLOUD,TIME_PLAN,CONNECTION,SWITCH,STATUS,WIFI,STATUS_EXT,NODISTURB,P2P,TTS,KMS,HIDDNS', json_key=None)
def get_DEVICE(self, max_retries=0):
return self._get_pagelist(filter='CLOUD',json_key='deviceInfos')
def get_CONNECTION(self, max_retries=0):
return self._get_pagelist(filter='CONNECTION',json_key='connectionInfos')
def get_STATUS(self, max_retries=0):
return self._get_pagelist(filter='STATUS',json_key='statusInfos')
def get_SWITCH(self, max_retries=0):
return self._get_pagelist(filter='SWITCH',json_key='switchStatusInfos')
def get_WIFI(self, max_retries=0):
return self._get_pagelist(filter='WIFI',json_key='wifiInfos')
def get_NODISTURB(self, max_retries=0):
return self._get_pagelist(filter='NODISTURB',json_key='alarmNodisturbInfos')
def get_P2P(self, max_retries=0):
return self._get_pagelist(filter='P2P',json_key='p2pInfos')
def get_KMS(self, max_retries=0):
return self._get_pagelist(filter='KMS',json_key='kmsInfos')
def get_TIME_PLAN(self, max_retries=0):
return self._get_pagelist(filter='TIME_PLAN',json_key='timePlanInfos')
def close_session(self):
"""Close current session."""
self._session.close()
self._session = None
|
python
|
# coding: utf-8
import os
import sh
import logging
import shutil
import tempfile
from unittest import TestCase
from nose.tools import nottest
class UncanningTest(TestCase):
""" Let's try to uncan a new project and run tests to see everything is ok
Estimated running time 260s"""
def setUp(self):
""" Prepare for tests """
self.tmp_dir_src = tempfile.mkdtemp(prefix='tinned_django')
self.tmp_dir_env = tempfile.mkdtemp(prefix='tinned_django.env')
def tearDown(self):
""" Clean up dirs after myself """
shutil.rmtree(self.tmp_dir_src)
shutil.rmtree(self.tmp_dir_env)
@nottest
def uncan_it(self):
template_path = os.path.abspath('./tinned_django')
managepy = sh.Command('django-admin.py')
managepy('startproject', 'test_project', self.tmp_dir_src,
'--extension', '.py,.gitignore', '--template', template_path)
@nottest
def create_virtualenv(self):
sh.virtualenv(self.tmp_dir_env, python='python2.7')
pip = sh.Command(os.path.join(self.tmp_dir_env, 'bin/pip'))
reqs_file = os.path.join(self.tmp_dir_src, 'requirements.txt')
print('Installing virtualenv...')
for line in pip.install(requirement=reqs_file, _iter=True):
print(line)
@nottest
def launch_project_tests(self):
sh.cd(self.tmp_dir_src)
python = sh.Command(os.path.join(self.tmp_dir_env, 'bin/python'))
print(python(os.path.join(self.tmp_dir_src, 'runtests.py'), verbosity=2))
def test_sanity(self):
""" Let's try to uncan our project """
logging.basicConfig()
self.uncan_it()
test_templating_path = os.path.join(self.tmp_dir_src, 'manage.py')
test_gitignore_path = os.path.join(self.tmp_dir_src, '.gitignore')
self.assert_('test_project.settings' in open(test_templating_path).read())
self.assert_('/test_project/local_settings.py' in open(test_gitignore_path).read())
self.create_virtualenv()
try:
self.launch_project_tests()
except sh.ErrorReturnCode as e:
print(e.stderr)
exit(1)
self.assert_('No error code raised')
|
python
|
"""
Solution to https://adventofcode.com/2018/day/4
"""
from pathlib import Path
# path from the root of the project
INPUT_FILE = Path.cwd() / "2018" / "dec4.txt"
def part1() -> int:
sorted_lines = sorted(line for line in INPUT_FILE.read_text().split("\n") if line)
if __name__ == "__main__":
print(part1())
|
python
|
# -*- coding: utf-8 -*-
from numpy import cos, exp, sin
from ....Classes.Arc1 import Arc1
from ....Classes.Segment import Segment
from ....Classes.SurfLine import SurfLine
def build_geometry(self, alpha=0, delta=0, is_simplified=False):
"""Compute the curve (Segment) needed to plot the Hole.
The ending point of a curve is the starting point of the next curve in the
list
Parameters
----------
self : HoleM53
A HoleM53 object
alpha : float
Angle to rotate the slot (Default value = 0) [rad]
delta : complex
Complex to translate the slot (Default value = 0)
is_simplified : bool
True to avoid line superposition
Returns
-------
surf_list: list
List of Magnet Surface and Air Surface on the slot
"""
if self.get_is_stator(): # check if the slot is on the stator
st = "_Stator"
else:
st = "_Rotor"
# Get all the points
point_dict = self._comp_point_coordinate()
Z1 = point_dict["Z1"]
Z2 = point_dict["Z2"]
Z3 = point_dict["Z3"]
Z4 = point_dict["Z4"]
Z5 = point_dict["Z5"]
Z6 = point_dict["Z6"]
Z7 = point_dict["Z7"]
Z8 = point_dict["Z8"]
Z9 = point_dict["Z9"]
Z10 = point_dict["Z10"]
Z11 = point_dict["Z11"]
Z1s = point_dict["Z1s"]
Z2s = point_dict["Z2s"]
Z3s = point_dict["Z3s"]
Z4s = point_dict["Z4s"]
Z5s = point_dict["Z5s"]
Z6s = point_dict["Z6s"]
Z7s = point_dict["Z7s"]
Z8s = point_dict["Z8s"]
Z9s = point_dict["Z9s"]
Z10s = point_dict["Z10s"]
Z11s = point_dict["Z11s"]
Rext = self.get_Rext()
# Air surface with magnet_0
curve_list = list()
curve_list.append(Segment(Z1, Z2))
curve_list.append(Segment(Z2, Z10))
curve_list.append(Segment(Z10, Z11))
curve_list.append(
Arc1(begin=Z11, end=Z1, radius=-Rext + self.H1, is_trigo_direction=False)
)
point_ref = (Z1 + Z2 + Z10 + Z11) / 4
S1 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# magnet_0 surface
curve_list = list()
if is_simplified:
curve_list.append(Segment(Z5, Z9))
curve_list.append(Segment(Z2, Z10))
else:
curve_list.append(Segment(Z3, Z4))
curve_list.append(Segment(Z4, Z9))
curve_list.append(Segment(Z9, Z10))
curve_list.append(Segment(Z10, Z3))
point_ref = (Z3 + Z4 + Z9 + Z10) / 4
# Defining type of magnetization of the magnet
if self.magnet_0:
if self.magnet_0.type_magnetization == 0:
type_mag = "_Radial"
else:
type_mag = "_Parallel"
else:
type_mag = "None"
magnet_label = "HoleMagnet" + st + type_mag + "_N_R0_T0_S0"
S2 = SurfLine(line_list=curve_list, label=magnet_label, point_ref=point_ref)
# Air suface with magnet_0 and W1 > 0
curve_list = list()
if self.W2 > 0:
curve_list.append(Segment(Z5, Z6))
curve_list.append(Segment(Z6, Z7))
curve_list.append(Segment(Z7, Z8))
if self.W2 > 0:
curve_list.append(Segment(Z8, Z9))
curve_list.append(Segment(Z9, Z5))
point_ref = (Z5 + Z6 + Z7 + Z8 + Z9) / 5
else:
curve_list.append(Segment(Z8, Z6))
point_ref = (Z6 + Z7 + Z8) / 3
S3 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# Air surface with magnet_1
curve_list = list()
curve_list.append(Segment(Z1s, Z2s))
curve_list.append(Segment(Z2s, Z10s))
curve_list.append(Segment(Z10s, Z11s))
curve_list.append(Arc1(Z11s, Z1s, Rext - self.H1, is_trigo_direction=True))
point_ref = (Z1s + Z2s + Z10s + Z11s) / 4
S4 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# magnet_1 surface
curve_list = list()
if is_simplified:
curve_list.append(Segment(Z5s, Z9s))
curve_list.append(Segment(Z2s, Z10s))
else:
curve_list.append(Segment(Z3s, Z4s))
curve_list.append(Segment(Z4s, Z9s))
curve_list.append(Segment(Z9s, Z10s))
curve_list.append(Segment(Z10s, Z3s))
point_ref = (Z3s + Z4s + Z9s + Z10s) / 4
# Defining type of magnetization of the magnet
if self.magnet_1:
if self.magnet_1.type_magnetization == 0:
type_mag = "_Radial"
else:
type_mag = "_Parallel"
else:
type_mag = "None"
magnet_label = "HoleMagnet" + st + type_mag + "_N_R0_T1_S0"
S5 = SurfLine(line_list=curve_list, label=magnet_label, point_ref=point_ref)
# Air suface with magnet_1 and W1 > 0
curve_list = list()
if self.W2 > 0:
curve_list.append(Segment(Z5s, Z6s))
curve_list.append(Segment(Z6s, Z7s))
curve_list.append(Segment(Z7s, Z8s))
if self.W2 > 0:
curve_list.append(Segment(Z8s, Z9s))
curve_list.append(Segment(Z9s, Z5s))
point_ref = (Z5s + Z6s + Z7s + Z8s + Z9s) / 5
else:
curve_list.append(Segment(Z8s, Z6s))
point_ref = (Z6s + Z7s + Z8s) / 3
S6 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# Air with both magnet and W1 = 0
curve_list = list()
if self.W2 > 0:
curve_list.append(Segment(Z5, Z6))
curve_list.append(Segment(Z6, Z6s))
if self.W2 > 0:
curve_list.append(Segment(Z6s, Z5s))
curve_list.append(Segment(Z5s, Z9s))
if self.W2 > 0:
curve_list.append(Segment(Z9s, Z8s))
curve_list.append(Segment(Z8s, Z9))
curve_list.append(Segment(Z9, Z5))
point_ref = (Z6 + Z6s + Z8) / 3
S7 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# first hole without magnet_0 and W1 > 0
curve_list = list()
curve_list.append(Segment(Z1, Z2))
if self.H3 > 0:
curve_list.append(Segment(Z2, Z3))
curve_list.append(Segment(Z3, Z4))
if self.H3 > 0:
curve_list.append(Segment(Z4, Z5))
if self.W2 > 0:
curve_list.append(Segment(Z5, Z6))
curve_list.append(Segment(Z6, Z7))
curve_list.append(Segment(Z7, Z8))
curve_list.append(Segment(Z8, Z11))
curve_list.append(Arc1(Z11, Z1, -Rext + self.H1, is_trigo_direction=False))
point_ref = (Z3 + Z4 + Z9 + Z10) / 4
S8 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# second hole without magnet_1 and W1 > 0
curve_list = list()
curve_list.append(Segment(Z1s, Z2s))
if self.H3 > 0:
curve_list.append(Segment(Z2s, Z3s))
curve_list.append(Segment(Z3s, Z4s))
if self.H3 > 0:
curve_list.append(Segment(Z4s, Z5s))
if self.W2 > 0:
curve_list.append(Segment(Z5s, Z6s))
curve_list.append(Segment(Z6s, Z7s))
curve_list.append(Segment(Z7s, Z8s))
curve_list.append(Segment(Z8s, Z11s))
curve_list.append(Arc1(Z11s, Z1s, -Rext + self.H1, is_trigo_direction=False))
point_ref = (Z3s + Z4s + Z9s + Z10s) / 4
S9 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# No magnet_1 and W1 = 0
curve_list = list()
curve_list.append(Segment(Z1s, Z2s))
if self.H3 > 0:
curve_list.append(Segment(Z2s, Z3s))
curve_list.append(Segment(Z3s, Z4s))
if self.H3 > 0:
curve_list.append(Segment(Z4s, Z5s))
if self.W2 > 0:
curve_list.append(Segment(Z5s, Z6s))
curve_list.append(Segment(Z6s, Z6))
if self.W2 > 0:
curve_list.append(Segment(Z6, Z5))
curve_list.append(Segment(Z5, Z9))
if self.W2 > 0:
curve_list.append(Segment(Z9, Z8))
curve_list.append(Segment(Z8s, Z11s))
curve_list.append(Arc1(Z11s, Z1s, -Rext + self.H1, is_trigo_direction=False))
point_ref = (Z3s + Z4s + Z9s + Z10s) / 4
S10 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# No magnet_0 and W1 = 0
curve_list = list()
curve_list.append(Segment(Z1, Z2))
if self.H3 > 0:
curve_list.append(Segment(Z2, Z3))
curve_list.append(Segment(Z3, Z4))
if self.H3 > 0:
curve_list.append(Segment(Z4, Z5))
if self.W2 > 0:
curve_list.append(Segment(Z5, Z6))
curve_list.append(Segment(Z6, Z6s))
if self.W2 > 0:
curve_list.append(Segment(Z6s, Z5s))
curve_list.append(Segment(Z5s, Z9s))
if self.W2 > 0:
curve_list.append(Segment(Z9s, Z8s))
curve_list.append(Segment(Z8, Z11))
curve_list.append(Arc1(Z11, Z1, -Rext + self.H1, is_trigo_direction=False))
point_ref = (Z3 + Z4 + Z9 + Z10) / 4
S11 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# No magnet and W1 = 0
curve_list = list()
curve_list.append(Arc1(Z1, Z11, Rext - self.H1, is_trigo_direction=True))
curve_list.append(Segment(Z11, Z8))
curve_list.append(Segment(Z8, Z11s))
curve_list.append(Arc1(Z11s, Z1s, Rext - self.H1, is_trigo_direction=True))
curve_list.append(Segment(Z1s, Z2s))
if self.H3 > 0:
curve_list.append(Segment(Z2s, Z3s))
curve_list.append(Segment(Z3s, Z4s))
if self.H3 > 0:
curve_list.append(Segment(Z4s, Z5s))
if self.W2 > 0:
curve_list.append(Segment(Z5s, Z6s))
curve_list.append(Segment(Z6s, Z6))
if self.W2 > 0:
curve_list.append(Segment(Z6, Z5))
if self.H3 > 0:
curve_list.append(Segment(Z5, Z4))
curve_list.append(Segment(Z4, Z3))
if self.H3 > 0:
curve_list.append(Segment(Z3, Z2))
curve_list.append(Segment(Z2, Z1))
point_ref = (Z6 + Z8 + Z6s) / 3
S12 = SurfLine(line_list=curve_list, label="Hole" + st, point_ref=point_ref)
# Create the surface list by selecting the correct ones
if self.magnet_0 and self.magnet_1 and self.W1 > 0:
S1.label = S1.label + "_R0_T0_S0" # Hole
S3.label = S3.label + "_R0_T1_S0" # Hole
S6.label = S6.label + "_R0_T2_S0" # Hole
S4.label = S4.label + "_R0_T3_S0" # Hole
surf_list = [S1, S2, S3, S6, S5, S4]
elif self.magnet_0 and self.magnet_1 and self.W1 == 0:
S1.label = S1.label + "_R0_T0_S0" # Hole
S7.label = S7.label + "_R0_T1_S0" # Hole
S4.label = S4.label + "_R0_T2_S0" # Hole
surf_list = [S1, S2, S7, S5, S4]
elif self.magnet_0 and not self.magnet_1 and self.W1 > 0:
S1.label = S1.label + "_R0_T0_S0" # Hole
S3.label = S3.label + "_R0_T1_S0" # Hole
S9.label = S9.label + "_R0_T2_S0" # Hole
surf_list = [S1, S2, S3, S9]
elif self.magnet_0 and not self.magnet_1 and self.W1 == 0:
S1.label = S1.label + "_R0_T0_S0" # Hole
S10.label = S10.label + "_R0_T1_S0" # Hole
surf_list = [S1, S2, S10]
elif not self.magnet_0 and self.magnet_1 and self.W1 > 0:
S8.label = S8.label + "_R0_T0_S0" # Hole
S6.label = S6.label + "_R0_T1_S0" # Hole
S4.label = S4.label + "_R0_T2_S0" # Hole
surf_list = [S8, S6, S5, S4]
elif not self.magnet_0 and self.magnet_1 and self.W1 == 0:
S11.label = S11.label + "_R0_T0_S0" # Hole
S4.label = S4.label + "_R0_T2_S0" # Hole
surf_list = [S11, S5, S4]
elif not self.magnet_0 and not self.magnet_1 and self.W1 > 0:
S8.label = S8.label + "_R0_T0_S0" # Hole
S9.label = S9.label + "_R0_T1_S0" # Hole
surf_list = [S8, S9]
elif not self.magnet_0 and not self.magnet_1 and self.W1 == 0:
S12.label = S12.label + "_R0_T0_S0" # Hole
surf_list = [S12]
# Apply the transformation
for surf in surf_list:
surf.rotate(alpha)
surf.translate(delta)
return surf_list
|
python
|
import pydantic
import os
import yaml
import re
import typing
@pydantic.dataclasses.dataclass(frozen=True, order=True)
class RegexTestCase:
text: pydantic.constr()
matches: typing.Optional[typing.List[str]] = None
def run(self, regex):
""" evaluate the test case against the pattern """
actual = regex.match(self.text)
link = regex.get_regexr_debug_link()
msg = f"{self.text} match of {regex.pattern} != {self.matches}: {link}"
if self.matches is None:
assert actual is None, msg
elif len(self.matches) == 1:
assert self.matches[0] == actual.group(0), msg
else:
for i in range(len(self.matches)):
assert self.matches[i] == actual.group(i + 1), msg
@pydantic.dataclasses.dataclass()
class RegularExpression:
pattern: pydantic.constr(min_length=2)
description: pydantic.constr(min_length=3)
test_cases: typing.List[RegexTestCase]
@classmethod
def from_yaml(cls, expression_name: str, folder: str = None):
environment_path = os.environ.get("SAFE_REGEX_PATH")
if folder:
working_folder = folder
elif environment_path:
working_folder = environment_path
else:
working_folder = os.getcwd()
file_path = os.path.join(working_folder, f"{expression_name}.re.yaml")
with open(file_path, "r") as yaml_file:
yaml_data = yaml.safe_load(yaml_file)
return cls(**yaml_data)
def __post_init_post_parse__(self):
self.regex = re.compile(self.pattern)
self.flags = self.regex.flags
self.groups = self.regex.groups
self.groupindex = self.regex.groupindex
class Config:
extra = "forbid"
def test(self):
for test_case in self.test_cases:
test_case.run(self)
def get_regexr_debug_link(self) -> str:
import urllib.parse
match = [tc.text for tc in self.test_cases if tc.matches is not None]
not_match = [tc.text for tc in self.test_cases if tc.matches is None]
tests = "These should match\n{}\nThese should not match\n{}".format(
"\n".join(sorted(match)),
"\n".join(sorted(not_match)),
)
params = {"expression": f"/{self.pattern}/gms", "text": tests}
encoded_params = urllib.parse.urlencode(params)
return f"https://regexr.com/?{encoded_params}"
"""
pass through to re.Pattern
"""
def search(self, *args, **kwargs):
return self.regex.search(*args, **kwargs)
def match(self, *args, **kwargs):
return self.regex.match(*args, **kwargs)
def fullmatch(self, *args, **kwargs):
return self.regex.fullmatch(*args, **kwargs)
def split(self, *args, **kwargs):
return self.regex.split(*args, **kwargs)
def findall(self, *args, **kwargs):
return self.regex.findall(*args, **kwargs)
def finditer(self, *args, **kwargs):
return self.regex.finditer(*args, **kwargs)
def sub(self, *args, **kwargs):
return self.regex.sub(*args, **kwargs)
def subn(self, *args, **kwargs):
return self.regex.subn(*args, **kwargs)
|
python
|
#!/usr/bin/env python3
__author__ = "Leon Wetzel"
__copyright__ = "Copyright 2021, Leon Wetzel"
__credits__ = ["Leon Wetzel"]
__license__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "Leon Wetzel"
__email__ = "[email protected]"
__status__ = "Production"
import praw
from nltk import sent_tokenize
def main():
with open("reddit_secret.txt", "r", encoding='utf-8') as F:
secret = F.read()
with open("reddit_id.txt", "r", encoding='utf-8') as F:
identifier = F.read()
with open("reddit_user_id.txt", "r", encoding='utf-8') as F:
user_id = F.read()
reddit = praw.Reddit(
client_id=identifier,
client_secret=secret,
user_agent="Ashleigh Dev Team",
)
subject = reddit.redditor(name=user_id)
comments = [comment.body for comment
in subject.comments.new(limit=None)]
sentences_per_comment = [sent_tokenize(comment)
for comment in comments]
sentences= [comment.replace("\n", "").strip() for sentence
in sentences_per_comment for comment in sentence]
with open("quips.txt", "w", encoding='utf-8') as F:
for sentence in sentences:
F.write(f"{sentence}\n")
if __name__ == '__main__':
main()
|
python
|
# coding: utf-8
# # Download TCGA Pan-Cancer Datasets from the UCSC Xena Browser
#
# This notebook downloads TCGA datasets for Project Cognoma. The file contents (text) remains unmodified, but files are given extensions and bzip2 compressed.
#
# [See here](https://genome-cancer.soe.ucsc.edu/proj/site/xena/datapages/?cohort=TCGA%20Pan-Cancer%20%28PANCAN%29 "Xena: cohort: TCGA Pan-Cancer (PANCAN)") for all TCGA Pan-Cancer datasets on Xena.
# In[1]:
import os
import bz2
from urllib.request import urlretrieve
# In[2]:
def bzip2_compress(path, keep=False):
"""
Compress a file using bzip2 compression.
Designed to mirror the functionality of the
`bzip2 --compress $PATH` shell command.
`keep` specifies whether to remove the uncompressed file.
"""
with open(path, 'rb') as reader, bz2.open(path + '.bz2', 'wb') as writer:
writer.writelines(reader)
if not keep:
os.remove(path)
# Documentation for the TCGA Pan-Cancer files from the Xena browser:
#
# + [`HiSeqV2`](https://genome-cancer.soe.ucsc.edu/proj/site/xena/datapages/?dataset=TCGA.PANCAN.sampleMap/HiSeqV2&host=https://tcga.xenahubs.net)
# + [`PANCAN_clinicalMatrix`](https://genome-cancer.soe.ucsc.edu/proj/site/xena/datapages/?dataset=TCGA.PANCAN.sampleMap/PANCAN_clinicalMatrix&host=https://tcga.xenahubs.net)
# + [`PANCAN_mutation`](https://genome-cancer.soe.ucsc.edu/proj/site/xena/datapages/?dataset=TCGA.PANCAN.sampleMap/PANCAN_mutation&host=https://tcga.xenahubs.net)
# In[3]:
base_url = 'https://tcga.xenahubs.net/download/TCGA.PANCAN.sampleMap/'
names = [
'PANCAN_clinicalMatrix',
'PANCAN_mutation',
'HiSeqV2',
]
# In[4]:
# Download metadata
for name in names:
url = base_url + name + '.json'
path = os.path.join('download', name + '.json')
urlretrieve(url, path)
# In[5]:
# Download data
for name in names:
url = base_url + name
path = os.path.join('download', name + '.tsv')
urlretrieve(url, path)
bzip2_compress(path)
|
python
|
def palindromeRearranging(inputString):
characters = {}
error_count = 0
for character in inputString:
if character not in characters:
characters[character] = 1
else:
characters[character] += 1
print(characters.values())
for value in characters.values():
print(value % 2)
if value % 2 != 0:
error_count += 1
return error_count <= 1
print(palindromeRearranging("aaabb"))
|
python
|
from .mock_plugin import MockPlugin
__all__ = ['mock_plugin']
|
python
|
# encoding: utf-8
import os.path as osp
from .bases import BaseImageDataset
class target_test(BaseImageDataset):
"""
target_training: only constains camera ID, no class ID information
Dataset statistics:
"""
dataset_dir = 'target_test'
def __init__(self, root='./example/data/challenge_datasets', verbose=True, **kwargs):
super(target_test, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.query_dir = osp.join(self.dataset_dir, 'image_query/')
self.gallery_dir = osp.join(self.dataset_dir, 'image_gallery/')
self._check_before_run()
self.query = self._process_dir(self.query_dir, 'index_test_query.txt')
self.gallery = self._process_dir(self.gallery_dir, 'index_test_gallery.txt')
if verbose:
print("=> target_validation loaded")
self.print_dataset_statistics_validation(self.query, self.gallery)
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, images_doc):
image_list = osp.join(self.dataset_dir, images_doc)
info = open(image_list).readlines() # image_name, image_id
dataset = []
for i in range(len(info)):
element = info[i]
image_name, image_id = element.split(' ')[0], element.split(' ')[1]
pid = 0 # target test has no lables, set ID 0 for all images
dataset.append((osp.join(dir_path, image_name), pid, image_id))
return dataset
|
python
|
#!/usr/bin/python
from bs4 import BeautifulSoup
import urllib2
import csv
import codecs
linksList = []
#f = csv.writer(open("worksURI.csv", "w"))
#f.writerow(["Name", "Link"])
f = codecs.open("alllinkstrial.txt", encoding='utf-8', mode='w+')
#'http://www.isfdb.org/cgi-bin/ea.cgi?12578 ',
target_url=['http://www.isfdb.org/cgi-bin/ea.cgi?48861']
for thing in target_url:
code = urllib2.urlopen(thing)
soup = BeautifulSoup(code.read())
#this finds all html tags with <a> and a href value, so all the links.
for link in soup.find_all('a', href=True):
names = link.contents[0]
fullLink = link.attrs['href']
#this sets the links as rows and writes them to the text file
row=names+","+fullLink+"\n"
f.write(row)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 21:11:25 2019
@author: Jorge
"""
import tkinter as tk
from tkinter import ttk
import crearcapasconv
from threading import Thread
import sys
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from tkinter import filedialog as fd
from tkinter import messagebox as mb
import sklearn
import CNN
import menu
class Display(tk.Frame):
def __init__(self,parent=0):
tk.Frame.__init__(self,parent)
self.output = tk.Text(self, width=80, height=15)
self.output.pack(padx = 30, pady = 5,)
sys.stdout = self
self.pack()
def flush(self):
pass
def write(self, txt):
self.output.insert(tk.END,str(txt))
self.output.see("end")
self.update_idletasks()
#Función que crea una ventana para introducir los parametros necesarios para crear una red convolucional
def Ventana_convolucional(ventana_seleccion,X_train,Y_train,X_test,Y_test,ventana_inicio):
ventana_convolucion = tk.Toplevel(ventana_seleccion)
ventana_convolucion.geometry('725x600+500+200')
#Insertamos menu
menu.menu(ventana_convolucion,ventana_inicio)
#Escondemos ventana anterior
ventana_seleccion.withdraw()
#título
labeltitulo = ttk.Label(ventana_convolucion,text = "Parámetros necesarios para la red Convolucional",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo.pack(pady=10)
lframe = ttk.Frame(ventana_convolucion)
lframe.pack()
#------------------------ entrada de datos ---------------------------------
#Tamaño del lote
tamlot = tk.IntVar()
lbtamlote = ttk.Label(lframe,text = "Tamaño lote: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbtamlote.grid(column=0, row=0 ,pady=5,sticky=tk.W)
etamlot = ttk.Entry(lframe,width=5, textvariable = tamlot)
etamlot.grid(column=1, row=0,pady=5,sticky=tk.E)
#Optimizador
opt =tk.StringVar()
lbopt = ttk.Label(lframe, text="Optimizador: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbopt.grid(column=0, row=1,pady=5,sticky=tk.W)
cbopt=ttk.Combobox(lframe,width=9,state="readonly",textvariable = opt)
cbopt["values"] = ["SGD", "RMSprop","Adam","Adagrad"]
cbopt.grid(column = 1 ,row = 1,pady=5,columnspan=2)
cbopt.current(0)
#Proporción de validación
pv = tk.DoubleVar()
pv.set(0.2)
lbpv = ttk.Label(lframe,text = "Proporción de Validación :",
foreground = "#054FAA",font=("Arial Bold", 12))
lbpv.grid(column=0, row=2 ,pady=5,sticky=tk.W)
epv = ttk.Entry(lframe,width=5, textvariable = pv)
epv.grid(column=1, row=2,pady=5,sticky=tk.E)
#Número de capas convolucionales
ncon = tk.IntVar()
lbncon = ttk.Label(lframe,text = "Número capas Convolucionales :",
foreground = "#054FAA",font=("Arial Bold", 12))
lbncon.grid(column=0, row=3 ,pady=5,sticky=tk.W)
encon = ttk.Entry(lframe,width=5, textvariable = ncon)
encon.grid(column=1, row=3,pady=5,sticky=tk.E)
#Número de capas completamente conectadas
ncfc = tk.IntVar()
lbncfc = ttk.Label(lframe,text = "Número capas completamente conectadas :",
foreground = "#054FAA",font=("Arial Bold", 12))
lbncfc.grid(column=0, row=4 ,pady=5,sticky=tk.W)
encfc = ttk.Entry(lframe,width=5, textvariable = ncfc)
encfc.grid(column=1, row=4,pady=5,sticky=tk.E)
#Función Loss
fl =tk.StringVar()
lbfl = ttk.Label(lframe, text="Función Loss: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbfl.grid(column=0, row=5,pady=5,sticky=tk.W)
cbfl=ttk.Combobox(lframe,width=21,state="readonly",textvariable = fl)
cbfl["values"] = ["kullback_leibler_divergence","mean_squared_error", "categorical_hinge",
"categorical_crossentropy","binary_crossentropy","poisson","cosine_proximity"]
cbfl.grid(column = 1 ,row = 5,pady=5,columnspan=2,sticky=tk.E)
cbfl.current(3)
#Métodos de parada
labeltitulo1 = ttk.Label(ventana_convolucion,text = "Método de parada",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo1.pack(pady=10)
lframe1 = ttk.Frame(ventana_convolucion)
lframe1.pack()
mp=tk.IntVar()
bat1= ttk.Radiobutton(lframe1, value=0,variable=mp)
bat1.grid(column=0, row=0)
#Número de iteraciones antes de la parada
nui=tk.IntVar()
lbnui = ttk.Label(lframe1, text="Número de iteraciones: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbnui.grid(column=1, row=0,pady=5,sticky=tk.W)
enui = ttk.Entry(lframe1,width=5, textvariable = nui)
enui.grid(column=2, row=0,pady=5,sticky=tk.E)
bat2 = ttk.Radiobutton(lframe1, value=1,variable=mp)
bat2.grid(column=0, row=1)
lbparada = ttk.Label(lframe1, text="Parada temprana: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbparada.grid(column = 1, row = 1,sticky=tk.W )
#Parámetro a controlar para la parada
lbcon = ttk.Label(lframe1, text=" Parámetro a controlar: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbcon.grid(column = 1, row = 2,pady=5,sticky=tk.W )
con =tk.StringVar()
cbcon=ttk.Combobox(lframe1,width=9,state="readonly",textvariable = con)
cbcon["values"] = ["loss","val_loss", "acc","val_acc"]
cbcon.grid(column = 2 ,row = 2,pady=5,sticky=tk.E)
cbcon.current(0)
#Delta mínimo
delt =tk.DoubleVar()
delt.set(0.001)
lbdelt = ttk.Label(lframe1, text=" Delta min: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbdelt.grid(column=1, row=3,pady=5,sticky=tk.W)
edelt = ttk.Entry(lframe1,width=5, textvariable = delt)
edelt.grid(column=2, row=3,pady=5,sticky=tk.E)
#Paciencia antes de parar
pat =tk.IntVar()
pat.set(3)
lbpat = ttk.Label(lframe1, text=" Paciencia: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbpat.grid(column=1, row=4,pady=5,sticky=tk.W)
epat = ttk.Entry(lframe1,width=5, textvariable = pat)
epat.grid(column=2, row=4,pady=5,sticky=tk.E)
#Función que define el modelo
def crearmodelo():
global NO,AC,BA,DR,NF,TF,RE,PAS,PO,TAMPOL,PASPOL,ACON,DRC,numero_capas_conv,numero_capas_fc
numero_capas_conv = int(ncon.get())
numero_capas_fc = int(ncfc.get())
NF,TF,RE,PAS,PO,TAMPOL,PASPOL,ACON,DRC,NO,AC,BA,DR = crearcapasconv.capas(numero_capas_conv,
numero_capas_fc, ventana_convolucion)
btnmodelo = ttk.Button(ventana_convolucion, text = "Crear modelo",style='my.TButton', command=crearmodelo)
btnmodelo.pack(pady=40)
lframe2 = ttk.Frame(ventana_convolucion)
lframe2.pack(side= "bottom")
#Función que permite entrenar la red convolucional
def entrenar():
lote = tamlot.get()
optimizador = opt.get()
prop_val = pv.get()
numero_capas_convolucionales = int(ncon.get())
numero_capas_fullcon = int(ncfc.get())
loss = fl.get()
parada = mp.get()
iteraciones = nui.get()
control = con.get()
delta = delt.get()
paciencia = pat.get()
#Excepciones
if lote == 0:
mb.showerror("Error", "Variable tamaño del lote = 0 ")
return
if prop_val == 0:
mb.showerror("Error", "El algoritmo necesita una parte del conjunto de entrenamiento para su validación ")
return
if prop_val > 1:
mb.showerror("Error", "Proporción de validación no válida ")
return
if numero_capas_convolucionales == 0:
mb.showerror("Error", "Variable numero de capas convolucionales = 0 ")
return
if numero_capas_fullcon == 0:
mb.showerror("Error", "Variable numero de capas completamente conectadas = 0 ")
return
if parada == 0 and iteraciones==0:
mb.showerror("Error", "No se ha indicado el número de iteraciones requeridas ")
return
if parada == 1 and delta==0.0:
mb.showerror("Error", "No se ha indicado el mínimo delta para controlar la evolución ")
return
while True:
try:
NF
break
except NameError:
mb.showerror("Error", "No se ha creado el modelo, haga click en crear modelo ")
return
for i in range(numero_capas_convolucionales) :
if NF[i].get()==0:
mb.showerror("Error", "Número de filtros = 0 ")
return
for i in range(numero_capas_convolucionales) :
if TF[i].get()==0:
mb.showerror("Error", "Tamaño de filtro = 0 ")
return
for i in range(numero_capas_convolucionales) :
if PAS[i].get()==0:
mb.showerror("Error", "Número de pasos = 0 ")
return
for i in range(numero_capas_convolucionales) :
if TAMPOL[i].get()==0:
mb.showerror("Error", "Tamaño de Pooling = 0 ")
return
for i in range(numero_capas_convolucionales) :
if PASPOL[i].get()==0:
mb.showerror("Error", "Pasos de pooling = 0 ")
return
for i in range(numero_capas_convolucionales) :
if DRC[i].get()> 1:
mb.showerror("Error", "Dropout no válido ")
return
for i in range(numero_capas_fullcon) :
if NO[i].get()==0:
mb.showerror("Error", "No es posible tener capas con 0 neuronas, asegurese de haber creado el modelo correctamente ")
return
for i in range(numero_capas_fullcon) :
if DR[i].get() > 1:
mb.showerror("Error", "Valor Dropout no válido ")
return
#Ventana donde aparecerá el progreso del entrenamiento
ventana_display = tk.Toplevel(ventana_convolucion)
labeltitulo1 = ttk.Label(ventana_display,text = "Entrenamiento",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo1.pack(pady=5)
#Función que dibuja la evolución del entrenamiento
def plot():
ventana_plot = tk.Toplevel(ventana_convolucion)
ventana_plot.geometry('900x600')
f = Figure(figsize = (5,5),dpi = 100)
a = f.add_subplot(121)
b = f.add_subplot(122)
#Resumimos e imprimimos esos datos
a.plot(entrenamiento.history['acc'])
a.plot(entrenamiento.history['val_acc'])
a.set_title('Precisión del modelo')
a.set_ylabel('Precisión')
a.set_xlabel('Iteraciones')
a.legend(['Entrenamiento', 'Validación'], loc='upper left')
# summarize history for loss
b.plot(entrenamiento.history['loss'])
b.plot(entrenamiento.history['val_loss'])
b.set_title('Loss del modelo')
b.set_ylabel('Loss')
b.set_xlabel('Iteraciones')
b.legend(['Entrenamiento', 'Validación'], loc='upper left')
canvas1 = FigureCanvasTkAgg(f,ventana_plot)
canvas1.get_tk_widget().pack(side = tk.TOP,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas1,ventana_plot)
toolbar.update()
canvas1._tkcanvas.pack(side = tk.TOP,fill = tk.BOTH, expand = True)
def guardarcompl():
nombrearch=fd.asksaveasfilename(initialdir = "/",title = "Guardar como",defaultextension = 'h5')
model.save(nombrearch)
mb.showinfo("Información", "Los datos fueron guardados.")
def guardarpesos():
nombrearch=fd.asksaveasfilename(initialdir = "/",title = "Guardar como",defaultextension = 'h5')
model.save_weights(nombrearch)
mb.showinfo("Información", "Los datos fueron guardados.")
def atras():
ventana_display.destroy()
framebotones = ttk.Frame(ventana_display)
framebotones.pack(side= "bottom")
btnguardarcompl = ttk.Button(framebotones, text="Modelo completo",
command=guardarcompl,style='my.TButton',width = 15)
btnguardarcompl.grid(row = 0, column = 0, padx = 10, pady = 5,sticky=tk.W)
btnguardarpesos = ttk.Button(framebotones, text="Pesos",
command=guardarpesos,style='my.TButton',width = 15)
btnguardarpesos.grid(row = 0, column = 1, padx = 10, pady = 5,sticky=tk.W)
btnplot = ttk.Button(framebotones, text="Plot",
command=plot,style='my.TButton',width = 15)
btnplot.grid(row = 1, column = 0, padx = 10, pady = 5,sticky=tk.W)
btnatras = ttk.Button(framebotones, text="Atrás",
command=atras,style='my.TButton',width = 15)
btnatras.grid(row = 1, column = 1, padx = 10, pady = 5,sticky=tk.W)
def pantalla():
Display(ventana_display)
def run():
global model, entrenamiento
while True:
try:
model, entrenamiento = CNN.cnn(ventana_convolucion,ventana_display,X_train,Y_train,X_test,Y_test,
lote,optimizador,prop_val,numero_capas_convolucionales,numero_capas_fullcon,loss,
parada,iteraciones,control,delta,paciencia, NF,TF,RE,PAS,PO,TAMPOL,PASPOL,ACON,DRC,NO,AC,BA,DR)
break
except tk.TclError:
mb.showerror("Error desconocido", "Por favor vuelva a intentarlo ")
ventana_display.destroy()
return
except RuntimeError:
mb.showerror("Error desconocido", "Por favor reinicie la aplicación ")
ventana_display.destroy()
return
except sklearn.metrics.classification.UndefinedMetricWarning:
mb.showerror("Error ", "Algo salió mal con los datos, reinicie la aplicación y vuelva a intentarlo ")
ventana_display.destroy()
return
t1=Thread(target=pantalla)
t2=Thread(target=run)
t1.start()
t2.start()
btntrain = ttk.Button(lframe2, text = "Entrenar",style='my.TButton', command=entrenar)
btntrain.grid(row = 0, column = 1, padx = 20, pady=15)
def atras():
ventana_convolucion.destroy()
ventana_seleccion.deiconify()
btnatras = ttk.Button(lframe2, text = "Atras",style='my.TButton', command=atras)
btnatras.grid(row=0,column=0, padx = 20, pady=15)
|
python
|
#!/usr/bin/env python
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('classic')
fig = plt.figure()
population_age = [22,55,62,45,21,22,34,42,42,4,2,102,95,85,55,110,120,70,65,55,111,115,80,75,65,54,44,43,42,48]
bins = [0,10,20,30,40,50,60,70,80,90,100]
plt.hist(population_age, bins, histtype='bar', rwidth=0.8)
plt.xlabel('age groups')
plt.ylabel('Number of people')
plt.title('Histogram')
fig.savefig('/home/tmp/histo001.png')
|
python
|
# -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import logging
logging.basicConfig(level=logging.ERROR)
try:
import cmt.ee_authenticate
except:
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import cmt.ee_authenticate
import matplotlib
#matplotlib.use('tkagg')
import sys
import os
import ee
import cmt.domain
import cmt.mapclient_qt
import cmt.util.gui_util
'''
GUI related utilities too small for their own file
'''
def visualizeDomain(domain, show=True):
'''Draw all the sensors and ground truth from a domain'''
cmt.mapclient_qt.centerMap(domain.center[0], domain.center[1], 11)
for s in domain.sensor_list:
apply(cmt.mapclient_qt.addToMap, s.visualize(show=show))
if domain.ground_truth != None:
cmt.mapclient_qt.addToMap(domain.ground_truth.mask(domain.ground_truth), {}, 'Ground Truth', False)
|
python
|
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from .models import Project,Profile
from .forms import ProjectForm,ProfileForm,VoteForm
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializer import ProfileSerializer,ProjectSerializer
# Create your views here.
def home(request):
all_projects = Project.fetch_all_images()
return render(request,"Awards/index.html",{"all_images":all_projects})
@login_required(login_url='/accounts/login/')
def new_project(request):
current_user = request.user
if request.method == 'POST':
form = ProjectForm(request.POST,request.FILES)
if form.is_valid():
user_image = form.save(commit=False)
user_image.user = current_user
user_image.save()
return redirect('home')
else:
form = ProjectForm()
return render(request,"Awards/new_project.html",{"form":form})
@login_required(login_url='/accounts/login/')
def new_profile(request):
current_user = request.user
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.prof_user = current_user
profile.profile_Id = request.user.id
profile.save()
return redirect('profile')
else:
form = ProfileForm()
return render(request, 'profile/new_profile.html', {"form": form})
@login_required(login_url='/accounts/login/')
def profile_edit(request):
current_user = request.user
if request.method == 'POST':
logged_user = Profile.objects.get(prof_user=request.user)
form = ProfileForm(request.POST, request.FILES, instance=logged_user)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = ProfileForm()
return render(request,'profile/edit_profile.html',{'form':form})
@login_required(login_url='/accounts/login/')
def profile(request):
current_user = request.user
projects = Project.objects.filter(user = current_user)
try:
prof = Profile.objects.get(prof_user=current_user)
except ObjectDoesNotExist:
return redirect('new_profile')
return render(request,'profile/profile.html',{'profile':prof,'projects':projects})
def search_project(request):
if 'project' in request.GET and request.GET ["project"]:
search_term = request.GET.get("project")
searched_projects = Project.search_project_by_title(search_term)
message = f'{search_term}'
return render(request, 'search/search.html', {"message":message, "projects":searched_projects})
else:
message = "No search results yet!"
return render (request, 'search/search.html', {"message": message})
@login_required(login_url='/accounts/login/')
def project_review(request,project_id):
try:
single_project = Project.get_single_project(project_id)
average_score = round(((single_project.design + single_project.usability + single_project.content)/3),2)
if request.method == 'POST':
vote_form = VoteForm(request.POST)
if vote_form.is_valid():
single_project.vote_submissions+=1
if single_project.design == 0:
single_project.design = int(request.POST['design'])
else:
single_project.design = (single_project.design + int(request.POST['design']))/2
if single_project.usability == 0:
single_project.usability = int(request.POST['usability'])
else:
single_project.usability = (single_project.usability + int(request.POST['usability']))/2
if single_project.content == 0:
single_project.content = int(request.POST['content'])
else:
single_project.content = (single_project.content + int(request.POST['usability']))/2
single_project.save()
return redirect('project_review',project_id)
else:
vote_form = VoteForm()
except Exception as e:
raise Http404()
return render(request,'Awards/project_review.html',{"vote_form":vote_form,"single_project":single_project,"average_score":average_score})
class ProfileList(APIView):
def get(self,request,format=None):
complete_profile = Profile.objects.all()
serializers = ProfileSerializer(complete_profile, many=True)
return Response(serializers.data)
class ProjectList(APIView):
def get(self,request,format=None):
projects = Project.objects.all()
serializers = ProjectSerializer(projects, many=True)
return Response(serializers.data)
|
python
|
from torchvision import datasets, transforms
def imagenet_transformer():
transform=transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def cifar_transformer():
return torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
])
|
python
|
import base64
import json
import logging
from io import BytesIO
from urllib.parse import urljoin
import feedparser
from flask_babel import lazy_gettext as _
from PIL import Image
from authentication_document import AuthenticationDocument
from model import Hyperlink
from opds import OPDSCatalog
from problem_details import (
ERROR_RETRIEVING_DOCUMENT,
INTEGRATION_DOCUMENT_NOT_FOUND,
INVALID_CONTACT_URI,
INVALID_INTEGRATION_DOCUMENT,
LIBRARY_ALREADY_IN_PRODUCTION,
TIMEOUT,
)
from util.http import HTTP, RequestTimedOut
from util.problem_detail import ProblemDetail
class LibraryRegistrar(object):
"""Encapsulates the logic of the library registration process."""
def __init__(self, _db, do_get=HTTP.debuggable_get):
self._db = _db
self.do_get = do_get
self.log = logging.getLogger("Library registrar")
def reregister(self, library):
"""Re-register the given Library by fetching its authentication
document and updating its record appropriately.
This process will not be as thorough as one initiated manually
by the library administrator, but it can be used to
automatically keep us up to date on minor changes to a
library's description, logo, etc.
:param library: A Library.
:return: A ProblemDetail if there's a problem. Otherwise, None.
"""
result = self.register(library, library.library_stage)
if isinstance(result, ProblemDetail):
return result
# The return value may include new settings for contact
# hyperlinks, but we will not be changing any Hyperlink
# objects, since that might result in emails being sent out
# unexpectedly. The library admin must explicitly re-register
# for that to happen.
#
# Basically, we don't actually use any of the items returned
# by register() -- only the controller uses that stuff.
return None
def register(self, library, library_stage):
"""Register the given Library with this registry, if possible.
:param library: A Library to register or re-register.
:param library_stage: The library administrator's proposed value for
Library.library_stage.
:return: A ProblemDetail if there's a problem. Otherwise, a 2-tuple
(auth_document, new_hyperlinks).
`auth_document` is an AuthenticationDocument corresponding to
the library's authentication document, as found at auth_url.
`new_hyperlinks` is a list of Hyperlinks
that ought to be created for registration to be complete.
"""
hyperlinks_to_create = []
auth_url = library.authentication_url
auth_response = self._make_request(
auth_url,
auth_url,
_("No Authentication For OPDS document present at %(url)s", url=auth_url),
_("Timeout retrieving auth document %(url)s", url=auth_url),
_("Error retrieving auth document %(url)s", url=auth_url),
)
if isinstance(auth_response, ProblemDetail):
return auth_response
try:
auth_document = AuthenticationDocument.from_string(
self._db, auth_response.content
)
except Exception as e:
self.log.error(
"Registration of %s failed: invalid auth document.",
auth_url,
exc_info=e,
)
return INVALID_INTEGRATION_DOCUMENT
failure_detail = None
if not auth_document.id:
failure_detail = _("The OPDS authentication document is missing an id.")
if not auth_document.title:
failure_detail = _("The OPDS authentication document is missing a title.")
if auth_document.root:
opds_url = auth_document.root["href"]
else:
failure_detail = _(
"The OPDS authentication document is missing a 'start' link to the root OPDS feed."
)
if auth_document.id != auth_response.url:
failure_detail = _(
"The OPDS authentication document's id (%(id)s) doesn't match its url (%(url)s).",
id=auth_document.id,
url=auth_response.url,
)
if failure_detail:
self.log.error("Registration of %s failed: %s", auth_url, failure_detail)
return INVALID_INTEGRATION_DOCUMENT.detailed(failure_detail)
# Make sure the authentication document includes a way for
# patrons to get help or file a copyright complaint. These
# links must be stored in the database as Hyperlink objects.
links = auth_document.links or []
for rel, problem_title in [
("help", "Invalid or missing patron support email address"),
(
Hyperlink.COPYRIGHT_DESIGNATED_AGENT_REL,
"Invalid or missing copyright designated agent email address",
),
]:
uris = self._locate_email_addresses(rel, links, problem_title)
if isinstance(uris, ProblemDetail):
return uris
hyperlinks_to_create.append((rel, uris))
# Cross-check the opds_url to make sure it links back to the
# authentication document.
opds_response = self._make_request(
auth_url,
opds_url,
_("No OPDS root document present at %(url)s", url=opds_url),
_("Timeout retrieving OPDS root document at %(url)s", url=opds_url),
_("Error retrieving OPDS root document at %(url)s", url=opds_url),
allow_401=True,
)
if isinstance(opds_response, ProblemDetail):
return opds_response
content_type = opds_response.headers.get("Content-Type")
failure_detail = None
if opds_response.status_code == 401:
# This is only acceptable if the server returned a copy of
# the Authentication For OPDS document we just got.
if content_type != AuthenticationDocument.MEDIA_TYPE:
failure_detail = _(
"401 response at %(url)s did not yield an Authentication For OPDS document",
url=opds_url,
)
elif not self.opds_response_links_to_auth_document(opds_response, auth_url):
failure_detail = _(
"Authentication For OPDS document guarding %(opds_url)s does not match the one at %(auth_url)s",
opds_url=opds_url,
auth_url=auth_url,
)
elif content_type not in (OPDSCatalog.OPDS_TYPE, OPDSCatalog.OPDS_1_TYPE):
failure_detail = _(
"Supposed root document at %(url)s is not an OPDS document",
url=opds_url,
)
elif not self.opds_response_links_to_auth_document(opds_response, auth_url):
failure_detail = _(
"OPDS root document at %(opds_url)s does not link back to authentication document %(auth_url)s",
opds_url=opds_url,
auth_url=auth_url,
)
if failure_detail:
self.log.error("Registration of %s failed: %s", auth_url, failure_detail)
return INVALID_INTEGRATION_DOCUMENT.detailed(failure_detail)
auth_url = auth_response.url
try:
library.library_stage = library_stage
except ValueError:
return LIBRARY_ALREADY_IN_PRODUCTION
library.name = auth_document.title
if auth_document.website:
url = auth_document.website.get("href")
if url:
url = urljoin(opds_url, url)
library.web_url = auth_document.website.get("href")
else:
library.web_url = None
if auth_document.logo:
library.logo = auth_document.logo
elif auth_document.logo_link:
url = auth_document.logo_link.get("href")
if url:
url = urljoin(opds_url, url)
logo_response = self.do_get(url, stream=True)
try:
image = Image.open(logo_response.raw)
except Exception:
image_url = auth_document.logo_link.get("href")
self.log.error(
"Registration of %s failed: could not read logo image %s",
auth_url,
image_url,
)
return INVALID_INTEGRATION_DOCUMENT.detailed(
_("Could not read logo image %(image_url)s", image_url=image_url)
)
# Convert to PNG.
buffer = BytesIO()
image.save(buffer, format="PNG")
b64 = base64.b64encode(buffer.getvalue()).decode("utf8")
type = logo_response.headers.get(
"Content-Type"
) or auth_document.logo_link.get("type")
if type:
library.logo = "data:%s;base64,%s" % (type, b64)
else:
library.logo = None
problem = auth_document.update_library(library)
if problem:
self.log.error(
"Registration of %s failed: problem during registration: %s/%s/%s/%s",
auth_url,
problem.uri,
problem.title,
problem.detail,
problem.debug_message,
)
return problem
return auth_document, hyperlinks_to_create
def _make_request(
self, registration_url, url, on_404, on_timeout, on_exception, allow_401=False
):
allowed_codes = ["2xx", "3xx", 404]
if allow_401:
allowed_codes.append(401)
try:
response = self.do_get(
url, allowed_response_codes=allowed_codes, timeout=30
)
# We only allowed 404 above so that we could return a more
# specific problem detail document if it happened.
if response.status_code == 404:
return INTEGRATION_DOCUMENT_NOT_FOUND.detailed(on_404)
if not allow_401 and response.status_code == 401:
self.log.error(
"Registration of %s failed: %s is behind authentication gateway",
registration_url,
url,
)
return ERROR_RETRIEVING_DOCUMENT.detailed(
_("%(url)s is behind an authentication gateway", url=url)
)
except RequestTimedOut as e:
self.log.error(
"Registration of %s failed: timeout retrieving %s",
registration_url,
url,
exc_info=e,
)
return TIMEOUT.detailed(on_timeout)
except Exception as e:
self.log.error(
"Registration of %s failed: error retrieving %s",
registration_url,
url,
exc_info=e,
)
return ERROR_RETRIEVING_DOCUMENT.detailed(on_exception)
return response
@classmethod
def opds_response_links(cls, response, rel):
"""Find all the links in the given response for the given
link relation.
"""
# Look in the response itself for a Link header.
links = []
link = response.links.get(rel)
if link:
links.append(link.get("url"))
media_type = response.headers.get("Content-Type")
if media_type == OPDSCatalog.OPDS_TYPE:
# Parse as OPDS 2.
catalog = json.loads(response.content)
links = []
for k, v in catalog.get("links", {}).items():
if k == rel:
links.append(v.get("href"))
elif media_type == OPDSCatalog.OPDS_1_TYPE:
# Parse as OPDS 1.
feed = feedparser.parse(response.content)
for link in feed.get("feed", {}).get("links", []):
if link.get("rel") == rel:
links.append(link.get("href"))
elif media_type == AuthenticationDocument.MEDIA_TYPE:
document = json.loads(response.content)
if isinstance(document, dict):
links.append(document.get("id"))
return [urljoin(response.url, url) for url in links if url]
@classmethod
def opds_response_links_to_auth_document(cls, opds_response, auth_url):
"""Verify that the given response links to the given URL as its
Authentication For OPDS document.
The link might happen in the `Link` header or in the body of
an OPDS feed.
"""
links = []
try:
links = cls.opds_response_links(
opds_response, AuthenticationDocument.AUTHENTICATION_DOCUMENT_REL
)
except ValueError:
# The response itself is malformed.
return False
return auth_url in links
@classmethod
def _locate_email_addresses(cls, rel, links, problem_title):
"""Find one or more email addresses in a list of links, all with
a given `rel`.
:param library: A Library
:param rel: The rel for this type of link.
:param links: A list of dictionaries with keys 'rel' and 'href'
:problem_title: The title to use in a ProblemDetail if no
valid links are found.
:return: Either a list of candidate links or a customized ProblemDetail.
"""
candidates = []
for link in links:
if link.get("rel") != rel:
# Wrong kind of link.
continue
uri = link.get("href")
value = cls._required_email_address(uri, problem_title)
if isinstance(value, str):
candidates.append(value)
# There were no relevant links.
if not candidates:
problem = INVALID_CONTACT_URI.detailed(
"No valid mailto: links found with rel=%s" % rel
)
problem.title = problem_title
return problem
return candidates
@classmethod
def _required_email_address(cls, uri, problem_title):
"""Verify that `uri` is a mailto: URI.
:return: Either a mailto: URI or a customized ProblemDetail.
"""
problem = None
on_error = INVALID_CONTACT_URI
if not uri:
problem = on_error.detailed("No email address was provided")
elif not uri.startswith("mailto:"):
problem = on_error.detailed(
_("URI must start with 'mailto:' (got: %s)") % uri
)
if problem:
problem.title = problem_title
return problem
return uri
|
python
|
#!/usr/bin/env python
# Copyright 2005-2009,2011 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import glob
import os
import shutil
import sys
import subprocess
from distutils.core import setup, Command
from distutils.command.clean import clean as distutils_clean
from distutils.command.sdist import sdist as distutils_sdist
class clean(distutils_clean):
def run(self):
# In addition to what the normal clean run does, remove pyc
# and pyo and backup files from the source tree.
distutils_clean.run(self)
def should_remove(filename):
if (filename.lower()[-4:] in [".pyc", ".pyo"] or
filename.endswith("~") or
(filename.startswith("#") and filename.endswith("#"))):
return True
else:
return False
for pathname, dirs, files in os.walk(os.path.dirname(__file__)):
for filename in filter(should_remove, files):
try:
os.unlink(os.path.join(pathname, filename))
except EnvironmentError, err:
print str(err)
try:
os.unlink("MANIFEST")
except OSError:
pass
for base in ["coverage", "build", "dist"]:
path = os.path.join(os.path.dirname(__file__), base)
if os.path.isdir(path):
shutil.rmtree(path)
class sdist(distutils_sdist):
def run(self):
self.run_command("test")
distutils_sdist.run(self)
# make sure MANIFEST.in includes all tracked files
if subprocess.call(["hg", "status"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0:
# contains the packaged files after run() is finished
included_files = self.filelist.files
process = subprocess.Popen(["hg", "locate"],
stdout=subprocess.PIPE)
out, err = process.communicate()
assert process.returncode == 0
tracked_files = out.splitlines()
for ignore in [".hgignore", ".hgtags"]:
tracked_files.remove(ignore)
assert not set(tracked_files) - set(included_files), \
"Not all tracked files included in tarball, update MANIFEST.in"
class build_sphinx(Command):
description = "build sphinx documentation"
user_options = [
("build-dir=", "d", "build directory"),
]
def initialize_options(self):
self.build_dir = None
def finalize_options(self):
self.build_dir = self.build_dir or "build"
def run(self):
docs = "docs"
target = os.path.join(self.build_dir, "sphinx")
self.spawn(["sphinx-build", "-b", "html", "-n", docs, target])
class test_cmd(Command):
description = "run automated tests"
user_options = [
("to-run=", None, "list of tests to run (default all)"),
("quick", None, "don't run slow mmap-failing tests"),
]
def initialize_options(self):
self.to_run = []
self.quick = False
def finalize_options(self):
if self.to_run:
self.to_run = self.to_run.split(",")
def run(self):
import tests
if tests.unit(self.to_run, self.quick):
raise SystemExit("Test failures are listed above.")
class coverage_cmd(Command):
description = "generate test coverage data"
user_options = [
("quick", None, "don't run slow mmap-failing tests"),
]
def initialize_options(self):
self.quick = None
def finalize_options(self):
self.quick = bool(self.quick)
def run(self):
import trace
tracer = trace.Trace(
count=True, trace=False,
ignoredirs=[sys.prefix, sys.exec_prefix])
def run_tests():
import mutagen
import mutagen._util
reload(mutagen._util)
reload(mutagen)
cmd = self.reinitialize_command("test")
cmd.quick = self.quick
cmd.ensure_finalized()
cmd.run()
tracer.runfunc(run_tests)
results = tracer.results()
coverage = os.path.join(os.path.dirname(__file__), "coverage")
results.write_results(show_missing=True, coverdir=coverage)
map(os.unlink, glob.glob(os.path.join(coverage, "[!m]*.cover")))
try:
os.unlink(os.path.join(coverage, "..setup.cover"))
except OSError:
pass
total_lines = 0
bad_lines = 0
for filename in glob.glob(os.path.join(coverage, "*.cover")):
lines = file(filename, "rU").readlines()
total_lines += len(lines)
bad_lines += len(
[line for line in lines if
(line.startswith(">>>>>>") and
"finally:" not in line and '"""' not in line)])
pct = 100.0 * (total_lines - bad_lines) / float(total_lines)
print "Coverage data written to", coverage, "(%d/%d, %0.2f%%)" % (
total_lines - bad_lines, total_lines, pct)
if pct < 98.66:
raise SystemExit(
"Coverage percentage went down; write more tests.")
if pct > 98.7:
raise SystemExit("Coverage percentage went up; change setup.py.")
if os.name == "posix":
data_files = [('share/man/man1', glob.glob("man/*.1"))]
else:
data_files = []
if __name__ == "__main__":
from mutagen import version_string
cmd_classes = {
"clean": clean,
"test": test_cmd,
"coverage": coverage_cmd,
"sdist": sdist,
"build_sphinx": build_sphinx,
}
setup(cmdclass=cmd_classes,
name="mutagen", version=version_string,
url="http://code.google.com/p/mutagen/",
description="read and write audio tags for many formats",
author="Michael Urman",
author_email="[email protected]",
license="GNU GPL v2",
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Topic :: Multimedia :: Sound/Audio',
],
packages=["mutagen"],
data_files=data_files,
scripts=glob.glob("tools/m*[!~]"),
long_description="""\
Mutagen is a Python module to handle audio metadata. It supports ASF,
FLAC, M4A, Monkey's Audio, MP3, Musepack, Ogg FLAC, Ogg Speex, Ogg
Theora, Ogg Vorbis, True Audio, WavPack and OptimFROG audio files. All
versions of ID3v2 are supported, and all standard ID3v2.4 frames are
parsed. It can read Xing headers to accurately calculate the bitrate
and length of MP3s. ID3 and APEv2 tags can be edited regardless of
audio format. It can also manipulate Ogg streams on an individual
packet/page level.
"""
)
|
python
|
# -*- coding: utf-8 -*-
# :Project: pglast -- DO NOT EDIT: automatically extracted from pg_trigger.h @ 13-2.0.6-0-ga248206
# :Author: Lele Gaifax <[email protected]>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2017-2021 Lele Gaifax
#
from enum import Enum, IntEnum, IntFlag, auto
try:
from enum import StrEnum
except ImportError:
# Python < 3.10
class StrEnum(str, Enum):
pass
# #define-ed constants
TRIGGER_TYPE_ROW = 1 << 0
TRIGGER_TYPE_BEFORE = 1 << 1
TRIGGER_TYPE_INSERT = 1 << 2
TRIGGER_TYPE_DELETE = 1 << 3
TRIGGER_TYPE_UPDATE = 1 << 4
TRIGGER_TYPE_TRUNCATE = 1 << 5
TRIGGER_TYPE_INSTEAD = 1 << 6
TRIGGER_TYPE_STATEMENT = 0
TRIGGER_TYPE_AFTER = 0
|
python
|
import ops
import ops.cmd
import ops.env
import ops.cmd.safetychecks
VALID_OPTIONS = ['all', 'permanent', 'cached']
class PasswordDumpCommand(ops.cmd.DszCommand, ):
def __init__(self, plugin='passworddump', **optdict):
ops.cmd.DszCommand.__init__(self, plugin, **optdict)
def validateInput(self):
truecount = 0
for optkey in self.optdict:
optval = self.optdict[optkey]
if (type(optval) is not bool):
try:
optval = bool(optval)
self.optdict[optkey] = optval
except:
return False
if optval:
truecount += 1
if (truecount > 1):
return False
return True
def mySafetyCheck(self):
if (self.validateInput() and (ops.env.get('OPS_NOINJECT').upper() != 'TRUE')):
return (True, '')
else:
return (False, 'OPS_NOINJECT is set to TRUE, you should probably not run passworddump')
ops.cmd.command_classes['passworddump'] = PasswordDumpCommand
ops.cmd.aliasoptions['passworddump'] = VALID_OPTIONS
ops.cmd.safetychecks.addSafetyHandler('passworddump', 'ops.cmd.passworddump.mySafetyCheck')
|
python
|
"""An App Template based on Bootstrap with a header, sidebar and main section"""
import pathlib
import awesome_panel.express as pnx
import panel as pn
from awesome_panel.express.assets import SCROLLBAR_PANEL_EXPRESS_CSS
BOOTSTRAP_DASHBOARD_CSS = pathlib.Path(__file__).parent / "bootstrap_dashboard.css"
BOOTSTRAP_DASHBOARD_TEMPLATE = pathlib.Path(__file__).parent / "bootstrap_dashboard.html"
HEADER_HEIGHT = 58
SIDEBAR_WIDTH = 200
# Hack to make dynamically adding plotly work:
# See https://github.com/holoviz/panel/issues/840
pn.extension("plotly")
class BootstrapDashboardTemplate(pn.Template):
"""A Basic App Template"""
def __init__(
self,
app_title: str = "App Name",
app_url="#",
):
pn.config.raw_css.append(BOOTSTRAP_DASHBOARD_CSS.read_text(encoding="utf8"))
pn.config.raw_css.append(SCROLLBAR_PANEL_EXPRESS_CSS.read_text(encoding="utf8"))
pnx.bootstrap.extend()
pnx.fontawesome.extend()
template = BOOTSTRAP_DASHBOARD_TEMPLATE.read_text(encoding="utf8")
app_title = pn.Row(
pn.pane.Markdown(
f"[{app_title}]({app_url})",
css_classes=["app-title"],
),
width=SIDEBAR_WIDTH,
sizing_mode="stretch_height",
)
header = pn.Row(
app_title,
pn.layout.HSpacer(),
sizing_mode="stretch_width",
height=HEADER_HEIGHT,
)
top_spacer = pn.layout.HSpacer(height=15)
self.header = header
self.sidebar = pn.Column(
top_spacer,
height_policy="max",
width=SIDEBAR_WIDTH,
)
self.main = pn.Column(
sizing_mode="stretch_width",
margin=(
25,
50,
25,
50,
),
)
items = {
"header": header,
"sidebar": self.sidebar,
"main": self.main,
}
super().__init__(
template=template,
items=items,
)
|
python
|
import codecs
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import itertools
def plot_confusion_matrix(cm,
target_names,
title='Confusion matrix',
prefix="",
cmap=None,
normalize=True,
save_dir="."
):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
save_dir parent directory to save the images
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
This function was modified slightly by the QUIPP development team.
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, size=24)
cbar = plt.colorbar(fraction=0.03)
cbar.ax.tick_params(labelsize=24)
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, size=20, rotation=90)
plt.yticks(tick_marks, target_names, size=20)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
fontsize=22,
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
fontsize=22,
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label', size=28)
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass), size=28)
plt.ylim(len(cm)-0.5, -0.5)
figpath = f"{prefix}_{title}_confusion_matrix.png"
save_path = os.path.join(save_dir, figpath)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
plt.savefig(save_path, format="PNG", bbox_inches = "tight")
return os.path.abspath(save_path)
def plot_util_confusion_matrix(confusion_dict_path, method_names=None,
prefix="", normalize=False, save_dir="."):
dict_r = codecs.open(confusion_dict_path, 'r', encoding='utf-8').read()
confusion_dict = json.loads(dict_r)
if type(method_names) == str:
method_names = [method_names]
if method_names == None:
method_names = list(confusion_dict.keys())
plt_names = []
for method_name in method_names:
if method_name not in confusion_dict:
print(confusion_dict.keys())
raise ValueError(f"Method name: {method_name} is not in the dictionary.")
title = method_name
cm = np.array(confusion_dict[method_name]["conf_matrix"])
target_names = confusion_dict[method_name]["target_names"]
plt_name = plot_confusion_matrix(cm,
target_names=target_names,
normalize=normalize,
title=title,
prefix=prefix,
save_dir=save_dir
)
plt_names.append(plt_name)
return plt_names
|
python
|
class DataFilePath:
def __init__(self):
self.dataDir = '../data/citydata/season_1/'
return
def getOrderDir_Train(self):
return self.dataDir + 'training_data/order_data/'
def getOrderDir_Test1(self):
return self.dataDir + 'test_set_1/order_data/'
def getTest1Dir(self):
return self.dataDir + 'test_set_1/'
def getTest2Dir(self):
return self.dataDir + 'test_set_2/'
def getTrainDir(self):
return self.dataDir + 'training_data/'
def getGapCsv_Train(self):
return self.getOrderDir_Train() + self.getGapFilename()
def getGapCsv_Test1(self):
return self.getOrderDir_Test1() + self.getGapFilename()
def getTestset1Readme(self):
return self.dataDir + 'test_set_1/read_me_1.txt'
def getTestset2Readme(self):
return self.dataDir + 'test_set_2/read_me_2.txt'
def getGapFilename(self):
return "temp/gap.csv"
def getGapPredictionFileName(self):
return 'gap_prediction.csv'
def getPrevGapFileName(self):
return "temp/prevgap.df.pickle"
def get_dir_name(self, data_dir):
return data_dir.split('/')[-2]
g_singletonDataFilePath = DataFilePath()
|
python
|
from __future__ import print_function
import argparse
import logging
import os
import warnings
import torch.nn as nn
import torch.utils.data
from torch.utils.data import SubsetRandomSampler
from torch.utils.tensorboard import SummaryWriter
from Colorization import utils
from Multi_label_classification.dataset.dataset_big_earth_torch_mlc import BigEarthDatasetTorchMLC
from Multi_label_classification.job_config import set_params
from Multi_label_classification.metrics.metric import metrics_def
from Multi_label_classification.models.Ensemble import EnsembleModel
from Multi_label_classification.models.ResnetMLC import ResNetMLC
from Multi_label_classification.test import test
warnings.filterwarnings("ignore")
os.environ["OMP_NUM_THREADS"] = "1"
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
def main(args):
# enable cuda if available
args.cuda = args.cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
# READ JSON CONFIG FILE
assert os.path.isfile(args.json_config_file), "No json configuration file found at {}".format(args.json_config_file)
params = utils.Params(args.json_config_file)
# for change params related to job-id
params = set_params(params, args.id_optim)
# set the torch seed
torch.manual_seed(params.seed)
# initialize summary writer; every folder is saved inside runs
writer = SummaryWriter(params.path_nas + params.log_dir + '/runs/')
# create dir for log file
if not os.path.exists(params.path_nas + params.log_dir):
os.makedirs(params.path_nas + params.log_dir)
# save the json config file of the model
params.save(os.path.join(params.path_nas + params.log_dir, "params.json"))
# Set the logger
utils.set_logger(os.path.join(params.path_nas + params.log_dir, "log"))
# DATASET
# Torch version
big_earth = BigEarthDatasetTorchMLC(csv_path=params.dataset, random_seed=params.seed, bands_indices=params.bands,
img_size=params.img_size, n_samples=params.dataset_nsamples)
# Split
train_idx, val_idx, test_idx = big_earth.split_dataset(params.test_split, params.val_split)
test_sampler = SubsetRandomSampler(test_idx)
# define the loader
test_loader = torch.utils.data.DataLoader(big_earth, batch_size=params.batch_size,
sampler=test_sampler, num_workers=params.num_workers)
# MODELS definition for Ensemble
model_rgb = ResNetMLC(in_channels=3, out_cls=params.out_cls, resnet_version=params.resnet_version,
pretrained=0, colorization=0)
model_colorization = ResNetMLC(in_channels=9, out_cls=params.out_cls, resnet_version=params.resnet_version,
pretrained=0, colorization=1)
checkpoint = torch.load(args.rgb_checkpoint)
model_rgb.load_state_dict(checkpoint['state_dict'], strict=False)
checkpoint = torch.load(args.spectral_checkpoint)
model_colorization.load_state_dict(checkpoint['state_dict'], strict=False)
model = EnsembleModel(model_rgb=model_rgb, model_colorization=model_colorization, device=device)
# CUDA
model.to(device)
# loss for multilabel classification
loss_fn = nn.MultiLabelSoftMarginLoss()
# METRICS
metrics = metrics_def
logging.info("Starting final test with ensemble model...")
test(model=model, test_loader=test_loader, loss_fn=loss_fn,
device=device, metrics=metrics)
# CLOSE THE WRITER
writer.close()
if __name__ == '__main__':
# command line arguments
parser = argparse.ArgumentParser(description='multi_label_classification')
parser.add_argument('--cuda', action='store_true', default=True, help='enables CUDA training')
parser.add_argument('--json_config_file', default='Multi_label_classification/config/configuration.json', help='name of the json config file')
parser.add_argument('--id_optim', default=0, type=int, help='id_optim parameter')
parser.add_argument('--rgb_checkpoint', type=str, default=None, help='specify the rgb checkpoint path', required=True)
parser.add_argument('--spectral_checkpoint', type=str, default=None, help='specify the spectral checkpoint path', required=True)
# read the args
args = parser.parse_args()
main(args)
|
python
|
from django.shortcuts import render
from django.http import Http404
from django.views.generic.edit import UpdateView
from django.views.generic import ListView, View
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils.decorators import method_decorator
import logging
from ..models.projects import Project
from ..models.authors import Author
from ..forms import AuthorForm
from .permission_helpers import PermissionOnObjectViewMixin
# logger for this file
logger = logging.getLogger(__name__)
class AuthorListView(ListView):
"""A generic view of the authors in a list"""
paginate_by = 10
template_name = "code_doc/authors/author_list.html"
context_object_name = "authors"
model = Author
def detail_author(request, author_id):
try:
author = Author.objects.get(pk=author_id)
except Author.DoesNotExist:
raise Http404
project_list = Project.objects.filter(authors=author)
coauthor_list = (
Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id)
)
return render(
request,
"code_doc/authors/author_details.html",
{
"project_list": project_list,
"author": author,
"user": request.user,
"coauthor_list": coauthor_list,
},
)
class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView):
"""View for editing information about an Author
.. note:: in order to be able to edit an Author, the user should have the
'code_doc.author_edit' permission on the Author object.
"""
form_class = AuthorForm
model = Author
permissions_on_object = ("code_doc.author_edit",)
permissions_object_getter = "get_author_from_request"
template_name = "code_doc/authors/author_edit.html"
pk_url_kwarg = "author_id"
def get_author_from_request(self, request, *args, **kwargs):
# TODO check if needed
try:
return Author.objects.get(pk=kwargs["author_id"])
except Author.DoesNotExist:
logger.warning(
"[AuthorUpdateView] non existent Author with id %s", kwargs["author_id"]
)
return None
class MaintainerProfileView(View):
"""Manages the views associated to the maintainers"""
@method_decorator(login_required)
def get(self, request, maintainer_id):
try:
maintainer = User.objects.get(pk=maintainer_id)
except Project.DoesNotExist:
raise Http404
projects = Project.objects.filter(administrators=maintainer)
return render(
request,
"code_doc/maintainer_details.html",
{"projects": projects, "maintainer": maintainer},
)
@method_decorator(login_required)
def post(self, request):
pass
|
python
|
from src.traces.traces import main as traces_main
import pandas as pd
def main():
db_path = '/Users/mossad/personal_projects/AL-public/src/crawler/crawled_kaggle.db'
traces_path = '/Users/mossad/personal_projects/AL-public/src/traces/extracted-traces.pkl'
clean_traces_path = '/Users/mossad/personal_projects/AL-public/src/traces/clean-traces.pkl'
# language = 'IPython Notebook'
language = 'Python'
traces_main(db_path, traces_path, language)
tr = pd.read_pickle(traces_path)
print()
if __name__ == '__main__':
main()
|
python
|
# -*- coding: UTF-8 -*-
# Copyright 2012-2017 Luc Saffre
#
# License: BSD (see file COPYING for details)
"""Defines the :class:`Page` model.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from django.utils.translation import get_language
from lino.api import dd, rt
from etgen.html import E, tostring_pretty
from lino.core.renderer import add_user_language
from lino import mixins
from django.conf import settings
from .utils import render_node
#~ class PageType(dbutils.BabelNamed,mixins.PrintableType,outbox.MailableType):
#~ templates_group = 'pages/Page'
#~ class Meta:
#~ verbose_name = _("Page Type")
#~ verbose_name_plural = _("Page Types")
#~ remark = models.TextField(verbose_name=_("Remark"),blank=True)
#~ def __unicode__(self):
#~ return self.name
#~ class PageTypes(dd.Table):
#~ """
#~ Displays all rows of :class:`PageType`.
#~ """
#~ model = 'pages.PageType'
#~ column_names = 'name build_method template *'
#~ order_by = ["name"]
#~ detail_layout = """
#~ id name
#~ build_method template email_template attach_to_email
#~ remark:60x5
#~ pages.PagesByType
#~ """
class Page(mixins.Referrable, mixins.Hierarchical, mixins.Sequenced):
class Meta:
verbose_name = _("Node")
verbose_name_plural = _("Nodes")
title = dd.BabelCharField(_("Title"), max_length=200, blank=True)
body = dd.BabelTextField(_("Body"), blank=True, format='plain')
raw_html = models.BooleanField(_("raw html"), default=False)
def get_absolute_url(self, **kwargs):
if self.ref:
if self.ref != 'index':
return dd.plugins.pages.build_plain_url(
self.ref, **kwargs)
return dd.plugins.pages.build_plain_url(**kwargs)
def get_sidebar_caption(self):
if self.title:
return dd.babelattr(self, 'title')
if self.ref == 'index':
return unicode(_('Home'))
if self.ref:
return self.ref
return str(self.id)
#~ if self.ref or self.parent:
#~ return self.ref
#~ return unicode(_('Home'))
def get_sidebar_item(self, request, other):
kw = dict()
add_user_language(kw, request)
url = self.get_absolute_url(**kw)
a = E.a(self.get_sidebar_caption(), href=url)
if self == other:
return E.li(a, **{'class':'active'})
return E.li(a)
def get_sidebar_html(self, request):
items = []
#~ loop over top-level nodes
for n in Page.objects.filter(parent__isnull=True).order_by('seqno'):
#~ items += [li for li in n.get_sidebar_items(request,self)]
items.append(n.get_sidebar_item(request, self))
if self.is_parented(n):
children = []
for ch in n.children.order_by('seqno'):
children.append(ch.get_sidebar_item(request, self))
if len(children):
items.append(E.ul(*children, **{'class':'nav nav-list'}))
e = E.ul(*items, **{'class':'nav nav-list'})
return tostring_pretty(e)
def get_sidebar_menu(self, request):
qs = Page.objects.filter(parent__isnull=True)
#~ qs = self.children.all()
yield ('/', 'index', unicode(_('Home')))
#~ yield ('/downloads/', 'downloads', 'Downloads')
#~ yield ('/about', 'about', 'About')
#~ if qs is not None:
for obj in qs:
if obj.ref and obj.title:
yield ('/' + obj.ref, obj.ref, dd.babelattr(obj, 'title'))
#~ else:
#~ yield ('/','index',obj.title)
#~ class PageDetail(dd.FormLayout):
#~ main = """
#~ ref title type:25
#~ project id user:10 language:8 build_time
#~ left right
#~ """
#~ left = """
# ~ # abstract:60x5
#~ body:60x20
#~ """
#~ right="""
#~ outbox.MailsByController
#~ postings.PostingsByController
#~ """
class PageDetail(dd.DetailLayout):
main = """
ref parent seqno
title
body
"""
class Pages(dd.Table):
model = 'pages.Page'
detail_layout = PageDetail()
column_names = "ref title *"
#~ column_names = "ref language title user type project *"
order_by = ["ref"]
#~ class MyPages(ByUser,Pages):
#~ required = dict(user_groups='office')
#~ column_names = "modified title type project *"
#~ label = _("My pages")
#~ order_by = ["-modified"]
#~ class PagesByType(Pages):
#~ master_key = 'type'
#~ column_names = "title user *"
#~ order_by = ["-modified"]
#~ if settings.SITE.project_model:
#~ class PagesByProject(Pages):
#~ master_key = 'project'
#~ column_names = "type title user *"
#~ order_by = ["-modified"]
def create_page(**kw):
#~ logger.info("20121219 create_page(%r)",kw['ref'])
return Page(**kw)
def lookup(ref, *args, **kw):
return Page.get_by_ref(ref, *args, **kw)
def get_all_pages():
return Page.objects.all()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from annotator import annotation
from annotator import document
from h._compat import text_type
class Annotation(annotation.Annotation):
@property
def uri(self):
"""Return this annotation's URI or an empty string.
The uri is escaped and safe to be rendered.
The uri is a Markup object so it won't be double-escaped.
"""
uri_ = self.get("uri")
if uri_:
# Convert non-string URIs into strings.
# If the URI is already a unicode string this will do nothing.
# We're assuming that URI cannot be a byte string.
return text_type(uri_)
else:
return ""
@property
def parent(self):
"""
Return the thread parent of this annotation, if it exists.
"""
if 'references' not in self:
return None
if not isinstance(self['references'], list):
return None
if not self['references']:
return None
return Annotation.fetch(self['references'][-1])
@property
def target_links(self):
"""A list of the URLs to this annotation's targets."""
links = []
targets = self.get("target")
if isinstance(targets, list):
for target in targets:
if not isinstance(target, dict):
continue
source = target.get("source")
if source is None:
continue
links.append(source)
return links
@property
def document(self):
return self.get("document", {})
class Document(document.Document):
__analysis__ = {}
|
python
|
import os
import string
from file2quiz import utils, reader
def convert_quiz(input_dir, output_dir, file_format, save_files=False, *args, **kwargs):
print(f'##############################################################')
print(f'### QUIZ CONVERTER')
print(f'##############################################################\n')
# Get files
files = utils.get_files(input_dir, extensions={'json'})
# Create quizzes folder
convert_dir = os.path.join(output_dir, f"quizzes/{file_format}")
utils.create_folder(convert_dir, empty_folder=True) if save_files else None
# Set format
FILE_FORMATS = {"text": "txt", "anki": "txt"}
file_format = str(file_format).lower().strip().replace('.', '') # parse formats
output_ext = FILE_FORMATS.get(file_format, None)
# Fallback for unknown extension
if output_ext is None:
file_format = "text"
print(f'\t- [ERROR] No method to save "{output_ext}" files (fallback to "txt")')
# Convert quizzes
fquizzes = []
total_questions = 0
total_answers = 0
for i, filename in enumerate(files, 1):
tail, basedir = utils.get_tail(filename)
fname, ext = utils.get_fname(filename)
print("")
print(f'==============================================================')
print(f'[INFO] ({i}/{len(files)}) Converting quiz to "{file_format}": "{tail}"')
print(f'==============================================================')
# Read file
quiz = reader.read_json(filename)
solutions = sum([1 for q_id, q in quiz.items() if q.get('correct_answer') is not None])
total_answers += solutions
total_questions += len(quiz)
try:
fquiz = _convert_quiz(quiz, file_format, *args, **kwargs)
except ValueError as e:
print(f'\t- [ERROR] {e}. Skipping quiz "{tail}"')
continue
# Add formatted quizzes
fquizzes.append((fquiz, filename))
# Show info
if len(fquiz.strip()) == 0:
print(f"\t- [WARNING] No quiz were found ({tail})")
print(f"\t- [INFO] Conversion done! {len(quiz)} questions were found; {solutions} with solutions. ({tail})")
# Save quizzes
if save_files:
print(f"\t- [INFO] Saving file... ({tail}.txt)")
reader.save_txt(fquiz, os.path.join(convert_dir, f"{fname}.{output_ext}"))
# Check result
if not fquizzes:
print("\t- [WARNING] No quiz was converted successfully")
print("")
print("--------------------------------------------------------------")
print("SUMMARY")
print("--------------------------------------------------------------")
print(f"- [INFO] Quizzes converted: {len(fquizzes)}")
print(f"- [INFO] Questions found: {total_questions} (with solutions: {total_answers})")
print("--------------------------------------------------------------\n\n")
return fquizzes
def _convert_quiz(quiz, file_format, *args, **kwargs):
# Select format
if file_format == "anki":
return quiz2anki(quiz)
else: # Fallback to txt
return quiz2txt(quiz, *args, **kwargs)
def pdf2image(filename, savepath, dpi=300, img_format="tiff", **kwargs):
# This requires: ImageMagick
cmd = f'convert -density {dpi} "{filename}" -depth 8 -strip -background white -alpha off "{savepath}/page-%0d.{img_format}"'
os.system(cmd)
def image2text(filename, savepath, lang="eng", dpi=300, psm=3, oem=3, **kwargs):
# This requires: Tesseract
# Tesseract needs the save path without the extensions
basedir, tail = os.path.split(savepath)
fname, ext = os.path.splitext(tail)
# Run command
#sub_cmds = 'tessedit_char_whitelist="0123456789 abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYñÑçÇáéíóúÁÉíÓÚüÜ()¿?,;.:/-\"\'ºª%-+Ø=<>*"'
cmd = f'tesseract "{filename}" "{basedir}/{fname}" -l {lang} --dpi {dpi} --psm {psm} --oem {oem} letters' #-c {sub_cmds}
os.system(cmd)
def quiz2anki(quiz, **kwargs):
text = ""
# Sort questions by key
keys = sorted(quiz.keys(), key=utils.tokenize)
for i, id_question in enumerate(keys):
question = quiz[id_question]
# Check if the there is a correct answer
if question.get('correct_answer') is None:
raise ValueError("No correct answer was given.")
# Format fields
fields = ["{}. {}".format(id_question, question['question']), str(int(question['correct_answer'])+1)] + question['answers']
text += "{}\n".format("\t".join(fields))
return text.strip()
def quiz2txt(quiz, show_answers, answer_table=False, **kwargs):
txt = ""
txt_answers = ""
# Sort questions by key
keys = sorted(quiz.keys(), key=utils.tokenize)
for i, id_question in enumerate(keys):
question = quiz[id_question]
# Format question
txt += "{}. {}\n".format(id_question, question['question'])
# Format answers
for j, ans in enumerate(question['answers']):
marker = ""
ans_id = string.ascii_lowercase[j].lower()
# Show correct answer?
if show_answers:
if j == question.get("correct_answer"): # correct answer
if answer_table:
txt_answers += f"{id_question} - {ans_id}\n"
else:
marker = "*"
txt += "{}{}) {}\n".format(marker, ans_id, ans)
txt += "\n"
# Add answer table at the end of the file if requested
if show_answers and answer_table:
txt += "\n\n\n=========\n\n\n" + txt_answers
return txt.strip()
def json2text(path, *args, **kwargs):
texts = []
files = utils.get_files(path, extensions="json")
for filename in files:
fname, extension = os.path.splitext(os.path.basename(filename))
# Load quiz and text
quiz = reader.read_json(filename)
quiz_txt = quiz2txt(quiz, *args, **kwargs)
texts.append((fname, quiz_txt))
return texts
def _pdf2word(filename, savepath, word_client=None):
try:
import win32com.client
import pywintypes
except ImportError as e:
raise ImportError("'pywin32' missing. You need to install it manually (only Windows): pip install pywin32")
# Create a Word client if there isn't any
if not word_client:
# Load word client
word_client = win32com.client.Dispatch("Word.Application")
word_client.visible = 0
try:
# Open word file
wb = word_client.Documents.Open(filename)
# File format for .docx
# https://docs.microsoft.com/en-us/office/vba/api/word.wdsaveformat
wb.SaveAs2(savepath, FileFormat=16)
wb.Close()
except pywintypes.com_error as e:
print(f"- [ERROR] There was an error converting the PDF file to DOCX. Skipping file. ({e})")
def pdf2word(input_dir, output_dir):
try:
import win32com.client
except ImportError as e:
raise ImportError("'pywin32' missing. You need to install it manually (only Windows): pip install pywin32")
# Get files
files = utils.get_files(input_dir, extensions={"pdf"})
# Create output dir
output_dir = os.path.join(output_dir, "docx-word")
utils.create_folder(output_dir, empty_folder=True)
# Load word client
word_client = win32com.client.Dispatch("Word.Application")
word_client.visible = 0
# Walk through files
for i, filename in enumerate(files, 1):
# Parse path
tail, basedir = utils.get_tail(filename)
fname, ext = utils.get_fname(filename)
print(f"#{i}. Converting *.pdf to *.docx ({tail})")
# Create save path
savepath = os.path.abspath(os.path.join(output_dir, f"{fname}.docx"))
# Convert pdf
_pdf2word(filename, savepath, word_client)
|
python
|
# Copyright (c) 2021 by xfangfang. All Rights Reserved.
#
# Using potplayer as DLNA media renderer
#
# Macast Metadata
# <macast.title>PotPlayer Renderer</macast.title>
# <macast.renderer>PotplayerRenderer</macast.title>
# <macast.platform>win32</macast.title>
# <macast.version>0.4</macast.version>
# <macast.host_version>0.7</macast.host_version>
# <macast.author>xfangfang</macast.author>
# <macast.desc>PotPlayer support for Macast, this is a simple plugin that only supports play and stop.</macast.desc>
import subprocess
import threading
import time
import cherrypy
from macast import gui
from macast.renderer import Renderer
from macast.utils import SETTING_DIR
POTPLAYER_PATH = r'"C:\Program Files\DAUM\PotPlayer\PotPlayermini64.exe"'
subtitle = None
class PotplayerRenderer(Renderer):
def __init__(self):
super(PotplayerRenderer, self).__init__()
self.start_position = 0
self.position_thread_running = True
self.position_thread = threading.Thread(target=self.position_tick, daemon=True)
self.position_thread.start()
# a thread is started here to increase the playback position once per second
# to simulate that the media is playing.
def position_tick(self):
while self.position_thread_running:
time.sleep(1)
self.start_position += 1
sec = self.start_position
position = '%d:%02d:%02d' % (sec // 3600, (sec % 3600) // 60, sec % 60)
self.set_state_position(position)
def set_media_stop(self):
subprocess.Popen(['taskkill', '/f', '/im', 'PotPlayerMini64.exe']).communicate()
self.set_state_transport('STOPPED')
cherrypy.engine.publish('renderer_av_stop')
def start_player(self, url):
try:
if subtitle is None:
subprocess.call('{} "{}"'.format(POTPLAYER_PATH, url))
else:
subprocess.call('{} "{}" /sub="{}"'.format(POTPLAYER_PATH, url, subtitle))
except Exception as e:
print(e)
self.set_media_stop()
cherrypy.engine.publish('app_notify', "Error", str(e))
def set_media_url(self, url, start=0):
self.set_media_stop()
self.start_position = 0
threading.Thread(target=self.start_player, daemon=True, kwargs={'url': url}).start()
self.set_state_transport("PLAYING")
cherrypy.engine.publish('renderer_av_uri', url)
def stop(self):
super(PotplayerRenderer, self).stop()
self.set_media_stop()
print("PotPlayer stop")
def start(self):
super(PotplayerRenderer, self).start()
print("PotPlayer start")
if __name__ == '__main__':
gui(PotplayerRenderer())
# or using cli to disable taskbar menu
# cli(PotplayerRenderer())
else:
import os
if os.path.exists(SETTING_DIR):
subtitle = os.path.join(SETTING_DIR, r"macast.ass")
if not os.path.exists(subtitle):
subtitle = None
|
python
|
import numpy as np
from metod_alg import objective_functions as mt_obj
from metod_alg import metod_algorithm_functions as mt_alg
def test_1():
"""
Test for mt_alg.forward_tracking() - check that when flag=True,
track is updated.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_forward = 1.1
forward_tol = 1000000000
step = 0.0001
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old > f_new)
track, flag = (mt_alg.forward_tracking(
point, step, f_old, f_new, grad,
const_forward, forward_tol, f,
func_args))
assert(flag == True)
assert(track[0][0] == 0)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_forward
if j < len(track) - 1:
assert(track[j][1] < track[j-1][1])
else:
assert(track[j][1] > track[j-1][1])
def test_2():
"""
Test for mt_alg.forward_tracking() - check for flag=False.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_forward = 1.1
forward_tol = 0.001
step = 0.0001
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old > f_new)
track, flag = (mt_alg.forward_tracking(
point, step, f_old, f_new, grad,
const_forward, forward_tol, f,
func_args))
assert(flag == False)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_forward
assert(track[j][1] < track[j-1][1])
def test_3():
"""Test for mt_alg.backward_tracking() - back_tol is met"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
back_tol = 0.4
step = 0.9
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old < f_new)
track = (mt_alg.backward_tracking
(point, step, f_old, f_new,
grad, const_back, back_tol,
f, func_args))
assert(track[0][0] == 0)
assert(track[0][1] == f_old)
assert(track[1][0] == step)
assert(track[1][1] == f_new)
def test_4():
"""Test for mt_alg.backward_tracking() - back tol is not met"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
back_tol = 0.000001
step = 1
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old < f_new)
track = (mt_alg.backward_tracking
(point, step, f_old, f_new,
grad, const_back, back_tol,
f, func_args))
assert(track[0][0] == 0)
assert(track[0][1] == f_old)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_back
if j < len(track) - 1:
assert(track[0][1] < track[j][1])
else:
assert(track[j][1] < track[0][1])
def test_5():
"""Checks computation in mt_alg.compute_coeffs()"""
track_y = np.array([100, 200, 50])
track_t = np.array([0, 1, 0.5])
opt_t = mt_alg.compute_coeffs(track_y, track_t)
OLS_polyfit = np.polyfit(track_t, track_y, 2)
check = -OLS_polyfit[1] / (2 * OLS_polyfit[0])
assert(np.all(np.round(check, 5) == np.round(opt_t, 5)))
def test_6():
"""
Test for mt_alg.combine_tracking() - check that correct step size is
returned when forward_tol is not met.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
const_forward = 1.1
forward_tol = 1000000000
back_tol = 0.000001
step = 0.0001
f_old = f(np.copy(point), *func_args)
grad = g(point, *func_args)
opt_t = (mt_alg.combine_tracking
(point, f_old, grad, step,
const_back, back_tol,
const_forward, forward_tol,
f, func_args))
assert(opt_t >= 0)
upd_point = point - opt_t * grad
assert(f(upd_point, *func_args) < f_old)
def test_7():
"""
Test for mt_alg.combine_tracking() - check that correct step size is
returned, when forward_tol is met.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
const_forward = 1.1
forward_tol = 0.001
back_tol = 0.000001
step = 0.0001
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
opt_t = (mt_alg.combine_tracking
(point, f_old, grad, step,
const_back, back_tol,
const_forward, forward_tol,
f, func_args))
assert(opt_t >= 0)
upd_point = point - opt_t * grad
assert(f(upd_point, *func_args) < f_old)
def test_8():
"""
Test for mt_alg.combine_tracking() - check that correct step size is
returned, when back_tol is met.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
const_forward = 1.1
back_tol = 0.4
forward_tol = 100000000
step = 0.9
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
opt_t = (mt_alg.combine_tracking
(point, f_old, grad, step,
const_back, back_tol,
const_forward, forward_tol,
f, func_args))
assert(opt_t == 0)
upd_point = point - opt_t * grad
assert(f(upd_point, *func_args) == f_old)
def test_9():
"""
Test for mt_alg.combine_tracking() - check that correct step size is
returned, when back_tol is not met.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
const_forward = 1.1
back_tol = 0.000001
forward_tol = 100000000
step = 1
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
opt_t = (mt_alg.combine_tracking
(point, f_old, grad, step,
const_back, back_tol,
const_forward, forward_tol,
f, func_args))
assert(opt_t >= 0)
upd_point = point - opt_t * grad
assert(f(upd_point, *func_args) < f_old)
def test_10():
"""Test that mt_alg.arrange_track_y_t produces expected outputs"""
track = np.array([[0, 100],
[1, 80],
[2, 160],
[4, 40],
[8, 20],
[16, 90]])
track_method = 'Forward'
track_y, track_t = mt_alg.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([40, 20, 90])))
assert(np.all(track_t == np.array([4, 8, 16])))
def test_11():
"""Test that mt_alg.arrange_track_y_t produces expected outputs"""
track = np.array([[0, 100],
[1, 80],
[2, 70],
[4, 90]])
track_method = 'Forward'
track_y, track_t = mt_alg.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([80, 70, 90])))
assert(np.all(track_t == np.array([1, 2, 4])))
def test_12():
"""Test that mt_alg.arrange_track_y_t produces expected outputs"""
track = np.array([[0, 100],
[1, 120],
[0.5, 110],
[0.25, 90]])
track_method = 'Backward'
track_y, track_t = mt_alg.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 90, 110])))
assert(np.all(track_t == np.array([0, 0.25, 0.5])))
def test_13():
"""
Test for mt_alg.check_func_val_coeffs() when func_val < track_y[1].
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
step = 0.00001
point = np.array([0.5525204, 0.8256308, 0.5034502, 0.68755988,
0.75954891, 0.64230399, 0.38500431, 0.0801039,
0.80748984, 0.81147401])
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old > f_new)
forward_tol = 100000000
const_forward = 1.1
track_method = 'Forward'
track, flag = (mt_alg.forward_tracking
(point, step, f_old, f_new, grad,
const_forward, forward_tol, f, func_args))
opt_t = mt_alg.check_func_val_coeffs(track, track_method, point, grad, f,
func_args)
assert(f(point - opt_t * grad, *func_args) < np.min(track[:, 1]))
def test_14():
"""
Test for mt_alg.check_func_val_coeffs() when func_val > track_y[1].
"""
np.random.seed(34272212)
f = mt_obj.sog_function
g = mt_obj.sog_gradient
d = 20
P = 10
lambda_1 = 1
lambda_2 = 4
sigma_sq = 0.8
store_x0, matrix_combined, store_c = (mt_obj.function_parameters_sog
(P, d, lambda_1, lambda_2))
func_args = P, sigma_sq, store_x0, matrix_combined, store_c
point = np.random.uniform(0,1,(d,))
f_old = f(point, *func_args)
grad = g(point, *func_args)
step = 0.1
const_forward = 1.5
forward_tol = 1000000000
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old > f_new)
track_method = 'Forward'
track, flag = (mt_alg.forward_tracking
(point, step, f_old, f_new, grad,
const_forward, forward_tol, f, func_args))
opt_t = mt_alg.check_func_val_coeffs(track, track_method, point, grad, f,
func_args)
pos = np.argmin(track[:, 1])
step_length = track[pos][0]
assert(step_length == opt_t)
|
python
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
import os
import sys
from .main import main as main_main
from .. import CondaError
from .._vendor.auxlib.ish import dals
log = getLogger(__name__)
def pip_installed_post_parse_hook(args, p):
if args.cmd not in ('init', 'info'):
raise CondaError(dals("""
Conda has not been initialized.
To enable full conda functionality, please run 'conda init'.
For additional information, see 'conda init --help'.
"""))
def main(*args, **kwargs):
os.environ[str('CONDA_PIP_UNINITIALIZED')] = str('true')
kwargs['post_parse_hook'] = pip_installed_post_parse_hook
return main_main(*args, **kwargs)
if __name__ == '__main__':
sys.exit(main())
|
python
|
with open('input') as fp:
grid = [line[:-1] for line in fp]
grid_width = len(grid[-1])
x, y = (0, 0)
n_trees = 0
while y < len(grid):
if grid[y][x % grid_width] == '#':
n_trees += 1
x += 3
y += 1
print(n_trees, "trees")
|
python
|
from __future__ import absolute_import
import sys, os
from subprocess import Popen, PIPE, check_output
import socket
from ..seqToolManager import seqToolManager, FeatureComputerException
from .windowHHblits import WindowHHblits
from utils import myMakeDir, tryToRemove #utils is at the root of the package
class HHBlitsManager(seqToolManager):
BAD_SCORE_CONSERVATION="-1048576"
def __init__(self, seqsManager, outPath, winSize,
hhBlitsCMD_template="%(hhBlitsBinPath)s/hhblits -i %(fastaInFname)s -n 4 -d %(hhblitsDB)s "+
"-oa3m %(aligsName)s -cpu %(psiBlastNThrs)d -ohhm %(profileNameRaw)s -o /dev/null"):
seqToolManager.__init__(self, seqsManager, outPath, winSize)
'''
@param seqsManager: ..manageSeqs.seqsManager.SeqsManager
@param outPath: str. path where hhblits results will be saved
@param winSize: int. The size of sliding window
'''
# self.hhBlits should be inherited from
# self.hhBlitsBinPath Inherited from ../../Config
# self.hhBlitsDB Inherited from ../../Config
# self.psiBlastNThrs Inherited from ../../Config
self.hhBlitsOut= myMakeDir(self.outPath,"hhBlits")
self.hhBlitsRaw= myMakeDir(self.hhBlitsOut,"rawHhBlits")
self.hhBlitsProc= myMakeDir(self.hhBlitsOut,"procHhBlits")
self.hhBlitsPathWindowed= myMakeDir(self.hhBlitsOut,"windowedHhBlits/wSize"+str(winSize))
self.hhBlitsCMD_template= hhBlitsCMD_template
def getFinalPath(self):
'''
returns path where final results (win hhBlits) are saved
@return self.hhBlitsPathWindowed: str
'''
return self.hhBlitsPathWindowed
def getFNames(self, prefixExtended):
'''
Returns a dict that contains the fnames that will be used by hhblits
@param prefixExtended. prefix for output fnames. They are formed as follows: prefix+chainType+chainId+b/u
@return Dict {"psiblast":(psiblastOutName, ), "pssm":(pssmOutNameRaw, pssmOutNameProc), "pssmWindow":(pssmWindowedOutName,)}
Processed pssm and pssmWindow are the ones that will be used for classification.
'''
hhBlitsAligName= os.path.join( self.hhBlitsRaw, prefixExtended+".a3m")
rawHhblits= os.path.join( self.hhBlitsRaw, prefixExtended+".ohhm")
procHhblits= os.path.join(self.hhBlitsProc, prefixExtended+".ohhm")
hhblitsWindowedOutName= os.path.join(self.hhBlitsPathWindowed, prefixExtended+".wsize"+str(self.winSize)+".ohhm")
fNames= { "hhBlitsAligName":(hhBlitsAligName,),"hhBlitsProfiles":(rawHhblits, procHhblits),
"hhBlitsProfilesWindow":(hhblitsWindowedOutName,)}
return fNames
def compute(self, prefixExtended):
'''
Computes hhblits for the sequence associated with prefixExtended as an unambiguous id and included in
self.seqsManager
@param prefixExtended: str. unambiguous id of the sequence that will be the prefix of output names. Must be included
in self.seqsManager
@return (profileNameProc, winProfileOutName)
profileNameProc: str
winProfileOutName: str
'''
prefix, chainType, chainId, __= prefixExtended.split("_")
seqStr, fastaInFname= self.seqsManager.getSeq(chainType, chainId)
fNames= self.getFNames( prefixExtended)
aligsName= fNames["hhBlitsAligName"][0]
profileNameRaw, profileNameProc= fNames["hhBlitsProfiles"]
winProfileOutName= fNames["hhBlitsProfilesWindow"][0]
if self.checkAlreayComputed(prefixExtended):
print("hhblits already computed for %s"%prefixExtended)
return aligsName, profileNameRaw, profileNameProc
# run hhblits
print("lauching hhblits over %s"%prefixExtended)
self.launchHhblits( fastaInFname, aligsName, profileNameRaw)
#Process psi-blast
self.processHhblits( seqStr, prefixExtended, profileNameRaw, profileNameProc)
#Compute windows
self.makeWindowedPSSMHhblits( profileNameProc, winProfileOutName)
return aligsName, profileNameProc, winProfileOutName
def launchHhblits(self, fastaInFname, aligsName, profileNameRaw):
'''
Launches hhblits command with fastaInFname as input file, aligsName as the output file that will
contain the aligments and profileNameRaw as the output file that will contain the profile.
@param fastaInFname: str. Path to fasta file where sequence is saved
@param aligsName: str. Path to results file where aligments will be saved
@param profileNameRaw: str. Path to results file where profile will be saved
'''
if os.path.isfile(profileNameRaw) and int(check_output('wc -l {}'.format(profileNameRaw), shell=True).split()[0])> 11:
print("hhblits raw files alredy computed")
return
hhBlitsBinPath= self.hhBlitsBinPath
hhblitsDB= self.hhBlitsDB
psiBlastNThrs= self.psiBlastNThrs if socket.gethostname()!="servet" else 1
hhblitsCMD = self.hhBlitsCMD_template%locals()
hhblitsCMD.replace("_*", "_\\*")
print(hhblitsCMD)
process= Popen( hhblitsCMD, shell=True, stdout=PIPE, stderr=PIPE)
processOut= process.communicate()
#Check for failure
if len(processOut[1])>0 and "Error" in processOut[1]: #No hits found will be dealt at processResults
print("Error computing hhblits. Caught stdin/stderr:\n",processOut[0],processOut[1])
raise FeatureComputerException("hhblits was not able to compute profile")
def processHhblits(self, seq, prefixExtended, profileNameRaw, profileNameProc):
'''
Reads hhblits profile output file and writes another one with tabulated format, headers and
some error checking.
@param: seq: str. Sequence of the chain
@param prefixExtended: str. unambiguous id of the sequence that will be the prefix of output names
@param profileNameRaw: str. Path to profiles results
@param profileNameProc: str. Path where formated results will be saved.
'''
try:
hhBlitsData = self.loadHhblits(profileNameRaw)
except IOError:
hhBlitsData= [ " ".join([HHBlitsManager.BAD_SCORE_CONSERVATION for i in range(31)]) for i in range(len(seq))]
prefix, chainType, chainId, __= prefixExtended.split("_")
try:
outFile= open(profileNameProc,"w")
outFile.write("chainId seqIndex structResId resName "+ "hhblits "*31+"\n")
for i, (hhBlitsArrayJoined,letter) in enumerate(zip(hhBlitsData,seq)):
structIndex= self.seqsManager.seqToStructIndex(chainType, chainId, i, asString= True)
if self.filterOutLabels and structIndex[-1].isalpha():
continue
outFile.write("%s %d %s %s "%(chainId, i, structIndex, letter)+ hhBlitsArrayJoined +"\n")
outFile.close()
except (KeyboardInterrupt, Exception):
print("Exception happend computing %s"%profileNameProc)
tryToRemove(profileNameProc)
raise
def loadHhblits(self, fname):
'''
Loads a hhblits profile file
@param fname: str. Path to hhblits profile file.
@return list of strings. ["row0_hhblits_values","row1_hhblits_values"...]
'''
scores=[]
begin=False
count=0
with open(fname) as f:
for line in f:
if line.startswith("#"):
begin=True
continue
if begin==True:
count+=1
if count==4:
break
for i,line in enumerate(f):
if line.startswith("//"):
break
lineArray= line.split()
nElems= len(lineArray)
if i%3==0:
lineArray= lineArray[2:]
if nElems!=23:
raise ValueError("Bad format in hhblits file %s"%fname)
scores.append(lineArray)
elif i%3==1:
scores[-1]+= lineArray
scores[-1]= " ".join([ elem if elem!="*" else "-1" for elem in scores[-1] ])
return scores
def makeWindowedPSSMHhblits(self, profileNameProc, winProfileOutName):
'''
Computes sliding windows for a given profileNameProc.
@param profileNameProc: str. Path to processed hhblits profile file
@param winProfileOutName: str. Path to windowed results.
'''
try:
WindowHHblits(self.winSize).compute(profileNameProc, winProfileOutName)
except (KeyboardInterrupt, Exception):
print("Exception happend computing %s"%winProfileOutName)
tryToRemove(winProfileOutName)
raise
def test():
fname="/home/rsanchez/Tesis/rriPredMethod/dependencies/bioinformaticTools/hh-Tools/seqExample.ohhm"
from computeFeatures.seqStep.manageSeqs.seqsManager import SeqsManager
seqManag= SeqsManager("rFname", "lFname", computedFeatsRootDir= ".")
hhblitsObj= HHBlitsManager( seqsManager= seqManag, outPath=".", winSize=11)
hhblitsObj.loadHhblits(fname)
if __name__=="__main__":
test()
|
python
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
# edge cases - if the element is present at start or last
if not head:
return head
while head.val == val:
head = head.next
if not head:
return head
prev, curr = head, head.next
while curr:
if curr.val == val:
prev.next = curr.next
else:
prev = prev.next
curr = curr.next
return head
if __name__ == "__main__":
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(2)
e = ListNode(5)
a.next = b
b.next = c
c.next = d
d.next = e
head = Solution().removeElements(a, 1)
while head:
print(head.val)
head = head.next
|
python
|
# Export individual functions
# Copy
from .scopy import scopy
from .dcopy import dcopy
from .ccopy import ccopy
from .zcopy import zcopy
# Swap
from .sswap import sswap
from .dswap import dswap
from .cswap import cswap
from .zswap import zswap
# Scaling
from .sscal import sscal
from .dscal import dscal
from .cscal import cscal
from .csscal import csscal
from .zscal import zscal
from .zdscal import zdscal
# Scaling plus vector
from .saxpy import saxpy
from .daxpy import daxpy
from .caxpy import caxpy
from .zaxpy import zaxpy
# Absolute values of components
from .scabs1 import scabs1
from .dcabs1 import dcabs1
# Sum of absolute values
from .sasum import sasum
from .dasum import dasum
# Sum of absolute values of components
from .scasum import scasum
from .dzasum import dzasum
# Dot products
from .sdot import sdot
from .dsdot import dsdot
from .ddot import ddot
from .cdotu import cdotu
from .zdotu import zdotu
# Complex dot products
from .cdotc import cdotc
from .zdotc import zdotc
# Dot product plus scalar
from .sdsdot import sdsdot
# Euclidean norm
from .snrm2 import snrm2
from .dnrm2 import dnrm2
from .scnrm2 import scnrm2
from .dznrm2 import dznrm2
|
python
|
import torch
import trtorch
precision = 'fp16'
ssd_model = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math=precision)
input_shapes = [1, 3, 300, 300]
model = ssd_model.eval().cuda()
scripted_model = torch.jit.script(model)
compile_settings = {
"input_shapes": [input_shapes],
"op_precision": torch.float16
}
trt_ts_module = trtorch.compile(scripted_model, compile_settings)
torch.jit.save(trt_ts_module, 'ssd.jit')
|
python
|
# Main file for the PwnedCheck distribution.
# This file retrieves the password, calls the module check_pwd,
# and check if the password has been breached by checking it with
# the database in the following website
# https://haveibeenpwned.com/Passwords
import sys
import logging
from getpass import getpass
from checkpwd.check_pwd import check_pwd
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
log = logging.getLogger(__name__)
def splash():
"""Splash `pwnedcheck` logo and information."""
print(" _____________________________________________________ ")
print("| ____ _ _ ____ _ |")
print("| / ___| |__ ___ ___| | _| _ \__ ____| | |")
print("| | | | '_ \ / _ \/ __| |/ / |_) \ \ /\ / / _` | |")
print("| | |___| | | | __/ (__| <| __/ \ V V / (_| | |")
print("| \____|_| |_|\___|\___|_|\_\_| \_/\_/ \__,_| |")
print("| |")
print("| Author: Tanjona R. Rabemananjara |")
print("| URL: https://radonirinaunimi.github.io/pwnd-check/ |")
print("|_____________________________________________________|")
def main():
"""Function that fetchs the password given by the user from the command
line using `getpass`. The password is then checked on `HaveIBeenPwned`.
"""
splash()
print("\nEnter your password below.")
pwd = getpass()
try:
# Check the pwd and add the values to some variables
hashed_pwd, nb_match = check_pwd(pwd)
# Print the result
if nb_match:
print(f"The password occurs {nb_match} times (hash: {hashed_pwd})")
else:
print("Your password was not found")
except UnicodeError:
errormsg = sys.exc_info()[1]
log.warning(f"Your password could not be checked: {errormsg}")
|
python
|
"""
================================
Symbolic Aggregate approXimation
================================
Binning continuous data into intervals can be seen as an approximation that
reduces noise and captures the trend of a time series. The Symbolic Aggregate
approXimation (SAX) algorithm bins continuous time series into intervals,
transforming independently each time series (a sequence of floats) into a
sequence of symbols, usually letters. This example illustrates the
transformation.
It is implemented as
:class:`pyts.approximation.SymbolicAggregateApproximation`.
"""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
from scipy.stats import norm
from pyts.approximation import SymbolicAggregateApproximation
# Parameters
n_samples, n_timestamps = 100, 24
# Toy dataset
rng = np.random.RandomState(41)
X = rng.randn(n_samples, n_timestamps)
# SAX transformation
n_bins = 3
sax = SymbolicAggregateApproximation(n_bins=n_bins, strategy='normal')
X_sax = sax.fit_transform(X)
# Compute gaussian bins
bins = norm.ppf(np.linspace(0, 1, n_bins + 1)[1:-1])
# Show the results for the first time series
bottom_bool = np.r_[True, X_sax[0, 1:] > X_sax[0, :-1]]
plt.figure(figsize=(6, 4))
plt.plot(X[0], 'o--', label='Original')
for x, y, s, bottom in zip(range(n_timestamps), X[0], X_sax[0], bottom_bool):
va = 'bottom' if bottom else 'top'
plt.text(x, y, s, ha='center', va=va, fontsize=14, color='#ff7f0e')
plt.hlines(bins, 0, n_timestamps, color='g', linestyles='--', linewidth=0.5)
sax_legend = mlines.Line2D([], [], color='#ff7f0e', marker='*',
label='SAX - {0} bins'.format(n_bins))
first_legend = plt.legend(handles=[sax_legend], fontsize=8, loc=(0.76, 0.86))
ax = plt.gca().add_artist(first_legend)
plt.legend(loc=(0.81, 0.93), fontsize=8)
plt.xlabel('Time', fontsize=14)
plt.title('Symbolic Aggregate approXimation', fontsize=16)
plt.show()
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-10 01:54
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cxp_v1', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Download',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(storage=django.core.files.storage.FileSystemStorage(location=b'/home/ubuntu/Django/sessionfiles/protected'), upload_to='download')),
],
),
]
|
python
|
# -*- coding: utf-8 -*-
import pytest
from click.testing import CliRunner
from jak.app import main as jak
import jak.crypto_services as cs
@pytest.fixture
def runner():
return CliRunner()
def test_empty(runner):
result = runner.invoke(jak)
assert result.exit_code == 0
assert not result.exception
@pytest.mark.parametrize('version_flag', ['--version', '-v'])
def test_version(runner, version_flag):
result = runner.invoke(jak, [version_flag])
assert not result.exception
assert result.exit_code == 0
assert '(Troubled Toddler)' in result.output.strip()
@pytest.mark.parametrize('cmd, filepath', [
('encrypt', 'filethatdoesnotexist'),
('decrypt', 'filethatdoesnotexist2')])
def test_file_not_found(runner, cmd, filepath):
result = runner.invoke(jak, [cmd, filepath, '-k', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'])
assert 'find the file:' in result.output
def test_encrypt_smoke(runner, tmpdir):
"""This one has proven to be an absolute godsend for finding
weirdness, especially between python versions."""
plaintext_secret = tmpdir.join("secret.txt")
plaintext_secret.write('secret')
runner.invoke(jak,
['encrypt',
plaintext_secret.strpath,
'--key',
'f40ec5d3ef66166720b24b3f8716c2c31ffc6b45295ff72024a45d90e5fddb56'])
assert cs.ENCRYPTED_BY_HEADER in plaintext_secret.read()
def test_decrypt_smoke(runner, tmpdir, monkeypatch):
ciphertext_secret = tmpdir.join("secret.txt")
# This test was leaking backup files
# The cause was the decorator "attach_jwd" which would
# force the filesystem back into the realworld with os.getcwd().
# My attempt at patching os.getcwd had unintended sideeffects so instead
# I patched the helper function to force it's return to be the files location.
def mock_getjwd():
return ciphertext_secret.dirpath().strpath
import jak.helpers as jakh
monkeypatch.setattr(jakh, "get_jak_working_directory", mock_getjwd)
ciphertext_secret.write('''- - - Encrypted by jak - - -
SkFLLTAwMHM0jlOUIaTUeVwbfS459sfDJ1SUW9_3wFFcm2rCxTnLvy1N-Ndb
O7t2Vcol566PnyniPGn9IadqwWFNykZdaycRJG7aL8P4pZnb4gnJcp08OLwR
LiFC7wcITbo6l3Q7Lw==''')
runner.invoke(jak,
['decrypt',
ciphertext_secret.strpath,
'--key',
'f40ec5d3ef66166720b24b3f8716c2c31ffc6b45295ff72024a45d90e5fddb56'])
result = ciphertext_secret.read()
assert cs.ENCRYPTED_BY_HEADER not in result
assert result.strip('\n') == 'attack at dawn'
|
python
|
try:
import matplotlib.pyplot as plt
import fuzzycorr.prepro as pp
from pathlib import Path
import numpy as np
import gdal
except:
print('ModuleNotFoundError: Missing fundamental packages (required: pathlib, numpy, gdal).')
cur_dir = Path.cwd()
Path(cur_dir / "rasters").mkdir(exist_ok=True)
raster_meas = pp.PreProCategorization(str(cur_dir / 'rasters') + '/' + 'vali_meas_2013_res5_clipped.tif')
raster_sim = pp.PreProCategorization(str(cur_dir / 'rasters') + '/' + 'vali_hydro_FT_manual_2013_res5_clipped.tif')
n_classes = 12
nb_classes = np.insert(raster_meas.nb_classes(n_classes), 0, -np.inf, axis=0)
nb_classes[-1] = np.inf
raster_meas.categorize_raster(nb_classes, map_out=str(cur_dir / 'rasters') + '/' + 'vali_meas_class_nbreaks.tif', save_ascii=False)
raster_sim.categorize_raster(nb_classes, map_out=str(cur_dir / 'rasters') + '/' + 'vali_hydro_FT_manual_class_nbreaks.tif', save_ascii=False)
|
python
|
"""Support for Vallox ventilation units."""
from __future__ import annotations
from dataclasses import dataclass, field
import ipaddress
import logging
from typing import Any, NamedTuple
from uuid import UUID
from vallox_websocket_api import PROFILE as VALLOX_PROFILE, Vallox
from vallox_websocket_api.exceptions import ValloxApiException
from vallox_websocket_api.vallox import get_uuid as calculate_uuid
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME, Platform
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType, StateType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
DEFAULT_FAN_SPEED_AWAY,
DEFAULT_FAN_SPEED_BOOST,
DEFAULT_FAN_SPEED_HOME,
DEFAULT_NAME,
DOMAIN,
METRIC_KEY_PROFILE_FAN_SPEED_AWAY,
METRIC_KEY_PROFILE_FAN_SPEED_BOOST,
METRIC_KEY_PROFILE_FAN_SPEED_HOME,
STATE_SCAN_INTERVAL,
STR_TO_VALLOX_PROFILE_SETTABLE,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
PLATFORMS: list[str] = [
Platform.SENSOR,
Platform.FAN,
Platform.BINARY_SENSOR,
]
ATTR_PROFILE = "profile"
ATTR_PROFILE_FAN_SPEED = "fan_speed"
SERVICE_SCHEMA_SET_PROFILE = vol.Schema(
{
vol.Required(ATTR_PROFILE): vol.All(
cv.string, vol.In(STR_TO_VALLOX_PROFILE_SETTABLE)
)
}
)
SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED = vol.Schema(
{
vol.Required(ATTR_PROFILE_FAN_SPEED): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
)
}
)
class ServiceMethodDetails(NamedTuple):
"""Details for SERVICE_TO_METHOD mapping."""
method: str
schema: vol.Schema
SERVICE_SET_PROFILE = "set_profile"
SERVICE_SET_PROFILE_FAN_SPEED_HOME = "set_profile_fan_speed_home"
SERVICE_SET_PROFILE_FAN_SPEED_AWAY = "set_profile_fan_speed_away"
SERVICE_SET_PROFILE_FAN_SPEED_BOOST = "set_profile_fan_speed_boost"
SERVICE_TO_METHOD = {
SERVICE_SET_PROFILE: ServiceMethodDetails(
method="async_set_profile",
schema=SERVICE_SCHEMA_SET_PROFILE,
),
SERVICE_SET_PROFILE_FAN_SPEED_HOME: ServiceMethodDetails(
method="async_set_profile_fan_speed_home",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
SERVICE_SET_PROFILE_FAN_SPEED_AWAY: ServiceMethodDetails(
method="async_set_profile_fan_speed_away",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
SERVICE_SET_PROFILE_FAN_SPEED_BOOST: ServiceMethodDetails(
method="async_set_profile_fan_speed_boost",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
}
@dataclass
class ValloxState:
"""Describes the current state of the unit."""
metric_cache: dict[str, Any] = field(default_factory=dict)
profile: VALLOX_PROFILE = VALLOX_PROFILE.NONE
def get_metric(self, metric_key: str) -> StateType:
"""Return cached state value."""
if (value := self.metric_cache.get(metric_key)) is None:
return None
if not isinstance(value, (str, int, float)):
return None
return value
def get_uuid(self) -> UUID | None:
"""Return cached UUID value."""
uuid = calculate_uuid(self.metric_cache)
if not isinstance(uuid, UUID):
raise ValueError
return uuid
class ValloxDataUpdateCoordinator(DataUpdateCoordinator):
"""The DataUpdateCoordinator for Vallox."""
data: ValloxState
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the integration from configuration.yaml (DEPRECATED)."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the client and boot the platforms."""
host = entry.data[CONF_HOST]
name = entry.data[CONF_NAME]
client = Vallox(host)
async def async_update_data() -> ValloxState:
"""Fetch state update."""
_LOGGER.debug("Updating Vallox state cache")
try:
metric_cache = await client.fetch_metrics()
profile = await client.get_profile()
except (OSError, ValloxApiException) as err:
raise UpdateFailed("Error during state cache update") from err
return ValloxState(metric_cache, profile)
coordinator = ValloxDataUpdateCoordinator(
hass,
_LOGGER,
name=f"{name} DataUpdateCoordinator",
update_interval=STATE_SCAN_INTERVAL,
update_method=async_update_data,
)
await coordinator.async_config_entry_first_refresh()
service_handler = ValloxServiceHandler(client, coordinator)
for vallox_service, service_details in SERVICE_TO_METHOD.items():
hass.services.async_register(
DOMAIN,
vallox_service,
service_handler.async_handle,
schema=service_details.schema,
)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"client": client,
"coordinator": coordinator,
"name": name,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
if hass.data[DOMAIN]:
return unload_ok
for service in SERVICE_TO_METHOD:
hass.services.async_remove(DOMAIN, service)
return unload_ok
class ValloxServiceHandler:
"""Services implementation."""
def __init__(
self, client: Vallox, coordinator: DataUpdateCoordinator[ValloxState]
) -> None:
"""Initialize the proxy."""
self._client = client
self._coordinator = coordinator
async def async_set_profile(self, profile: str = "Home") -> bool:
"""Set the ventilation profile."""
_LOGGER.debug("Setting ventilation profile to: %s", profile)
_LOGGER.warning(
"Attention: The service 'vallox.set_profile' is superseded by the "
"'fan.set_preset_mode' service. It will be removed in the future, please migrate to "
"'fan.set_preset_mode' to prevent breakage"
)
try:
await self._client.set_profile(STR_TO_VALLOX_PROFILE_SETTABLE[profile])
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting ventilation profile: %s", err)
return False
async def async_set_profile_fan_speed_home(
self, fan_speed: int = DEFAULT_FAN_SPEED_HOME
) -> bool:
"""Set the fan speed in percent for the Home profile."""
_LOGGER.debug("Setting Home fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_HOME: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Home profile: %s", err)
return False
async def async_set_profile_fan_speed_away(
self, fan_speed: int = DEFAULT_FAN_SPEED_AWAY
) -> bool:
"""Set the fan speed in percent for the Away profile."""
_LOGGER.debug("Setting Away fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_AWAY: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Away profile: %s", err)
return False
async def async_set_profile_fan_speed_boost(
self, fan_speed: int = DEFAULT_FAN_SPEED_BOOST
) -> bool:
"""Set the fan speed in percent for the Boost profile."""
_LOGGER.debug("Setting Boost fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_BOOST: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Boost profile: %s", err)
return False
async def async_handle(self, call: ServiceCall) -> None:
"""Dispatch a service call."""
service_details = SERVICE_TO_METHOD.get(call.service)
params = call.data.copy()
if service_details is None:
return
if not hasattr(self, service_details.method):
_LOGGER.error("Service not implemented: %s", service_details.method)
return
result = await getattr(self, service_details.method)(**params)
# This state change affects other entities like sensors. Force an immediate update that can
# be observed by all parties involved.
if result:
await self._coordinator.async_request_refresh()
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from math import isnan
from typing import Any, List, Optional, TYPE_CHECKING, Union
from iceberg.exceptions import ValidationException
from .expression import (Expression,
Operation)
from .literals import (BaseLiteral,
Literals)
from .term import BoundTerm, UnboundTerm
from ..types import TypeID
if TYPE_CHECKING:
from iceberg.api import StructLike
class Predicate(Expression):
def __init__(self, op: Operation, term: Union[BoundTerm, UnboundTerm]):
if term is None:
raise ValueError("Term cannot be None")
self.op: Operation = op
self.term: Union[BoundTerm, UnboundTerm] = term
@property
def ref(self):
return self.term.ref
@property
def lit(self):
raise NotImplementedError("Not Implemented for base class")
def __eq__(self, other):
if id(self) == id(other):
return True
elif other is None or not isinstance(other, Predicate):
return False
return self.op == other.op and self.ref == other.ref and self.lit == other.lit
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "Predicate({},{},{})".format(self.op, self.ref, self.lit)
def __str__(self):
if self.op == Operation.IS_NULL:
return "is_null({})".format(self.ref)
elif self.op == Operation.NOT_NULL:
return "not_null({})".format(self.ref)
elif self.op == Operation.LT:
return "less_than({})".format(self.ref)
elif self.op == Operation.LT_EQ:
return "less_than_equal({})".format(self.ref)
elif self.op == Operation.GT:
return "greater_than({})".format(self.ref)
elif self.op == Operation.GT_EQ:
return "greater_than_equal({})".format(self.ref)
elif self.op == Operation.EQ:
return "equal({})".format(self.ref)
elif self.op == Operation.NOT_EQ:
return "not_equal({})".format(self.ref)
else:
return "invalid predicate: operation = {}".format(self.op)
class BoundPredicate(Predicate):
def __init__(self, op: Operation, term: BoundTerm, lit: BaseLiteral = None, literals: List[BaseLiteral] = None,
is_unary_predicate: bool = False, is_literal_predicate: bool = False,
is_set_predicate: bool = False):
self.is_unary_predicate = is_unary_predicate
self.is_literal_predicate = is_literal_predicate
self.is_set_predicate = is_set_predicate
super(BoundPredicate, self).__init__(op, term)
ValidationException.check(sum([is_unary_predicate, is_literal_predicate, is_set_predicate]) == 1,
"Only a single predicate type may be set: %s=%s, %s=%s, %s=%s",
("is_unary_predicate", is_unary_predicate,
"is_literal_predicate", is_literal_predicate,
"is_set_predicate", is_set_predicate))
self._literals: Optional[List[BaseLiteral]] = None
if self.is_unary_predicate:
ValidationException.check(lit is None, "Unary Predicates may not have a literal", ())
elif self.is_literal_predicate:
ValidationException.check(lit is not None, "Literal Predicates must have a literal set", ())
self._literals = [lit] # type: ignore
elif self.is_set_predicate:
ValidationException.check(literals is not None, "Set Predicates must have literals set", ())
self._literals = literals
else:
raise ValueError(f"Unable to instantiate {op} -> (lit={lit}, literal={literals}")
@property
def lit(self) -> Optional[BaseLiteral]:
if self._literals is None or len(self._literals) == 0:
return None
return self._literals[0]
def eval(self, struct: StructLike) -> bool:
ValidationException.check(isinstance(self.term, BoundTerm), "Term must be bound to eval: %s", (self.term))
return self.test(self.term.eval(struct)) # type: ignore
def test(self, struct: StructLike = None, value: Any = None) -> bool:
ValidationException.check(struct is None or value is None, "Either struct or value must be none", ())
if struct is not None:
ValidationException.check(isinstance(self.term, BoundTerm), "Term must be bound to eval: %s", (self.term))
return self.test(value=self.term.eval(struct)) # type: ignore
else:
if self.is_unary_predicate:
return self.test_unary_predicate(value)
elif self.is_literal_predicate:
return self.test_literal_predicate(value)
else:
return self.test_set_predicate(value)
def test_unary_predicate(self, value: Any) -> bool:
if self.op == Operation.IS_NULL:
return value is None
elif self.op == Operation.NOT_NULL:
return value is not None
elif self.op == Operation.IS_NAN:
return isnan(value)
elif self.op == Operation.NOT_NAN:
return not isnan(value)
else:
raise ValueError(f"{self.op} is not a valid unary predicate")
def test_literal_predicate(self, value: Any) -> bool:
if self.lit is None:
raise ValidationException("Literal must not be none", ())
if self.op == Operation.LT:
return value < self.lit.value
elif self.op == Operation.LT_EQ:
return value <= self.lit.value
elif self.op == Operation.GT:
return value > self.lit.value
elif self.op == Operation.GT_EQ:
return value >= self.lit.value
elif self.op == Operation.EQ:
return value == self.lit.value
elif self.op == Operation.NOT_EQ:
return value != self.lit.value
else:
raise ValueError(f"{self.op} is not a valid literal predicate")
def test_set_predicate(self, value: Any) -> bool:
if self._literals is None:
raise ValidationException("Literals must not be none", ())
if self.op == Operation.IN:
return value in self._literals
elif self.op == Operation.NOT_IN:
return value not in self._literals
else:
raise ValueError(f"{self.op} is not a valid set predicate")
class UnboundPredicate(Predicate):
def __init__(self, op, term, value=None, lit=None, values=None, literals=None):
self._literals = None
num_set_args = sum([1 for x in [value, lit, values, literals] if x is not None])
if num_set_args > 1:
raise ValueError(f"Only one of value={value}, lit={lit}, values={values}, literals={literals} may be set")
super(UnboundPredicate, self).__init__(op, term)
if isinstance(value, BaseLiteral):
lit = value
value = None
if value is not None:
self._literals = [Literals.from_(value)]
elif lit is not None:
self._literals = [lit]
elif values is not None:
self._literals = map(Literals.from_, values)
elif literals is not None:
self._literals = literals
@property
def literals(self):
return self._literals
@property
def lit(self):
if self.op in [Operation.IN, Operation.NOT_IN]:
raise ValueError(f"{self.op} predicate cannot return a literal")
return None if self.literals is None else self.literals[0]
def negate(self):
return UnboundPredicate(self.op.negate(), self.term, literals=self.literals)
def bind(self, struct, case_sensitive=True):
bound = self.term.bind(struct, case_sensitive=case_sensitive)
if self.literals is None:
return self.bind_unary_operation(bound)
elif self.op in [Operation.IN, Operation.NOT_IN]:
return self.bind_in_operation(bound)
return self.bind_literal_operation(bound)
def bind_unary_operation(self, bound_term: BoundTerm) -> BoundPredicate:
from .expressions import Expressions
if self.op == Operation.IS_NULL:
if bound_term.ref.field.is_required:
return Expressions.always_false()
return BoundPredicate(Operation.IS_NULL, bound_term, is_unary_predicate=True)
elif self.op == Operation.NOT_NULL:
if bound_term.ref.field.is_required:
return Expressions.always_true()
return BoundPredicate(Operation.NOT_NULL, bound_term, is_unary_predicate=True)
elif self.op in [Operation.IS_NAN, Operation.NOT_NAN]:
if not self.floating_type(bound_term.ref.type.type_id):
raise ValidationException(f"{self.op} cannot be used with a non-floating column", ())
return BoundPredicate(self.op, bound_term, is_unary_predicate=True)
raise ValidationException(f"Operation must be in [IS_NULL, NOT_NULL, IS_NAN, NOT_NAN] was:{self.op}", ())
def bind_in_operation(self, bound_term):
from .expressions import Expressions
def convert_literal(lit):
converted = lit.to(bound_term)
ValidationException.check(converted is not None,
"Invalid Value for conversion to type %s: %s (%s)",
(bound_term.type, lit, lit.__class__.__name__))
return converted
converted_literals = filter(lambda x: x != Literals.above_max() and x != Literals.below_min(),
[convert_literal(lit) for lit in self.literals])
if len(converted_literals) == 0:
return Expressions.always_true() if Operation.NOT_IN else Expressions.always_false()
literal_set = set(converted_literals)
if len(literal_set) == 1:
if self.op == Operation.IN:
return BoundPredicate(Operation.EQ, bound_term, literal_set[0])
elif self.op == Operation.NOT_IN:
return BoundPredicate(Operation.NOT_EQ, bound_term, literal_set[0])
else:
raise ValidationException("Operation must be in or not in", ())
return BoundPredicate(self.op, bound_term, literals=literal_set, is_set_predicate=True)
def bind_literal_operation(self, bound_term):
from .expressions import Expressions
lit = self.lit.to(bound_term.type)
ValidationException.check(lit is not None,
"Invalid Value for conversion to type %s: %s (%s)",
(bound_term.type, self.lit, self.lit.__class__.__name__))
if lit == Literals.above_max():
if self.op in [Operation.LT, Operation.LT_EQ, Operation.NOT_EQ]:
return Expressions.always_true()
elif self.op in [Operation.GT, Operation.GT_EQ, Operation.EQ]:
return Expressions.always_false()
elif lit == Literals.below_min():
if self.op in [Operation.LT, Operation.LT_EQ, Operation.NOT_EQ]:
return Expressions.always_false()
elif self.op in [Operation.GT, Operation.GT_EQ, Operation.EQ]:
return Expressions.always_true()
return BoundPredicate(self.op, bound_term, lit=lit, is_literal_predicate=True)
@staticmethod
def floating_type(type_id: TypeID) -> bool:
return type_id in [TypeID.FLOAT, TypeID.DOUBLE]
|
python
|
import numpy as np # type: ignore
import pandas as pd # type: ignore
# imported ML models from scikit-learn
from sklearn.model_selection import (ShuffleSplit, StratifiedShuffleSplit, # type: ignore
TimeSeriesSplit, cross_val_score) # type: ignore
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # type: ignore
from sklearn.ensemble import (BaggingRegressor, ExtraTreesRegressor, # type: ignore
RandomForestClassifier, ExtraTreesClassifier, # type: ignore
AdaBoostRegressor, AdaBoostClassifier) # type: ignore
from sklearn.linear_model import LinearRegression, LogisticRegression, RidgeCV # type: ignore
from sklearn.svm import LinearSVC, SVR, LinearSVR # type: ignore
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier # type: ignore
# imported specialized tree models from scikit-garden
# from skgarden import RandomForestQuantileRegressor
# helper functions
from ..utils import print_static_rmse, print_dynamic_rmse
import pdb
def run_ensemble_model(X, Y, modeltype='Regression', scoring='', verbose=0):
"""
Quickly builds and runs multiple models for a clean data set(only numerics).
"""
seed = 99
if len(X) <= 100000 or X.shape[1] < 50:
NUMS = 50
FOLDS = 3
else:
NUMS = 20
FOLDS = 5
## create Voting models
estimators = []
if modeltype == 'Regression':
if scoring == '':
scoring = 'neg_mean_squared_error'
scv = ShuffleSplit(n_splits=FOLDS, random_state=seed)
model5 = LinearRegression()
results1 = cross_val_score(model5, X, Y, cv=scv, scoring=scoring)
estimators.append(('Linear Model', model5, np.sqrt(abs(results1.mean()))))
model6 = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(
min_samples_leaf=2, max_depth=1, random_state=seed),
n_estimators=NUMS, random_state=seed)
results2 = cross_val_score(model6, X, Y, cv=scv, scoring=scoring)
estimators.append(('Boosting', model6, np.sqrt(abs(results2.mean()))))
model7 = RidgeCV(alphas=np.logspace(-10, -1, 50), cv=scv)
results3 = cross_val_score(model7, X, Y, cv=scv, scoring=scoring)
estimators.append(('Linear Regularization', model7, np.sqrt(abs(results3.mean()))))
## Create an ensemble model ####
# estimators_list = [(tuples[0], tuples[1]) for tuples in estimators] # unused
ensemble = BaggingRegressor(DecisionTreeRegressor(random_state=seed),
n_estimators=NUMS, random_state=seed)
results4 = cross_val_score(ensemble, X, Y, cv=scv, scoring=scoring)
estimators.append(('Bagging', ensemble, np.sqrt(abs(results4.mean()))))
if verbose == 1:
print('\nLinear Model = %0.4f \nBoosting = %0.4f\nRegularization = %0.4f \nBagging = %0.4f' %(
np.sqrt(abs(results1.mean()))/Y.std(), np.sqrt(abs(results2.mean()))/Y.std(),
np.sqrt(abs(results3.mean()))/Y.std(), np.sqrt(abs(results4.mean()))/Y.std()))
besttype = sorted(estimators, key=lambda x: x[2], reverse=False)[0][0]
bestmodel = sorted(estimators, key=lambda x: x[2], reverse=False)[0][1]
bestscore = sorted(estimators, key=lambda x: x[2], reverse=False)[0][2]/Y.std()
if verbose == 1:
print(' Best Model = %s with %0.2f Normalized RMSE score\n' %(besttype,bestscore))
elif modeltype == 'TimeSeries' or modeltype =='Time Series' or modeltype == 'Time_Series':
#### This section is for Time Series Models only ####
if scoring == '':
scoring = 'neg_mean_squared_error'
tscv = TimeSeriesSplit(n_splits=FOLDS)
scoring = 'neg_mean_squared_error'
model5 = SVR(C=0.1, kernel='rbf', degree=2)
results1 = cross_val_score(model5, X, Y, cv=tscv, scoring=scoring)
estimators.append(('SVR', model5, np.sqrt(abs(results1.mean()))))
model6 = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(
min_samples_leaf=2, max_depth=1, random_state=seed),
n_estimators=NUMS, random_state=seed)
results2 = cross_val_score(model6, X, Y, cv=tscv, scoring=scoring)
estimators.append(('Extra Trees', model6,np.sqrt(abs(results2.mean()))))
model7 = LinearSVR(random_state=seed)
results3 = cross_val_score(model7, X, Y, cv=tscv, scoring=scoring)
estimators.append(('LinearSVR', model7, np.sqrt(abs(results3.mean()))))
## Create an ensemble model ####
# estimators_list = [(tuples[0], tuples[1]) for tuples in estimators] # unused
ensemble = BaggingRegressor(DecisionTreeRegressor(random_state=seed),
n_estimators=NUMS, random_state=seed)
results4 = cross_val_score(ensemble, X, Y, cv=tscv, scoring=scoring)
estimators.append(('Bagging', ensemble, np.sqrt(abs(results4.mean()))))
print('Running multiple models...')
if verbose == 1:
print(' Instance Based = %0.4f \n Boosting = %0.4f\n Linear Model = %0.4f \n Bagging = %0.4f' %(
np.sqrt(abs(results1.mean()))/Y.std(), np.sqrt(abs(results2.mean()))/Y.std(),
np.sqrt(abs(results3.mean()))/Y.std(), np.sqrt(abs(results4.mean()))/Y.std()))
besttype = sorted(estimators, key=lambda x: x[2], reverse=False)[0][0]
bestmodel = sorted(estimators, key=lambda x: x[2], reverse=False)[0][1]
bestscore = sorted(estimators, key=lambda x: x[2], reverse=False)[0][2]/Y.std()
if verbose == 1:
print('Best Model = %s with %0.2f Normalized RMSE score\n' % (besttype, bestscore))
print('Model Results:')
else:
if scoring == '':
scoring = 'f1'
scv = StratifiedShuffleSplit(n_splits=FOLDS, random_state=seed)
model5 = LogisticRegression(random_state=seed)
results1 = cross_val_score(model5, X, Y, cv=scv, scoring=scoring)
estimators.append(('Logistic Regression', model5, abs(results1.mean())))
model6 = LinearDiscriminantAnalysis()
results2 = cross_val_score(model6, X, Y, cv=scv, scoring=scoring)
estimators.append(('Linear Discriminant', model6, abs(results2.mean())))
model7 = ExtraTreesClassifier(n_estimators=NUMS, min_samples_leaf=2, random_state=seed)
results3 = cross_val_score(model7, X, Y, cv=scv, scoring=scoring)
estimators.append(('Bagging', model7, abs(results3.mean())))
## Create an ensemble model ####
# estimators_list = [(tuples[0], tuples[1]) for tuples in estimators] # unused
ensemble = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(
random_state=seed, max_depth=1, min_samples_leaf=2),
n_estimators=NUMS, random_state=seed)
results4 = cross_val_score(ensemble, X, Y, cv=scv, scoring=scoring)
estimators.append(('Boosting', ensemble, abs(results4.mean())))
if verbose == 1:
print('\nLogistic Regression = %0.4f \nLinear Discriminant = %0.4f \nBagging = %0.4f \nBoosting = %0.4f' %
(abs(results1.mean()), abs(results2.mean()), abs(results3.mean()), abs(results4.mean())))
besttype = sorted(estimators, key=lambda x: x[2], reverse=True)[0][0]
bestmodel = sorted(estimators, key=lambda x: x[2], reverse=True)[0][1]
bestscore = sorted(estimators, key=lambda x: x[2], reverse=True)[0][2]
if verbose == 1:
print(' Best Model = %s with %0.2f %s score\n' % (besttype, bestscore, scoring))
return bestmodel, bestscore, besttype
|
python
|
#!/usr/bin/env python
__author__ = 'Florian Hase'
#=======================================================================
from DatabaseHandler.PickleWriters.db_writer import DB_Writer
|
python
|
#
# @lc app=leetcode id=383 lang=python
#
# [383] Ransom Note
#
# https://leetcode.com/problems/ransom-note/description/
#
# algorithms
# Easy (49.29%)
# Total Accepted: 107.4K
# Total Submissions: 216.8K
# Testcase Example: '"a"\n"b"'
#
#
# Given an arbitrary ransom note string and another string containing letters
# from all the magazines, write a function that will return true if the ransom
# note can be constructed from the magazines ; otherwise, it will return
# false.
#
#
# Each letter in the magazine string can only be used once in your ransom
# note.
#
#
# Note:
# You may assume that both strings contain only lowercase letters.
#
#
#
# canConstruct("a", "b") -> false
# canConstruct("aa", "ab") -> false
# canConstruct("aa", "aab") -> true
#
#
#
class Solution(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
magazine = list(magazine)
for i in ransomNote:
if i in magazine:
magazine.remove(i)
else:
return False
return True
|
python
|
"""
Guidelines from whitehouse.gov/openingamerica/
SYMPTOMS:
- Downward Trajectory of Flu-like illnesses
AND
- Downward Trajectory of Covid symptoms 14 day period
CASES:
- Downward Trajectory of documented cases within 14 day period
OR
- Downward Trajectory of positive tests within 14 days
(flat or increasing volume of tests)
HOSPITALS:
- Treat all patients WITHOUT crisis care
- Robust testing program in place including antibody testing
Data is collected daily at 9PM from John Hopkins University
- https://github.com/CSSEGISandData/COVID-19
- Data is assumed to be accurate. Confirmed cases include presumptive
positive cases
- A positive test counts as an active case
- Only data from the mainland 48 states, Alaska, Hawaii, and D.C is parsed and
calculated. Territories are not included in this calculation.
"""
import requests, time, io, pandas, numpy, json, smtplib, ssl, csv, schedule
from datetime import datetime, date, timedelta
from ftplib import FTP
import firebase_admin
from firebase_admin import credentials, firestore
from state_code import state_codes
from hospital_capacity import hospital_capacity
class DataCollect():
"""
Main data collection class.
Contains methods used to collect and aggregate data.
"""
def __init__(self):
self.data_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/{}.csv"
self.set_data()
print("Script is now running. Waiting to collect and upload data...")
self.upload_data_or_email()
schedule.every().day.at("00:01").do(self.set_data)
schedule.every().day.at("00:02").do(self.upload_data_or_email)
self.run_automated_tasks()
def set_data(self):
"""
Sets variables (to be run every night at midnight)
"""
self.data = self.get_data()
self.state_cases = self.get_cases_trend()
self.state_hospitals = self.get_hospital_capacity()
self.state_tests = self.get_test_trend()
def get_data(self):
"""
Requests data from John Hopkins University database from the most
recent day it can find, and then also from 14 days prior and every day
inbetween (in order to calculate general trajectory)
"""
today_date = date.today()
data = None
while True:
str__today_date = today_date.strftime("%m-%d-%Y")
print("Attempting to download data from %s..." % str__today_date)
request = requests.get(self.data_url.format(str__today_date))
#If we have data for today, use that
if request != None and request.status_code == 200:
data = pandas.read_csv(io.StringIO(request.content.decode('utf-8')))
data['date'] = str__today_date
self.most_recent_date = str__today_date
prior_date = today_date - timedelta(days=1)
#Get data from every day between 14 days ago and today
while prior_date != today_date - timedelta(days=15):
str__prior_date = prior_date.strftime("%m-%d-%Y")
prior_request = requests.get(self.data_url.format(str__prior_date))
print("Attempting to download data from %s..." % str__prior_date)
if prior_request != None and request.status_code == 200:
prior_data = pandas.read_csv(io.StringIO(prior_request.content.decode('utf-8')))
prior_data['date'] = str__prior_date
data = pandas.concat([data, prior_data], ignore_index=True)
else:
print("Couldn't find data for %s, skipping..." % str__prior_date)
prior_date = prior_date - timedelta(days=1)
break
else:
print("Couldn't find data for %s, attempting previous day..." % str__today_date)
today_date = today_date - timedelta(days=1)
#Convert all dates to datetime
data['date'] = pandas.to_datetime(data['date'])
#Convert dataframe to all lowercase
data.columns = data.columns.str.lower()
data = data.applymap(lambda s:s.lower() if type(s) == str else s)
#Reverse dataframe (so the latest day is at the top)
data = data.iloc[::-1]
print("Data retrieved!")
return data
def get_cases_trend(self):
"""
Calculates which states meet the guideline:
"Downward Trajectory of documented cases within 14 day period"
Grabs active cases for each state over the 14 day period and calculates
slope of those points.
If slope is negative, then it meets guideline.
If the slope if positive, then it does not meet guideline.
"""
state_cases = {}
for state in state_codes.keys():
state_data = self.data.loc[self.data['province_state'] == state]
df = pandas.DataFrame(state_data, columns=['active'])
df = df.reset_index(drop=True)
state_cases[state] = self.is_downward_trend(df, 'active')
print("States that meet active case guidelines: \n{}".format(json.dumps(state_cases, indent=2)))
return state_cases
def get_hospital_capacity(self):
"""
Calculates which states meet the guideline:
"Treat all patients WITHOUT crisis care"
Grabs the active cases for each state and calculates
if that number is <= the total hospital capacity of the state.
If the number is <= the capacity, then it meets the guideline.
If the number is > the capacity, then it does not meet the guideline.
"""
state_hospitals = {}
for state in state_codes.keys():
state_data = self.data.loc[self.data['province_state'] == state]
current_active = state_data.iloc[-1]
state_code = state_codes[state]
state_hospitals[state] = bool(current_active['active'] <= hospital_capacity[state_code])
print("States that meet hospital guidelines: \n{}".format(json.dumps(state_hospitals, indent=2)))
return state_hospitals
def get_test_trend(self):
"""
Calculates which states meet the guideline:
"Downward Trajectory of positive tests within 14 days
(flat or increasing volume of tests)"
Grabs the number of people tested and active cases per state and
calculates the slope of both lines.
If (active cases <= 0 && tests >= 0), then it meets the guideline.
If the above expression is not true, then it does not meet the guideline.
"""
state_tests = {}
for state in state_codes.keys():
state_data = self.data.loc[self.data['province_state'] == state]
df = pandas.DataFrame(state_data, columns=['people_tested', 'active'])
df = df.reset_index(drop=True)
state_tests[state] = (self.is_downward_trend(df, 'active') & self.is_upward_trend(df, 'people_tested'))
return state_tests
def get_case_info(self, state):
"""
Returns information about cases over the 14 day period for a given
state. That info is:
- Net change in new cases
- Cases at beginning of 14 days
- Cases at end of 14 days
- Number of people tested
"""
case_info = {}
state_data = self.data.loc[self.data['province_state'] == state.lower()]
df = pandas.DataFrame(state_data, columns=['active', 'confirmed', 'people_tested'])
df = df.reset_index(drop=True)
case_info["beginning"] = df.iloc[0]['active']
case_info["end"] = df.iloc[-1]['active']
case_info["net"] = df.iloc[-1]['active'] - df.iloc[0]['active']
case_info["total"] = df.iloc[-1]['confirmed']
case_info["total_tests"] = df.iloc[-1]['people_tested']
return case_info
def get_slope(self, data, column):
"""
Calculates the slope of a graph given a list of data
"""
slope_data = data.apply(lambda x: numpy.polyfit(data.index, x, 1)[0])
return slope_data[column]
def is_downward_trend(self, data, column):
"""
Determines if the trend is downward (slope is negative)
"""
slope = self.get_slope(data, column)
return True if slope <= 0 else False
def is_upward_trend(self, data, column):
"""
Determines if the trend is upward (slope is positive)
"""
slope = self.get_slope(data, column)
return True if slope >= 0 else False
def compile_data(self):
"""
Compiles all data into one coherent json file to be parsed by the
frontend
"""
data = { "info": {} }
for state in state_codes.keys():
case_info = self.get_case_info(state)
data["info"][state] = {}
data["info"][state]["state_code"] = state_codes[state]
data["info"][state]["total_hospital_capacity"] = hospital_capacity[state_codes[state]]
data["info"][state]["downward_cases"] = self.state_cases[state]
data["info"][state]["enough_hospital_capacity"] = self.state_hospitals[state]
data["info"][state]["beginning_cases"] = case_info["beginning"]
data["info"][state]["end_cases"] = case_info["end"]
data["info"][state]["net_case_change"] = case_info["net"]
data["info"][state]["total_cases"] = case_info["total"]
data["info"][state]["enough_tests"] = self.state_tests[state]
data["info"][state]["total_tests"] = case_info["total_tests"]
data["info"][state]["should_open"] = (self.state_cases[state] & self.state_hospitals[state] & self.state_tests[state])
data["info"][state]["most_recent_date"] = self.most_recent_date
print("Successfully parsed data into json file. Result:\n")
print(json.dumps(data, indent=2))
#Upload file to firebase
creds = credentials.Certificate('credentials.json')
firebase_admin.initialize_app(creds, { 'databaseURL': 'https://should-my-state-open.firebaseio.com' })
try:
database = firestore.client()
collection = database.collection('data')
data['createdAt'] = datetime.now()
collection.document().create(data)
except Exception as e:
print(f"An exception Occurred:\n {e}")
self.exception_message = e
return False
return True
def send_error_email(self):
"""
Sends an error email to [email protected] using provided
credentials if something goes wrong
"""
email_user = ""
email_password = ""
with open('email_credentials', 'r') as email_credentials:
csv_reader = csv.reader(email_credentials, delimiter=',')
for row in csv_reader:
email_user = row[0]
email_password = row[1]
message = """\
Subject: WEB ERROR shouldmystateopen.com
WEB ERROR OCCURRED @ https://shouldmystateopen.com (FTP UPLOAD)
Date: {date} | Time: {time}
Exception message as follows:
{error}
""".format(date=date.today().strftime("%m-%d-%Y"),
time=datetime.now().strftime("%H:%M:%S"),
error=self.exception_message if self.exception_message else "No message provided."
)
ssl._create_default_https_context = ssl._create_unverified_context
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as email_server:
try:
email_server.login(email_user, email_password)
email_server.sendmail(email_user, "[email protected]", message)
self.exception_message = None
except Exception as e:
print("Couldn't send email...\n{}".format(e))
finally:
email_server.quit()
def upload_data_or_email(self):
"""
Attempts to upload FTP data, else send email error
"""
if not self.compile_data():
self.send_error_email()
def run_automated_tasks(self):
"""
Runs scheduled tasks
"""
while True:
schedule.run_pending()
time.sleep(1)
collect = DataCollect()
|
python
|
from pymodm import connect, MongoModel, fields
from pymodm.base.fields import MongoBaseField
import pymongo
from datetime import datetime as dt
db_server = "mongodb+srv://AtlasUser:[email protected]"
db_server += ".mongodb.net/ECGServer?retryWrites=true&w=majority"
mongodb_server = connect(db_server)
class Patient(MongoModel):
# Medical Record Number
# Patient Name
# ECG Images as b64 string
# Heart Rate Data
# Datetime timestamps as strftime strings
# Medical Images as b64 string
MRN = fields.IntegerField(primary_key=True)
patient_name = fields.CharField()
ECG_trace = fields.ListField(fields.CharField())
heart_rate = fields.ListField(fields.IntegerField())
receipt_timestamps = fields.ListField(fields.CharField())
medical_image = fields.ListField(fields.CharField())
class PatientTest(MongoModel):
MRN = fields.IntegerField(primary_key=True)
patient_name = fields.CharField()
ECG_trace = fields.ListField(fields.CharField())
heart_rate = fields.ListField(fields.IntegerField())
receipt_timestamps = fields.ListField(fields.CharField())
medical_image = fields.ListField(fields.CharField())
def get_database():
"""Simply returns the mongodb_server object
Returns:
mongodb_server object
"""
return mongodb_server
def clean_database():
"""Deletes all contents of the Patient database
"""
Patient.objects.raw({}).delete()
# from PIL import Image
# # Testing Patient class & Database Connection
# import image_toolbox as tb
# x = PatientTest()
# x.MRN = 1
# x.patient_name = "Anuj Som"
# x.ECG_trace.append(tb.file_to_b64("images/test_image.png"))
# x.heart_rate.append(60)
# x.receipt_timestamps.append(dt.now().strftime("%Y-%m-%d %H:%M:%S"))
# x.save()
|
python
|
from django.urls import path
from account.views import (
index, profile,
LOGIN, LOGOUT, REGISTER,
activate, change_password, update_profile, change_profile_pic
)
from account.api import (
check_username_existing, get_users
)
app_name = 'account'
urlpatterns = [
# API URLs
path('api/check_username/', check_username_existing, name='username_existing'),
path('api/get_users/', get_users, name='get_users'),
# View URLs
path('', index, name='home'),
path('profile/<int:pk>/', profile, name='profile'),
path('login/', LOGIN, name='login'),
path('logout/', LOGOUT, name='logout'),
path('register/', REGISTER, name='register'),
path('update-password/', change_password, name='update-password'),
path('update-profile/', update_profile, name='update-profile'),
path('update-profile-pic', change_profile_pic, name='update-profile-pic'),
# Functional URLs
path('activate/<uidb64>/<token>', activate, name='activate'),
]
|
python
|
import msgpack_numpy
import os
import torch
from collections import defaultdict
from typing import List
import lmdb
import magnum as mn
import numpy as np
from torch.utils.data import Dataset
from tqdm import tqdm
import habitat
from habitat import logger
from habitat.datasets.utils import VocabDict
from habitat.tasks.pickplace.pickplace import RearrangementEpisode
class ObservationsDict(dict):
def pin_memory(self):
for k, v in self.items():
self[k] = v.pin_memory()
return self
def collate_fn(batch):
"""Each sample in batch: (
obs,
prev_actions,
oracle_actions,
inflec_weight,
)
"""
def _pad_helper(t, max_len, fill_val=0):
pad_amount = max_len - t.size(0)
if pad_amount == 0:
return t
pad = torch.full_like(t[0:1], fill_val).expand(pad_amount, *t.size()[1:])
return torch.cat([t, pad], dim=0)
transposed = list(zip(*batch))
observations_batch = list(transposed[1])
next_actions_batch = list(transposed[2])
prev_actions_batch = list(transposed[3])
weights_batch = list(transposed[4])
B = len(prev_actions_batch)
new_observations_batch = defaultdict(list)
for sensor in observations_batch[0]:
for bid in range(B):
new_observations_batch[sensor].append(observations_batch[bid][sensor])
observations_batch = new_observations_batch
max_traj_len = max(ele.size(0) for ele in prev_actions_batch)
for bid in range(B):
for sensor in observations_batch:
observations_batch[sensor][bid] = _pad_helper(
observations_batch[sensor][bid], max_traj_len, fill_val=1.0
)
next_actions_batch[bid] = _pad_helper(next_actions_batch[bid], max_traj_len)
prev_actions_batch[bid] = _pad_helper(prev_actions_batch[bid], max_traj_len)
weights_batch[bid] = _pad_helper(weights_batch[bid], max_traj_len)
for sensor in observations_batch:
observations_batch[sensor] = torch.stack(observations_batch[sensor], dim=1)
next_actions_batch = torch.stack(next_actions_batch, dim=1)
prev_actions_batch = torch.stack(prev_actions_batch, dim=1)
weights_batch = torch.stack(weights_batch, dim=1)
not_done_masks = torch.ones_like(next_actions_batch, dtype=torch.float)
not_done_masks[0] = 0
observations_batch = ObservationsDict(observations_batch)
return (
observations_batch,
prev_actions_batch,
not_done_masks,
next_actions_batch,
weights_batch,
)
class PickPlaceDataset(Dataset):
"""Pytorch dataset for object rearrangement task for each episode"""
def __init__(self, config, content_scenes=["*"], mode="train", use_iw=False, inflection_weight_coef=1.0):
"""
Args:
env (habitat.Env): Habitat environment
config: Config
mode: 'train'/'val'
"""
scene_split_name = "train"
if content_scenes[0] != "*":
scene_split_name = "_".join(content_scenes)
self.config = config.TASK_CONFIG
self.dataset_path = config.DATASET_PATH.format(split=mode, scene_split=scene_split_name)
self.config.defrost()
self.config.DATASET.CONTENT_SCENES = content_scenes
self.config.freeze()
self.resolution = [self.config.SIMULATOR.RGB_SENSOR.WIDTH, self.config.SIMULATOR.RGB_SENSOR.HEIGHT]
self.possible_actions = config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS
self.total_actions = 0
self.inflections = 0
self.inflection_weight_coef = inflection_weight_coef
if use_iw:
self.inflec_weight = torch.tensor([1.0, inflection_weight_coef])
else:
self.inflec_weight = torch.tensor([1.0, 1.0])
if not self.cache_exists():
"""
for each scene > load scene in memory > save frames for each
episode corresponding to that scene
"""
self.env = habitat.Env(config=self.config)
self.episodes = self.env._dataset.episodes
self.instruction_vocab = self.env._dataset.instruction_vocab
logger.info(
"Dataset cache not found. Saving rgb, seg, depth scene images"
)
logger.info(
"Number of {} episodes: {}".format(mode, len(self.episodes))
)
self.scene_ids = []
self.scene_episode_dict = {}
# dict for storing list of episodes for each scene
for episode in self.episodes:
if episode.scene_id not in self.scene_ids:
self.scene_ids.append(episode.scene_id)
self.scene_episode_dict[episode.scene_id] = [episode]
else:
self.scene_episode_dict[episode.scene_id].append(episode)
self.lmdb_env = lmdb.open(
self.dataset_path,
map_size=int(2e12),
writemap=True,
)
self.count = 0
for scene in tqdm(list(self.scene_episode_dict.keys())):
for episode in tqdm(self.scene_episode_dict[scene]):
self.load_scene(scene, episode)
state_index_queue = []
try:
# TODO: Consider alternative for shortest_paths
state_index_queue.extend(range(0, len(episode.reference_replay) - 1))
except AttributeError as e:
logger.error(e)
self.save_frames(state_index_queue, episode)
print("Inflection weight coef: {}, N: {}, nI: {}".format(self.total_actions / self.inflections, self.total_actions, self.inflections))
logger.info("Rearrangement database ready!")
self.env.close()
else:
logger.info("Dataset cache found.")
self.lmdb_env = lmdb.open(
self.dataset_path,
readonly=True,
lock=False,
)
self.dataset_length = int(self.lmdb_env.begin().stat()["entries"] / 4)
self.lmdb_env.close()
self.lmdb_env = None
def save_frames(
self, state_index_queue: List[int], episode: RearrangementEpisode
) -> None:
r"""
Writes rgb, seg, depth frames to LMDB.
"""
next_actions = []
prev_actions = []
observations = {
"rgb": [],
"depth": [],
"instruction": [],
}
reference_replay = episode.reference_replay
instruction = episode.instruction
print("Replay len: {}".format(len(reference_replay)))
for state_index in state_index_queue:
instruction_tokens = np.array(instruction.instruction_tokens)
state = reference_replay[state_index]
position = state.agent_state.position
rotation = state.agent_state.rotation
object_states = state.object_states
sensor_states = state.agent_state.sensor_data
observation = self.env.sim.get_observations_at(
position, rotation, sensor_states, object_states
)
next_state = reference_replay[state_index + 1]
next_action = self.possible_actions.index(next_state.action)
prev_state = reference_replay[state_index]
prev_action = self.possible_actions.index(prev_state.action)
observations["depth"].append(observation["depth"])
observations["rgb"].append(observation["rgb"])
observations["instruction"].append(instruction_tokens)
next_actions.append(next_action)
prev_actions.append(prev_action)
oracle_actions = np.array(next_actions)
inflection_weights = np.concatenate(([1], oracle_actions[1:] != oracle_actions[:-1]))
self.total_actions += inflection_weights.shape[0]
self.inflections += np.sum(inflection_weights)
inflection_weights = self.inflec_weight[torch.from_numpy(inflection_weights)].numpy()
sample_key = "{0:0=6d}".format(self.count)
with self.lmdb_env.begin(write=True) as txn:
txn.put((sample_key + "_obs").encode(), msgpack_numpy.packb(observations, use_bin_type=True))
txn.put((sample_key + "_next_action").encode(), np.array(next_actions).tobytes())
txn.put((sample_key + "_prev_action").encode(), np.array(prev_actions).tobytes())
txn.put((sample_key + "_weights").encode(), inflection_weights.tobytes())
self.count += 1
# images_to_video(images=obs_list, output_dir="demos", video_name="dummy_{}".format(self.count))
def cache_exists(self) -> bool:
if os.path.exists(self.dataset_path):
if os.listdir(self.dataset_path):
return True
else:
os.makedirs(self.dataset_path)
return False
def get_vocab_dict(self) -> VocabDict:
r"""Returns Instruction VocabDicts"""
return self.instruction_vocab
def load_scene(self, scene, episode) -> None:
self.config.defrost()
self.config.SIMULATOR.SCENE = scene
self.config.SIMULATOR.objects = episode.objects
self.config.freeze()
self.env.sim.reconfigure(self.config.SIMULATOR)
def __len__(self) -> int:
return self.dataset_length
def __getitem__(self, idx: int):
r"""Returns batches to trainer.
batch: (rgb, depth, seg)
"""
if self.lmdb_env is None:
self.lmdb_env = lmdb.open(
self.dataset_path,
map_size=int(2e12),
writemap=True,
)
self.lmdb_txn = self.lmdb_env.begin()
self.lmdb_cursor = self.lmdb_txn.cursor()
height, width = int(self.resolution[0]), int(self.resolution[1])
obs_idx = "{0:0=6d}_obs".format(idx)
observations_binary = self.lmdb_cursor.get(obs_idx.encode())
observations = msgpack_numpy.unpackb(observations_binary, raw=False)
for k, v in observations.items():
obs = np.array(observations[k])
observations[k] = torch.from_numpy(obs)
next_action_idx = "{0:0=6d}_next_action".format(idx)
next_action_binary = self.lmdb_cursor.get(next_action_idx.encode())
next_action = np.frombuffer(next_action_binary, dtype="int")
next_action = torch.from_numpy(np.copy(next_action))
prev_action_idx = "{0:0=6d}_prev_action".format(idx)
prev_action_binary = self.lmdb_cursor.get(prev_action_idx.encode())
prev_action = np.frombuffer(prev_action_binary, dtype="int")
prev_action = torch.from_numpy(np.copy(prev_action))
weight_idx = "{0:0=6d}_weights".format(idx)
weight_binary = self.lmdb_cursor.get(weight_idx.encode())
weight = np.frombuffer(weight_binary, dtype="float32")
weight = torch.from_numpy(np.copy(weight))
weight = torch.where(weight != 1.0, self.inflection_weight_coef, 1.0)
return idx, observations, next_action, prev_action, weight
|
python
|
from visualization import plot_binary_grid # used to show results
from wrapper import multi_image # import segmenter for multiple images
if __name__ == "__main__":
# Apply segmenter to default test images.
print("Classifying images...")
masks = multi_image() # uses default test image
print("Complete.")
# Show results.
plot_binary_grid(masks)
|
python
|
# from sqlalchemy package we can import Column, String, Integer, Date, Sequence
from sqlalchemy import Column, String, Integer, Date, Sequence
# imports Config class from config module
from config import Config
#exception handling using try except for FeatureRequestAppClass.
try:
# A class FeatureRequestApp will be the class to which we map 'FeatureRequestApp' table and contains requeired columns from table as variable in class.
class FeatureRequestApp(Config.base):
"""Simple database model with required columns and table name."""
# A class using Declarative needs a __tablename__ attribute, and one Column which is a primary key
__tablename__ = 'FeatureRequestApp'
featureId = Column('featureId', Integer, Sequence('feature_id_seq'),unique=True,primary_key=True)
title = Column(String(250),unique=True)
description = Column(String(1000))
client = Column(String(100))
clientPriority = Column(Integer())
targetDate = Column(Date())
productArea = Column(String(100))
# __init__ is a special method in Python classes, it is the constructor method for a class
# __init__ is called when ever an object of the class is constructed.
def __init__(self, title, description, client, clientpriority, targetdate, productarea):
self.title = title
self.description = description
self.client = client
self.clientPriority = clientpriority
self.targetDate = targetdate
self.productArea = productarea
# The declarative_base() base class contains a MetaData object where newly defined Table objects are collected.
# This object is to be accessed for MetaData-specific operations.Such as, to issue CREATE statements for all tables.
Config.base.metadata.create_all(Config.db)
except ArgumentError as argexp:
print('Missing connection string or primary key', argexp)
except UnboundExecutionError as unexp:
print('SQL was attempted without a database connection to execute it on', unexp)
except IndexError as indexerror:
print('Missing Table Name', indexerror)
except TypeError as typeerror:
print('Check Params', typeerror)
except TimeoutError as timeout:
print('Connection TimedOut', timeout)
|
python
|
from typing import List
import config
import datetime
from email.mime.text import MIMEText
from html.parser import HTMLParser
import email.utils as utils
import logging
import queue
import re
import sys
import threading
from time import strftime
import socket
import feedparser
import yaml
from imap_wrapper import ImapWrapper
class FilterError(IOError):
pass
class TranslationException(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def item_message_id(feed, item):
msgid = item.get('id', item.link)
if not msgid:
msgid = feed.Name + " / " + item.title + " AT " + item.get('date', 'No date')
msgid = msgid.replace(' ', '_')
msgid = re.sub('[^\x00-\x7f]', '_', msgid)
return msgid
def rss_item_to_email(item, feed):
# Cribbing things from StackOverflow is fun. :)
def strip_html(dat):
class TagStripper(HTMLParser):
def __init__(self):
super().__init__()
self.convert_charrefs = True
self.texts = []
def handle_data(self, t):
self.texts.append(t)
def result(self):
return ''.join(self.texts)
ts = TagStripper()
ts.feed(dat)
ts.close()
return ts.result()
try:
text = '<p>Item Link: <a href="%s">%s</a></p>' % (item.link, item.link)
if 'summary' in item:
text = text + "<br>" + item.summary
email = MIMEText(text, "html")
email['Subject'] = feed.format_subject(subject=strip_html(item.title))
email['From'] = item.get('author', '(Author Not Provided)')
email['Message-Id'] = item_message_id(feed, item)
if 'published' in item:
date = item.published
date_parts = item.published_parsed
elif 'updated' in item:
date = item.updated
date_parts = item.updated_parsed
elif 'created' in item:
date = item.created
date_parts = item.created_parsed
else:
date = None
date_parts = datetime.datetime.now().timetuple()
if date_parts is None:
date_parts = utils.parsedate(strip_html(date))
# RSS feeds may contain parsable dates that aren't allowed in email.
if not (date_parts is None):
date = strftime("%A, %b %d %Y %H:%M:%S %Z", date_parts)
email['Date'] = strip_html(date)
return email
except Exception as e:
raise TranslationException(item) from e
class FeedItem:
def __init__(self, feed, rss_item):
self.feed = feed
self.rss_item = rss_item
self.email = rss_item_to_email(rss_item, feed)
self.message_id = self.email['Message-Id']
class FeedConfig:
def __init__(self, dat, *parent_configs):
def _extract_setting(name):
for obj in [dat, *parent_configs]:
if name in obj:
return obj[name]
raise IndexError(f'Cannot find config value for {name}')
self.Name = dat['Name']
self.URL = dat['URL']
self.folder_template = _extract_setting('FolderTemplate')
self.subject_template = _extract_setting('SubjectTemplate')
def __repr__(self):
return ("{ Name: %s; URL: %s; Folder: %s; Subject: %s }" % (self.Name, self.URL, self.folder_template, self.subject_template))
def quoted_folder(self):
return self.folder_template.format(name=self.Name)
def format_subject(self, subject):
return self.subject_template.format(name=self.Name, subject=subject)
def fetch_feed_items(feed):
l = logging.getLogger(__name__)
l.info("Fetching feed %s", feed.URL)
content = feedparser.parse(feed.URL)
l.info("Done fetching feed %s", feed.URL)
if content.bozo:
l.warning("Feed %s had bozo set for '%s'", feed.URL, content.bozo_exception)
for item in content.entries:
yield FeedItem(feed, item)
def parse_configs(configs):
l = logging.getLogger(__name__)
feed_configs : List[FeedConfig] = []
app_config = {'FolderTemplate': config.feed_folder_template, 'SubjectTemplate': config.subject_template}
for dat in configs:
parent_config = app_config
l.debug("Config data: %s", dat)
for item in filter(lambda p: p != None, yaml.safe_load_all(dat)):
if 'Configuration' in item and 'Items' not in item:
l.debug("Config item: %s", dat)
parent_config = item['Configuration']
elif 'Configuration' in item and 'Items' in item:
parent = item['Configuration']
for feed in item['Items']:
feed_configs.append(FeedConfig(feed, parent, parent_config))
elif 'Items' in item:
for feed in item['Items']:
feed_configs.append(FeedConfig(feed, parent_config))
else:
feed_configs.append(FeedConfig(item, parent_config))
return feed_configs
class RssIMAP:
def __init__(self):
pass
def connect_imap(self, hostname, username, password, **kwargs):
self._W = ImapWrapper(hostname, username, password, **kwargs)
self._W.ensure_folder(config.config_mailbox)
def config_data_from_imap(self):
# Don't be lazy about this.
ret = []
for msg in self._W.fetch_messages(config.config_mailbox, 'SUBJECT', 'rss-imap', 'NOT', 'DELETED'):
if msg.is_multipart():
for part in msg.get_payload():
name = part.get_param('Name', '(none)')
if 'Folders' in name:
ret.append(part.get_payload(None, True).decode('UTF-8'))
elif name == '(none)' and part.get_content_type() == 'text/plain':
ret.append(part.get_payload(None, True).decode('UTF-8'))
else:
ret.append(msg.get_payload())
return ret
def get_feed_config_from_imap(self):
the_data = self.config_data_from_imap()
return parse_configs(the_data)
def filter_items(self, folder, items):
"""Filter a list of items to only those that do not exist on the server."""
try:
have_ids = self._W.check_folder_for_message_ids(folder, [item.message_id for item in items])
except:
l = logging.getLogger(__name__)
l.exception("Exception while checking existing items in %s", folder)
try:
have_ids = self._W.check_folder_for_message_ids(folder, [item.message_id for item in items])
except:
l.exception("Second exception while checking existing items in %s; skipping.", folder)
return []
want_items = []
for item in items:
if not (item.message_id.encode('utf-8') in have_ids):
want_items.append(item)
return want_items
def save_item_to_imap(self, item):
l = logging.getLogger(__name__)
l.info('New item "%s" for feed "%s", with message_id "%s"', item.email['Subject'], item.feed.Name, item.message_id)
self._W.append(item.feed.quoted_folder(), item.email)
def save_items_to_imap(self, items):
for item in items:
self.save_item_to_imap(item)
def disconnect(self):
self._W.logout()
if __name__ == '__main__':
config.configure_logging()
# The default is to just hang forever if one of
# the RSS feed servers isn't responding.
socket.setdefaulttimeout(10)
x = RssIMAP()
x.connect_imap(config.hostname, config.username, config.password)
feeds = x.get_feed_config_from_imap()
todo = queue.Queue()
producer_threads = []
def producer(feed):
l = logging.getLogger(__name__)
items = list(fetch_feed_items(feed))
if len(items) > 0:
todo.put((feed, items))
def consumer():
l = logging.getLogger(__name__)
while True:
(feed, items) = todo.get()
if items == None:
break
l.info("Filtering %d items from feed %s", len(items), feed.URL)
filtered = x.filter_items(feed.quoted_folder(), items)
l.info("Done filtering feed %s", feed.URL)
if len(items) == 0:
continue
x.save_items_to_imap(filtered)
l.info("Done saving %d new items from feed %s", len(filtered), feed.URL)
consumer_thread = threading.Thread(target=consumer, name="Consumer")
consumer_thread.start()
for feed in feeds:
thread = threading.Thread(target=producer, name=f"Fetch {feed.URL}", args=(feed,))
thread.start()
producer_threads.append(thread)
for producer in producer_threads:
producer.join()
todo.put((None, None))
consumer_thread.join()
x.disconnect()
|
python
|
from flask.views import MethodView
class APIView(MethodView):
api_version = None
path = None
@classmethod
def get_path(self):
if self.path:
return self.path
elif self.__name__.endswith('View'):
return self.__name__[:-4].lower()
else:
return self.__name__
@classmethod
def get_rule(self):
if self.api_version is None:
raise RuntimeError("An API version is required")
return '/v{}/{}'.format(self.api_version, self.get_path())
@classmethod
def add_rule_to_app(self, app, prefix=None):
rule = self.get_rule()
app.add_url_rule(
(prefix or '') + rule,
view_func=self.as_view(rule.strip('/').replace('/', '_').lower())
)
|
python
|
'''
- Application Factory
'''
from app import create_app
from fastapi import FastAPI
from fastapi.testclient import TestClient
from pytest import fixture
@fixture
def client():
'''Cliente do FastAPI.'''
app = create_app()
return TestClient(app)
def test_create_app(client):
assert isinstance(create_app(), FastAPI)
def test_home_deve_retornar_200(client):
response = client.get('/')
assert response.status_code == 200
def test_home_deve_retornar_ola_regis(client):
# response é da api do requests.
response = client.get('/')
assert response.json() == {'message': 'Ola Regis'}
def test_pessoas_deve_retornar_200_quando_chamar_com_eduardo(client):
response = client.get('/pessoa/eduardo')
assert response.status_code == 200
def test_pessoas_deve_retornar_chamou_eduardo_quando_chamar_com_eduardo(client):
response = client.get('/pessoa/eduardo')
assert response.json() == {'message': 'Você chamou eduardo'}
# def test_busca_por_id_deve_retornar_404(client):
# response = client.get('/id/42')
# assert response.status_code == 404
def test_busca_por_id_1_deve_retornar_404(client):
response = client.get('/id/1')
assert response.status_code == 404
def test_busca_por_id_1_deve_retornar_nao_tem_1(client):
response = client.get('/id/1')
assert response.json() == {'detail': 'Não tem 1'}
def test_busca_por_id_2_deve_retornar_200(client):
response = client.get('/id/2')
assert response.status_code == 200
def test_busca_por_id_2_deve_retornar_regis(client):
response = client.get('/id/2')
assert response.json() == {'name': 'regis'}
def test_inserir_usuario_no_banco_deve_retornar_201(client):
user = {
'id': 1,
'nome': 'Regis',
'idade': 42,
'email': '[email protected]',
}
response = client.post('/inserir/', json=user)
assert response.status_code == 201
def test_inserir_entidade_não_processável_retorna_422(client):
user = {
'nome': 'Regis',
'email': '[email protected]',
}
response = client.post('/inserir/', json=user)
assert response.status_code == 422
def test_pessoas_deve_retornar_200(client):
response = client.get('/pessoas')
assert response.status_code == 200
def test_pessoas_deve_retornar_lista_de_pessoas(client):
response = client.get('/pessoas')
pessoas = [
{"id": 1, "nome": "Regis", "idade": 42, "email": "[email protected]"}
]
assert response.json() == pessoas
def test_get_pessoas_deve_retornar_200(client):
response = client.get('/pessoas/1')
assert response.status_code == 200
def test_get_pessoas_deve_retornar_um_dict(client):
response = client.get('/pessoas/1')
pessoa = {"id": 1, "nome": "Regis", "idade": 42, "email": "[email protected]"}
assert response.json() == pessoa
def test_pessoas_add_deve_retornar_201(client):
pessoa = {"id": 1, "nome": "Regis", "idade": 42, "email": "[email protected]"}
response = client.post('/pessoas/add/', json=pessoa)
assert response.status_code == 201
|
python
|
"""A module for evaluating policies."""
import os
import json
import pandas as pd
import matplotlib.pyplot as plt
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from lake_monster.environment.environment import LakeMonsterEnvironment
from lake_monster.environment.variations import MultiMonsterEnvironment, JumpingEnvironment
from lake_monster import configs
def evaluate_episode(policy, env_params):
"""Use naive while loop to evaluate policy in single episode."""
if 'n_monsters' in env_params:
env = MultiMonsterEnvironment
elif 'is_jumping' in env_params:
env = JumpingEnvironment
else:
env = LakeMonsterEnvironment
py_env = env(**env_params)
tf_env = TFPyEnvironment(py_env)
ts = tf_env.reset()
n_steps = 0
while not ts.is_last():
action = policy.action(ts)
ts = tf_env.step(action.action)
n_steps += 1
reward = ts.reward.numpy().item()
return reward, n_steps * py_env.step_size
def probe_policy_const_steps(policy, env_params):
"""Determine the maximum monster speed at which policy can succeed."""
highest_speed_with_success = 0.0
n_steps_at_success = 0
n_consecutive_fails = 0
current_monster_speed = 0.0
delta = 1.0
while True:
print('.', end='', flush=True)
current_monster_speed += delta
env_params['monster_speed'] = current_monster_speed
env_params['use_mini_rewards'] = False
reward, n_steps = evaluate_episode(policy, env_params)
reward = round(reward)
if reward not in [0, 1]:
raise ValueError(f'Strange reward. Reward encountered: {reward}')
if reward == 0:
n_consecutive_fails += 1
else:
highest_speed_with_success = current_monster_speed
n_steps_at_success = n_steps
n_consecutive_fails = 0
if n_consecutive_fails == 3:
if delta < 0.001 - 1e-6: # tolerance
print('')
return highest_speed_with_success, n_steps_at_success
delta *= 0.5
current_monster_speed = highest_speed_with_success
n_consecutive_fails = 0
def probe_policy(policy, env_params):
"""Call probe_policy at different step_sizes."""
current_step_size = env_params['step_size']
result = {'monster_speed': 0.0, 'step_size': 0.0, 'n_env_steps': 0}
for multiplier in [1/16, 1/8, 1/4, 1/2, 1]:
step_size = multiplier * current_step_size
env_params['step_size'] = step_size
monster_speed, n_env_steps = probe_policy_const_steps(policy, env_params)
if monster_speed > result['monster_speed']:
result['monster_speed'] = monster_speed
result['step_size'] = step_size
result['n_env_steps'] = n_env_steps
return result
def result_df():
"""Return DataFrame of monster speed data in results.json."""
with open(configs.RESULTS_PATH) as f:
data = json.load(f)
dfs = []
params = {}
for uid in data:
params[uid] = data[uid]['params']
results = data[uid]['results']
if results:
df = pd.DataFrame(results)
df = df.set_index('n_episode', drop=True)
df = df.drop(['step_size', 'n_env_steps'], axis=1)
df = df.rename(columns={'monster_speed': uid})
dfs.append(df)
return pd.concat(dfs, axis=1), params
def plot_results(policies=None):
"""Plot evaluation monter speeds over training."""
df, _ = result_df()
if policies:
df = df[policies]
df = df[df.index <= 600_000]
df = df.rolling(25).mean()
plt.figure(figsize=(12, 8))
df.plot(legend=False, ax=plt.gca())
plt.xlabel('episode number')
plt.ylabel('monster speed')
plt.title('Smoothed evaluation scores over training')
# plt.legend(loc='lower right', fontsize='xx-small')
plt.grid()
save_path = os.path.join(configs.ASSETS_DIR, 'results.png')
plt.savefig(save_path, dpi=300)
plt.show()
def print_strongest_policies():
"""Print a markdown table showing agent parameters and evaluation results."""
df, params = result_df()
shortened_names = {
'n_actions': 'n act',
'initial_step_size': 'init step',
'initial_monster_speed': 'init speed',
'timeout_factor': 'timeout',
'fc_layer_params': 'layers',
'dropout_layer_params': 'dropout',
'learning_rate': 'learn rate',
'epsilon_greedy': 'epsilon',
'n_step_update': 'update',
'use_categorical': 'categorical',
'use_step_schedule': 'schedule'
}
params_df = []
for p in df.columns:
# printing out high speed policies separately from markdown
episode = df[p].idxmax()
speed = df[p][episode]
if speed > 4.3:
print(speed, p + '-' + str(episode))
results = {}
results['max speed'] = round(speed, 3)
results['avg speed'] = round(df[p].mean(), 3)
for k, v in shortened_names.items():
results[v] = params[p][k]
if results['categorical']:
results['dropout'] = 'None'
if results['dropout'] is None: # getting None to appear in markdown
results['dropout'] = 'None'
params_df.append(results)
params_df = pd.DataFrame(params_df)
params_df = params_df.sort_values(by='max speed', axis=0, ascending=False)
print(params_df.to_markdown(index=False))
if __name__ == '__main__':
print_strongest_policies()
plot_results()
|
python
|
import numpy as np
from numpy import exp,dot,full,cos,sin,real,imag,power,pi,log,sqrt,roll,linspace,arange,transpose,pad,complex128 as c128, float32 as f32, float64 as f64
from numba import njit,jit,complex128 as nbc128, void
import os
os.environ['NUMEXPR_MAX_THREADS'] = '16'
os.environ['NUMEXPR_NUM_THREADS'] = '8'
import numexpr as ne
from mesh import RectMesh3D,RectMesh2D
import optics
from misc import timeit, overlap, normalize,printProgressBar, overlap_nonu, norm_nonu,resize
### to do ###
## performance
# maybe adaptive z stepping
# get a better refinement criterion -- now weighting partially by 2nd deriv. still could use some work
# compute r = 1 and r=/=1 points separately? -- I TRIED IT -- WHY IS THIS SLOWER
# more efficient ways to store arrays with many repeated values -- some sort of sparse-like data structure?
# optimize tri_solve_vec : maybe try out dask (parallelize) -- WHY IS THIS ALSO SLOWER
#ignore shifting of IOR arrays in trimats calc?
## readability
# actually add doc strings
# combine some functions into "remesh" and "recompute" functions
# remove unused functions
# move all the eval strings somewhere else (together)
def genc(shape):
return np.empty(shape,dtype=c128,order='F')
def genf(shape):
return np.empty(shape,dtype=c128,order='F')
@njit(void(nbc128[:,:],nbc128[:,:],nbc128[:,:],nbc128[:,:],nbc128[:,:],nbc128[:,:]))
def tri_solve_vec(a,b,c,r,g,u):
'''Apply Thomas' method for simultaneously solving a set of tridagonal systems. a, b, c, and r are matrices
(N rows) where each column corresponds a separate system'''
N = a.shape[0]
beta = b[0]
u[0] = r[0]/beta
for j in range(1,N):
g[j] = c[j-1]/beta
beta = b[j] - a[j]*g[j]
u[j] = (r[j] - a[j]*u[j-1])/beta
for j in range(N-1):
k = N-2-j
u[k] = u[k] - g[k+1]*u[k+1]
class Prop3D:
'''beam propagator. employs finite-differences beam propagation with PML as the boundary condition. works on an adaptive mesh'''
def __init__(self,wl0,mesh:RectMesh3D,optical_system:optics.OpticSys,n0):
xymesh = mesh.xy
self.wl0 = wl0
self.k0 = k0 = 2.*pi/wl0
self.k02 = k02 = k0*k0
self.mesh = mesh
self.n0 = n0
self.sig = sig = -2.j*k0*n0/mesh.dz
self.field = None
self.optical_system = optical_system
self.optical_system.set_sampling(xymesh)
self.nb2 = nb2 = optical_system.nb2
self.n02 = n02 = n0*n0
## things that will be set during computation
self.xgrid_cor_facs = [[]]*3
self.ygrid_cor_facs = [[]]*3
self.xgrid_cor_mask = []
self.ygrid_cor_mask = []
## precomputing some stuff ##
Rx,Tupx,Tdox,Ry,Tupy,Tdoy = self.calculate_PML_mats()
dx02 = mesh.xy.dx0**2
dy02 = mesh.xy.dy0**2
K = k02*(nb2-n02)
n02 = power(n0,2)
## coeff matrices of tridiagonal system, updated periodically
self._a0x = None
self._b0x = None
self._c0x = None
self._a0y = None
self._b0y = None
self._c0y = None
self.a0x_ = None
self.b0x_ = None
self.c0x_ = None
self.a0y_ = None
self.b0y_ = None
self.c0y_ = None
## same as above but in PML zone
self._apmlx = sig/12. - 0.5/dx02*Tdox - K/48.
self._bpmlx = 5./6.*sig + Rx/dx02 - 5./24. * K
self._cpmlx = sig/12. - 0.5/dx02*Tupx - K/48.
self.apmlx_ = sig/12. + 0.5/dx02*Tdox + K/48.
self.bpmlx_ = 5./6.*sig - Rx/dx02 + 5./24. * K
self.cpmlx_ = sig/12. + 0.5/dx02*Tupx + K/48.
self._apmly = sig/12. - 0.5/dy02*Tdoy - K/48.
self._bpmly = 5./6.*sig + Ry/dy02 - 5./24. * K
self._cpmly = sig/12. - 0.5/dy02*Tupy - K/48.
self.apmly_ = sig/12. + 0.5/dy02*Tdoy + K/48.
self.bpmly_ = 5./6.*sig - Ry/dy02 + 5./24. * K
self.cpmly_ = sig/12. + 0.5/dy02*Tupy + K/48.
self.half_dz = mesh.dz/2.
self.power = np.empty((mesh.zres,))
self.totalpower = np.empty((mesh.zres,))
def allocate_mats(self):
sx,sy = self.mesh.xy.xg.shape,self.mesh.xy.yg.T.shape
_trimatsx = (genc(sx),genc(sx),genc(sx))
_trimatsy = (genc(sy),genc(sy),genc(sy))
rmatx,rmaty = genc(sx),genc(sy)
gx = genc(sx)
gy = genc(sy)
fill = self.nb2*self.k02
IORsq__ = np.full(sx,fill,dtype=f64)
_IORsq_ = np.full(sx,fill,dtype=f64)
__IORsq = np.full(sx,fill,dtype=f64)
return _trimatsx,rmatx,gx,_trimatsy,rmaty,gy,IORsq__,_IORsq_,__IORsq
def check_z_inv(self):
return self.optical_system.z_invariant
def set_IORsq(self,out,z,xg=None,yg=None):
#premultiply by k02 so we don't have to keep doing it later
self.optical_system.set_IORsq(out,z,xg,yg,coeff=self.k02)
def calculate_PML_mats(self):
'''As per textbook <Beam Propagation Method for Design of Optical Waveguide Devices> ,
calculate the matrices R, T_j+1, and T_j-1 in the PML zone. We assume that the
the PML's refractive index will be constant, equal to the background index.
'''
m = self.mesh
xy = m.xy
xverts = xy.pvert_xa
sdox = m.sigmax(xverts-xy.dx0)
sx = m.sigmax(xverts)
supx = m.sigmax(xverts+xy.dx0)
yverts = xy.pvert_ya
sdoy = m.sigmay(yverts-xy.dy0)
sy = m.sigmay(yverts)
supy = m.sigmay(yverts+xy.dy0)
Qdox = 1./(1.+1.j*sdox*self.nb2)
Qx = 1./(1.+1.j*sx*self.nb2)
Qupx = 1./(1.+1.j*supx*self.nb2)
Tupx = 0.5 * Qx * (Qx+Qupx)
Tdox = 0.5 * Qx * (Qx+Qdox)
Rx = 0.25 * Qx * (Qdox+2*Qx+Qupx)
Qdoy = 1./(1.+1.j*sdoy*self.nb2)
Qy = 1./(1.+1.j*sy*self.nb2)
Qupy = 1./(1.+1.j*supy*self.nb2)
Tupy= 0.5 * Qy * (Qy+Qupy)
Tdoy = 0.5 * Qy * (Qy+Qdoy)
Ry = 0.25 * Qy * (Qdoy+2*Qy+Qupy)
return (Rx,Tupx,Tdox,Ry,Tupy,Tdoy)
def update_grid_cor_facs(self,which='x'):
xy = self.mesh.xy
ix = xy.cvert_ix
if which=='x':
r = xy.rxa[ix]
self.xgrid_cor_imask = np.where(r[1:-1]!=1)[0]
else:
r = xy.rya[ix]
self.ygrid_cor_imask = np.where(r[1:-1]!=1)[0]
r2 = r*r
R1 = (r2 + r -1)/(6*r*(r+1))
R2 = (r2 + 3*r + 1)/(6*r)
R3 = (-r2 + r + 1)/(6*(r+1))
## alternative values from paper
#R1 = (3*r2 - 3*r + 1)/ (6*r*(r+1))
#R2 = (-r2 + 7*r - 1)/(6*r)
#R3 = (r2 - 3*r + 3)/(6*(r+1))
if which=='x':
self.xgrid_cor_facs[0] = R1
self.xgrid_cor_facs[1] = R2
self.xgrid_cor_facs[2] = R3
else:
self.ygrid_cor_facs[0] = R1
self.ygrid_cor_facs[1] = R2
self.ygrid_cor_facs[2] = R3
def precomp_trimats(self,which='x'):
ix = self.mesh.xy.cvert_ix
s = self.sig
nu0 = -self.k02*self.n02
eval1 = "s*r3 - 1/(r+1)/(d*d) - 0.25*r3*n"
eval2 = "s*r2 + 1/r/(d*d) - 0.25*r2*n"
eval3 = "s*r1 - 1/r/(r+1)/(d*d) - 0.25*r1*n"
if which == 'x':
R1,R2,R3 = self.xgrid_cor_facs
r = self.mesh.xy.rxa[ix]
dla = self.mesh.xy.dxa[ix]
self._a0x = ne.evaluate(eval1,local_dict={"s":s,"r3":R3[1:,None],"r":r[1:,None],"d":dla[1:,None],"n":nu0})
self._b0x = ne.evaluate(eval2,local_dict={"s":s,"r2":R2[:,None],"r":r[:,None],"d":dla[:,None],"n":nu0})
self._c0x = ne.evaluate(eval3,local_dict={"s":s,"r1":R1[:-1,None],"r":r[:-1,None],"d":dla[:-1,None],"n":nu0})
else:
R1,R2,R3 = self.ygrid_cor_facs
r = self.mesh.xy.rya[ix]
dla = self.mesh.xy.dya[ix]
self._a0y = ne.evaluate(eval1,local_dict={"s":s,"r3":R3[1:,None],"r":r[1:,None],"d":dla[1:,None],"n":nu0})
self._b0y = ne.evaluate(eval2,local_dict={"s":s,"r2":R2[:,None],"r":r[:,None],"d":dla[:,None],"n":nu0})
self._c0y = ne.evaluate(eval3,local_dict={"s":s,"r1":R1[:-1,None],"r":r[:-1,None],"d":dla[:-1,None],"n":nu0})
def _trimats(self,out,IORsq,which='x'):
''' calculate the tridiagonal matrices in the computational zone '''
ix = self.mesh.xy.cvert_ix
_IORsq = IORsq[ix]
if which == 'x':
R1,R2,R3 = self.xgrid_cor_facs
r = self.mesh.xy.rxa[ix]
dla = self.mesh.xy.dxa[ix]
a,b,c = self._a0x,self._b0x,self._c0x
else:
R1,R2,R3 = self.ygrid_cor_facs
r = self.mesh.xy.rya[ix]
dla = self.mesh.xy.dya[ix]
a,b,c = self._a0y,self._b0y,self._c0y
_a,_b,_c = out
s = self.sig
eval1 = "a - 0.25*r3*n"
eval2 = "b - 0.25*r2*n"
eval3 = "c - 0.25*r1*n"
ne.evaluate(eval1,local_dict={"a":a,"r3":R3[1:,None],"n":_IORsq[:-1]},out=_a[ix][1:])
ne.evaluate(eval2,local_dict={"b":b,"r2":R2[:,None],"n":_IORsq},out=_b[ix])
ne.evaluate(eval3,local_dict={"c":c,"r1":R1[:-1,None],"n":_IORsq[1:]},out=_c[ix][:-1])
_a[ix][0] = s*R3[0] - 1. / ((r[0]+1) * dla[0]*dla[0]) - 0.25*R3[0]*(_IORsq[0]-self.n02*self.k02)
_c[ix][-1] = s*R1[-1] - 1/r[-1]/(r[-1]+1)/(dla[-1]*dla[-1]) - 0.25*R1[-1]*(_IORsq[-1]-self.n02*self.k02)
def rmat_pmlcorrect(self,_rmat,u,which='x'):
if which == 'x':
apml,bpml,cpml = self.apmlx_,self.bpmlx_,self.cpmlx_
else:
apml,bpml,cpml = self.apmly_,self.bpmly_,self.cpmly_
pix = self.mesh.xy.pvert_ix
temp = np.empty_like(_rmat[pix])
temp[1:-1] = apml[1:-1,None]*u[pix-1][1:-1] + bpml[1:-1,None]*u[pix][1:-1] + cpml[1:-1,None]*u[pix+1][1:-1]
temp[0] = bpml[0]*u[0] + cpml[0]*u[1]
temp[-1] = apml[-1]*u[-2] + bpml[-1]*u[-1]
_rmat[pix] = temp
def rmat(self,_rmat,u,IORsq,which='x'):
ix = self.mesh.xy.cvert_ix
_IORsq = IORsq[ix]
s = self.sig
if which == 'x':
R1,R2,R3 = self.xgrid_cor_facs
dla = self.mesh.xy.dxa[ix]
r = self.mesh.xy.rxa[ix]
a,b,c = self.a0x_,self.b0x_,self.c0x_
else:
R1,R2,R3 = self.ygrid_cor_facs
dla = self.mesh.xy.dya[ix]
r = self.mesh.xy.rya[ix]
a,b,c = self.a0y_,self.b0y_,self.c0y_
N = self.n02*self.k02
m = np.s_[1:-1,None]
_dict = _dict = {"a":a,"b":b,"c":c,"u1":u[ix][:-2],"u2":u[ix][1:-1],"u3":u[ix][2:],"n1":_IORsq[:-2],"n2":_IORsq[1:-1],"n3":_IORsq[2:],"r3":R3[m],"r2":R2[m],"r1":R1[m] }
_eval = "(a+0.25*r3*n1)*u1 + (b+0.25*r2*n2)*u2 + (c+0.25*r1*n3)*u3"
ne.evaluate(_eval,local_dict=_dict,out=_rmat[ix][1:-1])
_rmat[ix][0] = (s*R2[0] - 1/(r[0]*dla[0]**2 ) + 0.25*R2[0]*(_IORsq[0]-N))*u[0] + (s*R1[0] + 1/r[0]/(r[0]+1)/dla[0]**2 + 0.25*R1[0] * (_IORsq[1]-N) )*u[1]
_rmat[ix][-1] = (s*R3[-1] + 1. / ((r[-1]+1) * dla[-1]**2) + 0.25*R3[-1]*(_IORsq[-2]-N))*u[-2] + (s*R2[-1] - 1/(r[-1]*dla[-1]**2) + 0.25*R2[-1]*(_IORsq[-1]-N))*u[-1]
def rmat_precomp(self,which='x'):
ix = self.mesh.xy.cvert_ix
s = self.sig
n0 = -self.k02 * self.n02
m = np.s_[1:-1,None]
eval1="(s*r3+1/(r+1)/(d*d)+0.25*r3*n)"
eval2="(s*r2-1/r/(d*d)+0.25*r2*n)"
eval3="(s*r1+1/r/(r+1)/(d*d) + 0.25*r1*n)"
if which == 'x':
R1,R2,R3 = self.xgrid_cor_facs
r = self.mesh.xy.rxa[ix]
dla = self.mesh.xy.dxa[ix]
_dict = {"s":s,"r3":R3[m],"r":r[m],"d":dla[m],"n":n0,"r2":R2[m],"r1":R1[m]}
self.a0x_ = ne.evaluate(eval1,local_dict=_dict)
self.b0x_ = ne.evaluate(eval2,local_dict=_dict)
self.c0x_ = ne.evaluate(eval3,local_dict=_dict)
else:
R1,R2,R3 = self.ygrid_cor_facs
r = self.mesh.xy.rya[ix]
dla = self.mesh.xy.dya[ix]
_dict = {"s":s,"r3":R3[m],"r":r[m],"d":dla[m],"n":n0,"r2":R2[m],"r1":R1[m]}
self.a0y_ = ne.evaluate(eval1,local_dict=_dict)
self.b0y_ = ne.evaluate(eval2,local_dict=_dict)
self.c0y_ = ne.evaluate(eval3,local_dict=_dict)
def _pmlcorrect(self,_trimats,which='x'):
ix = self.mesh.xy.pvert_ix
_a,_b,_c = _trimats
if which=='x':
_a[ix] = self._apmlx[:,None]
_b[ix] = self._bpmlx[:,None]
_c[ix] = self._cpmlx[:,None]
else:
_a[ix] = self._apmly[:,None]
_b[ix] = self._bpmly[:,None]
_c[ix] = self._cpmly[:,None]
@timeit
def prop2end(self,_u,xyslice=None,zslice=None,u1_func=None,writeto=None,ref_val=5.e-6,remesh_every=20,dynamic_n0 = False,fplanewidth=0):
mesh = self.mesh
PML = mesh.PML
if not (xyslice is None and zslice is None):
za_keep = mesh.za[zslice]
if type(za_keep) == np.ndarray:
minz, maxz = za_keep[0],za_keep[-1]
shape = (len(za_keep),*mesh.xg[xyslice].shape)
else:
raise Exception('uhh not implemented')
self.field = np.zeros(shape,dtype=c128)
#pull xy mesh
xy = mesh.xy
dx,dy = xy.dx0,xy.dy0
if fplanewidth == 0:
xa_in = np.linspace(-mesh.xw/2,mesh.xw/2,xy.shape0_comp[0])
ya_in = np.linspace(-mesh.yw/2,mesh.yw/2,xy.shape0_comp[1])
else:
xa_in = np.linspace(-fplanewidth/2,fplanewidth/2,xy.shape0_comp[0])
ya_in = np.linspace(-fplanewidth/2,fplanewidth/2,xy.shape0_comp[1])
dx0 = xa_in[1]-xa_in[0]
dy0 = ya_in[1]-ya_in[0]
# u can either be a field or a function that generates a field.
# the latter option allows for coarse base grids to be used
# without being penalized by forcing the use of a low resolution
# launch field
if type(_u) is np.ndarray:
_power = overlap(_u,_u)
print('input power: ',_power)
# normalize the field, preserving the input power. accounts for grid resolution
normalize(_u,weight=dx0*dy0,normval=_power)
#resample the field onto the smaller xy mesh (in the smaller mesh's computation zone)
u0 = xy.resample_complex(_u,xa_in,ya_in,xy.xa[PML:-PML],xy.ya[PML:-PML])
_power2 = overlap(u0,u0,dx*dy)
#now we pad w/ zeros to extend it into the PML zone
u0 = np.pad(u0,((PML,PML),(PML,PML)))
#initial mesh refinement
xy.refine_base(u0,ref_val)
weights = xy.get_weights()
#now resample the field onto the smaller *non-uniform* xy mesh
u = xy.resample_complex(_u,xa_in,ya_in,xy.xa[PML:-PML],xy.ya[PML:-PML])
u = np.pad(u,((PML,PML),(PML,PML)))
#do another norm to correct for the slight power change you get when resampling. I measure 0.1% change for psflo. should check again
norm_nonu(u,weights,_power2)
elif callable(_u):
# must be of the form u(x,y)
u0 = _u(xy.xg,xy.yg)
_power = overlap(u0,u0)
print('input power: ',_power)
# normalize the field, preserving the input power. accounts for grid resolution
normalize(u0,weight=dx0*dy0,normval=_power)
# do an initial mesh refinement
xy.refine_base(u0,ref_val)
# compute the field on the nonuniform grid
u = norm_nonu(_u(xy.xg,xy.yg),xy.get_weights(),_power)
else:
raise Exception("unsupported type for argument u in prop2end()")
counter = 0
total_iters = self.mesh.zres
print("propagating field...")
__z = 0
z__ = 0
#step 0 setup
self.update_grid_cor_facs('x')
self.update_grid_cor_facs('y')
# initial array allocation
_trimatsx,rmatx,gx,_trimatsy,rmaty,gy,IORsq__,_IORsq_,__IORsq = self.allocate_mats()
self.precomp_trimats('x')
self.precomp_trimats('y')
self.rmat_precomp('x')
self.rmat_precomp('y')
self._pmlcorrect(_trimatsx,'x')
self._pmlcorrect(_trimatsy,'y')
#get the current IOR dist
self.set_IORsq(IORsq__,z__)
#plt.figure(frameon=False)
#plt.imshow(xy.get_base_field(IORsq__))
#plt.show()
print("initial shape: ",xy.shape)
for i in range(total_iters):
if i%20 == 0:
printProgressBar(i,total_iters-1)
u0 = xy.get_base_field(u)
u0c = np.conj(u0)
weights = xy.get_weights()
## Total power monitor ##
self.totalpower[i] = overlap_nonu(u,u,weights)
#print(self.totalpower[i])
## Other monitors ##
if u1_func is not None:
lp = norm_nonu(u1_func(xy.xg,xy.yg),weights)
self.power[i] = power(overlap_nonu(u,lp,weights),2)
_z_ = z__ + mesh.half_dz
__z = z__ + mesh.dz
if self.field is not None and (minz<=__z<=maxz):
ix0,ix1,ix2,ix3 = mesh.get_loc()
mid = int(u0.shape[1]/2)
self.field[counter][ix0:ix1+1] = u0[:,mid] ## FIX ##
counter+=1
#avoid remeshing on step 0
if (i+1)%remesh_every== 0:
## update the effective index
if dynamic_n0:
#update the effective index
base = xy.get_base_field(IORsq__)
self.n02 = xy.dx0*xy.dy0*np.real(np.sum(u0c*u0*base))/self.k02
oldxm,oldxM = xy.xm,xy.xM
oldym,oldyM = xy.ym,xy.yM
oldxw,oldyw = xy.xw,xy.yw
new_xw,new_yw = oldxw,oldyw
#expand the grid if necessary
if mesh.xwfunc is not None:
new_xw = mesh.xwfunc(__z)
if mesh.ywfunc is not None:
new_yw = mesh.ywfunc(__z)
new_xw, new_yw = xy.snapto(new_xw,new_yw)
xy.reinit(new_xw,new_yw) #set grid back to base res with new dims
if (xy.xw > oldxw or xy.yw > oldyw):
#now we need to pad u,u0 with zeros to make sure it matches the new space
xpad = int((xy.shape0[0]-u0.shape[0])/2)
ypad = int((xy.shape0[1]-u0.shape[1])/2)
u = np.pad(u,((xpad,xpad),(ypad,ypad)))
u0 = np.pad(u0,((xpad,xpad),(ypad,ypad)))
#pad coord arrays to do interpolation
xy.xa_last = np.hstack( ( np.linspace(xy.xm,oldxm-dx,xpad) , xy.xa_last , np.linspace(oldxM + dx, xy.xM,xpad) ) )
xy.ya_last = np.hstack( ( np.linspace(xy.ym,oldym-dy,ypad) , xy.ya_last , np.linspace(oldyM + dy, xy.yM,ypad) ) )
#subdivide into nonuniform grid
xy.refine_base(u0,ref_val)
#interp the field to the new grid
u = xy.resample_complex(u)
#give the grid to the optical sys obj so it can compute IORs
self.optical_system.set_sampling(xy)
#compute nonuniform grid correction factors R_i
self.update_grid_cor_facs('x')
self.update_grid_cor_facs('y')
# grid size has changed, so now we need to reallocate arrays for at least the next remesh_period iters
_trimatsx,rmatx,gx,_trimatsy,rmaty,gy,IORsq__,_IORsq_,__IORsq = self.allocate_mats()
#get the current IOR dist
self.set_IORsq(IORsq__,z__)
#precompute things that will be reused
self.precomp_trimats('x')
self.precomp_trimats('y')
self.rmat_precomp('x')
self.rmat_precomp('y')
self._pmlcorrect(_trimatsx,'x')
self._pmlcorrect(_trimatsy,'y')
self.set_IORsq(_IORsq_,_z_,)
self.set_IORsq(__IORsq,__z)
self.rmat(rmatx,u,IORsq__,'x')
self.rmat_pmlcorrect(rmatx,u,'x')
self._trimats(_trimatsx,_IORsq_,'x')
self._trimats(_trimatsy,__IORsq.T,'y')
tri_solve_vec(_trimatsx[0],_trimatsx[1],_trimatsx[2],rmatx,gx,u)
self.rmat(rmaty,u.T,_IORsq_.T,'y')
self.rmat_pmlcorrect(rmaty,u.T,'y')
tri_solve_vec(_trimatsy[0],_trimatsy[1],_trimatsy[2],rmaty,gy,u.T)
z__ = __z
if (i+2)%remesh_every != 0:
IORsq__[:,:] = __IORsq
print("final total power",self.totalpower[-1])
if writeto:
np.save(writeto,self.field)
return u,u0
@timeit
def prop2end_uniform(self,u,xyslice=None,zslice=None,u1_func=None,writeto=None,dynamic_n0 = False,fplanewidth=0):
mesh = self.mesh
PML = mesh.PML
if not (xyslice is None and zslice is None):
za_keep = mesh.za[zslice]
if type(za_keep) == np.ndarray:
minz, maxz = za_keep[0],za_keep[-1]
shape = (len(za_keep),*mesh.xg[xyslice].shape)
else:
raise Exception('uhh not implemented')
self.field = np.zeros(shape,dtype=c128)
if fplanewidth == 0:
xa_in = np.linspace(-mesh.xw/2,mesh.xw/2,u.shape[0])
ya_in = np.linspace(-mesh.yw/2,mesh.yw/2,u.shape[1])
else:
xa_in = np.linspace(-fplanewidth/2,fplanewidth/2,u.shape[0])
ya_in = np.linspace(-fplanewidth/2,fplanewidth/2,u.shape[1])
dx0 = xa_in[1]-xa_in[0]
dy0 = ya_in[1]-ya_in[0]
_power = overlap(u,u)
print('input power: ',_power)
# normalize the field, preserving the input power. accounts for grid resolution
normalize(u,weight=dx0*dy0,normval=_power)
__z = 0
#pull xy mesh
xy = mesh.xy
dx,dy = xy.dx0,xy.dy0
#resample the field onto the smaller xy mesh (in the smaller mesh's computation zone)
u0 = xy.resample_complex(u,xa_in,ya_in,xy.xa[PML:-PML],xy.ya[PML:-PML])
_power2 = overlap(u0,u0,dx*dy)
#now we pad w/ zeros to extend it into the PML zone
u0 = np.pad(u0,((PML,PML),(PML,PML)))
counter = 0
total_iters = self.mesh.zres
print("propagating field...")
z__ = 0
#step 0 setup
self.update_grid_cor_facs('x')
self.update_grid_cor_facs('y')
# initial array allocation
_trimatsx,rmatx,gx,_trimatsy,rmaty,gy,IORsq__,_IORsq_,__IORsq = self.allocate_mats()
self.precomp_trimats('x')
self.precomp_trimats('y')
self.rmat_precomp('x')
self.rmat_precomp('y')
self._pmlcorrect(_trimatsx,'x')
self._pmlcorrect(_trimatsy,'y')
#get the current IOR dist
self.set_IORsq(IORsq__,z__)
weights = xy.get_weights()
print("initial shape: ",xy.shape)
for i in range(total_iters):
if i%20 == 0:
printProgressBar(i,total_iters-1)
## Total power monitor ##
self.totalpower[i] = overlap_nonu(u0,u0,weights)
## Other monitors ##
if u1_func is not None:
lp = norm_nonu(u1_func(xy.xg,xy.yg),weights)
self.power[i] = power(overlap_nonu(u0,lp,weights),2)
_z_ = z__ + mesh.half_dz
__z = z__ + mesh.dz
if self.field is not None and (minz<=__z<=maxz):
ix0,ix1,ix2,ix3 = mesh.get_loc()
mid = int(u0.shape[1]/2)
self.field[counter][ix0:ix1+1] = u0[:,mid] ## FIX ##
counter+=1
self.set_IORsq(_IORsq_,_z_,)
self.set_IORsq(__IORsq,__z)
self.rmat(rmatx,u0,IORsq__,'x')
self.rmat_pmlcorrect(rmatx,u0,'x')
self._trimats(_trimatsx,_IORsq_,'x')
self._trimats(_trimatsy,__IORsq.T,'y')
tri_solve_vec(_trimatsx[0],_trimatsx[1],_trimatsx[2],rmatx,gx,u0)
self.rmat(rmaty,u0.T,_IORsq_.T,'y')
self.rmat_pmlcorrect(rmaty,u0.T,'y')
tri_solve_vec(_trimatsy[0],_trimatsy[1],_trimatsy[2],rmaty,gy,u0.T)
z__ = __z
IORsq__[:,:] = __IORsq
print("final total power",self.totalpower[-1])
if writeto:
np.save(writeto,self.field)
return u0
|
python
|
#!/usr/bin/env python
# Copyright (c) 2014 Spotify AB.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import types
import uuid
# Set up some basic logging.
logging.basicConfig(level = logging.INFO, format = "[%(asctime)s] [%(levelname)s] %(message)s")
try:
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeError, RuntimeInconsistency
from kazoo.handlers.threading import TimeoutError
except ImportError:
logging.error("This script uses Kazoo Python libraries to work with Zookeeper")
logging.error("You can install them by typing 'sudo easy_install kazoo' in your console")
# Let the original exception propagate, because sometimes it's not working for different
# reasons, like package conflicts or whatever else (Python packaging is weird), so it is
# a good idea to let the user see the actual exception message.
raise
DESCRIPTION = """
Bootstraps a new Helios cluster.
Bootstrapping is done via populating Zookeeper with a basic data structures required
by Helios to properly function. The script cannot be be used on a NONEMPTY ZooKeeper
cluster.
"""
def main():
parser = argparse.ArgumentParser(description = DESCRIPTION)
parser.add_argument("hosts", metavar = "<zookeeper-endpoint>", type = str,
nargs = "+", help = "Zookeeper node endpoints to connect to")
parser.add_argument("--timeout", dest = "timeout", action = "store", type = int,
default = 30, help = "Zookeeper connection timeout")
parser.add_argument("--force", dest = "force", action = "store_true",
help = "Bootstrap even when Zookeeper is not empty")
option = parser.parse_args()
logging.debug("Using %s as a Zookeeper connection string" % option.hosts)
client = KazooClient(hosts = ",".join(option.hosts))
try:
client.start(timeout = option.timeout)
except TimeoutError as e:
logging.error("Timed out while connecting to Zookeeper")
return 1
status = bootstrap(client, str(uuid.uuid4()), option.force)
# If the client is not stopped, it will hang forever maintaining the connection.
client.stop()
return status
def bootstrap(client, cluster_id, force):
nodes = [
"/config",
"/config/id",
"/config/id/%s" % cluster_id
]
transaction = client.transaction()
# Version is not important here. If any of these nodes exist, just stop doing anything and
# report the error to avoid messing things up.
[transaction.check(node, version = -1) for node in nodes]
# Operation results are either True if the given node exists or an exception of NoNodeError or
# RuntimeIncosistency and RolledBackError types if the previous (1) or following (2) operation
# has failed. We want all results to be NoNodeError or RuntimeInconsistency (which means, node
# existance check wasn't performed, because node's parent is not there).
types = NoNodeError, RuntimeInconsistency
nodes_missing = [isinstance(result, types) for result in transaction.commit()]
if not force and not all(nodes_missing):
logging.error("Aborting, some nodes already exist: %s" %
", ".join(nodes[idx] for idx, missing in enumerate(nodes_missing) if not missing)
)
return 1
transaction = client.transaction()
# Filter the node list so that only the missing nodes are in it.
nodes = [node for idx, node in enumerate(nodes) if nodes_missing[idx]]
# TODO: Might be a good idea to set ACLs here so that these structural nodes are protected from
# accidental deletions, but allow children modifications.
[transaction.create(node) for node in nodes]
# Operation results are either a string representing the created path or an exception object we
# don't really care about.
nodes_created = [result == nodes[idx] for idx, result in enumerate(transaction.commit())]
if not all(nodes_created):
logging.error("Aborting, couldn't create some nodes: %s" %
", ".join(nodes[idx] for idx, created in enumerate(nodes_created) if not created)
)
return 1
logging.info("Cluster has been successfully bootstrapped, cluster id is: %s" % cluster_id)
if __name__ == "__main__":
exit(main())
|
python
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.backends.frame import FrameBase, FrameNode
from .port import Port
from .vessel import gen_vessel_definition
from .matrix import gen_matrix
def gen_ecr_frame(port_num: int, vessel_num: int, stop_nums: tuple, snapshots_num: int):
"""Define and generate ecr frame
Args:
port_num (int): number of ports
vessel_num (int): number of vessels
stop_nums (tuple): past stops number and future stop number
"""
vessel_cls = gen_vessel_definition(stop_nums)
matrix_cls = gen_matrix(port_num, vessel_num)
class EcrFrame(FrameBase):
"""Our ecr frame that contains vessels, ports, and a general matrix"""
vessels = FrameNode(vessel_cls, vessel_num)
ports = FrameNode(Port, port_num)
matrix = FrameNode(matrix_cls, 1)
def __init__(self):
super().__init__(enable_snapshot=True, total_snapshot=snapshots_num)
return EcrFrame()
|
python
|
"""Provide functionality to handle package settings."""
import logging
from enum import Enum
from os import makedirs
from os.path import isdir
from urllib.error import URLError
from urllib.parse import urlparse
from urllib.request import Request, urlopen
from dynaconf import Validator, settings
LOGGER = logging.getLogger('standardlog')
class SettingKeys(Enum):
"""Holds all required setting keys with description."""
OPENEO_VERSION = "OPENEO_VERSION"
"""The OpenEO version running - in the 'version url format'.
This is used for version urls only! So depending how you setup version urls the format can change. E.g. v1.0
"""
AIRFLOW_HOST = "AIRFLOW_HOST"
"""The complete url to the Apache Airflow webserver as a string.
If you are running the provided docker setup use: http://airflow-webserver:8080.
"""
AIRFLOW_OUTPUT = "AIRFLOW_OUTPUT"
"""The path on the Airflow worker where data output is written to.
This path does not need to exist in the jobs service! It is only needed to write a correct dag for a job as the
absolute paths of the output directories - on the airflow worker (!) - are also written in the dag.
If you are running inside docker this path has to be inside the corresponding airflow worker container. E.g.
/data_out
"""
AIRFLOW_DAGS = "AIRFLOW_DAGS"
"""The path a folder where all dag files will be stored.
If you are running in docker the path needs to be inside the container. E.g.: /usr/src/dags
"""
SYNC_DEL_DELAY = "SYNC_DEL_DELAY"
"""Delay after which to delete sync-jobs output.
It must be minimum above the timeout of gunicorn and nginx. E.g. 300
"""
SYNC_RESULTS_FOLDER = "SYNC_RESULTS_FOLDER"
"""The path to the sync-results folder.
The content of this folder is also mounted to the gateway to simplify data transfer.
If you are running in docker the path needs to be inside the container. E.g.: /usr/src/sync-results
"""
WEKEO_STORAGE = "WEKEO_STORAGE"
"""The path where files downloaded via the WEkEO HDA API will be available
on the VM, where the processing engine (e.g. Airflow) executes jobs.
e.g. /usr/local/airflow/wekeo_storage
"""
# Connection to RabbitMQ
RABBIT_HOST = "RABBIT_HOST"
"""The host name of the RabbitMQ - e.g. `rabbitmq`.
If you are running in docker this is the hostname of the container!
"""
RABBIT_PORT = "RABBIT_PORT"
"""The port on which the RabbitMQ is running - e.g. `5672`.
If you are running in docker and the capabilities container is in the same network as the RabbitMQ this is the port
inside the docker network NOT the exposed one!
"""
RABBIT_USER = "RABBIT_USER"
"""The username to authenticate on the RabbitMQ - e.g. `rabbitmq`."""
RABBIT_PASSWORD = "RABBIT_PASSWORD" # noqa S105
"""The password to authenticate with the given user on the RabbitMQ."""
# Jobs Database
DB_USER = "DB_USER"
"""Database username for the jobs database."""
DB_PASSWORD = "DB_PASSWORD" # noqa S105 - not a hardcoded password only the parameter name!
"""Database user password for the jobs database matching the provided user name."""
DB_HOST = "DB_HOST"
"""Host where the jobs database is running."""
DB_PORT = "DB_PORT"
"""Port where the jobs database is running."""
DB_NAME = "DB_NAME"
"""Database name of the jobs database."""
# Additional
LOG_DIR = "LOG_DIR"
"""The path to the directory where log files should be saved.
If you are running in docker this is the path inside the docker container! E.g. `/usr/src/logs`
In case you want to persist the logs a volume or a local folder needs to be mounted into the specified location.
"""
class SettingValidationUtils:
"""Provides a set of utility functions to validated settings."""
def check_create_folder(self, folder_path: str) -> bool:
"""Create the given folder path if it does not exist, always returns True."""
if not isdir(folder_path):
makedirs(folder_path)
return True
def check_positive_int(self, value: int) -> bool:
"""Return a boolean whether a given value is a positive integer."""
return isinstance(value, int) and value > 0
def check_parse_url(self, url: str) -> bool:
"""Return a boolean whether the url could be parsed.
This is useful if a setting holding a url may not be reachable at the time of setting validation. Then this
method at least validates that a valid url is provided. E.g. the gateway will most probably be not reachable
when bringing up microservices.
"""
result = urlparse(url)
return all([result.scheme, result.netloc])
def check_url_is_reachable(self, url: str) -> bool:
"""Return a boolean whether a connection to a given url could be created."""
try:
if url.lower().startswith('http'):
req = Request(url)
with urlopen(req) as resp: # noqa
return resp.status == 200
else:
return False
except URLError:
return False
def initialise_settings() -> None:
"""Configure and validates settings.
This method is called when starting the microservice to ensure all configuration settings are properly provided.
Raises:
:class:`~dynaconf.validator.ValidationError`: A setting is not valid.
"""
not_doc = Validator("ENV_FOR_DYNACONF", is_not_in=["documentation"])
not_doc_unittest = Validator("ENV_FOR_DYNACONF", is_not_in=["documentation", "unittest"])
settings.configure(ENVVAR_PREFIX_FOR_DYNACONF="OEO")
utils = SettingValidationUtils()
settings.validators.register(
Validator(SettingKeys.OPENEO_VERSION.value, must_exist=True, when=not_doc),
Validator(SettingKeys.AIRFLOW_HOST.value, must_exist=True, condition=utils.check_parse_url,
when=(not_doc_unittest & not_doc)),
Validator(SettingKeys.AIRFLOW_OUTPUT.value, must_exist=True, when=not_doc),
Validator(SettingKeys.AIRFLOW_DAGS.value, must_exist=True, condition=utils.check_create_folder, when=not_doc),
Validator(SettingKeys.SYNC_DEL_DELAY.value, must_exist=True, is_type_of=int, condition=utils.check_positive_int,
when=not_doc),
Validator(SettingKeys.SYNC_RESULTS_FOLDER.value, must_exist=True, condition=utils.check_create_folder,
when=not_doc),
Validator(SettingKeys.WEKEO_STORAGE.value, default="", when=not_doc_unittest),
Validator(SettingKeys.RABBIT_HOST.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.RABBIT_PORT.value, must_exist=True, is_type_of=int, when=not_doc_unittest),
Validator(SettingKeys.RABBIT_USER.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.RABBIT_PASSWORD.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.DB_USER.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.DB_PASSWORD.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.DB_HOST.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.DB_PORT.value, must_exist=True, is_type_of=int, when=not_doc_unittest),
Validator(SettingKeys.DB_NAME.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.LOG_DIR.value, must_exist=True, condition=utils.check_create_folder,
when=not_doc_unittest),
)
settings.validators.validate()
LOGGER.info("Settings validated")
|
python
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for experimental sonnet functions and classes.
This file contains functions and classes that are being tested until they're
either removed or promoted into the wider sonnet library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import weakref
import tensorflow as tf
from tensorflow.python.ops import variable_scope as variable_scope_ops
def reuse_vars(method):
"""Wraps an arbitrary method so it does variable sharing.
This decorator creates variables the first time it calls `method`, and reuses
them for subsequent calls. The object that calls `method` provides a
`tf.VariableScope`, either as a `variable_scope` attribute or as the return
value of an `_enter_variable_scope()` method.
The first time the wrapped method is invoked, it enters the caller's
`tf.VariableScope` with `reuse=False`. On all subsequent calls it enters the
same variable scope with `reuse=True`.
Variables are created in the context of the `tf.VariableScope` provided by the
caller object. Ops are created with an additional `tf.name_scope()`, which
adds a scope for the wrapped method name. For example:
```python
class MyModule(object):
def __init__(self, name):
with tf.variable_scope(name) as variable_scope:
self.variable_scope = variable_scope
@snt.experimental.reuse_vars
def add_x(self, tensor):
x = tf.get_variable("x", shape=tensor.get_shape())
return tensor + x
module = MyModule("my_module_name")
input_tensor = tf.zeros(shape=(5,))
# This creates the variable "my_module_name/x:0"
# and op "my_module_name/add_x/add:0"
output = module.add_x(input_tensor)
```
Args:
method: The method to wrap.
Returns:
The wrapped method.
"""
initialized_variable_scopes = weakref.WeakKeyDictionary()
# Ensure that the argument passed in is really a method by checking that the
# first positional argument to it is "self".
arg_spec = inspect.getargspec(method)
is_method = arg_spec.args and arg_spec.args[0] == "self"
if not is_method:
raise TypeError("reuse_vars can only be used with methods.")
@functools.wraps(method)
def wrapper(*args, **kwargs):
"""Calls `method` with a variable scope whose reuse flag is set correctly.
The first time the wrapper is called it creates a
`(tf.Graph, tf.VariableScope)` key and checks it for membership in
`initialized_variable_scopes`. The check is `False` if and only if this is
the first time the wrapper has been called with the key, otherwise the
check is `True`. The result of this check is used as the `reuse` flag for
entering the provided variable scope before calling `method`.
Here are two examples of how to use the reuse_vars decorator.
1. Decorate an arbitrary instance method with a `variable_scope` attribute:
```python
class Reusable(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
@snt.experimental.reuse_vars
def add_a(self, input_tensor):
a = tf.get_variable("a", shape=input_tensor.get_shape())
return a + input_tensor
obj = Reusable("reusable")
x = tf.constant(5.0)
out1 = obj.add_a(x)
out2 = obj.add_a(x)
# out1 == out2
```
2. Decorating a snt.AbstractModule instance method:
```python
class ReusableModule(snt.AbstractModule):
@snt.experimental.reuse_vars
def add_a(self, input_tensor):
a = tf.get_variable("a", shape=input_tensor.get_shape())
return a + input_tensor
# We don't need @snt.experimental.reuse_vars here because build is
wrapped by # `tf.make_template` inside `snt.AbstractModule`.
def _build(self, input_tensor):
b = tf.get_variable("b", shape=input_tensor.get_shape())
return b + self.add_a(input_tensor)
obj = Reusable("reusable")
x = tf.constant(5.0)
out1 = obj(x)
out2 = obj(x)
# out1 == out2
```
Args:
*args: The positional arguments (Tensors) passed to the wrapped method.
**kwargs: The keyword arguments passed to the wrapped method.
Returns:
Output of the wrapped method.
Raises:
ValueError: If no variable scope is provided or if `method` is a method
and a variable_scope keyword argument is also provided.
"""
obj = args[0]
def default_context_manager(reuse=None):
variable_scope = obj.variable_scope
return tf.variable_scope(variable_scope, reuse=reuse)
variable_scope_context_manager = getattr(obj, "_enter_variable_scope",
default_context_manager)
graph = tf.get_default_graph()
if graph not in initialized_variable_scopes:
initialized_variable_scopes[graph] = set([])
initialized_variable_scopes_for_graph = initialized_variable_scopes[graph]
# Temporarily enter the variable scope to capture it
with variable_scope_context_manager() as tmp_variable_scope:
variable_scope = tmp_variable_scope
reuse = variable_scope.name in initialized_variable_scopes_for_graph
# Enter the pure variable scope with reuse correctly set
with variable_scope_ops._pure_variable_scope( # pylint:disable=protected-access
variable_scope, reuse=reuse) as pure_variable_scope:
# Force tf.name_scope to treat variable_scope.original_name_scope as
# an "absolute" scope name so we can re-enter it.
name_scope = variable_scope.original_name_scope
if name_scope[-1] != "/":
name_scope += "/"
with tf.name_scope(name_scope):
with tf.name_scope(method.__name__):
out_ops = method(*args, **kwargs)
initialized_variable_scopes_for_graph.add(pure_variable_scope.name)
return out_ops
return wrapper
|
python
|
#!/usr/bin/env python
"""
(C) 2007-2019 1024jp
"""
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
_flags = (cv2.CALIB_ZERO_TANGENT_DIST |
cv2.CALIB_FIX_K3
)
class Undistorter:
def __init__(self, camera_matrix, dist_coeffs, rvecs, tvecs, image_size,
new_camera_matrix=None):
self.camera_matrix = camera_matrix
self.dist_coeffs = dist_coeffs
self.rvecs = rvecs
self.tvecs = tvecs
self.image_size = image_size
if new_camera_matrix:
self.new_camera_matrix = new_camera_matrix
else:
self.__get_new_camera_matrix()
@classmethod
def init(cls, image_points, dest_points, image_size):
dest_points = [(x, y, 0) for x, y, z in dest_points]
_, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(
[np.float32([dest_points])],
[np.float32([image_points])],
image_size, None, None, flags=_flags)
return cls(camera_matrix, dist_coeffs, rvecs, tvecs, image_size)
@classmethod
def load(cls, f):
return pickle.load(f)
def save(self, f):
pickle.dump(self, f)
def calibrate_points(self, points):
dest = cv2.undistortPoints(np.array([points]), self.camera_matrix,
self.dist_coeffs,
P=self.new_camera_matrix)
return np.squeeze(dest)
def undistort_image(self, image):
return cv2.undistort(image, self.camera_matrix, self.dist_coeffs,
newCameraMatrix=self.new_camera_matrix)
def show_map(self):
interval = 200
size = self.image_size
w, h = np.meshgrid(range(0, size[0], interval),
range(0, size[1], interval))
points = np.vstack((w.flatten(), h.flatten())).T.astype('float32')
new_points = self.calibrate_points(points)
plt.scatter(points[:, 0], points[:, 1], 20, 'b', alpha=.5)
plt.scatter(new_points[:, 0], new_points[:, 1], 20, 'r', alpha=.5)
plt.axes().set_aspect('equal', 'datalim')
plt.show()
def __get_new_camera_matrix(self):
self.new_camera_matrix = cv2.getOptimalNewCameraMatrix(
self.camera_matrix, self.dist_coeffs, self.image_size, 0)[0]
|
python
|
#!/usr/bin/env python
names = ["Amy", "Bob", "Cathy", "David", "Eric"]
for x in names:
print("Hello " + x)
|
python
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
from django.contrib.auth.models import AnonymousUser
from mock import Mock
from saleor.cart import decorators
from saleor.cart.models import Cart
from saleor.checkout.core import Checkout
from saleor.discount.models import Voucher
from saleor.product.models import Product, ProductVariant, Stock
from saleor.shipping.models import ShippingMethod
from saleor.userprofile.models import Address, User
@pytest.fixture
def cart(db): # pylint: disable=W0613
return Cart.objects.create()
@pytest.fixture
def customer_user(db): # pylint: disable=W0613
return User.objects.create_user('[email protected]', 'password')
@pytest.fixture
def request_cart(cart, monkeypatch):
monkeypatch.setattr(
decorators, 'get_cart_from_request',
lambda request, create=False: cart)
return cart
@pytest.fixture
def request_cart_with_item(product_in_stock, request_cart):
variant = product_in_stock.variants.get()
# Prepare some data
request_cart.add(variant)
return request_cart
@pytest.fixture()
def admin_user(db): # pylint: disable=W0613
"""A Django admin user.
"""
return User.objects.create_superuser('[email protected]', 'password')
@pytest.fixture()
def admin_client(admin_user):
"""A Django test client logged in as an admin user."""
from django.test.client import Client
client = Client()
client.login(username=admin_user.email, password='password')
return client
@pytest.fixture()
def authorized_client(client, customer_user):
client.login(username=customer_user.email, password='password')
return client
@pytest.fixture
def billing_address(db): # pylint: disable=W0613
return Address.objects.create(
first_name='John', last_name='Doe',
company_name='Mirumee Software',
street_address_1='Tęczowa 7',
city='Wrocław',
postal_code='53-601',
country='PL')
@pytest.fixture
def shipping_method(db): # pylint: disable=W0613
shipping_method = ShippingMethod.objects.create(name='DHL')
shipping_method.price_per_country.create(price=10)
return shipping_method
@pytest.fixture
def product_in_stock(db): # pylint: disable=W0613
product = Product.objects.create(
name='Test product', price=10, weight=1)
variant = ProductVariant.objects.create(product=product, sku='123')
Stock.objects.create(
variant=variant, cost_price=1, quantity=5, quantity_allocated=5,
location='Warehouse 1')
Stock.objects.create(
variant=variant, cost_price=100, quantity=5, quantity_allocated=5,
location='Warehouse 2')
Stock.objects.create(
variant=variant, cost_price=10, quantity=5, quantity_allocated=0,
location='Warehouse 3')
return product
@pytest.fixture
def anonymous_checkout():
return Checkout(Mock(), AnonymousUser(), 'tracking_code')
@pytest.fixture
def voucher(db): # pylint: disable=W0613
return Voucher.objects.create(code='mirumee', discount_value=20)
|
python
|
#!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import path,getenv
from glob import glob
import argparse
parser = argparse.ArgumentParser(description='make forest')
parser.add_argument('--region',metavar='region',type=str,default=None)
toProcess = parser.parse_args().region
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Tools.Load import *
import PandaCore.Tools.Functions # kinematics
import PandaAnalysis.Monotop.CombinedSelection as sel
Load('PandaAnalysisFlat','LimitTreeBuilder')
baseDir = getenv('PANDA_FLATDIR')
lumi = 36560
factory = root.LimitTreeBuilder()
if toProcess:
factory.SetOutFile(baseDir+'/limits/limitForest_%s.root'%toProcess)
else:
factory.SetOutFile(baseDir+'/limits/limitForest_all.root')
def dataCut(basecut,trigger):
return tAND(trigger,basecut)
treelist = []
def getTree(fpath):
global treelist
fIn = root.TFile(baseDir+fpath+'.root')
tIn = fIn.Get('events')
treelist.append(tIn)
return tIn,fIn
def enable(regionName):
if toProcess:
return (toProcess==regionName)
else:
return True
def shiftBtags(label,tree,varmap,cut,baseweight):
ps = []
for shift in ['BUp','BDown','MUp','MDown']:
for cent in ['sf_btag','sf_sjbtag']:
shiftedlabel = '_'
if 'sj' in cent:
shiftedlabel += 'sj'
if 'B' in shift:
shiftedlabel += 'btag'
else:
shiftedlabel += 'mistag'
if 'Up' in shift:
shiftedlabel += 'Up'
else:
shiftedlabel += 'Down'
weight = sel.weights[baseweight+'_'+cent+shift]%lumi
shiftedProcess = root.Process(label,tree,varmap,cut,weight)
shiftedProcess.syst = shiftedlabel
ps.append(shiftedProcess)
return ps
# input
tZll,fZll = getTree('ZJets')
tZvv,fZvv = getTree('ZtoNuNu')
tWlv,fWlv = getTree('WJets')
tPho,fPho = getTree('GJets')
tTTbar,fTT = getTree('TTbar')
tVV,fVV = getTree('Diboson')
tQCD,fQCD = getTree('QCD')
tST,fST = getTree('SingleTop')
tMET,fMET = getTree('MET')
tSingleEle,fSEle = getTree('SingleElectron')
tSinglePho,fSPho = getTree('SinglePhoton')
#tSig,fSig = getTree('monotop-nr-v3-1700-100_med-1700_dm-100') # this is just a sample point
tAllSig = {}; fAllSig = {}
if enable('signal'):
signalFiles = glob(baseDir+'/Vector*root')
for f in signalFiles:
fname = f.split('/')[-1].replace('.root','')
signame = fname
replacements = {
'Vector_MonoTop_NLO_Mphi-':'',
'_gSM-0p25_gDM-1p0_13TeV-madgraph':'',
'_Mchi-':'_',
}
for k,v in replacements.iteritems():
signame = signame.replace(k,v)
tAllSig[signame],fAllSig[signame] = getTree(fname)
factory.cd()
regions = {}
processes = {}
vms = {}
for region_type,met_type,phi_type in [('signal','pfmet','pfmetphi'),
('w','pfUWmag','pfUWphi'),
('z','pfUZmag','pfUZphi'),
('a','pfUAmag','pfUAphi')]:
vms[region_type] = root.VariableMap()
vms[region_type].AddVar('met',met_type)
# vms[region_type].AddVar('metphi',phi_type)
vms[region_type].AddVar('genBosonPt','genBosonPt')
# vms[region_type].AddVar('genBosonPhi','genBosonPhi')
for x in ['fj1Tau32','top_ecf_bdt']:
vms[region_type].AddVar(x,x)
# test region
if enable('test'):
regions['test'] = root.Region('test')
cut = sel.cuts['signal']
weight = sel.weights['signal']%lumi
processes['test'] = [
root.Process('Data',tMET,vms['signal'],dataCut(cut,sel.triggers['met']),'1'),
root.Process('Diboson',tVV,vms['signal'],cut,weight),
]
btag_shifts = []
for p in processes['test']:
if p.name=='Data':
continue
btag_shifts += shiftCSV(p.name,p.GetInput(),vms['signal'],cut,'signal')
processes['test'] += btag_shifts
for p in processes['test']:
regions['test'].AddProcess(p)
factory.AddRegion(regions['test'])
# signal region
if enable('signal'):
regions['signal'] = root.Region('signal')
cut = sel.cuts['signal']
weight = sel.weights['signal']%lumi
vm = vms['signal']
processes['signal'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
# root.Process('signal',tSig,vm,cut,weight),
]
for signame,tsig in tAllSig.iteritems():
processes['signal'].append( root.Process(signame,tsig,vm,cut,weight) )
btag_shifts = []
for p in processes['signal']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'signal')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'signal')
processes['signal'] += btag_shifts
for p in processes['signal']:
regions['signal'].AddProcess(p)
factory.AddRegion(regions['signal'])
#singlemuonw
if enable('singlemuonw'):
regions['singlemuonw'] = root.Region('singlemuonw')
cut = sel.cuts['singlemuonw']
weight = sel.weights['singlemuonw']%lumi
vm = vms['w']
processes['singlemuonw'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singlemuonw']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singlemuonw')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singlemuonw')
processes['singlemuonw'] += btag_shifts
for p in processes['singlemuonw']:
regions['singlemuonw'].AddProcess(p)
factory.AddRegion(regions['singlemuonw'])
#singleelectronw
if enable('singleelectronw'):
regions['singleelectronw'] = root.Region('singleelectronw')
cut = sel.cuts['singleelectronw']
weight = sel.weights['singleelectronw']%lumi
vm = vms['w']
processes['singleelectronw'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singleelectronw']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singleelectronw')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singleelectronw')
processes['singleelectronw'] += btag_shifts
for p in processes['singleelectronw']:
regions['singleelectronw'].AddProcess(p)
factory.AddRegion(regions['singleelectronw'])
#singlemuontop
if enable('singlemuontop'):
regions['singlemuontop'] = root.Region('singlemuontop')
cut = sel.cuts['singlemuontop']
weight = sel.weights['singlemuontop']%lumi
vm = vms['w']
processes['singlemuontop'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singlemuontop']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singlemuontop')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singlemuontop')
processes['singlemuontop'] += btag_shifts
for p in processes['singlemuontop']:
regions['singlemuontop'].AddProcess(p)
factory.AddRegion(regions['singlemuontop'])
#singleelectrontop
if enable('singleelectrontop'):
regions['singleelectrontop'] = root.Region('singleelectrontop')
cut = sel.cuts['singleelectrontop']
weight = sel.weights['singleelectrontop']%lumi
vm = vms['w']
processes['singleelectrontop'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singleelectrontop']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singleelectrontop')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singleelectrontop')
processes['singleelectrontop'] += btag_shifts
for p in processes['singleelectrontop']:
regions['singleelectrontop'].AddProcess(p)
factory.AddRegion(regions['singleelectrontop'])
#dimuon
if enable('dimuon'):
regions['dimuon'] = root.Region('dimuon')
cut = sel.cuts['dimuon']
weight = sel.weights['dimuon']%lumi
vm = vms['z']
processes['dimuon'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
for p in processes['dimuon']:
regions['dimuon'].AddProcess(p)
factory.AddRegion(regions['dimuon'])
#dielectron
if enable('dielectron'):
regions['dielectron'] = root.Region('dielectron')
cut = sel.cuts['dielectron']
weight = sel.weights['dielectron']%lumi
vm = vms['z']
processes['dielectron'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
for p in processes['dielectron']:
regions['dielectron'].AddProcess(p)
factory.AddRegion(regions['dielectron'])
#photon
if enable('photon'):
regions['photon'] = root.Region('photon')
cut = sel.cuts['photon']
weight = sel.weights['photon']%lumi
vm = vms['a']
processes['photon'] = [
root.Process('Data',tSinglePho,vm,dataCut(cut,sel.triggers['pho']),'1'),
root.Process('Pho',tPho,vm,cut,weight),
root.Process('QCD',tSinglePho,vm,dataCut(cut,sel.triggers['pho']),'sf_phoPurity'),
]
for p in processes['photon']:
regions['photon'].AddProcess(p)
factory.AddRegion(regions['photon'])
PInfo('makeLimitForest','Starting '+str(toProcess))
factory.Run()
PInfo('makeLimitForest','Finishing '+str(toProcess))
factory.Output()
PInfo('makeLimitForest','Outputted '+str(toProcess))
|
python
|
from tensorflow.keras import initializers
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
def build_subnet(output_filters, bias_initializer='zeros', name=None):
"""构建功能子网络.
Args:
output_filters: int,
功能子网络输出层的神经元数量.
bias_initializer: str or tf.keras.initializers.Initializer instance, default='zeros',
网络层偏置项初始化器.
name: (可选) str, default=None, 功能子网络名称.
Return:
tf.keras.models.Sequential, 功能子网络实例.
"""
model = Sequential(name=name)
model.add(layers.InputLayer(input_shape=[None, None, 256])) # 输入特征是每层FPN.
_kernel_initializer = initializers.RandomNormal(mean=0.0, stddev=0.01) # 高斯分布初始化.
for _ in range(4):
model.add(layers.Conv2D(filters=256,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
kernel_initializer=_kernel_initializer))
model.add(layers.ReLU())
model.add(layers.Conv2D(filters=output_filters,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
kernel_initializer=_kernel_initializer,
bias_initializer=bias_initializer))
return model
|
python
|
from .seld import *
|
python
|
import numpy as np
class UnionFind:
def __init__(self, n):
self.n = n
self.parent = np.arange(n)
self.rank = np.zeros(n, dtype=np.int32)
self.csize = np.ones(n, dtype=np.int32)
def find(self, u):
v = u
while u != self.parent[u]:
u = self.parent[u]
while v != self.parent[v]:
t = self.parent[v]
self.parent[v] = u
v = t
return u
def union(self, u, v):
u = self.find(u)
v = self.find(v)
if u != v:
if self.rank[u] < self.rank[v]:
self.parent[u] = v
self.csize[v] += self.csize[u]
else:
self.parent[v] = u
self.csize[u] += self.csize[v]
if self.rank[u] == self.rank[v]:
self.rank[u] += 1
def is_same_set(self, u, v):
return self.find(u) == self.find(v)
|
python
|
import logging.config
from .Camera import Camera
import time
from io import BytesIO
from PIL import Image
from dateutil import zoneinfo
timezone = zoneinfo.get_zonefile_instance().get("Australia/Canberra")
try:
logging.config.fileConfig("/etc/eyepi/logging.ini")
except:
pass
try:
import picamera
import picamera.array
except Exception as e:
logging.error("Couldnt import picamera module, no picamera camera support: {}".format(str(e)))
pass
class PiCamera(Camera):
"""
Picamera extension to the Camera abstract class.
"""
@classmethod
def stream_thread(cls):
"""
Streaming thread member.
uses :func:`picamera.PiCamera.capture_continuous` to stream data from the rpi camera video port.
:func:`time.sleep` added to rate limit a little bit.
"""
import picamera
print("start thread")
try:
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (640, 480)
# camera.hflip = True
# camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls._frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
time.sleep(0.01)
if time.time() - cls._last_access > 1:
break
except Exception as e:
print("Couldnt acquire camera")
print("Closing Thread")
cls._thread = None
def set_camera_settings(self, camera):
"""
Sets the camera resolution to the max resolution
if the config provides camera/height or camera/width attempts to set the resolution to that.
if the config provides camera/isoattempts to set the iso to that.
if the config provides camera/shutter_speed to set the shutterspeed to that.
:param picamera.PiCamera camera: picamera camera instance to modify
"""
try:
camera.resolution = camera.MAX_RESOLUTION
if type(self.config) is dict:
if hasattr(self, "width") and hasattr(self, "height"):
camera.resolution = (int(self.width),
int(self.height))
if "width" in self.config and "height" in self.config:
camera.resolution = (int(self.config['width']),
int(self.config['height']))
camera.shutter_speed = getattr(self, "shutter_speed", camera.shutter_speed)
camera.iso = getattr(self, "iso", camera.iso)
else:
if self.config.has_option("camera", "width") and self.config.has_option("camera", "height"):
camera.resolution = (self.config.getint("camera", "width"),
self.config.getint("camera", "height"))
if self.config.has_option("camera", "shutter_speed"):
camera.shutter_speed = self.config.getfloat("camera", "shutter_speed")
if self.config.has_option("camera", "iso"):
camera.iso = self.config.getint("camera", "iso")
except Exception as e:
self.logger.error("error setting picamera settings: {}".format(str(e)))
def capture_image(self, filename: str = None):
"""
Captures image using the Raspberry Pi Camera Module, at either max resolution, or resolution
specified in the config file.
Writes images disk using :func:`encode_write_image`, so it should write out to all supported image formats
automatically.
:param filename: image filename without extension
:return: :func:`numpy.array` if filename not specified, otherwise list of files.
:rtype: numpy.array
"""
st = time.time()
try:
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as output:
time.sleep(2) # Camera warm-up time
self.set_camera_settings(camera)
time.sleep(0.2)
# self._image = numpy.empty((camera.resolution[1], camera.resolution[0], 3), dtype=numpy.uint8)
camera.capture(output, 'rgb')
# self._image = output.array
self._image = Image.fromarray(output.array)
# self._image = cv2.cvtColor(self._image, cv2.COLOR_BGR2RGB)
if filename:
filenames = self.encode_write_image(self._image, filename)
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
return filenames
else:
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
except Exception as e:
self.logger.critical("EPIC FAIL, trying other method. {}".format(str(e)))
return None
return None
|
python
|
# 建立一个登录页面
from tkinter import *
root = Tk()
root.title("登陆页面")
msg = "欢迎进入海绵宝宝系统"
sseGif = PhotoImage(file="../img/hmbb1.gif")
logo = Label(root,image=sseGif,text=msg, compound=BOTTOM)
logo.grid(row=0,column=0,columnspan=2,padx=10,pady=10)
accountL = Label(root,text="Account")
accountL.grid(row=1)
pwdL = Label(root,text="Password")
pwdL.grid(row=2)
accountE = Entry(root)
accountE.grid(row=1,column=1)
accountE.insert(0,"海绵宝宝") #在0位置插入默认文本框里的文字
pwdE = Entry(root,show="*")
pwdE.grid(row=2,column=1,pady=10)
pwdE.insert(0,"hmbb") #在0位置插入默认文本框里的文字
# LOGIN QUIT
def printInfo():
print("Account: %s\nPassword: %s" %(accountE.get(), pwdE.get()))
accountE.delete(0,END) #删除文本框内从0到最后的内容
pwdE.delete(0,END) #删除文本框内从0到最后的内容
loginbtn = Button(root,text="Login",command=printInfo)
loginbtn.grid(row=3,column=0,sticky=W,padx=10,pady=10)
quitbtn = Button(root,text="Quit",command=root.quit)
quitbtn.grid(row=3,column=1,sticky=W,padx=10,pady=10)
root.mainloop()
#86
|
python
|
from setuptools import setup
setup(
name='YinPortfolioManagement',
author='Yiqiao Yin',
version='1.0.0',
description="This package uses Long Short-Term Memory (LSTM) to forecast a stock price that user enters.",
packages=['YinCapital_forecast']
)
|
python
|
import analysis
#Create historgram
analysis.histogram()
#Create scatterplot
analysis.scatterplot("sepal_length","sepal_width")
analysis.scatterplot("petal_length","petal_width")
analysis.pair_plot()
#Create summary.txt
analysis.writeToAFile
|
python
|
import os
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
@sched.scheduled_job('cron', hour=9)
def scheduled_job():
os.system("python manage.py runbot")
sched.start()
|
python
|
from django.db import models
from i18nfield.fields import I18nCharField
from pretalx.common.mixins import LogMixin
from pretalx.common.urls import EventUrls
class Track(LogMixin, models.Model):
event = models.ForeignKey(
to='event.Event', on_delete=models.PROTECT, related_name='tracks'
)
name = I18nCharField(max_length=200)
color = models.CharField(max_length=7)
class urls(EventUrls):
base = edit = '{self.event.cfp.urls.tracks}{self.pk}'
delete = '{base}delete'
def __str__(self) -> str:
return str(self.name)
|
python
|
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy import Column, Integer, String, Boolean, BigInteger, Text, ForeignKey, Float
from sqlalchemy.orm import relationship
#from sqlalchemy_views import CreateView, DropView
# connection.execute("CREATE TABLE `jobs` (`clock` BIGINT(11),`jobid` VARCHAR(50),`type` TEXT,`username` TEXT,`status` INT,`data` TEXT,`error` TEXT,`reqid` TEXT, PRIMARY KEY(jobid))")
# connection.execute("CREATE TABLE `data_raw` (`clock` BIGINT(11), PRIMARY KEY(clock))")
# connection.execute("CREATE TABLE `data_finalized` (`clock` BIGINT(11), PRIMARY KEY(clock))")
# connection.execute("CREATE TABLE `data_processed` (`clock` BIGINT(11), PRIMARY KEY(clock))")
# connection.execute("CREATE TABLE `data_other` (`clock` BIGINT(11), PRIMARY KEY(clock))")
# connection.execute(text("INSERT INTO `daqbroker_settings`.`databases` VALUES(:dbname,'0')"),dbname=newdbname)
daqbroker_database = declarative_base()
#connection.execute("CREATE TABLE `instruments` ( `Name` text NOT NULL, `instid` int(11) NOT NULL, `active` int(11) NOT NULL, `description` text NOT NULL, `username` text NOT NULL, `email` text NOT NULL, `insttype` int(11) NOT NULL,`log` text, PRIMARY KEY (`instid`))")
class instruments(daqbroker_database):
__tablename__ = "instruments"
Name = Column(String(50))
instid = Column(Integer, primary_key=True)
active = Column(Boolean)
description = Column(Text)
username = Column(Text)
email = Column(Text)
insttype = Column(Integer)
log = Column(Text)
sources = relationship("instmeta", backref="meta", cascade="all, delete, delete-orphan", order_by="instmeta.metaid")
#connection.execute("CREATE TABLE `instmeta` (`clock` bigint(11) NOT NULL DEFAULT '0', `name` varchar(50) NOT NULL, `metaid` int(11) NOT NULL DEFAULT '0', `instid` int(11) NOT NULL DEFAULT '0', `type` int(11) DEFAULT '0', `node` varchar(50) NOT NULL, `remarks` text, `sentRequest` tinyint(1) DEFAULT '0', `lastAction` bigint(11) NOT NULL DEFAULT '0', `lasterrortime` bigint(11) NOT NULL DEFAULT '0', `lasterror` text, `lockSync` tinyint(1) DEFAULT '0', PRIMARY KEY (`instid`,`metaid`,`name`))")
class instmeta(daqbroker_database):
__tablename__ = "instmeta"
clock = Column(BigInteger)
name = Column(String(50))
metaid = Column(Integer, primary_key=True)
instrument_id = Column(Integer, ForeignKey('instruments.instid'))
type = Column(Integer)
node = Column(String(50))
remarks = Column(Text)
sentRequest = Column(Boolean)
lastAction = Column(BigInteger)
lasterrortime = Column(BigInteger)
lasterror = Column(Text)
lockSync = Column(Boolean)
channels = relationship("channels", backref="chann", cascade="all, delete, delete-orphan", order_by="channels.channelid")
parsing = relationship("parsing", backref="metaParse", cascade="all, delete, delete-orphan")
#connection.execute("CREATE TABLE `channels` (`Name` text NOT NULL, `channelid` int(11) NOT NULL, `channeltype` int(11) NOT NULL, `valuetype` int(11) NOT NULL DEFAULT '0', `units` text NOT NULL, `instid` int(11) NOT NULL, `description` text NOT NULL, `active` int(11) NOT NULL, `remarks` text NOT NULL, `metaid` INT, `lastclock` BIGINT(11) NOT NULL DEFAULT 0, `lastValue` text, `fileorder` int(11) DEFAULT 0,`alias` text NOT NULL,`firstClock` BIGINT(11) DEFAULT 10000000000000, PRIMARY KEY (channelid,metaid,instid))")
class channels(daqbroker_database):
__tablename__ = "channels"
Name = Column(Text)
channelid = Column(Integer, primary_key=True)
channeltype = Column(Integer)
valuetype = Column(Integer)
units = Column(Text)
description = Column(Text)
active = Column(Boolean)
remarks = Column(Text)
metaid = Column(Integer, ForeignKey('instmeta.metaid'))
lastclock = Column(BigInteger)
lastValue = Column(Text)
firstClock = Column(BigInteger)
fileorder = Column(Text)
alias = Column(Text)
#chann = relationship("instmeta", back_populates="channels")
#connection.execute("CREATE TABLE `parsing` (clock BIGINT(11),lastAction BIGINT(11), `metaid` INT(11) , `instid` INT(11), `type` INT(11), `locked` INT(11), `forcelock` BOOLEAN DEFAULT '0', `remarks` MEDIUMTEXT, PRIMARY KEY (metaid,instid))")
class parsing(daqbroker_database):
__tablename__ = "parsing"
clock = Column(BigInteger)
metaid = Column(Integer, ForeignKey('instmeta.metaid'), primary_key=True)
type = Column(Integer)
locked = Column(Boolean)
forcelock = Column(Boolean)
remarks = Column(Text)
#metaParse = relationship("instmeta", back_populates="parsing")
#connection.execute("CREATE TABLE `plots` ( `plotname` varchar(200) NOT NULL, `plotid` int(11) NOT NULL, `channelids` text NOT NULL, `plottype` int(11) NOT NULL, `adminPlot` int(11) NOT NULL, `active` int(11) NOT NULL, `remarks` text NOT NULL, PRIMARY KEY (`plotname`,`plotid`))")
class plots(daqbroker_database):
__tablename__ = "plots"
plotname = Column(String(200))
plotid = Column(Integer, primary_key=True)
channelids = Column(Text)
plottype = Column(Integer)
adminPlot = Column(Boolean)
active = Column(Boolean)
remarks = Column(Text)
#connection.execute("CREATE TABLE `plotcomments` (`clock` BIGINT(11),`plotid` INT,`channelid` INT,`comment` TEXT,`author` TEXT,`remarks` TEXT, PRIMARY KEY(clock,plotid,channelid))")
class plotcomments(daqbroker_database):
__tablename__ = "plotcomments"
clock = Column(BigInteger, primary_key=True)
plotid = Column(Integer, primary_key=True)
channelid = Column(Integer, primary_key=True)
comment = Column(Text)
author = Column(Text)
remarks = Column(Text)
#connection.execute("CREATE TABLE `layouts` (`Name` varchar(50) NOT NULL,`layoutid` int(11) NOT NULL,`plots` text NOT NULL,`format` text NOT NULL,PRIMARY KEY (`layoutid`) USING BTREE,UNIQUE KEY `Name` (`Name`))")
class layouts(daqbroker_database):
__tablename__ = "layouts"
Name = Column(String(200))
layoutid = Column(Integer, primary_key=True)
plots = Column(Text)
format = Column(Text)
plottype = Column(Integer)
adminPlot = Column(Boolean)
active = Column(Boolean)
remarks = Column(Text)
#connection.execute("CREATE TABLE `collections` (`Name` VARCHAR(30),`channels` TEXT,`remarks` TEXT, PRIMARY KEY(Name))")
class collections(daqbroker_database):
__tablename__ = "collections"
Name = Column(String(200), primary_key=True)
channels = Column(Text)
remarks = Column(Text)
#connection.execute("CREATE TABLE `runs` (`clock` BIGINT(11),`lastUpdate` BIGINT(11),`isLinked` INT(11),`linkRemarks` TEXT,`linkType` INT(11),`runlistRemarks` TEXT, PRIMARY KEY (clock))")
class runs(daqbroker_database):
__tablename__ = "runs"
clock = Column(BigInteger, primary_key=True)
lastUpdate = Column(BigInteger)
isLinked = Column(Boolean)
linkRemarks = Column(Text)
linkType = Column(Integer)
runlistRemarks = Column(Text)
#connection.execute("CREATE TABLE `runlist` (`start` BIGINT(11),`end` BIGINT(11),`run` VARCHAR(50),`summary` LONGTEXT,`comments` TEXT,`active` INT, PRIMARY KEY(run))")
class runlist(daqbroker_database):
__tablename__ = "runlist"
start = Column(BigInteger)
end = Column(BigInteger)
run = Column(String(20), primary_key=True)
lastUpdate = Column(BigInteger)
summary = Column(Text)
comments = Column(Text)
active = Column(Boolean)
#connection.execute("CREATE TABLE `subscribers` (`email` VARCHAR(100), PRIMARY KEY(email))")
class subscribers(daqbroker_database):
__tablename__ = "subscribers"
email = Column(String(200), primary_key=True)
class instTable(object):
def __init__(self, cols):
for key in cols:
setattr(self, key, cols[key])
def __repr__(self):
return "<instTable class>"
def createInstrumentTable(iname, cols, isNew):
attrDictData = {'__tablename__': iname + '_data', 'clock': Column(BigInteger, primary_key=True)} # For raw data
attrDictCustom = {
'__tablename__': iname + '_custom',
'clock': Column(
BigInteger,
primary_key=True)} # For custom (processed) data
for col in cols:
if col["type"] == 1:
attrDictData[col["name"]] = Column(Float)
if col["type"] == 2:
attrDictData[col["name"]] = Column(Text)
if col["type"] == 3:
attrDictCustom[col["name"]] = Column(Float)
#tableClassData = type (instTable, (attrDictData)
if not isNew:
tableClassData = type(iname + '_data', (instTable,), attrDictData)
tableClassCustom = type(iname + '_custom', (instTable,), attrDictCustom)
else:
tableClassData = type(iname + '_data', (daqbroker_database,), attrDictData)
tableClassCustom = type(iname + '_custom', (daqbroker_database,), attrDictCustom)
return (tableClassData,tableClassCustom)
def dropTable(tableName, engine, is_view):
tablesDrop = []
tablesDropKeys = []
for table in daqbroker_database.metadata.tables.keys():
if table == tableName:
tablesDrop.append(daqbroker_database.metadata.tables[table])
tablesDropKeys.append(table)
if is_view:
for table in tablesDrop:
engine.execute(DropView(table, if_exists=True))
else:
daqbroker_database.metadata.drop_all(engine, tables=tablesDrop)
for table in tablesDropKeys:
daqbroker_database.metadata.remove(daqbroker_database.metadata.tables[table])
def newMetaData():
global daqbroker_database
daqbroker_database.metadata.clear()
daqbroker_database = declarative_base()
instruments = type('instruments', (daqbroker_database,), dict(
__tablename__="instruments",
Name=Column(String(50)),
instid=Column(Integer, primary_key=True),
active=Column(Boolean),
description=Column(Text),
username=Column(Text),
email=Column(Text),
insttype=Column(Integer),
log=Column(Text),
sources=relationship("instmeta", backref="meta", cascade="all, delete, delete-orphan", order_by="instmeta.metaid"))
)
instmeta = type('instmeta', (daqbroker_database,), dict(
__tablename__="instmeta",
clock=Column(BigInteger),
name=Column(String(50)),
metaid=Column(Integer, primary_key=True),
instrument_id=Column(Integer, ForeignKey('instruments.instid')),
type=Column(Integer),
node=Column(String(50)),
remarks=Column(Text),
sentRequest=Column(Boolean),
lastAction=Column(BigInteger),
lasterrortime=Column(BigInteger),
lasterror=Column(Text),
lockSync=Column(Boolean),
channels=relationship("channels", backref="chann", cascade="all, delete, delete-orphan", order_by="channels.channelid"),
parsing=relationship("parsing", backref="metaParse", cascade="all, delete, delete-orphan"))
)
channels = type('channels', (daqbroker_database,), dict(
__tablename__="channels",
Name = Column(Text),
channelid = Column(Integer, primary_key=True),
channeltype = Column(Integer),
valuetype = Column(Integer),
units = Column(Text),
description = Column(Text),
active = Column(Boolean),
remarks = Column(Text),
metaid = Column(Integer, ForeignKey('instmeta.metaid')),
lastclock = Column(BigInteger),
lastValue = Column(Text),
firstClock = Column(BigInteger),
fileorder = Column(Text),
alias = Column(Text)
))
parsing = type('parsing', (daqbroker_database,), dict(
__tablename__="parsing",
clock = Column(BigInteger),
metaid = Column(Integer, ForeignKey('instmeta.metaid'), primary_key=True),
type = Column(Integer),
locked = Column(Boolean),
forcelock = Column(Boolean),
remarks = Column(Text)
))
plots = type('plots', (daqbroker_database,), dict(
__tablename__="plots",
plotname = Column(String(200)),
plotid = Column(Integer, primary_key=True),
channelids = Column(Text),
plottype = Column(Integer),
adminPlot = Column(Boolean),
active = Column(Boolean),
remarks = Column(Text)
))
plotcomments = type('plotcomments', (daqbroker_database,), dict(
__tablename__="plotcomments",
clock = Column(BigInteger, primary_key=True),
plotid = Column(Integer, primary_key=True),
channelid = Column(Integer, primary_key=True),
comment = Column(Text),
author = Column(Text),
remarks = Column(Text),
))
layouts = type('layouts', (daqbroker_database,), dict(
__tablename__="layouts",
Name = Column(String(200)),
layoutid = Column(Integer, primary_key=True),
plots = Column(Text),
format = Column(Text),
plottype = Column(Integer),
adminPlot = Column(Boolean),
active = Column(Boolean),
remarks = Column(Text)
))
runs = type('runs', (daqbroker_database,), dict(
__tablename__="runs",
clock = Column(BigInteger, primary_key=True),
lastUpdate = Column(BigInteger),
isLinked = Column(Boolean),
linkRemarks = Column(Text),
linkType = Column(Integer),
runlistRemarks = Column(Text)
))
runlist = type('runlist', (daqbroker_database,), dict(
__tablename__="runlist",
start = Column(BigInteger),
end = Column(BigInteger),
run = Column(String(20), primary_key=True),
lastUpdate = Column(BigInteger),
summary = Column(Text),
comments = Column(Text),
active = Column(Boolean)
))
collections = type('collections', (daqbroker_database,), dict(
__tablename__="collections",
Name = Column(String(200), primary_key=True),
channels = Column(Text),
remarks = Column(Text)
))
collections = type('collections', (daqbroker_database,), dict(
__tablename__="subscribers",
email=Column(String(200), primary_key=True)
))
return daqbroker_database
|
python
|
# -*- coding: UTF-8 -*-
import os
import sys
import time
import caffe
import numpy as np
from timer import Timer
from db_helper import DBHelper
import matplotlib.pyplot as plt
from caffe.proto import caffe_pb2
from google.protobuf import text_format
# Load LabelMap file.
def get_labelmap(labelmap_path):
labelmap_file = labelmap_path
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
return labelmap
# get labelnames
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
# 处理输入的数据,caffe中用的图像是BGR空间,但是matplotlib用的是RGB空间
# caffe的数值空间是[0,255],但是matplotlib的空间是[0,1]
def input_process(net):
# 定义转换输入的data数值的函数
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1)) # 分离图像的RGB三通道
transformer.set_mean('data', np.array([104, 117, 123])) # mean pixel,减去平均像素值,从而减少噪声的影响
transformer.set_raw_scale('data', 255) # 将0-1空间变成0-255空间
transformer.set_channel_swap('data', (2, 1, 0)) # 交换RGB空间到BGR空间
image_resize = 300
net.blobs['data'].reshape(1, 3, image_resize, image_resize) # batchsize=1,三通道,图像大小是300*300
return transformer
# 检测一张图片
def im_detect(transformer, labelmap, image_name, images_path, db):
i = 0
while os.path.exists(images_path + image_name) is False: # 图片不存在则循环等待,超过10秒退出程序
if i > 100:
sys.exit()
time.sleep(0.1)
i += 1
image = caffe.io.load_image(images_path + image_name)
image_byte = os.path.getsize(images_path + image_name) / 1024.0
# Run the net and examine the top_k results
timer = Timer()
timer.tic()
transformed_image = transformer.preprocess('data', image)
net.blobs['data'].data[...] = transformed_image # 将图像数据拷贝到内存中并分配给网络net
# 分类
detections = net.forward()['detection_out']
# 解析输出结果
det_label = detections[0, 0, :, 1]
det_conf = detections[0, 0, :, 2]
det_xmin = detections[0, 0, :, 3]
det_ymin = detections[0, 0, :, 4]
det_xmax = detections[0, 0, :, 5]
det_ymax = detections[0, 0, :, 6]
# 得到置信度>=0.6的检测目标
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_labels = get_labelname(labelmap, top_label_indices)
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
timer.toc()
print 'Detection took {:.3f}s for {}'.format(timer.total_time, image_name)
image_name = image_name[:-4]
if db.search(image_name, 'ssd_detect_res') != 0:
db.delete_data(image_name, 'ssd_detect_res') # 删除数据库中该图片原有的数据
if db.search(image_name, 'image_byte') != 0:
db.delete_data(image_name, 'image_byte') # 删除数据库中该图片原有的数据
db.insert_data(image_name, image_byte)
if top_conf.shape[0] == 0: # 图片中没有检测到目标
if db.insert_data(image_name, 0, 1) == 0:
print 'Insert data without object failed!'
else:
# 获取每个目标类别及目标的bounding box
for i in xrange(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * image.shape[1]))
ymin = int(round(top_ymin[i] * image.shape[0]))
xmax = int(round(top_xmax[i] * image.shape[1]))
ymax = int(round(top_ymax[i] * image.shape[0]))
score = top_conf[i]
label_name = top_labels[i]
if i != top_conf.shape[0] - 1:
if db.insert_data(image_name, str(label_name), xmin, ymin, xmax, ymax, score) == 0:
print 'Insert data with object failed!'
else:
if db.insert_data(image_name, str(label_name), xmin, ymin, xmax, ymax, score, 1) == 0:
print 'Insert data with object failed!'
print 'class: ' + str(label_name) + ' ' + ' location: ' + str(xmin) + ' ' + str(ymin) + \
' ' + str(xmax) + ' ' + str(ymax) + ' possibility: ' + str(score)
return timer.total_time
if __name__ == '__main__':
# Make sure that caffe is on the python path:
caffe_root = '/home/beijing/opt/caffe/'
os.chdir(caffe_root)
sys.path.insert(0, 'python')
caffe.set_device(1) # 假如有多块gpu,选择第一块gpu
caffe.set_mode_gpu() # 设置用GPU来加载Caffe并且加载网络
labelmap_path = 'data/KITTI/labelmap_kitti.prototxt'
labelmap = get_labelmap(labelmap_path)
# * Load the net in the test phase for inference, and configure input preprocessing.
model_def = 'models/VGGNet/KITTI3/SSD_300x300/deploy.prototxt'
model_weights = 'models/VGGNet/KITTI3/SSD_300x300/VGG_KITTI_SSD_300x300_iter_80000.caffemodel'
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
transformer = input_process(net)
images_path = '/mnt/disk_a/beijing/DataSet/augsburg/'
im_names = []
for index in range(1000):
s = "%06d" % index
im_names.append(str(s) + '.png')
totaltime = 0
db = DBHelper()
db.get_conn()
db.create_database()
db.create_table()
for image_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
totaltime += im_detect(transformer, labelmap, image_name, images_path, db)
db.close_conn()
print 'totaltime = ' + str(totaltime) + ' for ' + str(len(im_names)) + ' images'
print 'averagetime = ' + str(totaltime / len(im_names))
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Script to delete files that are also present on Wikimedia Commons.
Do not run this script on Wikimedia Commons itself. It works based on
a given array of templates defined below.
Files are downloaded and compared. If the files match, it can be deleted on
the source wiki. If multiple versions of the file exist, the script will not
delete. If the SHA1 comparison is not equal, the script will not delete.
A sysop account on the local wiki is required if you want all features of
this script to work properly.
This script understands various command-line arguments:
-always run automatically, do not ask any questions. All files
that qualify for deletion are deleted. Reduced screen
output.
-replace replace links if the files are equal and the file names
differ
-replacealways replace links if the files are equal and the file names
differ without asking for confirmation
-replaceloose Do loose replacements. This will replace all occurrences
of the name of the image (and not just explicit image
syntax). This should work to catch all instances of the
file, including where it is used as a template parameter
or in galleries. However, it can also make more
mistakes.
-replaceonly Use this if you do not have a local sysop account, but do
wish to replace links from the NowCommons template.
-hash Use the hash to identify the images that are the same. It
doesn't work always, so the bot opens two tabs to let to
the user to check if the images are equal or not.
-- Example --
python pwb.py nowcommons -replaceonly -replaceloose -replacealways \
-replace -hash
-- Known issues --
Please fix these if you are capable and motivated:
- if a file marked nowcommons is not present on Wikimedia Commons, the bot
will exit.
"""
#
# (C) Wikipedian, 2006-2007
# (C) Siebrand Mazeland, 2007-2008
# (C) xqt, 2010-2014
# (C) Pywikibot team, 2006-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 166b7765189cec83e67c95450417db9aa553ab0f $'
#
import re
import sys
import webbrowser
import pywikibot
from pywikibot import i18n, Bot
from pywikibot import pagegenerators as pg
from pywikibot.tools.formatter import color_format
from scripts.image import ImageRobot as ImageBot
nowCommons = {
'_default': [
u'NowCommons'
],
'ar': [
u'الآن كومنز',
u'الآن كومونز',
],
'de': [
u'NowCommons',
u'NC',
u'NCT',
u'Nowcommons',
u'NowCommons/Mängel',
u'NowCommons-Überprüft',
],
'en': [
u'NowCommons',
u'Ncd',
],
'eo': [
u'Nun en komunejo',
u'NowCommons',
],
'fa': [
u'موجود در انبار',
u'NowCommons',
],
'fr': [
u'Image sur Commons',
u'DoublonCommons',
u'Déjà sur Commons',
u'Maintenant sur commons',
u'Désormais sur Commons',
u'NC',
u'NowCommons',
u'Nowcommons',
u'Sharedupload',
u'Sur Commons',
u'Sur Commons2',
],
'he': [
u'גם בוויקישיתוף'
],
'hu': [
u'Azonnali-commons',
u'NowCommons',
u'Nowcommons',
u'NC'
],
'ia': [
u'OraInCommons'
],
'it': [
u'NowCommons',
],
'ja': [
u'NowCommons',
],
'ko': [
u'NowCommons',
u'공용중복',
u'공용 중복',
u'Nowcommons',
],
'nds-nl': [
u'NoenCommons',
u'NowCommons',
],
'nl': [
u'NuCommons',
u'Nucommons',
u'NowCommons',
u'Nowcommons',
u'NCT',
u'Nct',
],
'ro': [
u'NowCommons'
],
'ru': [
u'NowCommons',
u'NCT',
u'Nowcommons',
u'Now Commons',
u'Db-commons',
u'Перенесено на Викисклад',
u'На Викискладе',
],
'zh': [
u'NowCommons',
u'Nowcommons',
u'NCT',
],
}
namespaceInTemplate = [
'en',
'ia',
'it',
'ja',
'ko',
'lt',
'ro',
'zh',
]
# Stemma and stub are images not to be deleted (and are a lot) on it.wikipedia
# if your project has images like that, put the word often used here to skip them
word_to_skip = {
'en': [],
'it': ['stemma', 'stub', 'hill40 '],
}
class NowCommonsDeleteBot(Bot):
"""Bot to delete migrated files."""
def __init__(self, **kwargs):
"""Constructor."""
self.availableOptions.update({
'replace': False,
'replacealways': False,
'replaceloose': False,
'replaceonly': False,
'use_hash': False,
})
super(NowCommonsDeleteBot, self).__init__(**kwargs)
self.site = pywikibot.Site()
if repr(self.site) == 'commons:commons':
sys.exit('Do not run this bot on Commons!')
def ncTemplates(self):
"""Return nowcommons templates."""
if self.site.lang in nowCommons:
return nowCommons[self.site.lang]
else:
return nowCommons['_default']
@property
def nc_templates(self):
"""A set of now commons template Page instances."""
if not hasattr(self, '_nc_templates'):
self._nc_templates = set(pywikibot.Page(self.site, title, ns=10)
for title in self.ncTemplates())
return self._nc_templates
def useHashGenerator(self):
"""Use hash generator."""
# https://toolserver.org/~multichill/nowcommons.php?language=it&page=2&filter=
lang = self.site.lang
num_page = 0
word_to_skip_translated = i18n.translate(self.site, word_to_skip)
images_processed = list()
while 1:
url = ('https://toolserver.org/~multichill/nowcommons.php?'
'language=%s&page=%s&filter=') % (lang, num_page)
HTML_text = self.site.getUrl(url, no_hostname=True)
reg = r'<[Aa] href="(?P<urllocal>.*?)">(?P<imagelocal>.*?)</[Aa]> +?</td><td>\n\s*?'
reg += r'<[Aa] href="(?P<urlcommons>http[s]?://commons.wikimedia.org/.*?)" \
>Image:(?P<imagecommons>.*?)</[Aa]> +?</td><td>'
regex = re.compile(reg, re.UNICODE)
found_something = False
change_page = True
for x in regex.finditer(HTML_text):
found_something = True
image_local = x.group('imagelocal')
image_commons = x.group('imagecommons')
if image_local in images_processed:
continue
change_page = False
images_processed.append(image_local)
# Skip images that have something in the title (useful for it.wiki)
image_to_skip = False
for word in word_to_skip_translated:
if word.lower() in image_local.lower():
image_to_skip = True
if image_to_skip:
continue
url_local = x.group('urllocal')
url_commons = x.group('urlcommons')
pywikibot.output(color_format(
'\n\n>>> {lightpurple}{0}{default} <<<',
image_local))
pywikibot.output(u'Local: %s\nCommons: %s\n'
% (url_local, url_commons))
webbrowser.open(url_local, 0, 1)
webbrowser.open(url_commons, 0, 1)
if image_local.split('Image:')[1] == image_commons:
choice = pywikibot.input_yn(
u'The local and the commons images have the same name, '
'continue?', default=False, automatic_quit=False)
else:
choice = pywikibot.input_yn(
u'Are the two images equal?',
default=False, automatic_quit=False)
if choice:
yield [image_local, image_commons]
else:
continue
# The page is dinamically updated, so we may don't need to change it
if change_page:
num_page += 1
# If no image found means that there aren't anymore, break.
if not found_something:
break
def getPageGenerator(self):
"""Generator method."""
if self.getOption('use_hash'):
gen = self.useHashGenerator()
else:
gens = [t.getReferences(follow_redirects=True, namespaces=[6],
onlyTemplateInclusion=True)
for t in self.nc_templates]
gen = pg.CombinedPageGenerator(gens)
gen = pg.DuplicateFilterPageGenerator(gen)
gen = pg.PreloadingGenerator(gen)
return gen
def findFilenameOnCommons(self, localImagePage):
"""Find filename on Commons."""
filenameOnCommons = None
for templateName, params in localImagePage.templatesWithParams():
if templateName in self.nc_templates:
if params == []:
filenameOnCommons = localImagePage.title(withNamespace=False)
elif self.site.lang in namespaceInTemplate:
skip = False
filenameOnCommons = None
for par in params:
val = par.split('=')
if len(val) == 1 and not skip:
filenameOnCommons = par[par.index(':') + 1:]
break
if val[0].strip() == '1':
filenameOnCommons = val[1].strip()[val[1].strip().index(':') + 1:]
break
skip = True
if not filenameOnCommons:
filenameOnCommons = localImagePage.title(withNamespace=False)
else:
val = params[0].split('=')
if len(val) == 1:
filenameOnCommons = params[0].strip()
else:
filenameOnCommons = val[1].strip()
return filenameOnCommons
def run(self):
"""Run the bot."""
commons = pywikibot.Site('commons', 'commons')
comment = i18n.twtranslate(self.site, 'imagetransfer-nowcommons_notice')
for page in self.getPageGenerator():
if self.getOption('use_hash'):
# Page -> Has the namespace | commons image -> Not
images_list = page # 0 -> local image, 1 -> commons image
page = pywikibot.Page(self.site, images_list[0])
else:
# If use_hash is true, we have already print this before, no need
self.current_page = page
try:
localImagePage = pywikibot.FilePage(self.site, page.title())
if localImagePage.fileIsShared():
pywikibot.output(u'File is already on Commons.')
continue
sha1 = localImagePage.latest_file_info.sha1
if self.getOption('use_hash'):
filenameOnCommons = images_list[1]
else:
filenameOnCommons = self.findFilenameOnCommons(
localImagePage)
if not filenameOnCommons and not self.getOption('use_hash'):
pywikibot.output(u'NowCommons template not found.')
continue
commonsImagePage = pywikibot.FilePage(commons, 'Image:%s'
% filenameOnCommons)
if (localImagePage.title(withNamespace=False) ==
commonsImagePage.title(withNamespace=False) and
self.getOption('use_hash')):
pywikibot.output(
u'The local and the commons images have the same name')
if (localImagePage.title(withNamespace=False) !=
commonsImagePage.title(withNamespace=False)):
usingPages = list(localImagePage.usingPages())
if usingPages and usingPages != [localImagePage]:
pywikibot.output(color_format(
'"{lightred}{0}{default}" is still used in {1} pages.',
localImagePage.title(withNamespace=False),
len(usingPages)))
if self.getOption('replace') is True:
pywikibot.output(color_format(
'Replacing "{lightred}{0}{default}" by '
'"{lightgreen}{1}{default}\".',
localImagePage.title(withNamespace=False),
commonsImagePage.title(withNamespace=False)))
bot = ImageBot(
pg.FileLinksGenerator(localImagePage),
localImagePage.title(withNamespace=False),
commonsImagePage.title(withNamespace=False),
'', self.getOption('replacealways'),
self.getOption('replaceloose'))
bot.run()
# If the image is used with the urlname the
# previous function won't work
is_used = bool(list(pywikibot.FilePage(
self.site, page.title()).usingPages(total=1)))
if is_used and self.getOption('replaceloose'):
bot = ImageBot(
pg.FileLinksGenerator(
localImagePage),
localImagePage.title(
withNamespace=False, asUrl=True),
commonsImagePage.title(
withNamespace=False),
'', self.getOption('replacealways'),
self.getOption('replaceloose'))
bot.run()
# refresh because we want the updated list
usingPages = len(list(pywikibot.FilePage(
self.site, page.title()).usingPages()))
if usingPages > 0 and self.getOption('use_hash'):
# just an enter
pywikibot.input(
u'There are still %s pages with this \
image, confirm the manual removal from them please.'
% usingPages)
else:
pywikibot.output(u'Please change them manually.')
continue
else:
pywikibot.output(color_format(
'No page is using "{lightgreen}{0}{default}" '
'anymore.',
localImagePage.title(withNamespace=False)))
commonsText = commonsImagePage.get()
if self.getOption('replaceonly') is False:
if sha1 == commonsImagePage.latest_file_info.sha1:
pywikibot.output(
u'The image is identical to the one on Commons.')
if (len(localImagePage.getFileVersionHistory()) > 1 and
not self.getOption('use_hash')):
pywikibot.output(
u"This image has a version history. Please \
delete it manually after making sure that the \
old versions are not worth keeping.""")
continue
if self.getOption('always') is False:
format_str = color_format(
'\n\n>>>> Description on {lightpurple}%s'
'{default} <<<<\n')
pywikibot.output(format_str % page.title())
pywikibot.output(localImagePage.get())
pywikibot.output(format_str %
commonsImagePage.title())
pywikibot.output(commonsText)
if pywikibot.input_yn(
u'Does the description on Commons contain '
'all required source and license\n'
'information?',
default=False, automatic_quit=False):
localImagePage.delete(
'%s [[:commons:Image:%s]]'
% (comment, filenameOnCommons), prompt=False)
else:
localImagePage.delete(
comment + ' [[:commons:Image:%s]]'
% filenameOnCommons, prompt=False)
else:
pywikibot.output(
u'The image is not identical to the one on Commons.')
except (pywikibot.NoPage, pywikibot.IsRedirectPage) as e:
pywikibot.output(u'%s' % e[0])
continue
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
for arg in pywikibot.handle_args(args):
if arg == '-replacealways':
options['replace'] = True
options['replacealways'] = True
elif arg == '-hash':
options['use_hash'] = True
elif arg == '-autonomous':
pywikibot.warning(u"The '-autonomous' argument is DEPRECATED,"
u" use '-always' instead.")
options['always'] = True
elif arg.startswith('-'):
if arg[1:] in ('always', 'replace', 'replaceloose', 'replaceonly'):
options[arg[1:]] = True
bot = NowCommonsDeleteBot(**options)
bot.run()
if __name__ == "__main__":
main()
|
python
|
#@+leo-ver=5-thin
#@+node:ville.20110403115003.10348: * @file valuespace.py
#@+<< docstring >>
#@+node:ville.20110403115003.10349: ** << docstring >>
'''Supports Leo scripting using per-Leo-outline namespaces.
Commands
========
.. note::
The first four commands are a light weight option for python calculations
within Leo bodies. The remainder are a more complex system for tree wide
computations.
This plugin supports the following commands:
vs-eval
-------
Execute the selected text, if any. Select next line of text.
Tries hard to capture the result of from the last expression in the
selected text::
import datetime
today = datetime.date.today()
will capture the value of ``today`` even though the last line is a
statement, not an expression.
Stores results in ``c.vs['_last']`` for insertion
into body by ``vs-last`` or ``vs-last-pretty``.
Removes common indentation (``textwrap.dedent()``) before executing,
allowing execution of indented code.
``g``, ``c``, and ``p`` are available to executing code, assignments
are made in the ``c.vs`` namespace and persist for the life of ``c``.
vs-eval-replace
---------------
Execute the selected text, if any. Replace the selected text with the
result.
vs-eval-block
-------------
In the body, "# >>>" marks the end of a code block, and "# <<<" marks
the end of an output block. E.g.::
a = 2
# >>>
4
# <<<
b = 2.0*a
# >>>
4.0
# <<<
``vs-eval-block`` evaluates the current code block, either the code block
the cursor's in, or the code block preceding the output block the cursor's
in. Subsequent output blocks are marked "# >>> *" to show they may need
re-evaluation.
Note: you don't really need to type the "# >>>" and "# <<<" markers
because ``vs-eval-block`` will add them as needed. So just type the
first code block and run ``vs-eval-block``.
vs-last
-------
Insert the last result from ``vs-eval``. Inserted as a string,
so ``"1\n2\n3\n4"`` will cover four lines and insert no quotes,
for ``repr()`` style insertion use ``vs-last-pretty``.
vs-last-pretty
--------------
Insert the last result from ``vs-eval``. Formatted by
``pprint.pformat()``, so ``"1\n2\n3\n4"`` will appear as
'``"1\n2\n3\n4"``', see all ``vs-last``.
vs-create-tree
--------------
Creates a tree whose root node is named 'valuespace' containing one child node
for every entry in the namespace. The headline of each child is *@@r <key>*,
where *<key>* is one of the keys of the namespace. The body text of the child
node is the value for *<key>*.
vs-dump
-------
Prints key/value pairs of the namespace.
vs-reset
--------
Clears the namespace.
vs-update
---------
Scans the entire Leo outline twice, processing *@=*, *@a* and *@r* nodes.
Pass 1
++++++
Pass 1 evaluates all *@=* and *@a* nodes in the outline as follows:
*@=* (assignment) nodes should have headlines of the the form::
@= <var>
Pass 1 evaluates the body text and assigns the result to *<var>*.
*@a* (anchor) nodes should have headlines of one of two forms::
@a
@a <var>
The first form evaluates the script in the **parent** node of the *@a* node.
Such **bare** @a nodes serve as markers that the parent contains code to be
executed.
The second form evaluates the body of the **parent** of the *@a* node and
assigns the result to *<var>*.
**Important**: Both forms of *@a* nodes support the following **@x convention**
when evaluating the parent's body text. Before evaluating the body text, pass1
scans the body text looking for *@x* lines. Such lines have two forms:
1. *@x <python statement>*
Pass 1 executes *<python statement>*.
2. The second form spans multiple lines of the body text::
@x {
python statements
@x }
Pass 1 executes all the python statements between the *@x {* and the *@x }*
3. Assign block of text to variable::
@x =<var> {
Some
Text
@x }
Pass 1 assigns the block of text to <var>. The type of value is SList,
a special subclass of standard 'list' that makes operating with string
lists convenient. Notably, you can do <var>.n to get the content as plain
string.
A special case of this is the "list append" notation::
@x =<var>+ {
Some
Text
@x }
This assumes that <var> is a list, and appends the content as SList to this
list. You will typically do '@x var = []' earlier in the document to make this
construct work.
<var> in all constructs above can be arbitrary expression that can be on left hand
side of assignment. E.g. you can use foo.bar, foo['bar'], foo().bar etc.
Pass 2
++++++
Pass 2 "renders" all *@r* nodes in the outline into body text. *@r* nodes should
have the form::
@r <expression>
Pass 2 evaluates *<expression>* and places the result in the body pane.
**TODO**: discuss SList expressions.
Evaluating expressions
======================
All expression are evaluated in a context that predefines Leo's *c*, *g* and *p*
vars. In addition, *g.vs* is a dictionary whose keys are *c.hash()* and whose
values are the namespaces for each commander. This allows communication between
different namespaces, while keeping namespaces generally separate.
'''
# SList docs: http://ipython.scipy.org/moin/Cookbook/StringListProcessing
#@-<< docstring >>
# By Ville M. Vainio and Terry N. Brown.
#@+<< imports >>
#@+node:ville.20110403115003.10351: ** << imports >>
import leo.core.leoGlobals as g
import leo.core.leoPlugins as leoPlugins
from leo.external.stringlist import SList
# Uses leoPlugins.TryNext.
import pprint
import os
import re
# import sys
# import types
import textwrap
import json
from io import BytesIO
try:
import yaml
except ImportError:
yaml = None
#@-<< imports >>
controllers = {}
# Keys are c.hash(), values are ValueSpaceControllers.
# pylint: disable=eval-used
# Eval is essential to this plugin.
#@+others
#@+node:ekr.20110408065137.14221: ** Module level
#@+node:ville.20110403115003.10353: *3* colorize_headlines_visitor
def colorize_headlines_visitor(c,p, item):
""" Changes @thin, @auto, @shadow to bold """
if p.h.startswith("!= "):
f = item.font(0)
f.setBold(True)
item.setFont(0,f)
raise leoPlugins.TryNext
#@+node:ville.20110403115003.10352: *3* init
def init ():
'''Return True if the plugin has loaded successfully.'''
# vs_reset(None)
global controllers
# g.vs = {} # A dictionary of dictionaries, one for each commander.
# create global valuaspace controller for ipython
g.visit_tree_item.add(colorize_headlines_visitor)
g.registerHandler('after-create-leo-frame',onCreate)
g.plugin_signon(__name__)
return True
#@+node:ekr.20110408065137.14222: *3* onCreate
def onCreate (tag,key):
global controllers
c = key.get('c')
if c:
h = c.hash()
vc = controllers.get(h)
if not vc:
controllers [h] = vc = ValueSpaceController(c)
#@+node:tbrown.20170516194332.1: *3* get_blocks
def get_blocks(c):
"""get_blocks - iterate code blocks
:return: (current, source, output)
:rtype: (bool, str, str)
"""
pos = c.frame.body.wrapper.getInsertPoint()
chrs = 0
lines = c.p.b.split('\n')
block = {'source': [], 'output': []}
reading = 'source'
seeking_current = True
# if the last non-blank line isn't the end of a possibly empty
# output block, make it one
if [i for i in lines if i.strip()][-1] != "# <<<":
lines.append("# <<<")
while lines:
line = lines.pop(0)
chrs += len(line)+1
if line.startswith("# >>>"):
reading = 'output'
continue
if line.startswith("# <<<"):
current = seeking_current and (chrs >= pos+1)
if current:
seeking_current = False
yield current, '\n'.join(block['source']), '\n'.join(block['output'])
block = {'source': [], 'output': []}
reading = 'source'
continue
block[reading].append(line)
#@+node:ville.20110403115003.10355: ** Commands
#@+node:ville.20130127115643.3695: *3* get_vs
def get_vs(c):
'''deal with singleton "ipython" controller'''
if g.app.ipk:
vsc = controllers.get('ipython')
if not vsc:
controllers['ipython'] = vsc = ValueSpaceController(c = None,
ns = g.app.ipk.namespace)
vsc.set_c(c)
return vsc
return controllers[c.hash()]
#@+node:ville.20110407210441.5691: *3* vs-create-tree
@g.command('vs-create-tree')
def vs_create_tree(event):
"""Create tree from all variables."""
get_vs(event['c']).create_tree()
#@+node:ekr.20110408065137.14227: *3* vs-dump
@g.command('vs-dump')
def vs_dump(event):
"""Dump the valuespace for this commander."""
get_vs(event['c']).dump()
#@+node:ekr.20110408065137.14220: *3* vs-reset
@g.command('vs-reset')
def vs_reset(event):
# g.vs = types.ModuleType('vs')
# sys.modules['vs'] = g.vs
get_vs(event['c']).reset()
#@+node:ville.20110403115003.10356: *3* vs-update
@g.command('vs-update')
def vs_update(event):
get_vs(event['c']).update()
#@+node:tbrown.20130227164110.21222: *3* vs-eval
@g.command("vs-eval")
def vs_eval(event):
"""
Execute the selected text, if any. Select next line of text.
Tries hard to capture the result of from the last expression in the
selected text::
import datetime
today = datetime.date.today()
will capture the value of ``today`` even though the last line is a
statement, not an expression.
Stores results in ``c.vs['_last']`` for insertion
into body by ``vs-last`` or ``vs-last-pretty``.
Removes common indentation (``textwrap.dedent()``) before executing,
allowing execution of indented code.
``g``, ``c``, and ``p`` are available to executing code, assignments
are made in the ``c.vs`` namespace and persist for the life of ``c``.
"""
c = event['c']
w = c.frame.body.wrapper
txt = w.getSelectedText()
# select next line ready for next select/send cycle
# copied from .../plugins/leoscreen.py
b = w.getAllText()
i = w.getInsertPoint()
try:
j = b[i:].index('\n')+i+1
w.setSelectionRange(i,j)
except ValueError: # no more \n in text
w.setSelectionRange(i,i)
eval_text(c, txt)
def eval_text(c, txt):
if not txt:
return
vsc = get_vs(c)
cvs = vsc.d
txt = textwrap.dedent(txt)
blocks = re.split('\n(?=[^\\s])', txt)
leo_globals = {'c':c, 'p':c.p, 'g':g}
ans = None
dbg = False
redirects = c.config.getBool('valuespace_vs_eval_redirect')
if redirects:
old_stderr = g.stdErrIsRedirected()
old_stdout = g.stdOutIsRedirected()
if not old_stderr:
g.redirectStderr()
if not old_stdout:
g.redirectStdout()
try:
# execute all but the last 'block'
if dbg: print('all but last')
# exec '\n'.join(blocks[:-1]) in leo_globals, c.vs
exec('\n'.join(blocks[:-1]), leo_globals, cvs) # Compatible with Python 3.x.
all_done = False
except SyntaxError:
# splitting of the last block caused syntax error
try:
# is the whole thing a single expression?
if dbg: print('one expression')
ans = eval(txt, leo_globals, cvs)
except SyntaxError:
if dbg: print('statement block')
# exec txt in leo_globals, c.vs
exec(txt, leo_globals, cvs) # Compatible with Python 3.x.
all_done = True # either way, the last block is used now
if not all_done: # last block still needs using
try:
if dbg: print('final expression')
ans = eval(blocks[-1], leo_globals, cvs)
except SyntaxError:
ans = None
if dbg: print('final statement')
# exec blocks[-1] in leo_globals, c.vs
exec(blocks[-1], leo_globals, cvs) # Compatible with Python 3.x.
if redirects:
if not old_stderr:
g.restoreStderr()
if not old_stdout:
g.restoreStdout()
if ans is None: # see if last block was a simple "var =" assignment
key = blocks[-1].split('=', 1)[0].strip()
if key in cvs:
ans = cvs[key]
if ans is None: # see if whole text was a simple /multi-line/ "var =" assignment
key = blocks[0].split('=', 1)[0].strip()
if key in cvs:
ans = cvs[key]
cvs['_last'] = ans
if ans is not None:
# annoying to echo 'None' to the log during line by line execution
txt = str(ans)
lines = txt.split('\n')
if len(lines) > 10:
txt = '\n'.join(lines[:5]+['<snip>']+lines[-5:])
if len(txt) > 500:
txt = txt[:500] + ' <truncated>'
g.es(txt)
return ans
#@+node:tbrown.20170516202419.1: *3* vs-eval-block
@g.command("vs-eval-block")
def vs_eval_block(event):
c = event['c']
pos = 0
lines = []
current_seen = False
for current, source, output in get_blocks(c):
lines.append(source)
lines.append("# >>>" + (" *" if current_seen else ""))
if current:
old_log = c.frame.log.logCtrl.getAllText()
eval_text(c, source)
new_log = c.frame.log.logCtrl.getAllText()[len(old_log):]
lines.append(new_log.strip())
# lines.append(str(get_vs(c).d.get('_last')))
pos = len('\n'.join(lines))+7
current_seen = True
else:
lines.append(output)
lines.append("# <<<")
c.p.b = '\n'.join(lines) + '\n'
c.frame.body.wrapper.setInsertPoint(pos)
c.redraw()
c.bodyWantsFocusNow()
#@+node:tbnorth.20171222141907.1: *3* vs-eval-replace
@g.command("vs-eval-replace")
def vs_eval_replace(event):
"""Execute the selected text, if any. Replace it with the result."""
c = event['c']
w = c.frame.body.wrapper
txt = w.getSelectedText()
eval_text(c, txt)
result = pprint.pformat(get_vs(c).d.get('_last'))
i, j = w.getSelectionRange()
new_text = c.p.b[:i]+result+c.p.b[j:]
bunch = c.undoer.beforeChangeNodeContents(c.p)
w.setAllText(new_text)
c.p.b = new_text
w.setInsertPoint(i+len(result))
c.undoer.afterChangeNodeContents(c.p, 'Insert result', bunch)
c.setChanged()
#@+node:tbrown.20130227164110.21223: *3* vs-last
@g.command("vs-last")
def vs_last(event, text=None):
"""
Insert the last result from ``vs-eval``.
Inserted as a string, so ``"1\n2\n3\n4"`` will cover four lines and
insert no quotes, for ``repr()`` style insertion use ``vs-last-pretty``.
"""
c = event['c']
if text is None:
text = str(get_vs(c).d.get('_last'))
editor = c.frame.body.wrapper
insert_point = editor.getInsertPoint()
editor.insert(insert_point, text+'\n')
editor.setInsertPoint(insert_point+len(text)+1)
c.setChanged(True)
#@+node:tbrown.20130227164110.21224: *3* vs-last-pretty
@g.command("vs-last-pretty")
def vs_last_pretty(event):
"""
Insert the last result from ``vs-eval``.
Formatted by ``pprint.pformat()``, so ``"1\n2\n3\n4"`` will appear as
'``"1\n2\n3\n4"``', see all ``vs-last``.
"""
c = event['c']
vs_last(event, text=pprint.pformat(get_vs(c).d.get('_last')))
#@+node:ekr.20110408065137.14219: ** class ValueSpaceController
class ValueSpaceController(object):
'''A class supporting per-commander evaluation spaces
containing @a, @r and @= nodes.
'''
#@+others
#@+node:ekr.20110408065137.14223: *3* ctor
def __init__ (self,c = None, ns = None ):
# g.trace('(ValueSpaceController)',c)
self.c = c
if ns is None:
self.d = {}
else:
self.d = ns
self.reset()
self.trace = False
self.verbose = False
if c:
# important this come after self.reset()
c.keyHandler.autoCompleter.namespaces.append(self.d)
# changed g.vs.__dict__ to self.d
# Not strictly necessary, but allows cross-commander communication.
#g.vs [c.hash()] = self.d
#@+node:ekr.20110408065137.14224: *3* create_tree
def create_tree (self):
'''The vs-create-tree command.'''
c = self.c ; p = c.p ; tag = 'valuespace'
# Create a 'valuespace' node if p's headline is not 'valuespace'.
if p.h == tag:
r = p
else:
r = p.insertAsLastChild()
r.h = tag
# Create a child of r for all items of self.d
for k,v in self.d.items():
if not k.startswith('__'):
child = r.insertAsLastChild()
child.h = '@@r ' + k
self.render_value(child,v) # Create child.b from child.h
c.bodyWantsFocus()
c.redraw()
#@+node:ekr.20110408065137.14228: *3* dump
def dump (self):
c,d = self.c,self.d
exclude = (
'__builtins__',
# 'c','g','p',
)
print('Valuespace for %s...' % c.shortFileName())
keys = list(d.keys())
keys = [z for z in keys if z not in exclude]
keys.sort()
max_s = 5
for key in keys:
max_s = max(max_s,len(key))
for key in keys:
val = d.get(key)
pad = max(0,max_s-len(key))*' '
print('%s%s = %s' % (pad,key,val))
c.bodyWantsFocus()
#@+node:ekr.20110408065137.14225: *3* reset
def reset (self):
'''The vs-reset command.'''
# do not allow resetting the dict if using ipython
if not g.app.ipk:
self.d = {}
self.c.vs = self.d
self.init_ns(self.d)
#@+node:ville.20110409221110.5755: *3* init_ns
def init_ns(self,ns):
""" Add 'builtin' methods to namespace """
def slist(body):
""" Return body as SList (string list) """
return SList(body.split("\n"))
ns['slist'] = slist
# xxx todo perhaps add more?
#@+node:ville.20130127122722.3696: *3* set_c
def set_c(self,c):
""" reconfigure vsc for new c
Needed by ipython integration
"""
self.c = c
#@+node:ekr.20110408065137.14226: *3* update & helpers
def update (self):
'''The vs-update command.'''
# names are reversed, xxx TODO fix later
self.render_phase() # Pass 1
self.update_vs() # Pass 2
self.c.bodyWantsFocus()
#@+node:ekr.20110407174428.5781: *4* render_phase (pass 1) & helpers
def render_phase(self):
'''Update p's tree (or the entire tree) as follows:
- Evaluate all @= nodes and assign them to variables
- Evaluate the body of the *parent* nodes for all @a nodes.
- Read in @vsi nodes and assign to variables
'''
c = self.c
self.d['c'] = c # g.vs.c = c
self.d['g'] = g # g.vs.g = g
for p in c.all_unique_positions():
h = p.h.strip()
if h.startswith('@= '):
if self.trace and self.verbose: g.trace('pass1',p.h)
self.d['p'] = p.copy() # g.vs.p = p.copy()
var = h[3:].strip()
self.let_body(var,self.untangle(p))
elif h.startswith("@vsi "):
fname = h[5:]
bname, ext = os.path.splitext(fname)
g.es("@vsi " + bname +" " + ext)
if ext.lower() == '.json':
pth = c.getNodePath(p)
fn = os.path.join(pth, fname)
g.es("vsi read from " + fn)
if os.path.isfile(fn):
cont = open(fn).read()
val = json.loads(cont)
self.let(bname, val)
self.render_value(p, cont)
elif h == '@a' or h.startswith('@a '):
if self.trace and self.verbose: g.trace('pass1',p.h)
tail = h[2:].strip()
parent = p.parent()
if tail:
self.let_body(tail,self.untangle(parent))
try:
self.parse_body(parent)
except Exception:
g.es_exception()
g.es("Error parsing " + parent.h)
# g.trace(self.d)
#@+node:ekr.20110407174428.5777: *5* let & let_body
def let(self,var,val):
'''Enter var into self.d with the given value.
Both var and val must be strings.'''
if self.trace:
print("Let [%s] = [%s]" % (var,val))
self.d ['__vstemp'] = val
if var.endswith('+'):
rvar = var.rstrip('+')
# .. obj = eval(rvar,self.d)
exec("%s.append(__vstemp)" % rvar,self.d)
else:
exec(var + " = __vstemp",self.d)
del self.d ['__vstemp']
def let_cl(self, var, body):
""" handle @cl node """
# g.trace()
lend = body.find('\n')
firstline = body[0:lend]
rest = firstline[4:].strip()
print("rest",rest)
try:
translator = eval(rest, self.d)
except Exception:
g.es_exception()
g.es("Can't instantate @cl xlator: " + rest)
translated = translator(body[lend+1:])
self.let(var, translated)
def let_body(self,var,val):
if var.endswith(".yaml"):
if yaml:
#print "set to yaml", `val`
sio = BytesIO(val)
try:
d = yaml.load(sio)
except Exception:
g.es_exception()
g.es("yaml error for: " + var)
return
parts = os.path.splitext(var)
self.let(parts[0], d)
else:
g.es("did not import yaml")
return
if val.startswith('@cl '):
self.let_cl(var, val)
return
self.let(var,val)
#@+node:ekr.20110407174428.5780: *5* parse_body & helpers
def parse_body(self,p):
body = self.untangle(p) # body is the script in p's body.
# print("Body")
# print(body)
if self.trace and self.verbose: g.trace('pass1',p.h,'\n',body)
self.d ['p'] = p.copy()
backop = []
segs = re.finditer('^(@x (.*))$',body,re.MULTILINE)
for mo in segs:
op = mo.group(2).strip()
# print("Oper",op)
if op.startswith('='):
# print("Assign", op)
backop = ('=', op.rstrip('{').lstrip('='), mo.end(1))
elif op == '{':
backop = ('runblock', mo.end(1))
elif op == '}':
bo = backop[0]
# print("backop",bo)
if bo == '=':
self.let_body(backop[1].strip(), body[backop[2] : mo.start(1)])
elif bo == 'runblock':
self.runblock(body[backop[1] : mo.start(1)])
else:
self.runblock(op)
#@+node:ekr.20110407174428.5779: *6* runblock
def runblock(self,block):
if self.trace and self.verbose:
g.trace('pass1',block)
exec(block,self.d)
#@+node:ekr.20110407174428.5778: *6* untangle (getScript)
def untangle(self,p):
return g.getScript(self.c,p,
useSelectedText=False,
useSentinels=False)
#@+node:ekr.20110407174428.5782: *4* update_vs (pass 2) & helper
def update_vs(self):
'''
Evaluate @r <expr> nodes, puting the result in their body text.
Output @vso nodes, based on file extension
'''
c = self.c
for p in c.all_unique_positions():
h = p.h.strip()
if h.startswith('@r '):
if self.trace and self.verbose: g.trace('pass2:',p.h)
expr = h[3:].strip()
try:
result = eval(expr,self.d)
except Exception:
g.es_exception()
g.es("Failed to render " + h)
continue
if self.trace: print("Eval:",expr,"result:",repr(result))
self.render_value(p,result)
if h.startswith("@vso "):
expr = h[5:].strip()
bname, ext = os.path.splitext(expr)
try:
result = eval(bname,self.d)
except Exception:
g.es_exception()
g.es("@vso failed: " + h)
continue
if ext.lower() == '.json':
cnt = json.dumps(result, indent = 2)
pth = os.path.join(c.getNodePath(p), expr)
self.render_value(p, cnt)
g.es("Writing @vso: " + pth)
open(pth, "w").write(cnt)
else:
g.es_error("Unknown vso extension (should be .json, ...): " + ext)
#@+node:ekr.20110407174428.5784: *5* render_value
def render_value(self,p,value):
'''Put the rendered value in p's body pane.'''
if isinstance(value, SList):
p.b = value.n
elif g.isString(value): # Works with Python 3.x.
p.b = value
else:
p.b = pprint.pformat(value)
#@+node:ekr.20110407174428.5783: *3* test
def test(self):
self.update()
# test()
#@-others
#@-others
#@-leo
|
python
|
#Let's do some experiments to understand why to use numpy and what we can do with it.
#import numpy with alias np
import numpy as np
import sys
# one dimensional array
a=np.array([1,2,3])
print(a)
# two dimensional array
a= np.array([(1,2,3),(4,5,6)])
print(a)
#why numpy better than list..
#1)Less memory
#2)faste2
#3)Convenien3
r = range(1000)
print(sys.getsizeof(0)*len(r))
d = np.arange(1000)
print(d.size *d.itemsize)
#so python int value is taking 28*1000 as total size, where as numpy is taking 4*1000.
|
python
|
if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable
import dezero.functions as F
# im2col
x1 = np.random.rand(1, 3, 7, 7)
col1 = F.im2col(x1, kernel_size=5, stride=1, pad=0, to_matrix=True)
print(col1.shape) # (9, 75)
x2 = np.random.rand(10, 3, 7, 7) # 10個のデータ
kernel_size = (5, 5)
stride = (1, 1)
pad = (0, 0)
col2 = F.im2col(x2, kernel_size, stride, pad, to_matrix=True)
print(col2.shape) # (90, 75)
# conv2d
N, C, H, W = 1, 5, 15, 15
OC, (KH, KW) = 8, (3, 3)
x = Variable(np.random.randn(N, C, H, W))
W = np.random.randn(OC, C, KH, KW)
y = F.conv2d_simple(x, W, b=None, stride=1, pad=1)
y.backward()
print(y.shape) # (1, 8, 15, 15)
print(x.grad.shape) # (1, 5, 15, 15)
|
python
|
import sys
import os
from os.path import join as opj
workspace = os.environ["WORKSPACE"]
sys.path.append(
opj(workspace, 'code/GeDML/src')
)
import argparse
import logging
logging.getLogger().setLevel(logging.INFO)
import torch.distributed as dist
from gedml.launcher.runners.distributed_runner import DistributedRunner
def subprocess_start():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--world_size", type=int, default=1)
parser.add_argument("--rank", type=int, default=0)
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--dataset", type=str, default='ImageNet')
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(opt.gpu)
opt.gpu = 0
opt.link_path = os.path.join(workspace, "code/Experiments/GeDML/demo/moco/link.yaml")
runner = DistributedRunner(
opt
)
runner.run()
if __name__ == '__main__':
subprocess_start()
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class Company(models.Model):
_inherit = 'res.company'
hr_presence_control_email_amount = fields.Integer(string="# emails to send")
hr_presence_control_ip_list = fields.Char(string="Valid IP addresses")
|
python
|
#!/usr/bin/env python
"""
_ConfigureState_
Populate the states tables with all known states, and set the max retries for
each state. Default to one retry.
Create the CouchDB and associated views if needed.
"""
from WMCore.Database.CMSCouch import CouchServer
from WMCore.DataStructs.WMObject import WMObject
class ConfigureState(WMObject):
def configure(self):
server = CouchServer(self.config.JobStateMachine.couchurl)
dbname = 'JSM/JobHistory'
if dbname not in server.listDatabases():
server.createDatabase(dbname)
|
python
|
import numpy as np
from numba import float32, jit
from numba.np.ufunc import Vectorize
from numba.core.errors import TypingError
from ..support import TestCase
import unittest
dtype = np.float32
a = np.arange(80, dtype=dtype).reshape(8, 10)
b = a.copy()
c = a.copy(order='F')
d = np.arange(16 * 20, dtype=dtype).reshape(16, 20)[::2, ::2]
def add(a, b):
return a + b
def add_multiple_args(a, b, c, d):
return a + b + c + d
def gufunc_add(a, b):
result = 0.0
for i in range(a.shape[0]):
result += a[i] * b[i]
return result
def ufunc_reduce(ufunc, arg):
for i in range(arg.ndim):
arg = ufunc.reduce(arg)
return arg
vectorizers = [
Vectorize,
# ParallelVectorize,
# StreamVectorize,
# CudaVectorize,
# GUFuncVectorize,
]
class TestUFuncs(TestCase):
def _test_ufunc_attributes(self, cls, a, b, *args):
"Test ufunc attributes"
vectorizer = cls(add, *args)
vectorizer.add(float32(float32, float32))
ufunc = vectorizer.build_ufunc()
info = (cls, a.ndim)
self.assertPreciseEqual(ufunc(a, b), a + b, msg=info)
self.assertPreciseEqual(ufunc_reduce(ufunc, a), np.sum(a), msg=info)
self.assertPreciseEqual(ufunc.accumulate(a), np.add.accumulate(a),
msg=info)
self.assertPreciseEqual(ufunc.outer(a, b), np.add.outer(a, b), msg=info)
def _test_broadcasting(self, cls, a, b, c, d):
"Test multiple args"
vectorizer = cls(add_multiple_args)
vectorizer.add(float32(float32, float32, float32, float32))
ufunc = vectorizer.build_ufunc()
info = (cls, a.shape)
self.assertPreciseEqual(ufunc(a, b, c, d), a + b + c + d, msg=info)
def test_ufunc_attributes(self):
for v in vectorizers: # 1D
self._test_ufunc_attributes(v, a[0], b[0])
for v in vectorizers: # 2D
self._test_ufunc_attributes(v, a, b)
for v in vectorizers: # 3D
self._test_ufunc_attributes(v, a[:, np.newaxis, :],
b[np.newaxis, :, :])
def test_broadcasting(self):
for v in vectorizers: # 1D
self._test_broadcasting(v, a[0], b[0], c[0], d[0])
for v in vectorizers: # 2D
self._test_broadcasting(v, a, b, c, d)
for v in vectorizers: # 3D
self._test_broadcasting(v, a[:, np.newaxis, :], b[np.newaxis, :, :],
c[:, np.newaxis, :], d[np.newaxis, :, :])
def test_implicit_broadcasting(self):
for v in vectorizers:
vectorizer = v(add)
vectorizer.add(float32(float32, float32))
ufunc = vectorizer.build_ufunc()
broadcasting_b = b[np.newaxis, :, np.newaxis, np.newaxis, :]
self.assertPreciseEqual(ufunc(a, broadcasting_b),
a + broadcasting_b)
def test_ufunc_exception_on_write_to_readonly(self):
z = np.ones(10)
z.flags.writeable = False # flip write bit
tests = []
expect = "ufunc 'sin' called with an explicit output that is read-only"
tests.append((jit(nopython=True), TypingError, expect))
tests.append((jit(nopython=False), ValueError,
"output array is read-only"))
for dec, exc, msg in tests:
def test(x):
a = np.ones(x.shape, x.dtype) # do not copy RO attribute from x
np.sin(a, x)
with self.assertRaises(exc) as raises:
dec(test)(z)
self.assertIn(msg, str(raises.exception))
if __name__ == '__main__':
unittest.main()
|
python
|
import json
import numpy as np
def pack(request):
data = request.get_json()
furniture = data['furniture']
orientations = []
'''
Test TEST TTEESSTT
'''
##################################################################
####### Preliminary Functions
##################################################################
def order(x,vals):
'''
idea is to apply this to the pieces, with different
vectors for vals depending on the ordering rule
(probably start with non-increasing volume)
'''
x = [i for _,i in sorted(zip(vals,x), reverse = True)]
return x
''' Permuatations of indices for dimensions '''
def re_order(dim, OR):
'''
dim stores original dimensions, OR is a permutation
'''
D = dim
new_dim = []
for i in range(3):
new_dim.append(D[OR[i]])
return new_dim
def Feas(Dims, EP, Bin_Size, OR, Curr_items, Curr_EP):
'''
Returns True if the orientation OR of a piece of dimension
Dims = HxWxD is feasible in a bin with leftmost corner at EP
Bin_Size = 1x3 dimensions of bin
Dims = 1x3
EP = 1x3 -- coordinates of the chosen spot
OR = 1x3 a permutation of [0,1,2]
For all items in Curr_items placed at Curr_Ep
have to make sure that EP[0] + d[OR[0]] doesn't
poke through... item[j][0] -- item[j][0] + Curr_Ep[j][0]
'''
BS = Bin_Size
D = re_order(Dims,OR)
CI = Curr_items
CE = Curr_EP
check = True
for i in range(3):
# Bin limits
if D[i] + EP[i] > BS[i]:
check = False
for j in range(len(CI)):
# checking intersections with other items
####################################################
#### DOUBLE CHECK THIS FOR CORRECTNESS!!!!
####################################################
for k in range(3):
a = (k + 1)%3
b = (k + 2)%3
if overlap(D,EP,CI[j],CE[j],k,a,b):
check = False
return check
def overlap(d1,c1, d2,c2, k,x, y):
'''
returns True if two 3-d boxes with dimensions d1 d2
and lower left corners c1, c2 overlap on the xy plane AND k dim...
'''
ov = True
if c1[x] >= c2[x] + d2[x]:
ov = False
if c2[x] >= c1[x] + d1[x]:
ov = False
if c1[y] >= c2[y] + d2[y]:
ov = False
if c2[y] >= c1[y]+d2[y]:
ov = False
if c1[k] >= c2[k] + d2[k]:
ov = False
if c2[k] >= c1[k] + d1[k]:
ov = False
return ov
'''
Compute Merit function for given placement of a piece
'''
def Merit_Res(Dims, OR, EP, Rs, Bin_Size):
'''
not gonna bother checking feasibility...
assume that this calc comes AFTER feasibility check...
--Maybe weight the dimensions differently to
make the different orientations different?
'''
D = Dims
BS = Bin_Size
'''
this does NOT take account of the orientation
so the orientation is basically just for feasibility...
'''
# The "extra" EP[0] + Dims[0] is supposed to penalize "high" positions...
return sum(Rs) - sum(Dims) + EP[0] + Dims[0]
#### Work with people to determine best/better merit functions.
#### CODE UP THE BOUNDING BOX ONES TOO!! THESE SEEM LIKELY
#### CANDIDATES FOR US...
def Merit_WD(Dims, OR, EP, curr_items, curr_eps):
'''
Selects position that minimizes the bounding
box in the WxD dimension
curr_items = items in crate
curr_eps = position of items
EP = candidate position
OR = candidate orientation
'''
Dim = re_order(Dims,OR)
CI = curr_items
CE = curr_eps
'''
start out with the box bounds as the new guy
'''
W = EP[1] + Dim[1]
D = EP[2] + Dim[2]
for i in range(len(CI)):
if CE[i][1] + CI[i][1] > W:
W = CE[i][1] + CI[i][1]
if CE[i][2] + CI[i][2] > D:
D = CE[i][2] + CI[i][2]
#Penalizes Height
val = W*D + (EP[0] + Dim[0]) * W
return(val)
'''
Update Extreme point list
'''
def proj(d1,e1,d2,e2, ep_dir, proj_dir):
'''
d1, e1 -- dim of new piece, placed at point e1
d2, e2 -- cycle these through the other pieces
ep_dir is the coordinate "pushed out" by the piece dimension in
the candidate extreme point
proj_dir is the one to shrink... (number 0,1,2 corresponding to x, y, z)
These are NEVER the same...
'''
e = ep_dir
pd = proj_dir
# remaining dimension???
od = 3-e - pd
eps = 0.0
check = True
if d2[pd] + e2[pd] > e1[pd] - eps:
#i.e. piece is further from axis in projection direction
check = False
if e2[e] > e1[e] + d1[e] - eps:
#i.e. piece too far
check = False
if e2[e] + d2[e] < e1[e] + d1[e] + eps:
# i.e. piece not far enough
check = False
if e2[od] > e1[od] - eps:
#i.e. piece too far
check = False
if e2[od] + d2[od] < e1[od] + eps:
# i.e. piece not far enough
check = False
return check
def Update_EP(Dims, EP, Curr_EPs, Curr_Items):
'''
Dims = 1x3 HxWxD of current piece placed
(in orienation OR* decided by Feas and Merit...)
EP = 1x3 coordinates of lower left corner of current piece
Curr_EPs = list of current extreme points where Curr_Items
are located
Curr_Items = list of dimensions of current items
idea is you take current EP and push it out in the
three dimensions of the current piece, then project
each of these towards the two other axes...
e.g. [ep[0],ep[1] + Dims[1], ep[2]] projected in
x and z directions...
- Six possible new ones (possibly duplicated...)
- each of the three
New_Eps[0], [1] are x_y and x_z projections of (ep[0]+dim[0],ep[1],ep[2])
by shrinking the y and z coordinates, respectively...
'''
D = Dims
CI = Curr_Items
CE = Curr_EPs
New_Eps = [[EP[0]+D[0],EP[1],EP[2]],[EP[0]+D[0],EP[1],EP[2]],
[EP[0],EP[1]+D[1],EP[2]],[EP[0],EP[1]+D[1],EP[2]],
[EP[0],EP[1],EP[2]+D[2]],[EP[0],EP[1],EP[2]+D[2]]]
Max_bounds = -1*np.ones(6)
for i in range(len(CI)):
# x_y -- New_Eps[0] shrinking y coordinate
if proj(D, EP, CI[i], CE[i],0,1) and CE[i][1] + CI[i][1] > Max_bounds[0]:
New_Eps[0] = [EP[0] + D[0], CE[i][1] + CI[i][1],EP[2]]
Max_bounds[0] = CE[i][1] + CI[i][1]
#x_z -- New_Eps[1] shrinking z coordinate
if proj(D, EP, CI[i], CE[i],0,2) and CE[i][2] + CI[i][2] > Max_bounds[1]:
New_Eps[1] = [EP[0] + D[0], EP[1], CE[i][2] + CI[i][2]]
Max_bounds[1] = CE[i][2] + CI[i][2]
# y_x -- New_Eps[2] shrinking x coordinate
if proj(D, EP, CI[i], CE[i],1,0) and CE[i][0] + CI[i][0] > Max_bounds[2]:
New_Eps[2] = [CE[i][0] + CI[i][0], EP[1] + D[1],EP[2]]
Max_bounds[2] = CE[i][0] + CI[i][0]
#y_z -- New_Eps[3] shrinking z coordinate
if proj(D, EP, CI[i], CE[i],1,2) and CE[i][2] + CI[i][2] > Max_bounds[3]:
New_Eps[3] = [EP[0], EP[1]+D[1], CE[i][2] + CI[i][2]]
Max_bounds[3] = CE[i][2] + CI[i][2]
# z_x -- New_Eps[4] shrinking x coordinate
if proj(D, EP, CI[i], CE[i],2,0) and CE[i][0] + CI[i][0] > Max_bounds[2]:
New_Eps[2] = [CE[i][0] + CI[i][0], EP[1],EP[2] + D[2]]
Max_bounds[2] = CE[i][0] + CI[i][0]
# z_y -- New_Eps[5] shrinking y coordinate
if proj(D, EP, CI[i], CE[i],2,1) and CE[i][1] + CI[i][1] > Max_bounds[0]:
New_Eps[0] = [EP[0], CE[i][1] + CI[i][1],EP[2] + D[2]]
Max_bounds[0] = CE[i][1] + CI[i][1]
# remove duplicates
New_Eps = np.unique(New_Eps, axis = 0)
return New_Eps
def Init_RS(NE, Bin_Dims):
'''
Input is a list of new EPs
Initializes the residual space in each axis
This may be updated by the Update_RS function'''
BD = Bin_Dims
RS = []
for i in range(len(NE)):
RS_i = [BD[0] - NE[i][0], BD[1] - NE[i][1],BD[2] - NE[i][2]]
RS.append(RS_i)
return RS
def Update_RS(Dims, EP, All_EPs, RS_list):
'''
This updates the EXISTING RS's to account for
the new item in the Bin.
DOES NOT update the initialized RS to account for
the other items already in the bin -- would have to
include the current items to do that...
Dims = **re-ordered** dimensions of the newly added piece
EP = extreme point PLACEMENT location of the new piece
-- this guy is no longer in the list...
-- the initial res of the
All_Eps = list of all other extreme points
RS_list = current residuals list (each entry a 3-tuple)
'''
EPL = All_EPs
D = Dims
RL = RS_list
for i in range(len(EPL)):
if EPL[i][0] >= EP[0] and EPL[i][0] < EP[0] + D[0]:
if EPL[i][1] <= EP[1] and EPL[i][2] >= EP[2] and EPL[i][2] < EP[2] + D[2]:
RL[i][1] = min([RL[i][1], EP[1] - EPL[i][1]])
if EPL[i][2] <= EP[2] and EPL[i][1] >= EP[1] and EPL[i][1] < EP[1] + D[1]:
RL[i][2] = min([RL[i][2], EP[2] - EPL[i][2]])
if EPL[i][1] >= EP[1] and EPL[i][1] < EP[1] + D[1]:
if EPL[i][0] <= EP[0] and EPL[i][2] >= EP[2] and EPL[i][2] < EP[2] + D[2]:
RL[i][0] = min([RL[i][0], EP[0] - EPL[i][0]])
return RL
##################################################################
####### INPUT STAGE
##################################################################
# Maximum box dimensions
# need to make sure that dimensions are big enough to handle each piece...
H_box = 40
W_box = 60
D_box = 48
#e_ST is the "allowed overhang" of the nesting in this case...is this a thing??
e_ST = 2
# dims are H x W x D
# pieces are (dimensions, label, nesting_dimensions -- default will be [0,0,0])
Pieces = furniture
for i in range(len(Pieces)):
Pieces[i].append(i)
# i.e. the simone table can't have long-way as height...
Or_Ex = {'Simone Table': [2,3]}
# could also fix orientations using something like...
# Fix_Or = {'label': fixed_orientation , etc.}
# probably also add something to specify that certain pieces
# have to go on the bottom, can't be stacked, etc...
#stack_ex = {'Simone Table': 1, 'Harper Shelf':0}
##################################################################
####### NESTING PACKING STAGE
##################################################################
Nest_possible = []
pieces_to_pack = []
for i in range(len(Pieces)):
pieces_to_pack.append(Pieces[i])
for i in range(len(Pieces)):
if Pieces[i][2][0] != 0:
Nest_possible.append(Pieces[i])
##### For now just ordered by volume
Nest_space_ordering = [Nest_possible[i][2][0] * Nest_possible[i][2][1] * Nest_possible[i][2][2]
for i in range(len(Nest_possible))]
Pack_piece_ordering = [pieces_to_pack[i][0][0] * pieces_to_pack[i][0][1] *pieces_to_pack[i][0][2]
for i in range(len(pieces_to_pack))]
Nest_possible = order(Nest_possible, Nest_space_ordering)
pieces_to_pack = order(pieces_to_pack, Pack_piece_ordering)
for j in range(len(Nest_possible)):
'''
try packing, and remove pieces from "pieces to pack"
if they are packed...
'''
Nestings = []
Bin_size = Nest_possible[j][2]
#initialize extreme point list
EPL = np.array([[0,0,0]])
Curr_items = []
Curr_EP = []
RS_list = [[Bin_size[0],Bin_size[1],Bin_size[2]]]
ptp_j = []
for i in range(len(pieces_to_pack)):
ptp_j.append(pieces_to_pack[i])
ptp_j.remove(Nest_possible[j])
for p in range(len(ptp_j)):
'''
try packing - for each succesful pack add the phrase
" label packed in label at EP in orientation ___" to some list to be
printed at the end...
'''
Dims = ptp_j[p][0]
best_merit = 2 * H_box * W_box * D_box
e_cand = None
o_cand = None
for e in range(len(EPL)):
for o in range(len(Ors)):
''' Skip if an orientation exception '''
if ptp_j[p][1] in Or_Ex and o in Or_Ex[ptp_j[p][1]]:
continue
if Feas(Dims, EPL[e], Bin_size, Ors[o], Curr_items, Curr_EP) and Merit_Res(Dims, Ors[o], EPL[e], RS_list[e], Bin_size) < best_merit:
best_merit = Merit_Res(Dims, Ors[o], EPL[e], RS_list[e], Bin_size)
e_cand = e
o_cand = o
if e_cand is None:
continue
else:
Dims = re_order(Dims, Ors[o_cand])
NE = Update_EP(Dims, EPL[e_cand], Curr_EP, Curr_items)
### Again had the original dimensions in here...
Curr_items.append(Dims)
Curr_EP.append(EPL[e_cand])
L = len(Curr_EP)
RS_list.remove(RS_list[e_cand])
EPL = np.delete(EPL,e_cand,axis = 0)
for i in range(len(NE)):
EPL = np.append(EPL,[NE[i]], axis = 0)
# Sort the EPs by lowest z, y, x respectively...
# might want to change this, depending on how things go...
for i in range(3):
### Probably Change this to be like the sorting further down...
EPL = EPL[EPL[:,2-i].argsort(kind='mergesort')]
N_RS = Init_RS(NE, Bin_size)
for i in range(len(N_RS)):
RS_list.append(N_RS[i])
RS_list = Update_RS(Dims, Curr_EP[L-1], EPL, RS_list)
Result = f'{ptp_j[p][1]}, orientation HxWxD = {Dims}, bottom left at {Curr_EP[L-1]} in {Nest_possible[j][1]}.'
Nestings.append(Result)
pieces_to_pack.remove(ptp_j[p])
for i in range(len(Nestings)):
print(Nestings[i])
##################################################################
####### Full Packing Stage
##################################################################
#### pieces_to_pack is THE SAME as from the nesting stage...
#### with all the nested pieces removed (whole nested ensemble
#### treated as one...)
#### Instantiate first Crate with first EP at [0,0,0]...
# can be different for each one... in principle...
Bin_size = [H_box,W_box,D_box]
# List of open EP's in open Crates
Cr = [[[0,0,0]]]
## when create a new crate, give it one of the size bounds
## from Crate_Dims and initialize the Crate_RS_Lists with these
## Stores Residuals for each EP in each Crate (ORDERING HAS TO BE THE SAME)
Cr_RS = [[Bin_size]]
# Stores a list of the dimensions of items currently in each crate
Cr_Item=[[]]
# Stores a list of the EPs where the current items
# were placed -- need this to compute intersections
Cr_EPs =[[]]
ptp = pieces_to_pack
## List of the locations and orientations of packed pieces
Packings = []
for p in range(len(ptp)):
'''
try the piece in EACH existing crate, pick best spot
according to the merit function.
If NO possible packing THEN Crates.append([[0,0,0]]) and
pack it in this one...
For bounding box merit function, maybe also start a new
crate if the BEST Merit value is too bad...
'''
# update this with the crate it's packed in...
packed_in = None
Dims = ptp[p][0]
Best_Merit = 2 * H_box * W_box * D_box
e_cand = None
o_cand = None
for c in range(len(Cr)):
EPL = Cr[c]
Curr_Items = Cr_Item[c]
Curr_EP = Cr_EPs[c]
RS_List = Cr_RS[c]
Ordered_RS = []
Ordered_EPL = []
for e in range(len(EPL)):
if EPL[e][0] > 0:
# no stacking
continue
for o in range(len(Ors)):
''' Skip if an orientation exception '''
if ptp[p][1] in Or_Ex and o in Or_Ex[ptp[p][1]]:
continue
#if Feas(Dims, EPL[e], Bin_size, Ors[o], Curr_Items, Curr_EP) and Merit_Res(Dims, Ors[o], EPL[e], RS_List[e], Bin_size) < Best_Merit:
if Feas(Dims, EPL[e], Bin_size, Ors[o], Curr_Items, Curr_EP) and Merit_WD(Dims, Ors[o], EPL[e], Curr_Items, Curr_EP) < Best_Merit:
#Best_Merit = Merit_Res(Dims, Ors[o], EPL[e], RS_List[e], Bin_size)
Best_Merit = Merit_WD(Dims, Ors[o], EPL[e], Curr_Items, Curr_EP)
e_cand = e
o_cand = o
packed_in = c
if packed_in is not None:
k = packed_in
EPL = Cr[k]
Curr_Items = Cr_Item[k]
#Curr_EP = Cr_EPs[k]
RS_List = Cr_RS[k]
Dims = re_order(Dims, Ors[o_cand])
NE = Update_EP(Dims, EPL[e_cand], Curr_EP, Curr_Items)
## before had this appending the ORIGINAL orientation
Cr_Item[k].append(Dims)
Cr_EPs[k].append(EPL[e_cand])
L = len(Cr_EPs[k])
del EPL[e_cand]
for i in range(len(NE)):
EPL.append(NE[i])
# Sort the EPs by lowest z, y, x respectively...
# might want to change this, depending on how things go...
for i in range(3):
# the [2-i] means it sorts the 0 index last -- i.e. really ordered
# by smallest height... wherever height is in the list...
order_i = [np.argsort(EPL,0)[r][2-i] for r in range(len(EPL))]
#### Seems to be ok to do this in place like this...
EPL = [EPL[order_i[j]] for j in range(len(order_i))]
#print('EPL:',EPL)
#print('RSList:', RS_List)
'''
WILL NEED TO CHANGE THIS so that it returns the format that Kyle wants
need to make a dictionary mapping the orientation chosen in the loop
to the relevant orientation in the "XY 90 degree" language...
'''
Result = [ptp[p][3],{'name': ptp[p][1], 'rotationXY': rotXY[o_cand], 'rotationYZ': rotYZ[o_cand], 'rotationXZ': rotXZ[o_cand],'bottomLeftX':Cr_EPs[k][L-1][1] + Bin_size[1]*packed_in, 'bottomLeftY': Cr_EPs[k][L-1][2], 'bottomLeftZ': Cr_EPs[k][L-1][0], 'crate': packed_in}]
#orientation HxWxD = {Dims}, bottom left at {Cr_EPs[k][L-1]} in Crate {packed_in}.
Packings.append(Result)
Cr[k] = EPL
#Cr_Item[k] = Curr_Items
#Cr_EPs[k] = Curr_EP
Cr_RS[k] = RS_List
if packed_in is None:
Cr.append([[0,0,0]])
Cr_RS.append([Bin_size])
Cr_Item.append([])
Cr_EPs.append([])
c = len(Cr)-1
packed_in = c
EPL = Cr[c]
Curr_Items = Cr_Item[c]
Curr_EP = Cr_EPs[c]
RS_List = Cr_RS[c]
e_cand = 0
o_cand = None
for o in range(len(Ors)):
''' Skip if an orientation exception '''
if ptp[p][1] in Or_Ex and o in Or_Ex[ptp[p][1]]:
continue
#if Feas(Dims, EPL[e_cand], Bin_size, Ors[o], Curr_Items, Curr_EP) and Merit_Res(Dims, Ors[o], EPL[e_cand], RS_List[e_cand], Bin_size) < Best_Merit:
if Feas(Dims, EPL[e_cand], Bin_size, Ors[o], Curr_Items, Curr_EP) and Merit_WD(Dims, Ors[o], EPL[e_cand], Curr_Items, Curr_EP) < Best_Merit:
#Best_Merit = Merit_Res(Dims, Ors[o], EPL[e_cand], RS_List[e_cand], Bin_size)
Best_Merit = Merit_WD(Dims, Ors[o], EPL[e_cand], Curr_Items, Curr_EP) < Best_Merit
o_cand = o
Dims = re_order(Dims, Ors[o_cand])
NE = Update_EP(Dims, EPL[e_cand], Curr_EP, Curr_Items)
## same thing, was adding the ORIGNINAL Orientation before...
Curr_Items.append(Dims)
Curr_EP.append(EPL[e_cand])
L = len(Curr_EP)
del EPL[e_cand]
for i in range(len(NE)):
EPL.append(NE[i])
# Sort the EPs by lowest height, width, and depth respectively...
# might want to change this, depending on how things go...
for i in range(3):
order_i = [np.argsort(EPL,0)[r][2-i] for r in range(len(EPL))]
RS_List = [RS_List[order_i[j]] for j in range(len(order_i))]
EPL = [EPL[order_i[j]] for j in range(len(order_i))]
Result = [ptp[p][3],{'name': ptp[p][1], 'rotationXY': rotXY[o_cand], 'rotationYZ': rotYZ[o_cand], 'rotationXZ': rotXZ[o_cand],'bottomLeftX': Cr_EPs[k][L-1][1]+Bin_size[1]*packed_in, 'bottomLeftY': Cr_EPs[k][L-1][2], 'bottomLeftZ': Cr_EPs[k][L-1][0], 'crate': packed_in}]
Packings.append(Result)
Cr[c] = EPL
Cr_Item[c] = Curr_Items
Cr_EPs[c] = Curr_EP
################################################################################
######## Generate dimensions of crates
################################################################################
'''
X - width
Y - Depth
Z - Height
(Z,X,Y)
'''
Crate_dims = []
for i in range(len(Cr_Item)):
H_dim = max([Cr_Item[i][j][0] + Cr_EPs[i][j][0] for j in range(len(Cr_Item[i]))])
W_dim = max([Cr_Item[i][j][1] + Cr_EPs[i][j][1] for j in range(len(Cr_Item[i]))])
D_dim = max([Cr_Item[i][j][2] + Cr_EPs[i][j][2] for j in range(len(Cr_Item[i]))])
Crate_dims.append([H_dim, W_dim, D_dim])
for i in range(len(Pieces)):
for j in range(len(Packings)):
if Packings[j][0] == i:
orientations.append(Packings[j][1])
print('orientations', orientations)
print('orientations = packings', Packings)
return {'pieces': orientations}
|
python
|
#!/usr/bin/env python3
# Dependencies: python3-pandas python3-plotly
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.colors
df = pd.read_csv(".perf-out/all.csv")
fig = make_subplots(
rows=2, cols=2,
horizontal_spacing = 0.1,
vertical_spacing = 0.1,
subplot_titles=(
"Requests per second",
"Latency: 90%ile", "Latency: 99%ile", "Latency: 99.9%ile"),
)
fig.update_yaxes(row=1, col=1, rangemode="tozero")
fig.update_yaxes(row=1, col=2, title_text="milliseconds",
rangemode="tozero")
fig.update_yaxes(row=2, col=1, title_text="milliseconds",
rangemode="tozero")
fig.update_yaxes(row=2, col=2, title_text="milliseconds",
rangemode="tozero")
fig.update_layout(legend_orientation="h", hovermode="x")
colors = plotly.colors.DEFAULT_PLOTLY_COLORS
for i, s in enumerate(set(df.server.values)):
dfs = df[df.server == s]
color = colors[i]
fig.add_trace(
go.Scatter(
x=dfs["size"],
y=dfs.reqps,
mode='lines+markers',
line=dict(color=color),
showlegend=True,
name=s),
row=1, col=1)
for (row, col), k in [
((1, 2), "lat90"),
((2, 1), "lat99"),
((2, 2), "lat99.9")]:
fig.add_trace(
go.Scatter(
x=dfs["size"],
y=dfs[k]/1000, # convert us -> ms
mode='lines+markers',
line=dict(color=color),
showlegend=False,
name=s),
row=row, col=col)
fig.write_html('.perf-out/results.html', auto_open=False)
|
python
|
import json, re, pymongo, os
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
from unidecode import unidecode
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from datetime import date, datetime
from scrapy import settings
format="%d/%m/%Y"
class NoticiasPipeline:
def process_item(self, item, spider):
return item
class DropFaultDataPipeline:
def process_item(self, item, spider):
if item['data']:
return item
else:
raise DropItem("Missing data in %s" % item)
class DropFaultCorpoPipeline:
def process_item(self, item, spider):
if item['corpo']:
return item
else:
raise DropItem("Missing corpo in %s" % item)
class DropNotCovid19:
def process_item(self, item, spider):
if (re.search("covid", item['corpo']) or re.search("vacina", item['corpo']) or re.search("doses", item['corpo']) or re.search("cloroquina", item['corpo']) or re.search("cpi", item['corpo']) ):
return item
else:
raise DropItem("Is not notice about Covid-19")
class LowerPipeline:
def process_item(self, item, spider):
item['corpo'] = item['corpo'].lower()
return item
class TagsSpecialsCorpoPipeline:
def process_item(self,item,spider):
str1 = item['corpo']
str1 = unidecode(str1)
str1 = re.sub('["\'\-,;%\[\]\{\}.*:@#?!&$\(\)/|]', ' ', str1)
item['corpo'] = str1
return item
class RemoveStopwordsPipeline:
def process_item(self,item,spider):
text = item['corpo']
stop_words = set(stopwords.words('portuguese'))
word_tokens = word_tokenize(text)
filtered_text = [w for w in word_tokens if not w in stop_words]
filtered_text = []
for w in word_tokens:
if w not in stop_words:
filtered_text.append(w)
item['corpo'] = filtered_text
return item
class ProcessedCorpoPipeline:
def process_item(self,item,spider):
processed_corpo = DataUtility.pre_processing(item['corpo'])
item['corpo'] = processed_corpo
class MongoDBPipeline:
collection_name = 'notices_collection'
#collection_name = 'teste'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE', 'crawler')
)
def open_spider(self,spider):
#self.client = pymongo.MongoClient(self.mongo_uri)
self.client = pymongo.MongoClient("your_url_database")
self.db = self.client[self.mongo_db]
def close_spider(self,spider):
self.client.close()
def process_item(self,item,spider):
link = item['link']
title = item['title']
data = item['data']
corpo = item['corpo']
data = datetime.strptime(data,format).date()
self.db[self.collection_name].insert_one({
'link': link,
'title': title,
'data': datetime(data.year, data.month, data.day),
'corpo': corpo
})
return item
|
python
|
from math import *
from scipy.integrate import quad
from scipy.integrate import dblquad
from scipy import integrate
from scipy import special
from numpy import median
from numpy import linspace
from copy import deepcopy
from scipy.stats import fisk
"Defining the parameters for the tests"
alpha = 0.05
delta = 0.002
k = int(log(2 * int(log(1 / delta)) * (1 / delta)))
beta = 0.001
eta = 0.001
max_iterations = 15
max_gradient_descents = 15
sample_size = 2000
optimal_w = [2, 3]
initial_w = [4, 5]
initial_v = 5
mu = [0, 0]
covariance = [[10, 0], [0, 10]]
X = np.random.multivariate_normal(mu, covariance, sample_size).T
X1 = X[:, 0:int(sample_size / 2)]
X2 = X[:, int(sample_size / 2):sample_size]
max_trials = 1000
min_sample = 15
max_sample = 125
samples = 20
prediction = linspace(min_sample, max_sample, samples).astype(int)
|
python
|
from rest_framework import permissions
class AnonymousPermission(permissions.BasePermission):
"""
Non Anonymous Users.
"""
message = "You are already registered. Please logout and try again."
def has_permission(self, request, view):
print(not request.user.is_authenticated)
return not request.user.is_authenticated
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object level permissions that only allow owner of object to edit it.
"""
message = "You cannot edit this object, since you are not the owner of this object."
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
|
python
|
# -*- coding: UTF-8 -*-
from operator import itemgetter
from copy import deepcopy
from pydruid.utils import aggregators
from pydruid.utils import filters
class TestAggregators:
def test_aggregators(self):
aggs = [('longsum', 'longSum'), ('doublesum', 'doubleSum'),
('min', 'min'), ('max', 'max'), ('count', 'count'),
('hyperunique', 'hyperUnique')]
aggs_funcs = [(getattr(aggregators, agg_name), agg_type)
for agg_name, agg_type in aggs]
for f, agg_type in aggs_funcs:
assert f('metric') == {'type': agg_type, 'fieldName': 'metric'}
def test_filtered_aggregator(self):
filter_ = filters.Filter(dimension='dim', value='val')
aggs = [aggregators.count('metric1'),
aggregators.longsum('metric2'),
aggregators.doublesum('metric3'),
aggregators.min('metric4'),
aggregators.max('metric5'),
aggregators.hyperunique('metric6'),
aggregators.cardinality('dim1'),
aggregators.cardinality(['dim1', 'dim2'], by_row=True)]
for agg in aggs:
expected = {
'type': 'filtered',
'filter': {
'type': 'selector',
'dimension': 'dim',
'value': 'val'
},
'aggregator': agg
}
actual = aggregators.filtered(filter_, agg)
assert actual == expected
def test_build_aggregators(self):
agg_input = {
'agg1': aggregators.count('metric1'),
'agg2': aggregators.longsum('metric2'),
'agg3': aggregators.doublesum('metric3'),
'agg4': aggregators.min('metric4'),
'agg5': aggregators.max('metric5'),
'agg6': aggregators.hyperunique('metric6'),
'agg7': aggregators.cardinality('dim1'),
'agg8': aggregators.cardinality(['dim1', 'dim2'], by_row=True)
}
built_agg = aggregators.build_aggregators(agg_input)
expected = [
{'name': 'agg1', 'type': 'count', 'fieldName': 'metric1'},
{'name': 'agg2', 'type': 'longSum', 'fieldName': 'metric2'},
{'name': 'agg3', 'type': 'doubleSum', 'fieldName': 'metric3'},
{'name': 'agg4', 'type': 'min', 'fieldName': 'metric4'},
{'name': 'agg5', 'type': 'max', 'fieldName': 'metric5'},
{'name': 'agg6', 'type': 'hyperUnique', 'fieldName': 'metric6'},
{'name': 'agg7', 'type': 'cardinality', 'fieldNames': ['dim1'], 'byRow': False},
{'name': 'agg8', 'type': 'cardinality', 'fieldNames': ['dim1', 'dim2'], 'byRow': True},
]
assert (sorted(built_agg, key=itemgetter('name')) ==
sorted(expected, key=itemgetter('name')))
def test_build_filtered_aggregator(self):
filter_ = filters.Filter(dimension='dim', value='val')
agg_input = {
'agg1': aggregators.filtered(filter_,
aggregators.count('metric1')),
'agg2': aggregators.filtered(filter_,
aggregators.longsum('metric2')),
'agg3': aggregators.filtered(filter_,
aggregators.doublesum('metric3')),
'agg4': aggregators.filtered(filter_,
aggregators.min('metric4')),
'agg5': aggregators.filtered(filter_,
aggregators.max('metric5')),
'agg6': aggregators.filtered(filter_,
aggregators.hyperunique('metric6')),
'agg7': aggregators.filtered(filter_,
aggregators.cardinality('dim1')),
'agg8': aggregators.filtered(filter_,
aggregators.cardinality(['dim1', 'dim2'], by_row=True)),
}
base = {
'type': 'filtered',
'filter': {
'type': 'selector',
'dimension': 'dim',
'value': 'val'
}
}
aggs = [
{'name': 'agg1', 'type': 'count', 'fieldName': 'metric1'},
{'name': 'agg2', 'type': 'longSum', 'fieldName': 'metric2'},
{'name': 'agg3', 'type': 'doubleSum', 'fieldName': 'metric3'},
{'name': 'agg4', 'type': 'min', 'fieldName': 'metric4'},
{'name': 'agg5', 'type': 'max', 'fieldName': 'metric5'},
{'name': 'agg6', 'type': 'hyperUnique', 'fieldName': 'metric6'},
{'name': 'agg7', 'type': 'cardinality', 'fieldNames': ['dim1'], 'byRow': False},
{'name': 'agg8', 'type': 'cardinality', 'fieldNames': ['dim1', 'dim2'], 'byRow': True},
]
expected = []
for agg in aggs:
exp = deepcopy(base)
exp.update({'aggregator': agg})
expected.append(exp)
built_agg = aggregators.build_aggregators(agg_input)
expected = sorted(built_agg, key=lambda k: itemgetter('name')(
itemgetter('aggregator')(k)))
actual = sorted(expected, key=lambda k: itemgetter('name')(
itemgetter('aggregator')(k)))
assert expected == actual
|
python
|
import os
from django.templatetags.static import static
from django.utils.html import format_html
from django.utils.module_loading import import_string
from wagtail.core import hooks
from wagtail_icons.settings import BASE_PATH, SETS
@hooks.register("insert_global_admin_css")
def global_admin_css():
stylesheets = []
for set in SETS:
iconset_class = import_string(set)
iconset_instance = iconset_class()
stylesheets += iconset_instance.get_css_files()
html = "".join(
'<link rel="stylesheet" href="%s">'
% static(os.path.join(BASE_PATH, stylesheet))
for stylesheet in stylesheets
)
return format_html(html)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Author: Ting'''
import logging
import traceback
from argparse import ArgumentParser
from datetime import date
from pprint import pprint
from subprocess import call
import re
from collections import defaultdict
from os.path import join, abspath, dirname, isfile
import csv
import xlrd
# import matplotlib.pyplot as plt
import psycopg2
log = logging.getLogger(name=__file__)
class DataIO:
today_str = str(date.today()).replace("-", "")
file_dir = dirname(abspath(__file__))
main_dir = file_dir.split('git')[0]
def __init__(self, *args, **kwargs):
super().__init__(self, config=None, *args, **kwargs)
self.parse_args = self._parse_args()
if not config is None:
self.file_path = input('Please enter the location of your file: ')
else:
self.config = config
try:
self.file_path_list = config.get('file_path')
except Exception as e:
log.error('Error! No path found.')
return
for file_path in file_path_list:
try:
parse_file(file_path)
except EXCEPTION as e:
log.error('Error! Unknown file type, please select only Excel, CSV, or Postgres Login.')
def _parse_args(self):
parser = ArgumentParser()
parser.add_argument('--filename', required=True, help="Filename.")
parser.add_argument('--file-type', required=True, help="File Type.")
# parser.add_argument('--debug', required=False, action='store_true', help="Log in debug mode.")
# parser.add_argument('--querysrch', required=False, action='store_true', nargs='+', help="Initiate search of the site entered.")
# if both querysrch and tags are there, use parser.parse_args('args'.split())
args, trash = parser.parse_known_args()
return args
def parse_file(file_path):
file_type = file_path.split('.')[1]
if file_type == 'csv':
return True # read_csv(file_type)
elif 'xls' in file_type:
return True # read_xlsx(file_path)
elif 'txt' in file_type:
return True # read_xlsx(file_path)
elif file_type is None and 'dbname' in file_path:
return True # read_db(file_path)
else:
return None
def read_csv(filename, sheet_names='Turnout Rates'):
# file_path = join(main_dir, filename)
csv_data = csv.reader(file_path, delimiter=',')
csv_data = list(csv_data)
return data_set
def save_xlsx_to_csv(filename, xlsx_sh, save_to_csv=True):
filename = filename.split('.')[0]
sh = xlsx_sh
if save_to_csv is True:
csv_file = open(filename + '.csv', 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
for rownum in xrange(sh.nrows):
wr.writerow(sh.row_values(rownum))
csv_file.close()
def read_xlsx(filename, sheet_names='Turnout Rates'):
# xlsx is expected to have xls in the string
file_path = join(main_dir, filename)
wb = xlrd.open_workbook(file_path)
sh = wb.sheet_by_name(sheet_names)
save_xlsx_to_csv(filename, sh, save_to_csv=False)
# Organize data
data_class = {}
data_subclass = {}
data_set = {}
for rownum in xrange(sh.nrows):
if rownum > 1:
colnum = 0
data = {}
for ds in sh.row_values(rownum):
data[data_subclass[colnum]] = ds
colnum += 1
data_set[data['State']] = data
elif rownum == 1:
data_subclass = sh.row_values(1)
data_subclass[0] = 'State'
data_subclass = tuple(data_subclass)
elif rownum == 0:
for dc in sh.row_values(0):
data_class[dc] = {}
return data_set
def log_data_to_psql(args_dict, db_name='test_db'):
db_name = 'dbname=' + db_name
'''This will be a place holder to customized log_data_psql fcn'''
with psycopg2.connect(db_name).cursor() as cur:
args_str = ','.join(cur.mogrify("(%s,%s,%s,%s,%s,%s,%s,%s,%s)", x) for x in args_dict)
cur.execute("INSERT INTO table VALUES " + args_str)
def read_db(query, db_name='test_db'):
db_name = 'dbname=' + db_name
if 'SELECT' not in query.upper():
return
with psycopg2.connect("dbname=test_db").cursor() as cur:
cur.execute(query)
return cur.fetchall()
def read_text_file(filename):
key_phrase = ''
expression = ''
if isfile(filename) is True:
with open(filename, 'rb') as file_content:
file_content_readlines = file_content.readlines()
# Read the last line
for line in reversed(file_content_readlines):
if key_phrase in line:
# Do something fancy AF
break
else:
# Do something else fancy AF
break
# Base on that fancy thing, choose what to do
if True:
log.info('Reading text file')
for row in file_content_readlines:
# Search for expression
if expression in row:
matching_expression = re.search('(?<=@)\w+', row).group(0)
if matching_expression in expression_dict:
# Do something fancy
keys = [k for k in table_dict.keys()]
content = [tuple(str(v) for k, v in table_dict.iteritems())]
if __name__ == '__main__':
# parsed_args = _parse_args()
filename = '2016 November General Election.xlsx'
data_set = read_xlsx(filename)
print data_set
# if parsed_args.filename:
# if parsed_args.file_type == 'txt':
# read_text_file(parsed_args.filename)
# if parsed_args.file_type == 'tbl':
# read_table(parsed_args.filename)
# if parsed_args.file_type == 'db':
# read_db(parsed_args.filename)
### Reference:
# http://stackoverflow.com/questions/20105118/convert-xlsx-to-csv-correctly-using-python
# http://stackoverflow.com/questions/1038160/data-structure-for-maintaining-tabular-data-in-memory
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.