content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
"""
@brief: fill queue with new tasks read from tasks file.
"""
import pika
import sys
from .ip_provider import get_valid_ip
def create_new_tasks(fn,broker):
tasks=[]
with open(fn,"r") as f:
for line in f:
tasks.append(line)
connection = pika.BlockingConnection(pika.ConnectionParameters(host=get_valid_ip(broker)))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
for task in tasks:
task = task.strip("\n")
channel.basic_publish(
exchange='',
routing_key='task_queue',
body=task,
properties=pika.BasicProperties(
delivery_mode=2, # make message persisten
))
print(" [x] Sent %r" % task)
connection.close()
|
python
|
'''
@file: MPNCOV.py
@author: Jiangtao Xie
@author: Peihua Li
Copyright (C) 2018 Peihua Li and Jiangtao Xie
All rights reserved.
'''
import torch
import numpy as np
from torch.autograd import Function
class Covpool(Function):
@staticmethod
def forward(ctx, input):
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
h = x.data.shape[2]
w = x.data.shape[3]
M = h*w
x = x.reshape(batchSize,dim,M)
I_hat = (-1./M/M)*torch.ones(M,M,device = x.device) + (1./M)*torch.eye(M,M,device = x.device)
I_hat = I_hat.view(1,M,M).repeat(batchSize,1,1).type(x.dtype)
y = x.bmm(I_hat).bmm(x.transpose(1,2))
ctx.save_for_backward(input,I_hat)
return y
@staticmethod
def backward(ctx, grad_output):
input,I_hat = ctx.saved_tensors
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
h = x.data.shape[2]
w = x.data.shape[3]
M = h*w
x = x.reshape(batchSize,dim,M)
grad_input = grad_output + grad_output.transpose(1,2)
grad_input = grad_input.bmm(x).bmm(I_hat)
grad_input = grad_input.reshape(batchSize,dim,h,w)
return grad_input
class Sqrtm(Function):
@staticmethod
def forward(ctx, input, iterN):
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
normA = (1.0/3.0)*x.mul(I3).sum(dim=1).sum(dim=1)
A = x.div(normA.view(batchSize,1,1).expand_as(x))
Y = torch.zeros(batchSize, iterN, dim, dim, requires_grad = False, device = x.device)
Z = torch.eye(dim,dim,device = x.device).view(1,dim,dim).repeat(batchSize,iterN,1,1)
if iterN < 2:
ZY = 0.5*(I3 - A)
Y[:,0,:,:] = A.bmm(ZY)
else:
ZY = 0.5*(I3 - A)
Y[:,0,:,:] = A.bmm(ZY)
Z[:,0,:,:] = ZY
for i in range(1, iterN-1):
ZY = 0.5*(I3 - Z[:,i-1,:,:].bmm(Y[:,i-1,:,:]))
Y[:,i,:,:] = Y[:,i-1,:,:].bmm(ZY)
Z[:,i,:,:] = ZY.bmm(Z[:,i-1,:,:])
ZY = 0.5*Y[:,iterN-2,:,:].bmm(I3 - Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]))
y = ZY*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)
ctx.save_for_backward(input, A, ZY, normA, Y, Z)
ctx.iterN = iterN
return y
@staticmethod
def backward(ctx, grad_output):
input, A, ZY, normA, Y, Z = ctx.saved_tensors
iterN = ctx.iterN
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
der_postCom = grad_output*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)
der_postComAux = (grad_output*ZY).sum(dim=1).sum(dim=1).div(2*torch.sqrt(normA))
I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
if iterN < 2:
der_NSiter = 0.5*(der_postCom.bmm(I3 - A) - A.bmm(der_sacleTrace))
else:
dldY = 0.5*(der_postCom.bmm(I3 - Y[:,iterN-2,:,:].bmm(Z[:,iterN-2,:,:])) -
Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]).bmm(der_postCom))
dldZ = -0.5*Y[:,iterN-2,:,:].bmm(der_postCom).bmm(Y[:,iterN-2,:,:])
for i in range(iterN-3, -1, -1):
YZ = I3 - Y[:,i,:,:].bmm(Z[:,i,:,:])
ZY = Z[:,i,:,:].bmm(Y[:,i,:,:])
dldY_ = 0.5*(dldY.bmm(YZ) -
Z[:,i,:,:].bmm(dldZ).bmm(Z[:,i,:,:]) -
ZY.bmm(dldY))
dldZ_ = 0.5*(YZ.bmm(dldZ) -
Y[:,i,:,:].bmm(dldY).bmm(Y[:,i,:,:]) -
dldZ.bmm(ZY))
dldY = dldY_
dldZ = dldZ_
der_NSiter = 0.5*(dldY.bmm(I3 - A) - dldZ - A.bmm(dldY))
grad_input = der_NSiter.div(normA.view(batchSize,1,1).expand_as(x))
grad_aux = der_NSiter.mul(x).sum(dim=1).sum(dim=1)
for i in range(batchSize):
grad_input[i,:,:] += (der_postComAux[i] \
- grad_aux[i] / (normA[i] * normA[i])) \
*torch.ones(dim,device = x.device).diag()
return grad_input, None
class Triuvec(Function):
@staticmethod
def forward(ctx, input):
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
x = x.reshape(batchSize, dim*dim)
I = torch.ones(dim,dim).triu().t().reshape(dim*dim)
index = I.nonzero()
y = torch.zeros(batchSize,dim*(dim+1)/2,device = x.device)
for i in range(batchSize):
y[i, :] = x[i, index].t()
ctx.save_for_backward(input,index)
return y
@staticmethod
def backward(ctx, grad_output):
input,index = ctx.saved_tensors
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
grad_input = torch.zeros(batchSize,dim,dim,device = x.device,requires_grad=False)
grad_input = grad_input.reshape(batchSize,dim*dim)
for i in range(batchSize):
grad_input[i,index] = grad_output[i,:].reshape(index.size(),1)
grad_input = grad_input.reshape(batchSize,dim,dim)
return grad_input
def CovpoolLayer(var):
return Covpool.apply(var)
def SqrtmLayer(var, iterN):
return Sqrtm.apply(var, iterN)
def TriuvecLayer(var):
return Triuvec.apply(var)
|
python
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Object client"""
import logging
from openstackclient.common import utils
LOG = logging.getLogger(__name__)
API_NAME = 'object-store'
API_VERSIONS = {
'1': 'openstackclient.object.client.ObjectClientv1',
}
def make_client(instance):
"""Returns an object service client."""
object_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
if instance._url:
endpoint = instance._url
else:
endpoint = instance.get_endpoint_for_service_type(API_NAME)
LOG.debug('instantiating object client')
client = object_client(
endpoint=endpoint,
token=instance._token,
)
return client
class ObjectClientv1(object):
def __init__(
self,
endpoint_type='publicURL',
endpoint=None,
token=None,
):
self.endpoint_type = endpoint_type
self.endpoint = endpoint
self.token = token
|
python
|
import pytest
from constants.pipelines import OperationStatuses, PipelineStatuses
from factories.factory_pipelines import OperationRunFactory
from pipelines.celery_task import ClassBasedTask, OperationTask
from polyaxon.celery_api import app as celery_app
from tests.utils import BaseTest
@pytest.mark.pipelines_mark
class TestOperationTask(BaseTest):
def setUp(self):
self.operation_run = OperationRunFactory()
self.pipeline_run = self.operation_run.pipeline_run
# Manually set status to scheduled
self.operation_run.on_scheduled()
return super().setUp()
def test_task_without_operation_run_raises(self):
@celery_app.task(base=OperationTask, shared=False)
def dummy_task():
return
with self.assertRaises(TypeError):
dummy_task.apply_async()
def test_task_with_operation_run_succeeds(self):
@celery_app.task(base=OperationTask, shared=False)
def dummy_task(operation_run_id):
return
kwargs = {'operation_run_id': self.operation_run.id}
dummy_task.apply_async(kwargs=kwargs)
self.operation_run.refresh_from_db()
assert self.operation_run.succeeded is True
assert set(self.operation_run.statuses.values_list('status', flat=True)) == {
OperationStatuses.CREATED,
OperationStatuses.SCHEDULED,
OperationStatuses.RUNNING,
OperationStatuses.SUCCEEDED,
}
self.pipeline_run.refresh_from_db()
assert self.operation_run.pipeline_run.last_status == PipelineStatuses.FINISHED
assert set(self.operation_run.pipeline_run.statuses.values_list('status', flat=True)) == {
PipelineStatuses.CREATED,
PipelineStatuses.SCHEDULED,
PipelineStatuses.RUNNING,
PipelineStatuses.FINISHED,
}
def test_task_with_error_fails(self):
@celery_app.task(base=OperationTask, shared=False)
def raising_task(operation_run_id):
raise KeyError
kwargs = {'operation_run_id': self.operation_run.id}
raising_task.apply_async(kwargs=kwargs)
self.operation_run.refresh_from_db()
assert self.operation_run.failed is True
assert set(self.operation_run.statuses.values_list('status', flat=True)) == {
OperationStatuses.CREATED,
OperationStatuses.SCHEDULED,
OperationStatuses.RUNNING,
OperationStatuses.FAILED,
}
self.pipeline_run.refresh_from_db()
assert self.operation_run.pipeline_run.last_status == PipelineStatuses.FINISHED
assert set(self.operation_run.pipeline_run.statuses.values_list('status', flat=True)) == {
PipelineStatuses.CREATED,
PipelineStatuses.SCHEDULED,
PipelineStatuses.RUNNING,
PipelineStatuses.FINISHED,
}
def test_task_retries_for_specified_exception(self):
class RetryTask(ClassBasedTask):
retry_for = (KeyError, )
@staticmethod
def _run(task_bind, *args, **kwargs):
raise KeyError
@celery_app.task(base=OperationTask, bind=True, shared=False)
def retry_task(task_bind, operation_run_id):
assert task_bind.max_retries == 2
assert task_bind.countdown == 0
RetryTask.run(task_bind=task_bind, operation_run_id=operation_run_id)
# Add retries and count to the operation
self.operation_run.operation.max_retries = 2
self.operation_run.operation.retry_delay = 0
self.operation_run.operation.save()
kwargs = {'operation_run_id': self.operation_run.id}
retry_task.apply_async(kwargs=kwargs)
self.operation_run.refresh_from_db()
assert self.operation_run.last_status == OperationStatuses.RETRYING
assert set(self.operation_run.statuses.values_list('status', flat=True)) == {
OperationStatuses.CREATED,
OperationStatuses.SCHEDULED,
OperationStatuses.RUNNING,
OperationStatuses.RETRYING,
}
self.pipeline_run.refresh_from_db()
assert self.operation_run.pipeline_run.last_status == PipelineStatuses.RUNNING
assert set(self.operation_run.pipeline_run.statuses.values_list('status', flat=True)) == {
PipelineStatuses.CREATED,
PipelineStatuses.SCHEDULED,
PipelineStatuses.RUNNING,
}
|
python
|
import os
import pygame
from game_defines import DIRECTIONS
ASSET_BASE = os.path.join(os.path.dirname(__file__), "assets")
class Actor(object):
@staticmethod
def asset(name):
return os.path.join(ASSET_BASE, name)
def __init__(self, name, image_path, actor_type, startx, starty):
self.image = pygame.image.load(Actor.asset(image_path))
self.name = name
self.x = startx
self.y = starty
self.actor_type = actor_type
self.map_object = None
def process(self, sensor_input):
raise AssertionError("Process Needs to be overriden")
def get_image(self):
return self.image
def get_type(self):
return self.actor_type
def get_x(self):
return self.x
def get_y(self):
return self.y
def set_map(self, map_object):
self.map_object = map_object
def move(self, move_to):
x_offset = 0
y_offset = 0
if move_to == DIRECTIONS.UP:
x_offset = 0
y_offset = -1
elif move_to == DIRECTIONS.UPRIGHT:
x_offset = 1
y_offset = -1
elif move_to == DIRECTIONS.UPLEFT:
x_offset = -1
y_offset = -1
elif move_to == DIRECTIONS.RIGHT:
x_offset = 1
y_offset = 0
elif move_to == DIRECTIONS.DOWN:
x_offset = 0
y_offset = 1
elif move_to == DIRECTIONS.DOWNRIGHT:
x_offset = 1
y_offset = 1
elif move_to == DIRECTIONS.DOWNLEFT:
x_offset = -1
y_offset = 1
elif move_to == DIRECTIONS.LEFT:
x_offset = -1
y_offset = 0
if self.map_object.is_blocked(self.x + x_offset, self.y + y_offset):
return False
self.x += x_offset
self.y += y_offset
return True
|
python
|
"""
File: 240.py
Title: Search a 2D Matrix II
Difficulty: Medium
URL: https://leetcode.com/problems/search-a-2d-matrix-ii/
"""
import unittest
from typing import List
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
m = len(matrix)
n = len(matrix[0])
i = 0
j = m - 1
while (i < n) and (j >= 0):
if matrix[j][i] == target:
return True
if matrix[j][i] > target:
j -= 1
else:
i += 1
return False
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
matrix = [[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]]
target = 5
# Output
output = True
solution = Solution()
self.assertEqual(solution.searchMatrix(matrix, target), output)
def test_example2(self):
# Input
matrix = [[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]]
target = 20
# Output
output = False
solution = Solution()
self.assertEqual(solution.searchMatrix(matrix, target), output)
if __name__ == "__main__":
unittest.main()
|
python
|
# -*- coding: utf-8 -*-
"""
safedun-server
Created on Sun Oct 13 00:00:00 2019
Author: Adil Rahman
GitHub: https://github.com/adildsw/safedun-server
"""
import argparse
import socket
from backend import safedun
from flask import Flask, render_template, request, send_file
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/execute', methods=['POST'])
def execute():
mode = request.form['mode']
key = request.form['key']
cycle = int(request.form['cycle'])
file = request.files['file']
scrambler = safedun()
output_file = scrambler.generate(mode, cycle, key, file)
return send_file(output_file, as_attachment=True, attachment_filename="output.png")
if __name__ == "__main__":
host_ip = socket.gethostbyname(socket.gethostname())
parser = argparse.ArgumentParser(description="safedun Server Option Description")
parser.add_argument("-H", "--host", help="specify IP address to host server", required=False, default=host_ip)
parser.add_argument("-p", "--port", help="specify Port number to host server", required=False, default="5000")
parser.add_argument("-d", "--debug", help="specify whether the server will run on debug mode", required=False, default=False)
parser.add_argument("-l", "--local", help="host server in localhost", required=False, default=False)
argument = parser.parse_args()
if not argument.local == False:
argument.host = '127.0.0.1'
app.run(host=argument.host, port=argument.port, debug=argument.debug)
|
python
|
"""The Policy can use these classes to communicate with Vizier."""
import abc
import collections
import dataclasses
import datetime
from typing import Dict, Iterable, List, Optional
from vizier import pyvizier as vz
@dataclasses.dataclass(frozen=True)
class MetadataDelta:
"""Carries cumulative delta for a batch metadata update.
Attributes:
on_study: Updates to be made on study-level metadata.
on_trials: Maps trial id to updates.
"""
on_study: vz.Metadata = dataclasses.field(default_factory=vz.Metadata)
on_trials: Dict[int, vz.Metadata] = dataclasses.field(
default_factory=lambda: collections.defaultdict(vz.Metadata))
class _MetadataUpdateContext:
"""Metadata update context.
Usage:
# All metadata updates in the context are queued, not immediately applied.
# Upon exit, supporter handles all metadata updates in a batch.
with pythia2._MetadataUpdateContext(policy_supporter) as mu:
# Study-level metadata.
mu.assign('namespace', 'key', 'value')
# Trial-level metadata.
mu.assign('namespace', 'key', 'value', trial_id=1)
# Same as above but with a side effect. After this line the following
# line is True:
# trial.metadata.ns('namespace')['key'] == 'value'
mu.assign('namespace', 'key', 'value', trial)
"""
def __init__(self, supporter: 'PolicySupporter'):
self._supporter = supporter
self._delta = MetadataDelta()
# pylint: disable=invalid-name
def assign(self,
namespace: str,
key: str,
value: vz.MetadataValue,
trial: Optional[vz.Trial] = None,
*,
trial_id: Optional[int] = None):
"""Assigns metadata.
Args:
namespace: Namespace of the metadata. See vz.Metadata doc for more
details.
key:
value:
trial: If specified, `trial_id` must be None. It behaves the same as when
`trial_id=trial.id`, except that `trial` is immediately modified.
trial_id: If specified, `trial` must be None. If both `trial` and
`trial_id` are None, then the key-value pair will be assigned to the
study.
Raises:
ValueError:
"""
if trial is None and trial_id is None:
self._delta.on_study.ns(namespace)[key] = value
elif trial is not None and trial_id is not None:
raise ValueError(
'At most one of `trial` and `trial_id` can be specified.')
elif trial is not None:
self._delta.on_trials[trial.id].ns(namespace)[key] = value
trial.metadata.ns(namespace)[key] = value
elif trial_id is not None:
self._delta.on_trials[trial_id].ns(namespace)[key] = value
def __enter__(self):
return self
def __exit__(self, *args):
"""upon exit, sends a batch update request."""
self._supporter.SendMetadata(self._delta)
class PolicySupporter(abc.ABC):
"""Used by Policy instances to communicate with Vizier."""
# TODO: Change to GetStudyDescriptor.
@abc.abstractmethod
def GetStudyConfig(self, study_guid: Optional[str] = None) -> vz.StudyConfig:
"""Requests a StudyConfig from Vizier.
This sends a PythiaToVizier.trial_selector packet and waits for the
response(s). You can call this multiple times, and it is thread-friendly,
so you can even overlap calls.
Args:
study_guid: The GUID of the study whose StudyConfig you want. Note that
access control applies. By default, use the current study's GUID.
Returns:
The requested StudyConfig proto.
Raises:
CancelComputeError: (Do not catch.)
PythiaProtocolError: (Do not catch.)
VizierDatabaseError: If the database operation raises an error, e.g. if
$study_guid refers to a nonexistent or inaccessible study.
"""
@abc.abstractmethod
def GetTrials(
self,
*,
study_guid: Optional[str] = None,
trial_ids: Optional[Iterable[int]] = None,
min_trial_id: Optional[int] = None,
max_trial_id: Optional[int] = None,
status_matches: Optional[vz.TrialStatus] = None,
include_intermediate_measurements: bool = True) -> List[vz.Trial]:
"""Requests Trials from Vizier.
Args:
study_guid: The GUID of the study to get Trials from. Default is None,
which means the current Study.
trial_ids: a list of Trial id numbers to acquire.
min_trial_id: Trials in [min_trial_id, max_trial_id] are selected, if at
least one of the two is not None.
max_trial_id: Trials in [min_trial_id, max_trial_id] are selected, if at
least one of the two is not None.
status_matches: If not None, filters for Trials where
Trial.status==status_matches. The default passes all types of Trial.
include_intermediate_measurements: If True (default), the returned Trials
must have all measurements. Note that the final Measurement is always
included for COMPLETED Trials. If False, PolicySupporter _may_ leave
`measurements` field empty in the returned Trials in order to optimize
speed, but it is not required to do so.
Returns:
Trials obtained from Vizier.
Raises:
CancelComputeError: (Do not catch.)
PythiaProtocolError: (Do not catch.)
VizierDatabaseError: If the database operation raises an error, e.g. if
$study_guid refers to a nonexistent or inaccessible study.
NOTE: if $trial_ids is set, $min_trial_id, $max_trial_id, and
$status_matches will be ignored.
"""
def CheckCancelled(self, note: Optional[str] = None) -> None:
"""Throws a CancelComputeError on timeout or if Vizier cancels.
This should be called occasionally by any long-running computation.
Raises an exception if the interaction has been cancelled by the Vizier
side of the protocol; the exception shuts down the Pythia server.
Args:
note: for debugging.
Raises:
CancelComputeError: (Do not catch.)
"""
pass
def TimeRemaining(self) -> datetime.timedelta:
"""The time remaining to compute a result.
Returns:
The remaining time before the RPC is considered to have timed out; it
returns datetime.timedelta.max if no deadline was specified in the RPC.
This is an alternative to calling CheckCancelled(); both have the goal of
terminating runaway computations. If your computation times out,
you should raise TemporaryPythiaError (if you want a retry) or
InactivateStudyError (if not).
"""
return datetime.timedelta(hours=1.0)
def MetadataUpdate(self) -> _MetadataUpdateContext:
"""Queues metadata updates, then passes them to UpdateMetadata().
Usage:
ps = PolicySupporter()
with ps.MetadataUpdate() as mu:
# Study-level metadata.
mu.assign('namespace', 'key', 'value')
# Trial-level metadata.
mu.assign('namespace', 'key', 'value', trial_id=1)
Returns:
A _MetadataUpdateContext instance to use as a context.
Raises:
CancelComputeError: (Do not catch.)
PythiaProtocolError: (Do not catch.)
VizierDatabaseError: If the database operation raises an error.
"""
return _MetadataUpdateContext(self)
@abc.abstractmethod
def SendMetadata(self, delta: MetadataDelta) -> None:
"""Updates the Study's metadata in Vizier's database.
The MetadataUpdate() method is preferred for normal use.
Args:
delta: Metadata to be uploaded to the Vizier database.
Raises:
CancelComputeError: (Do not catch.)
PythiaProtocolError: (Do not catch.)
VizierDatabaseError: If the database operation raises an error.
"""
|
python
|
from django.conf.urls import url
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^sponsor/$', views.sponsor, name='sponsor'),
url(r'^hospitality/$', views.hospitality, name='hospitality'),
url(r'^transport/$', views.transport, name='transport'),
url(r'^accomodation/$', views.accomodation, name='accomodation'),
url(r'^events/technical/$',views.technical, name='technical'),
url(r'^events/sports/$',views.sports, name='sports'),
url(r'^events/cultural/$',views.cultural, name='cultural'),
url(r'^events/register/$',views.event_register, name='event_register'),
url(r'^accomodation/register/$', views.accom_register, name='accom_register'),
url(r'^events/register2/$',views.event_register2, name='event_register2'),
url(r'^accomodation/register2/$',views.accom_register2, name='accom_register2'),
url(r'^events/(?P<category>\w+)/(?P<subcategory>\w+)/$', views.specificEventView, name='specificView'),
url(r'^events/(?P<category>\w+)/$', views.specificEventView, name='specificView'),
url(r'^pronights/', views.pronights, name='pronights'),
url(r'^forgotPassMail/', views.forgotmail, name='forgotmail'),
url(r'^forgotPassword/(?P<hashkey>\w+)', views.forgot, name='forgot'),
url(r'^me/', views.profile, name='profile'),
url(r'^login/', views.login1, name='login'),
url(r'^register/', views.register, name='register'),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}),
]
|
python
|
import numpy as np
import math
import pandas as pd
#######################################################################
"""
These functions are applied to hierarchically classify the images.
level_n(x): determines the prediction at level n. If there is no prediction to be made at level n, the function returns nan.
level_n_p(x): determines the probability associated with the prediction at level n. This probability is needed to classify the sequences using the top-k method.
"""
def level_1(x):
if x['blank'] > x['not_blank']:
return 'blank'
else:
return 'not_blank'
def level_1_p(x):
if x['blank'] > x['not_blank']:
return x['blank']
else:
return x['not_blank']
def level_2(x):
if x['level_1'] == 'not_blank':
if x['animal'] > x['no_animal']:
return 'animal'
else:
return 'no_animal'
else:
return math.nan
def level_2_p(x):
if x['animal'] > x['no_animal']:
return x['animal']
else:
return x['no_animal']
def level_3(x):
if x['level_2'] == 'animal':
if x['bird'] > x['mammal']:
return 'bird'
else:
return 'mammal'
elif x['level_2'] == 'no_animal':
if x['human'] > x['pickup']:
return 'human'
else:
return 'pickup'
else:
return math.nan
def level_3_p(x):
if x['level_2'] == 'animal':
if x['bird'] > x['mammal']:
return x['bird']
else:
return x['mammal']
elif x['level_2'] == 'no_animal':
if x['human'] > x['pickup']:
return x['human']
else:
return x['pickup']
else:
return math.nan
def level_4(x):
if x['level_3'] == 'mammal':
if x['small_mammal'] > x['large_mammal']:
return 'small_mammal'
else:
return 'large_mammal'
else:
return math.nan
def level_4_p(x):
if x['level_3'] == 'mammal':
if x['small_mammal'] > x['large_mammal']:
return x['small_mammal']
else:
return x['large_mammal']
else:
return math.nan
def level_5(x,pred):
if x['level_4'] == 'small_mammal':
p = int(pred.iloc[x.name,[9,5,6,14]].idxmax())
return p
elif x['level_4'] == 'large_mammal':
p = int(pred.iloc[x.name,[0,7,11,1,4,12,13,15,16]].idxmax())
return p
else:
return math.nan
def level_5_p(x,pred):
if x['level_4'] == 'small_mammal':
p = np.asarray(pred.iloc[x.name,[9,5,6,14]]).max(axis=0)
return p
elif x['level_4'] == 'large_mammal':
p = np.asarray(pred.iloc[x.name,[0,7,11,1,4,12,13,15,16]]).max(axis=0)
return p
else:
return math.nan
########################################################################
def top_predictions(data, data_hierarchy):
"""
This function determines the prediction of the sequences based on the top-k predictions at every level.
"""
sequences = data_hierarchy['sequence'].drop_duplicates()
data_seq_top = pd.DataFrame(columns=['sequence','level_1','level_1_p','level_2','level_2_p','level_3','level_3_p','level_4','level_4_p','level_5','level_5_p'])
for s, seq in enumerate(sequences):
data_sequence = data_hierarchy[data_hierarchy['sequence'] == seq]
pred_sequence = data.loc[:,'0':'16'].loc[data['sequence'] == seq]
#Level 1
p_l1 = max([data_sequence['blank'].mean(), data_sequence['not_blank'].mean()])
l1 = ['blank', 'not_blank'][np.argmax([data_sequence['blank'].mean(), data_sequence['not_blank'].mean()])]
#level 2
if l1 == 'not_blank':
p_l2 = max([data_sequence['animal'].mean(), data_sequence['no_animal'].mean()])
l2 = ['animal', 'no_animal'][np.argmax([data_sequence['animal'].mean(), data_sequence['no_animal'].mean()])]
else:
p_l2 = math.nan
l2 = math.nan
#Level 3
if l2 == 'animal':
p_l3 = max([data_sequence['bird'].mean(), data_sequence['mammal'].mean()])
l3 = ['bird', 'mammal'][np.argmax([data_sequence['bird'].mean(), data_sequence['mammal'].mean()])]
elif l2 == 'no_animal':
p_l3 = max([data_sequence['human'].mean(), data_sequence['pickup'].mean()])
l3 = ['human', 'pickup'][np.argmax([data_sequence['human'].mean(), data_sequence['pickup'].mean()])]
else:
p_l3 = math.nan
l3 = math.nan
#Level 4
if l3 == 'mammal':
p_l4 = max([data_sequence['small_mammal'].mean(), data_sequence['large_mammal'].mean()])
l4 = ['small_mammal', 'large_mammal'][np.argmax([data_sequence['small_mammal'].mean(), data_sequence['large_mammal'].mean()])]
else:
p_l4 = math.nan
l4 = math.nan
#Level 5
if l4 == 'small_mammal':
p_l5 = max(pred_sequence.iloc[:,[9,5,6,14]].mean())
l5 = int(np.argmax(pred_sequence.iloc[:,[9,5,6,14]].mean()))
elif l4 == 'large_mammal':
large = pred_sequence.iloc[:,[0,7,11,1,4,12,13,15,16]]
top5_p = []
top5_pred = []
#Top-5 for every image
for i, row in large.iterrows():
top5_p += np.sort(row.values.tolist())[-5:].tolist()
top5_pred += np.array([0,7,11,1,4,12,13,15,16])[np.argsort(row.values.tolist())[-5:].tolist()].tolist()
df_top5 = pd.DataFrame({'top5_p': top5_p, 'top5_pred':top5_pred})
top5_seq = df_top5.groupby('top5_pred').sum().divide(len(data_sequence)).sort_values('top5_p',ascending=False)[:5]
p_l5 = top5_seq.max()[0]
l5 = int(top5_seq.idxmax()[0])
else:
p_l5 = math.nan
l5 = math.nan
data_seq_top.loc[s] = [seq, l1, p_l1, l2, p_l2, l3, p_l3, l4, p_l4, l5, p_l5]
return data_seq_top
#########################################################################################
def hierarchical_predictions(data):
"""
This function determines the hierarchical prediction for the individual images, based on the output of the neural network.
These predictions can then be used to classify a sequence.
"""
predictions = data.loc[:,'0':'16']
index_small = [9,5,6,14]
index_large = [0,7,11,1,4,12,13,15,16]
hierarchy = pd.DataFrame()
hierarchy['blank'] = predictions.iloc[:,3]
hierarchy['small_mammal'] = predictions.iloc[:,index_small].sum(axis=1)
hierarchy['large_mammal'] = predictions.iloc[:,index_large].sum(axis=1)
hierarchy['mammal'] = hierarchy['small_mammal'] + hierarchy['large_mammal']
hierarchy['bird'] = predictions.iloc[:,2]
hierarchy['animal'] = hierarchy['bird'] + hierarchy['mammal']
hierarchy['human'] = predictions.iloc[:,8]
hierarchy['pickup'] = predictions.iloc[:,10]
hierarchy['no_animal'] = hierarchy['human'] + hierarchy['pickup']
hierarchy['not_blank'] = hierarchy['no_animal'] + hierarchy['animal']
hierarchy['level_1'] = hierarchy.apply(level_1, axis=1)
hierarchy['level_1_p'] = hierarchy.apply(level_1_p, axis=1)
hierarchy['level_2'] = hierarchy.apply(level_2, axis=1)
hierarchy['level_2_p'] = hierarchy.apply(level_2_p, axis=1)
hierarchy['level_3'] = hierarchy.apply(level_3, axis=1)
hierarchy['level_3_p'] = hierarchy.apply(level_3_p, axis=1)
hierarchy['level_4'] = hierarchy.apply(level_4, axis=1)
hierarchy['level_4_p'] = hierarchy.apply(level_4_p, axis=1)
mammals = pd.DataFrame()
mammals['small_pred_max'] = np.asarray(predictions.iloc[:,index_small]).argmax(axis=1)
mammals['large_pred_max'] = np.asarray(predictions.iloc[:,index_large]).argmax(axis=1)
mammals['small_max_p'] = np.asarray(predictions.iloc[:,index_small]).max(axis=1)
mammals['large_max_p'] = np.asarray(predictions.iloc[:,index_large]).max(axis=1)
hierarchy['level_5'] = hierarchy.apply(level_5, pred = predictions, axis=1)
hierarchy['level_5_p'] = hierarchy.apply(level_5_p, pred = predictions, axis=1)
return hierarchy
############################################################################################
def bottom_hierarchical_prediction(x):
"""
This function determines the final prediction for a sequence, based on the hierarchical prediction at every level.
"""
if pd.isnull(x['level_5']) == False:
label = x['level_5']
elif pd.isnull(x['level_3']) == False:
label = x['level_3']
if label == 'bird':
label = 2
elif label == 'human':
label = 8
else:
label = 10
else:
label = 3 #blank
labels = ['Ass','Beech Marten','Bird','Blank','Cat','Squirrel','Hare','Horse','Human','Mouse','PickupSetup','Fox','Dog','Mouflon','Hedgehog','Roe Deer','Wild Boar']
label = labels[label]
return label
|
python
|
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Choice, Question
# Create your views here.
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset_rev(self):
return Question.objects.order_by('-pub_date').reverse()[1:]
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def summary(request):
summary = {}
for choice in Choice.objects.all():
q_text = choice.question.question_text
c_text = choice.choice_text
votes = choice.votes
if not q_text in summary.keys():
summary[q_text] = {c_text: votes}
else:
summary[q_text][c_text] = votes
return render(request, 'polls/summary.html', {
'summary': summary
})
def vote(request, pk):
question = get_object_or_404(Question, pk=pk)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question': question,
'error_message' :"You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id, )))
|
python
|
#!/usr/bin/env python
import sys
from os import listdir
def proc_files(files):
fs = []
for f in files:
try:
fs.append(open(f, "r"))
except Exception as exct:
print("Failed to read file {:s}".format(f))
sys.exit(1)
done = False
result = []
while not done:
index = "NONE"
val_sum = 0
for f in fs:
try:
line = f.__next__()
except Exception:
done = True
break
sline = line.split()
assert(len(sline) == 2)
index, value = sline
value = float(value)
val_sum += value
# print(sline)
else:
val_avg = val_sum/len(fs)
result.append("{:s}\t{:f}".format(index, val_avg))
result.append("")
return result
def proc_dirs(dirs):
first_dir = dirs[0]
for f in listdir(first_dir):
avg_out = proc_files(["{:s}/{:s}".format(d, f) for d in dirs])
with open(f, "w") as outfile:
outfile.write("\n".join(avg_out))
def main():
proc_dirs(sys.argv[1:])
# proc_files(sys.argv[1:])
if __name__ == "__main__":
main()
|
python
|
"""Define language independent properties at the module level"""
from adam.language.lexicon import LexiconEntry, LexiconProperty
from adam.language.dependency import MorphosyntacticProperty
# Define universal morphosyntactic properties
FIRST_PERSON = MorphosyntacticProperty("1p")
SECOND_PERSON = MorphosyntacticProperty("2p")
THIRD_PERSON = MorphosyntacticProperty("3p")
NOMINATIVE = MorphosyntacticProperty("nom")
ACCUSATIVE = MorphosyntacticProperty("acc")
# Define universal lexicon properties
MASS_NOUN = LexiconProperty("mass-noun")
ALLOWS_DITRANSITIVE = LexiconProperty("allows-ditransitive")
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 16:31:59 2021
@author: msantamaria
"""
# Redes Neuronales Artificiales
# Parte 1 - Pre procesado de datos
# Importar las librerías
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importar el data set
dataset = pd.read_csv("Churn_Modelling.csv")
X = dataset.iloc[:,3:13].values
y = dataset.iloc[:,13].values
# Codificar datos categóricos
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelEncoder_X_1 = LabelEncoder()
X[:,1] = labelEncoder_X_1.fit_transform(X[:,1])
labelEncoder_X_2 = LabelEncoder()
X[:,2] = labelEncoder_X_2.fit_transform(X[:,2])
from sklearn.compose import ColumnTransformer
ct = ColumnTransformer(
[('one_hot_encoder', OneHotEncoder(categories='auto'), [1])], # The column numbers to be transformed (here is [0] but can be [0, 1, 3])
remainder='passthrough' # Leave the rest of the columns untouched
)
X = np.array(ct.fit_transform(X), dtype=np.float)
X = X[:,1:]
# Dividir el dataset en conjunto de entrenamiento y en conjunto de testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.20,random_state = 0)
# Escalado de variables
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Parte 2 - Construir la RNA
# Importar Keras y librerías adicionales
import keras
from keras.models import Sequential
from keras.layers import Dense
# Inicializar la RNA
classifier = Sequential()
# Añadir las capas de entrada y primera capa oculta
classifier.add(Dense(units=6,
kernel_initializer="uniform",
activation="relu",
input_dim=11))
# Añadir la segunda capa oculta
classifier.add(Dense(units=6,
kernel_initializer="uniform",
activation="relu"))
# Añadir la capa de salida
classifier.add(Dense(units=1,
kernel_initializer="uniform",
activation="sigmoid"))
# Compilar la RNA
classifier.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"])
# Ajustamos la RNA al Conjunto de Entrenamiento
classifier.fit(X_train, y_train, batch_size=10, epochs=100)
# How to build the same neural network for Regression?
# Initialising the ANN
# regressor = Sequential()
# # Adding the input layer and the first hidden layer
# regressor.add(Dense(units = 6,
# kernel_initializer = ’uniform’,
# activation = 'relu',
# input_dim = 11))
# # Adding the second hidden layer
# regressor.add(Dense(units = 6,
# kernel_initializer = 'uniform',
# activation = 'relu'))
# # Adding the output layer
# regressor.add(Dense(units = 1,
# kernel_initializer = 'uniform'))
# # Compiling the ANN
# regressor.compile(optimizer = 'adam',
# loss = 'mean_squared_error')
# # Fitting the ANN to the Training set
# regressor.fit(X_train,
# y_train,
# batch_size = 10,
# epochs = 100)
# Parte 3 - Evaluar el modelo y calcular predicciones finales
# Predicción de los resultados con el Conjunto de Testing
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Elaborar una matriz de confusión
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test,y_pred)
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-01-29 08:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('codenerix_products', '0008_auto_20180126_1711'),
('codenerix_invoicing', '0003_auto_20180129_0941'),
]
operations = [
migrations.CreateModel(
name='SalesLines',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('removed', models.BooleanField(default=False, editable=False, verbose_name='Removed')),
('price_recommended', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Recomended price base')),
('quantity', models.FloatField(verbose_name='Quantity')),
('subtotal', models.DecimalField(decimal_places=2, default=0, editable=False, max_digits=10, verbose_name='Subtotal')),
('discounts', models.DecimalField(decimal_places=2, default=0, editable=False, max_digits=10, verbose_name='Discounts')),
('taxes', models.DecimalField(decimal_places=2, default=0, editable=False, max_digits=10, verbose_name='Taxes')),
('equivalence_surcharges', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, null=True, verbose_name='Equivalence surcharge')),
('total', models.DecimalField(decimal_places=2, default=0, editable=False, max_digits=10, verbose_name='Total')),
('code', models.CharField(blank=True, default=None, max_length=250, null=True, verbose_name='Code')),
('description_basket', models.CharField(blank=True, max_length=256, null=True, verbose_name='Description')),
('price_base_basket', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Price base')),
('discount_basket', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Discount (%)')),
('tax_basket', models.FloatField(blank=True, default=0, null=True, verbose_name='Tax (%)')),
('equivalence_surcharge_basket', models.FloatField(blank=True, default=0, null=True, verbose_name='Equivalence surcharge (%)')),
('tax_label_basket', models.CharField(blank=True, max_length=250, null=True, verbose_name='Tax Name')),
('notes_basket', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('description_order', models.CharField(blank=True, max_length=256, null=True, verbose_name='Description')),
('price_base_order', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Price base')),
('discount_order', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Discount (%)')),
('tax_order', models.FloatField(blank=True, default=0, null=True, verbose_name='Tax (%)')),
('equivalence_surcharge_order', models.FloatField(blank=True, default=0, null=True, verbose_name='Equivalence surcharge (%)')),
('tax_label_order', models.CharField(blank=True, max_length=250, null=True, verbose_name='Tax Name')),
('notes_order', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('notes_albaran', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('description_ticket', models.CharField(blank=True, max_length=256, null=True, verbose_name='Description')),
('price_base_ticket', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Price base')),
('discount_ticket', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Discount (%)')),
('tax_ticket', models.FloatField(blank=True, default=0, null=True, verbose_name='Tax (%)')),
('equivalence_surcharge_ticket', models.FloatField(blank=True, default=0, null=True, verbose_name='Equivalence surcharge (%)')),
('tax_label_ticket', models.CharField(blank=True, max_length=250, null=True, verbose_name='Tax Name')),
('notes_ticket', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('notes_ticket_rectification', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('description_invoice', models.CharField(blank=True, max_length=256, null=True, verbose_name='Description')),
('price_base_invoice', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Price base')),
('discount_invoice', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Discount (%)')),
('tax_invoice', models.FloatField(blank=True, default=0, null=True, verbose_name='Tax (%)')),
('equivalence_surcharge_invoice', models.FloatField(blank=True, default=0, null=True, verbose_name='Equivalence surcharge (%)')),
('tax_label_invoice', models.CharField(blank=True, max_length=250, null=True, verbose_name='Tax Name')),
('notes_invoice', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('notes_invoice_rectification', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('albaran', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesAlbaran', verbose_name='Albaran')),
('basket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesBasket', verbose_name='Basket')),
('invoice', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesInvoice', verbose_name='Invoice')),
('invoice_rectification', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesInvoiceRectification', verbose_name='Invoice rectification')),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesOrder', verbose_name='Sales order')),
('product_final', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_products.ProductFinal', verbose_name='Product')),
('product_unique', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_products.ProductUnique', verbose_name='Product Unique')),
('ticket', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesTicket', verbose_name='Ticket')),
('ticket_rectification', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesTicketRectification', verbose_name='Ticket rectification')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
]
|
python
|
# %%
from sklearn.metrics import r2_score
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# %%
# Other type of file could be used which contains tabular data
advertising = pd.read_csv("advertising.csv")
# Target column must be last to work below all cell's code correctly, If you don't have your target colum last then make necessary changes to below two lines of code
TV = 'TV'
Radio = "Radio"
Newspaper = 'Newspaper'
Sales = 'Sales'
X = advertising.iloc[:, :1]
y = advertising.iloc[:, -1]
# %%
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=100
)
# %%
X_train
# %%
model = LinearRegression(
normalize=True, fit_intercept=True, n_jobs=-1).fit(X_train, y_train)
# %%
y_predicted = model.predict(X_test)
# %%
r2_score(y_predicted, y_test)
# %%
|
python
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
import os
import re
from lxml import etree
from django.db.models import Count, Prefetch
from selections.models import PreProcessFragment
from stats.utils import get_label_properties_from_cache, prepare_label_cache
from .models import Corpus, Tense, Alignment, Source, Annotation, Fragment, Sentence, Word
def get_next_alignment(user, language_from, language_to, corpus=None):
"""
Retrieves a random Alignment from the database.
:param user: The current User
:param language_from: The source language
:param language_to: The target language
:param corpus: (if supplied) The Corpus where to draw an Alignment from
(otherwise: select from the available Corpora for a user)
:return: A random Alignment object
"""
target_words = Sentence.objects. \
prefetch_related(Prefetch('word_set', queryset=Word.objects.filter(is_target=True)))
alignments = Alignment.objects \
.filter(original_fragment__language=language_from) \
.filter(translated_fragment__language=language_to) \
.filter(annotation=None) \
.select_related('original_fragment__document') \
.prefetch_related(Prefetch('original_fragment__sentence_set', queryset=target_words,
to_attr='targets_prefetched'),)
corpora = [corpus] if corpus else get_available_corpora(user)
alignments = alignments.filter(original_fragment__document__corpus__in=corpora)
for corpus in corpora:
if corpus.current_subcorpus:
alignments = alignments.filter(original_fragment__in=corpus.current_subcorpus.get_fragments())
if not alignments:
return None
elif corpora[0].random_next_item:
return alignments.order_by('?').first()
else:
# Sort by Document title and the xml_id of the first target Word
return sorted(alignments, key=lambda a: (a.original_fragment.document.title,
a.original_fragment.sort_key_target()))[0]
def get_available_corpora(user):
"""
Returns the available Corpora for a User.
A superuser can see data from all corpora, other users are limited to corpora where they are an annotator.
:param user: The current User
:return: The available Corpora for this User
"""
if user.is_superuser:
return Corpus.objects.all()
elif user.is_authenticated:
return user.corpus_set.all()
else:
return Corpus.objects.filter(is_public=True)
def get_most_frequent_tenses(language):
"""
Returns the most frequently annotated tenses for a language.
:param language: The given Language
:return: A list of tenses
"""
most_frequent_by_language = Annotation.objects \
.filter(alignment__translated_fragment__language=language) \
.values('tense') \
.annotate(Count('tense')) \
.order_by('-tense__count')
return Tense.objects.filter(pk__in=[t.get('tense') for t in most_frequent_by_language])
def get_tenses(language):
"""
Returns tenses for a language.
:param language: The given Language
:return: A list of tenses
"""
return [t.title for t in Tense.objects.filter(language=language).order_by('title')]
def update_dialogue(in_dialogue, fragment=None, sentence=None, word_range=None):
"""
Updates the dialogue marking for Words and Fragments.
:param in_dialogue: whether the Words should be in_dialogue
:param fragment: a Fragment for which to change the dialogue marking
:param sentence: a Sentence for which to change the dialogue marking
:param word_range: a Word range for which to change the dialogue marking
"""
words = Word.objects.none()
if not any([fragment, sentence, word_range]):
raise ValueError('No words selected')
if fragment:
words |= Word.objects.filter(sentence__fragment=fragment)
if sentence:
words |= Word.objects.filter(sentence=sentence)
if word_range:
words |= Word.objects.filter(pk__in=word_range)
fragments = set()
for word in words:
word.is_in_dialogue = in_dialogue
word.is_in_dialogue_prob = 1.0 if in_dialogue else 0.0
word.save()
fragments.add(word.sentence.fragment)
for fragment in fragments:
fragment.save()
XML_ID_REGEX = re.compile(r'w?(\d[\.\d]*)')
def is_before(xml_id1, xml_id2):
result = False
match1 = re.match(XML_ID_REGEX, xml_id1)
match2 = re.match(XML_ID_REGEX, xml_id2)
if match1 and match2:
parts1 = [int(i) for i in match1.group(1).split('.')]
parts2 = [int(i) for i in match2.group(1).split('.')]
for p1, p2 in zip(parts1, parts2):
if p1 < p2:
result = True
break
return result
def sort_key(xml_id, xml_tag):
result = [xml_id]
if xml_id.isdigit():
result = int(xml_id)
else:
if xml_id[0] == xml_tag and xml_id[1:].split('.'):
result = list(map(int, xml_id[1:].split('.')))
return result
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
"""
Allows natural sorting, e.g. 2.xml is before 16.xml
"""
return [int(text) if text.isdigit() else text.lower() for text in _nsre.split(s)]
def get_xml_sentences(fragment, limit):
"""
Retrieves sentences in the XML in the vicinity of the given xml_id
"""
try:
source = Source.objects.get(document=fragment.document, language=fragment.language)
except Source.DoesNotExist:
source = None
results = []
if source and source.xml_file and os.path.exists(source.xml_file.path):
xml_id = fragment.xml_ids() # TODO: this works, as source Fragments have only one Sentence
# TODO: limit to Fragments that are the source of an Alignment
related_fragments = Fragment.objects.filter(
document=fragment.document,
language=fragment.language,
preprocessfragment=None,
)
related_fragments = related_fragments.exclude(original=None)
# Loop over p/head/s elements
prev_el = []
found = False
added = 0
for _, el in etree.iterparse(source.xml_file.path, tag=['p', 'head', 's']):
if el.get('id') == xml_id:
found = True
if found:
if added <= limit:
position = 'current' if added == 0 else 'after'
results.append(add_element(el, fragment, related_fragments, position))
if el.tag == 's':
added += 1
else:
break
else:
prev_el.append(el)
# Inserts previous elements before the results
added = 0
for el in list(reversed(prev_el)):
results.insert(0, add_element(el, fragment, related_fragments, 'before'))
if el.tag == 's':
added += 1
if added == limit:
break
return results
def add_element(el, current_fragment, related_fragments, position):
sentence = None
sentence_content_xml = None
if el.tag == 's':
# For s elements, look up the Sentence in the same Corpus as the current Fragment
sentences = Sentence.objects.filter(
xml_id=el.get('id'),
fragment__in=related_fragments
).select_related('fragment').prefetch_related('word_set')
if sentences:
xml_id = None
fragment_pks = []
words = OrderedDict()
for s in sentences:
xml_id = s.xml_id
fragment_pks.append(s.fragment.pk)
is_current = current_fragment == s.fragment
for w in s.word_set.all():
if w.xml_id in words:
words[w.xml_id]['is_target'] |= w.is_target and is_current
words[w.xml_id]['is_other_target'] |= w.is_target and not is_current
else:
word = {
'word': w.word,
'xml_id': w.xml_id,
'pos': w.pos,
'lemma': w.lemma,
'is_in_dialogue': w.is_in_dialogue,
'is_target': w.is_target and is_current,
'is_other_target': w.is_target and not is_current,
}
words[w.xml_id] = word
fragment_pks.sort(reverse=True)
sentence = {
'xml_id': xml_id,
'fragment_pks': fragment_pks,
'words': list(words.values()),
}
# If the Sentence is not there, create a mock Sentence from the XML
else:
words = []
for w in el.xpath('.//w'):
word = {
'word': w.text,
'xml_id': w.get('id'),
'pos': w.get('tree') or w.get('pos') or w.get('hun') or '?',
'lemma': w.get('lem'),
'is_in_dialogue': float(w.get('dialog', 0)) > 0,
}
words.append(word)
sentence_content_xml = {
'xml_id': el.get('id'),
'words': words
}
return {'tag': el.tag,
'id': el.get('id'),
'position': position,
'content': sentence,
'content_xml': sentence_content_xml,
}
def bind_annotations_to_xml(source):
# Retrieve the Annotations
annotations = Annotation.objects. \
filter(alignment__translated_fragment__language=source.language,
alignment__translated_fragment__document=source.document). \
select_related('alignment__original_fragment', 'tense'). \
prefetch_related('labels', 'words')
# Only include correct Annotations
annotations = annotations.filter(is_no_target=False, is_translation=True)
tree = etree.parse(source.xml_file)
label_cache = prepare_label_cache(source.document.corpus)
labels = set()
failed_lookups = []
words_by_xml_id = dict()
all_w_elements = tree.xpath('//w')
if annotations:
# Attach Annotations to the XML tree
for annotation in annotations:
label, color, _ = get_label_properties_from_cache(
annotation.get_labels(as_pk=True, include_labels=True), label_cache, len(labels))
labels.add(label)
words = annotation.words.all()
for w in words:
words_by_xml_id[w.xml_id] = dict(annotation=annotation, label=label, color=color, found=False)
for xml_w in all_w_elements:
word = words_by_xml_id.get(xml_w.get('id'))
if word:
annotation = word['annotation']
label = word['label']
color = word['color']
xml_w.set('annotation-pk', str(annotation.pk))
xml_w.set('fragment-pk', str(annotation.alignment.original_fragment.pk))
xml_w.set('label', label)
xml_w.set('color', color)
del words_by_xml_id[xml_w.get('id')]
else:
# Assume we are dealing with a source language here
# Retrieve the fragments
target_words = Sentence.objects. \
prefetch_related(Prefetch('word_set', queryset=Word.objects.filter(is_target=True)))
pp_fragments = PreProcessFragment.objects.filter(language=source.language, document=source.document)
fragments = Fragment.objects.filter(language=source.language, document=source.document). \
exclude(pk__in=pp_fragments). \
select_related('tense'). \
prefetch_related('labels',
Prefetch('sentence_set', queryset=target_words, to_attr='targets_prefetched'))
# Attach Fragments to the XML tree
for fragment in fragments:
label, color, _ = get_label_properties_from_cache(
fragment.get_labels(as_pk=True, include_labels=True), label_cache, len(labels))
labels.add(label)
sentences = fragment.targets_prefetched
for s in sentences:
for w in s.word_set.all():
words_by_xml_id[w.xml_id] = dict(fragment=fragment, label=label, color=color, found=False)
for xml_w in all_w_elements:
word = words_by_xml_id.get(xml_w.get('id'))
if word:
fragment = word['fragment']
label = word['label']
color = word['color']
xml_w.set('fragment-pk', str(fragment.pk))
xml_w.set('label', label)
xml_w.set('color', color)
del words_by_xml_id[xml_w.get('id')]
for word in words_by_xml_id.values():
# all words that were assigned to the xml tree were removed from words_by_xml_id
failed_lookups.append(word.get('fragment', word.get('annotation')))
return tree, failed_lookups
def labels_to_choices(queryset):
return [(label['pk'], '{}:{}'.format(label['key__title'], label['title']))
for label in queryset.values('pk', 'key__title', 'title')]
|
python
|
from math import exp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
# from kornia.color import rgb_to_yuv
from torch.nn.modules.loss import _Loss
import numpy as np
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
mssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
# Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
if normalize:
mssim = (mssim + 1) / 2
mcs = (mcs + 1) / 2
pow1 = mcs ** weights
pow2 = mssim ** weights
# From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
output = torch.prod(pow1[:-1] * pow2[-1])
return output
# Classes to re-use window
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, val_range=None):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
# Assume 1 channel for SSIM
self.channel = 1
self.window = create_window(window_size)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.dtype == img1.dtype:
window = self.window
else:
window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
self.window = window
self.channel = channel
return ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)
class MSSSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, channel=3):
super(MSSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
def forward(self, img1, img2):
# TODO: store window between calls if possible
return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
class VGG(torch.nn.Module):
def __init__(self, conv_index, rgb_range=1):
super(VGG, self).__init__()
vgg_features = models.vgg19(pretrained=True).features
modules = [m for m in vgg_features]
if conv_index == '22':
self.vgg = nn.Sequential(*modules[:8])
elif conv_index == '54':
self.vgg = nn.Sequential(*modules[:35])
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range)
self.sub_mean = MeanShift(rgb_range, vgg_mean, vgg_std)
self.vgg.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.sub_mean(x)
x = self.vgg(x)
return x
vgg_sr = _forward(sr)
with torch.no_grad():
vgg_hr = _forward(hr.detach())
loss = F.l1_loss(vgg_sr, vgg_hr)
return loss
def color_loss(out, target):
out_yuv = rgb_to_yuv(out)
out_u = out_yuv[:, 1, :, :]
out_v = out_yuv[:, 2, :, :]
target_yuv = rgb_to_yuv(target)
target_u = target_yuv[:, 1, :, :]
target_v = target_yuv[:, 2, :, :]
return torch.div(torch.mean((out_u - target_u).pow(1)).abs() + torch.mean((out_v - target_v).pow(1)).abs(), 2)
class BurstLoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(BurstLoss, self).__init__(size_average, reduce, reduction)
self.reduction = reduction
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
prewitt_filter = 1 / 6 * np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
self.prewitt_filter_horizontal = torch.nn.Conv2d(in_channels=1, out_channels=1,
kernel_size=prewitt_filter.shape,
padding=prewitt_filter.shape[0] // 2).to(device)
self.prewitt_filter_horizontal.weight.data.copy_(torch.from_numpy(prewitt_filter).to(device))
self.prewitt_filter_horizontal.bias.data.copy_(torch.from_numpy(np.array([0.0])).to(device))
self.prewitt_filter_vertical = torch.nn.Conv2d(in_channels=1, out_channels=1,
kernel_size=prewitt_filter.shape,
padding=prewitt_filter.shape[0] // 2).to(device)
self.prewitt_filter_vertical.weight.data.copy_(torch.from_numpy(prewitt_filter.T).to(device))
self.prewitt_filter_vertical.bias.data.copy_(torch.from_numpy(np.array([0.0])).to(device))
def get_gradients(self, img):
img_r = img[:, 0:1, :, :]
img_g = img[:, 1:2, :, :]
img_b = img[:, 2:3, :, :]
grad_x_r = self.prewitt_filter_horizontal(img_r)
grad_y_r = self.prewitt_filter_vertical(img_r)
grad_x_g = self.prewitt_filter_horizontal(img_g)
grad_y_g = self.prewitt_filter_vertical(img_g)
grad_x_b = self.prewitt_filter_horizontal(img_b)
grad_y_b = self.prewitt_filter_vertical(img_b)
grad_x = torch.stack([grad_x_r[:, 0, :, :], grad_x_g[:, 0, :, :], grad_x_b[:, 0, :, :]], dim=1)
grad_y = torch.stack([grad_y_r[:, 0, :, :], grad_y_g[:, 0, :, :], grad_y_b[:, 0, :, :]], dim=1)
grad = torch.stack([grad_x, grad_y], dim=1)
return grad
def forward(self, input, target):
input_grad = self.get_gradients(input)
target_grad = self.get_gradients(target)
return F.l1_loss(input_grad, target_grad, reduction=self.reduction)
|
python
|
from rvsml.align import dtw2, OPW_w
from rvsml.EvaluateRVSML import EvaluateRVSML_dtw
from rvsml.NNClassifier import NNClassifier_dtw
from rvsml.RVSML_OT_Learning import RVSML_OT_Learning_dtw
|
python
|
import sys
class Foobar:
def __init__(self, foobar="foobar"):
self.foobar = foobar
def __repr__(self):
return(self.foobar)
|
python
|
import os
import json
import requests
import telegram
def custom_alert_slack(message):
text = ""
text = "%s" % message
requests.post(os.getenv('SLACK_WEBHOOK'), data=json.dumps({"text": text}), headers={'Content-type': 'application/json'})
def publish_on_telegram_channel(chat_id, message, token=None, image=None):
if not token:
token = os.getenv('TelegramBotsToken')
bot = telegram.Bot(token=token)
if image is None:
bot.send_message(chat_id=chat_id, text=message, parse_mode='HTML', disable_web_page_preview="true")
else:
bot.send_photo(chat_id=chat_id, photo=open(image, 'rb'), caption=message, parse_mode='HTML', disable_web_page_preview="true")
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author Eric Bullen <[email protected]>
@application jtune.py
@version 4.0.1
@abstract This tool will give detailed information about the running
JVM in real-time. It produces useful information that can
further assist the user in debugging and optimization.
@license Copyright 2015 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied.
"""
import atexit
import datetime
import getpass
import locale
import logging
import math
import os
import re
import resource
import shlex
import socket
import subprocess as sp
import sys
import textwrap
import time
from decimal import Decimal
from itertools import zip_longest, count
import argparse
import multiprocessing as mp
try:
locale.setlocale(locale.LC_ALL, 'en_US')
except locale.Error:
# Try UTF8 variant before failing
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s: "%(name)s" (line: %(lineno)d) - %(levelname)s: %(message)s'))
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(handler)
# For me to use in PyCharm to read flight recorder files
DEBUG = False
class Display(object):
def __init__(self, textwrap_offset=80):
self.display_output = []
self.textwrap_offset = textwrap_offset
def render(self, message=None, keep_newline=True, save_output=True):
"""Basically wraps the print function so that it will also save the output to an array for pasting
Keyword arguments:
message -- the message to print
keep_newline -- if this is True, then print it, otherwise, print with no newline (like print with a comma at the end)
save_output -- if this is false, do not save the output to an array for pasting
"""
if save_output:
self.add(message)
if message.endswith("\n"):
message = message[:-1]
if keep_newline:
print(message)
else:
print(message, end=" ")
def add(self, message):
"""Append message to output items."""
self.display_output.append(message)
class GCRecord(object):
"""Object definition for a single gc record."""
_version = "1.0"
def __init__(self, raw_gc_record=None):
if raw_gc_record:
self.raw_gc_record = raw_gc_record
else:
self.raw_gc_record = list()
self.is_cms_gc = False
self.is_stw_gc = False
self.cms_sweep_time = None
self.valid_record = False
self.record_timestamp = None
self.jvm_running_time = None
self.gc_type = None
self.desired_survivor_size = None
self.curr_threshold = None
self.max_threshold = None
self.ages = list()
self.young_size_before_gc = None
self.young_size_after_gc = None
self.young_size_total = None
self.young_gc_time = 0
self.total_heap_before_gc = None
self.total_heap_after_gc = None
self.total_heap = None
self.total_gc_time = 0
self.og_used = None
self.stw_time = 0
self._parse_record()
def __repr__(self):
"""This prints out the gc record so that it looks as though it came straight from
the logs."""
output = list()
output.append("{0} Runtime: {1} GC Type: {2}".format(self.record_timestamp, self.jvm_running_time, self.gc_type))
output.append("Desired Survivor Size: {0}, Curr Threshold: {1} (Max: {2})".format(self.desired_survivor_size, self.curr_threshold, self.max_threshold))
for age in self.ages:
if age[1] > -1 or age[2] > -1:
output.append("- Age {0}: {1:>10} bytes, {2:>10} total".format(age[0], age[1], age[2]))
output.append("YG Before GC: {0}K, YG After GC: {1}K (Total: {2}K), {3} secs".format(self.young_size_before_gc, self.young_size_after_gc, self.young_size_total, self.young_gc_time))
output.append("Total Heap Before GC: {0}K, Total Heap After GC: {1}K (Total: {2}K), {3} secs".format(self.total_heap_before_gc, self.total_heap_after_gc, self.total_heap, self.total_gc_time))
return "\n".join(output)
def _parse_record(self):
"""This loops through record_array to set the class variables that make up the record."""
self.record_timestamp, record_array = self.raw_gc_record
#############################################################
# Capture STW (Full GC, remarks, etc.). Yeah, I could combine
# these three, but this is good enough for now.
if any("CMS Initial Mark" in line for line in record_array):
match = re.search(r", ([\d\.]+) secs\] ", record_array[-1])
if match:
self.gc_type = "CMS-STW"
self.is_stw_gc = True
self.valid_record = True
self.stw_time += float(match.group(1))
if any("CMS Final Remark" in line for line in record_array):
match = re.search(r", ([\d\.]+) secs\] ", record_array[-1])
if match:
self.gc_type = "CMS-STW"
self.is_stw_gc = True
self.valid_record = True
self.stw_time += float(match.group(1))
if any("Full GC" in line for line in record_array):
match = re.search(r", ([\d\.]+) secs\] ", record_array[-1])
if match:
self.gc_type = "FULL"
self.is_stw_gc = True
self.valid_record = True
self.stw_time += float(match.group(1))
if not self.is_stw_gc:
for line in record_array:
if "CMS-concurrent-sweep: " in line:
match = re.match(r"^\d+-\d+-\d+T\d+:\d+:[\d\.]+[+-]\d+: ([\d\.]+): \[CMS-concurrent-sweep: [\d\.]+/([\d\.]+) secs", line)
if match:
self.is_cms_gc = True
self.valid_record = True
self.gc_type = "CMS"
self.jvm_running_time = float(match.group(1))
self.cms_sweep_time = float(match.group(2))
break
if not (self.jvm_running_time or self.gc_type):
match = re.match(r"^\d+-\d+-\d+T\d+:\d+:[\d\.]+[+-]\d+: ([\d\.]+): .*\[(\S+)", line)
if match:
self.jvm_running_time = float(match.group(1))
self.gc_type = match.group(2)
if not (self.desired_survivor_size or self.curr_threshold or self.max_threshold):
match = re.match(r"^Desired survivor size (\d+) bytes, new threshold (\d+) \(max (\d+)\)", line)
if match:
self.valid_record = True
self.desired_survivor_size = int(match.group(1))
self.curr_threshold = int(match.group(2))
self.max_threshold = int(match.group(3))
# Here I set the survivor size beforehand, for any that
# may be missing as I want all the ages even if they aren't
# being used for comparison between GCs
for age in range(1, self.max_threshold + 1):
self.ages.append((age, -1, -1))
continue
################################################
# Skipping records when the JVM has been running
# for less than 300 seconds
if self.jvm_running_time < 300:
self.valid_record = False
break
#############################
# Capture survivor ages, etc.
match = re.match(r"^- age\s+(\d+):\s+(\d+) bytes,\s+(\d+) total", line)
if match:
############################################################
# This while logic block catches any ages that were
# fully reaped, and fills them with zeros. This is important
# as the analytics needs to know this to determine survivor
# death rates/ratios
age = int(match.group(1))
curr_size = int(match.group(2))
max_size = int(match.group(3))
self.ages[age - 1] = (age, curr_size, max_size)
continue
###############################
# Capture gc reallocation stats
match = re.match(r"^: (\d+)\w->(\d+)\w\((\d+)\w\), ([\d\.]+) secs\] (\d+)\w->(\d+)\w\((\d+)\w\), ([\d\.]+) secs\]", line)
if match:
self.young_size_before_gc = int(match.group(1)) * 1024
self.young_size_after_gc = int(match.group(2)) * 1024
self.young_size_total = int(match.group(3)) * 1024
self.young_gc_time = Decimal(match.group(4))
self.total_heap_before_gc = int(match.group(5)) * 1024
self.total_heap_after_gc = int(match.group(6)) * 1024
self.total_heap = int(match.group(7)) * 1024
self.total_gc_time = Decimal(match.group(8))
self.og_used = self.total_heap_after_gc - self.young_size_after_gc
def liverun(cmd=None):
"""Run cmd, and return an iterator of said cmd.
Keyword arguments:
cmd -- the command to run
"""
global subproc
env = dict(os.environ)
# Combining stdout and stderr. I can't find a way to keep both separate
# while getting the data 'live'. itertools.izip_longest seemed like it'd
# almost do it, but it caches the results before sending it out...
subproc = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.STDOUT, env=env)
return iter(subproc.stdout.readline, b'')
def reduce_seconds(secs=None):
"""Return a compressed representation of time in seconds
Keyword arguments:
secs -- a float/int representing the seconds to be 'compressed'
"""
# The nested if statements keep it from being too long,
# by lopping off the non significant values
retval = ""
secs = int(float(secs))
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
days, hours = divmod(hours, 24)
secs = int("{0:0.0f}".format(secs))
if days:
retval += "{0}d".format(days)
if hours:
retval += "{0}h".format(hours)
if days > 0:
return retval
if mins:
retval += "{0}m".format(mins)
if hours or days:
return retval
if secs:
retval += "{0:}s".format(secs)
return retval
def sec_diff(first_time=None, second_time=None):
"""Return the number of seconds between two datetime objects
Keyword arguments:
first_time -- The (typically) older time of the two
second_time -- The (typically) newer time of the two
"""
time_delta = second_time - first_time
return time_delta.days * 86400 + time_delta.seconds + Decimal(str(time_delta.microseconds / float(1000000)))
def _min(values=None):
"""A wrapper around the min() function so that it does not error on an empty list"""
try:
return min(values)
except ValueError:
return 0
def _max(values=None):
"""A wrapper around the max() function so that it does not error on an empty list"""
try:
return max(values)
except ValueError:
return 0
def median(values=None):
"""Return the median of 'values'
Keyword arguments:
values -- the list of numbers
"""
sorts = sorted(values)
length = len(sorts)
result = None
if not values:
result = 0
# raise ValueError, "I can't find the median of an empty list."
elif not length % 2:
result = (sorts[(length // 2)] + sorts[(length // 2) - 1]) / 2.0
else:
result = sorts[length // 2]
return result
def mean(values=None, _length=None):
"""Return the mean of 'values'
Keyword arguments:
values -- the list of numbers
_length -- mostly not usable for end-users, needed by the stdev function
"""
result = None
if not _length:
_length = len(values)
if _length > 0:
result = Decimal(str(sum(values))) / _length
else:
result = 0
return result
def stdev(values=None):
"""Return the standard deviation of values
Keyword arguments:
values -- The poorly named argument that contains the list of numbers
"""
values_mean = mean(values)
variance = [math.pow(Decimal(str(x)) - values_mean, 2) for x in values]
return math.sqrt(mean(variance, len(variance) - 1))
def percentile(values=None, pct=None):
"""Return the percentile of a given values
Keyword arguments:
values -- The list of numbers to be analyzed
pct -- The percentile (can be a float) to be used (100 == 100%, not 1 = 100%, etc.)
"""
watermark_index = int(round((float(pct) / 100) * len(values) + .5))
watermark = sorted(values)[watermark_index - 1]
return [element for element in values if element <= watermark]
def ord_num(number=None):
return str(number) + ("th" if 4 <= number % 100 <= 20 else {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th"))
def reduce_k(size=None, precision=2, short_form=True, _place_holder=0):
"""Return a compressed representation of a given number of bytes
Keyword arguments:
size -- the size in bytes
precision -- what precision should be used (places to the right of the decimal)
short_form -- (true/false). Use 'K' instead of 'KiB', etc.
"""
if not isinstance(size, Decimal):
size = Decimal(str(size))
# You know.. just in case we ever get to a yottabyte
if short_form:
iec_scale = ['K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
else:
iec_scale = ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
if not isinstance(size, Decimal):
size = Decimal(str(size))
if abs(size) >= 1024:
_place_holder += 1
return reduce_k(size / Decimal("1024.0"), precision=precision, short_form=short_form, _place_holder=_place_holder)
else:
value = Decimal("{0:.{1}f}".format(size, precision))
if Decimal(str(int(value))) == value:
value = int(value)
if short_form:
return "{0}{1}".format(value, iec_scale[_place_holder])
else:
return "{0} {1}".format(value, iec_scale[_place_holder])
def _run_analysis(gc_data=None, jmap_data=None, jstat_data=None, proc_details=None, optimized_for_ygcs_rate=None):
"""The meat-and-potatoes of this tool. This takes in numerous data structures,
and prints out a report of the analysis of them."""
# Formulas to get the JVM configuration just from JMap:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# eden_size == (newsize * survivor_ratio)/(2 + survivor_ratio)
# survivor_size == eden_size * (1/survivor_ratio)
# og_size == max_heap_size - eden_size - survivor_size
# og_used == heap_used - eden_used
if not gc_data:
logger.error("I can't do any analysis for this sample period because there wasn't enough data in the GC log. Exiting.")
sys.exit(1)
############################################################
# Get some summary data that doesn't require GC log analysis
# Loop through the GC data array to find all CMS events, and capture
# how long they took.
cms_times = [record.cms_sweep_time for record in gc_data if record.is_cms_gc]
display.render("\n")
display.render("Meta:\n")
display.render("~~~~~\n")
sample_time_secs = sec_diff(gc_data[0].record_timestamp, gc_data[-1].record_timestamp)
if sample_time_secs < 60:
display.render("GC Sample Time: {0} seconds\n".format(sample_time_secs))
else:
display.render("GC Sample Time: {0} ({1} seconds)\n".format(reduce_seconds(sample_time_secs), sample_time_secs))
display.render("GC Sample Time from {0} to {1}\n".format(gc_data[0].record_timestamp, gc_data[-1].record_timestamp))
if proc_details:
cpu_count = mp.cpu_count()
cpu_uptime = cpu_count * proc_details['sys_uptime_seconds']
proc_utime_pct = proc_details['proc_utime_seconds'] / cpu_uptime
proc_stime_pct = proc_details['proc_stime_seconds'] / cpu_uptime
display.render("System Uptime: {0}\n".format(reduce_seconds(proc_details['sys_uptime_seconds'])))
display.render("Proc Uptime: {0}\n".format(reduce_seconds(proc_details['proc_uptime_seconds'])))
display.render("Proc Usertime: {0} ({1:0.2%})\n".format(reduce_seconds(proc_details['proc_utime_seconds']), proc_utime_pct))
display.render("Proc Systime: {0} ({1:0.2%})\n".format(reduce_seconds(proc_details['proc_stime_seconds']), proc_stime_pct))
display.render("Proc RSS: {0}\n".format(reduce_k(proc_details['proc_rss_bytes'] / 1024)))
display.render("Proc VSize: {0}\n".format(reduce_k(proc_details['proc_vsize_bytes'] / 1024)))
display.render("Proc # Threads: {0}\n".format(proc_details['num_threads']))
display.render("\n")
# Exit out as I don't have enough gc_data to do any analysis on
if len(gc_data) < 2:
display.render("\n")
display.render("* NOTE: There wasn't enough data to do any analysis. Please let the tool\n")
display.render(" gather at least 2 complete gc.log records (found {0}).\n".format(len(gc_data)))
return False
survivor_info = dict()
young_gc_count_delta = len([record.is_stw_gc for record in gc_data if not record.is_stw_gc])
full_gc_count_delta = len([record.is_stw_gc for record in gc_data if record.is_stw_gc])
sample_gc_time = sum(record.total_gc_time for record in gc_data)
sample_gc_load = (sample_gc_time / Decimal(str(sample_time_secs))) * 100
#######################################################
# Get young gen allocation rates over the sample period
yg_rates = list()
for first_gc, second_gc in zip(gc_data, gc_data[1:]):
if any([second_gc.is_stw_gc, first_gc.is_stw_gc, first_gc.is_cms_gc, second_gc.is_cms_gc]):
continue
# Iterate over the gc logs 2 at a time
# [1, 2, 3, 4] ->
# [(1, 2), (2, 3), (3, 4)]
#
time_delta = sec_diff(first_gc.record_timestamp, second_gc.record_timestamp)
try:
yg_size_delta = second_gc.young_size_before_gc - first_gc.young_size_after_gc
yg_growth_delta = second_gc.young_size_after_gc - first_gc.young_size_after_gc
except TypeError:
display.render("\n".join(textwrap.wrap("Warning: Something is really wrong with this JVM; I couldn't get correct GC data for it.", display.textwrap_offset)))
display.render("")
yg_size_delta = 0
yg_growth_delta = 0
# These are in KiB/s
yg_alloc_rate = yg_size_delta / time_delta
yg_growth_rate = yg_growth_delta / time_delta
yg_rates.append((yg_alloc_rate, yg_growth_rate))
#####################################################
# Get old gen promotion rates over the sample period
og_rates = list()
for first_gc, second_gc in zip(gc_data, gc_data[1:]):
if any([second_gc.is_stw_gc, first_gc.is_stw_gc, first_gc.is_cms_gc, second_gc.is_cms_gc]):
continue
time_delta = sec_diff(first_gc.record_timestamp, second_gc.record_timestamp)
# These are in KiB/s
og_allocation_delta = (second_gc.og_used - first_gc.og_used) / Decimal("1024")
og_allocation_rate = og_allocation_delta / time_delta
############################################################################
# I only want when the old gen is growing. If it's decreasing, it's probably
# b/c there was a FGC, and space is being reclaimed.
if og_allocation_delta > 0:
# This is in KiB/s
og_rates.append(og_allocation_rate)
############################
# Calc survivor death ratios
gc_survivor_death_rates = list()
for first_gc, second_gc in zip(gc_data, gc_data[1:]):
if any([second_gc.is_stw_gc, first_gc.is_stw_gc, first_gc.is_cms_gc, second_gc.is_cms_gc]):
continue
survivor_death_rates = list()
for first_age, second_age in zip(first_gc.ages, second_gc.ages[1:]):
# The second age CAN be bigger than the first age. I verified
# this in the gc.logs (still not sure how/why)
# ID 0 is the age number
# ID 1 is bytes in that age
# ID 2 is the total bytes for that age
if second_age[1] == -1:
# I don't think I want to capture any changes if
# the survivor space didn't exist (-1 as a default value- see above)
continue
# survivor_death_rates.append(Decimal(0))
else:
survivor_death_rates.append(1 - (Decimal(second_age[1]) / first_age[1]))
gc_survivor_death_rates.append(survivor_death_rates)
#################################################################################
# Since I have 2 in-scope valid GCs, I'm going to calculate some needed JVM sizes
# the sizes will be fixed if I have a fixed heap size (which we do in prod)
jvm_mem_cfg = dict()
try:
jvm_mem_cfg["og_size"] = (first_gc.total_heap - first_gc.young_size_total) * 1024
except TypeError:
display.render("\n".join(textwrap.wrap("Error: I could not find a non CMS/FGC GC record for analysis. Exiting.", display.textwrap_offset)))
display.render("")
sys.exit(1)
jvm_mem_cfg["survivor_size"] = (first_gc.desired_survivor_size * 2)
jvm_mem_cfg["eden_size"] = (first_gc.young_size_total * 1024) - jvm_mem_cfg["survivor_size"]
jvm_mem_cfg["total_heap"] = (first_gc.total_heap * 1024) + jvm_mem_cfg["survivor_size"]
jvm_mem_cfg["new_size"] = (jvm_mem_cfg["eden_size"] + (jvm_mem_cfg["survivor_size"] * 2))
#########################################################
# Now that I have a crap-ton of curated data, report out.
# This grabs the first part of the tuple (which is
# the total allocation for that gc (not growth!)
yg_alloc_rates = [entry[0] for entry in yg_rates]
min_yg_rate, mean_yg_rate, max_yg_rate = _min(yg_alloc_rates), mean(yg_alloc_rates), _max(yg_alloc_rates)
display.render("YG Allocation Rates*:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~\n")
display.render("per sec (min/mean/max): {0:>13} {1:>13} {2:>13}\n".format(reduce_k(min_yg_rate) + "/s", reduce_k(mean_yg_rate) + "/s", reduce_k(max_yg_rate) + "/s"))
display.render("per hr (min/mean/max): {0:>13} {1:>13} {2:>13}\n".format(reduce_k(min_yg_rate * 3600) + "/h", reduce_k(mean_yg_rate * 3600) + "/h", reduce_k(max_yg_rate * 3600) + "/h"))
display.render("\n")
# This grabs the second part of the tuple (which is
# the total growth for that gc (not allocation rate!)
min_og_rate, mean_og_rate, max_og_rate = _min(og_rates), mean(og_rates), _max(og_rates)
display.render("OG Promotion Rates:\n")
display.render("~~~~~~~~~~~~~~~~~~~\n")
display.render("per sec (min/mean/max): {0:>13} {1:>13} {2:>13}\n".format(reduce_k(min_og_rate) + "/s", reduce_k(mean_og_rate) + "/s", reduce_k(max_og_rate) + "/s"))
display.render("per hr (min/mean/max): {0:>13} {1:>13} {2:>13}\n".format(reduce_k(min_og_rate * 3600) + "/h", reduce_k(mean_og_rate * 3600) + "/h", reduce_k(max_og_rate * 3600) + "/h"))
display.render("\n")
################################################
# Survivor Lengths- wanted to make a nested list
# comprehension, but I suppose that's a bit ugly
# to debug/read
display.render("Survivor Death Rates:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~\n")
survivor_lengths = list()
for sub_arr in gc_survivor_death_rates:
survivor_lengths.append(len([elem for elem in sub_arr if elem > 0]))
display.render("Lengths (min/mean/max): {0}/{1:0.1f}/{2}\n".format(_min(survivor_lengths), mean(survivor_lengths), _max(survivor_lengths)))
display.render("Death Rate Breakdown:\n")
cuml_pct = 1
death_ages = list()
for survivor_num, pct_list in enumerate(zip_longest(*gc_survivor_death_rates, fillvalue=0), 1):
min_pct = min(pct_list)
mean_pct = mean(pct_list)
max_pct = max(pct_list)
cuml_pct *= 1 - mean_pct
death_ages.append(mean_pct)
survivor_info[survivor_num] = min_pct, mean_pct, max_pct
display.render(" Age {0}: {1:>5} / {2:>5} / {3:>5} / {4:>5} (min/mean/max/cuml alive %)\n".format(survivor_num, "{0:0.1%}".format(min_pct), "{0:0.1%}".format(mean_pct), "{0:0.1%}".format(max_pct),
"{0:0.1%}".format(cuml_pct)))
##################################
# GC Times
young_gc_times = [record.young_gc_time * 1000 for record in gc_data if not record.is_stw_gc]
full_gc_times = [record.stw_time * 1000 for record in gc_data if record.is_stw_gc]
if sample_time_secs:
if young_gc_count_delta:
ygc_rate = (young_gc_count_delta / sample_time_secs) * 60
else:
ygc_rate = 0
if full_gc_count_delta:
fgc_rate = (full_gc_count_delta / sample_time_secs) * 60
else:
fgc_rate = 0
display.render("\n")
display.render("GC Information:\n")
display.render("~~~~~~~~~~~~~~~\n")
display.render("YGC/FGC Count: {0}/{1} (Rate: {2:0.2f}/min, {3:0.2f}/min)\n".format(young_gc_count_delta, full_gc_count_delta, ygc_rate, fgc_rate))
display.render("\n")
display.render("Sample Period GC Load: {0:0.2f}%\n".format(sample_gc_load))
display.render("")
display.render("CMS Sweep Times: {0:0.3f}s / {1:0.3f}s / {2:0.3f}s / {3:0.2f} (min/mean/max/stdev)\n".format(_min(cms_times), mean(cms_times), _max(cms_times), stdev(cms_times)))
display.render("YGC Times: {0:0.0f}ms / {1:0.0f}ms / {2:0.0f}ms / {3:0.2f} (min/mean/max/stdev)\n".format(_min(young_gc_times), mean(young_gc_times), _max(young_gc_times), stdev(young_gc_times)))
display.render("FGC Times: {0:0.0f}ms / {1:0.0f}ms / {2:0.0f}ms / {3:0.2f} (min/mean/max/stdev)\n".format(_min(full_gc_times), mean(full_gc_times), _max(full_gc_times), stdev(full_gc_times)))
agg_ygc_time = sum(young_gc_times)
agg_fgc_time = sum(full_gc_times)
display.render("Agg. YGC Time: {0:0.0f}ms\n".format(agg_ygc_time))
display.render("Agg. FGC Time: {0:0.0f}ms\n".format(agg_fgc_time))
display.render("\n")
if og_rates:
display.render(
"Est. Time Between FGCs (min/mean/max): {0:>10} {1:>10} {2:>10}\n".format(reduce_seconds(jvm_mem_cfg["og_size"] / min_og_rate), reduce_seconds(jvm_mem_cfg["og_size"] / mean_og_rate),
reduce_seconds(jvm_mem_cfg["og_size"] / max_og_rate)))
else:
display.render("Est. Time Between FGCs (min/mean/max): {0:>10} {1:>10} {2:>10}\n".format("n/a", "n/a", "n/a"))
display.render("Est. OG Size for 1 FGC/hr (min/mean/max): {0:>10} {1:>10} {2:>10}\n".format(reduce_k(min_og_rate * 3600), reduce_k(mean_og_rate * 3600), reduce_k(max_og_rate * 3600)))
display.render("\n")
display.render("Overall JVM Efficiency Score*: {0:0.3f}%\n".format(100 - sample_gc_load))
display.render("\n")
###################################
# JMap Data
display.render("Current JVM Mem Configuration:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
if jmap_data:
for k, v in jmap_data.items():
if "Size" in k:
v = reduce_k(v / 1024)
display.render("{0:>17}: {1}\n".format(k, v))
else:
for k, v in jvm_mem_cfg.items():
display.render("{0:>17}: {1}\n".format(k, reduce_k(v / 1024)))
display.render("\n")
######################
# Show recommendations
_show_recommendations(death_ages, young_gc_times, full_gc_times, fgc_rate, ygc_rate, yg_alloc_rates, og_rates, jvm_mem_cfg, jmap_data, jstat_data, gc_data, cms_times, survivor_info,
optimized_for_ygcs_rate, proc_details)
display.render("~~~\n")
display.render("* The allocation rate is the increase in usage before a GC done. Growth rate\n")
display.render(" is the increase in usage after a GC is done.\n")
display.render("\n")
display.render("* The JVM efficiency score is a convenient way to quantify how efficient the\n")
display.render(" JVM is. The most efficient JVM is 100% (pretty much impossible to obtain).\n")
if full_gc_count_delta == 0:
display.render("\n")
display.render("* There were no full GCs during this sample period. This reporting will\n")
display.render(" be less useful/accurate as a result.\n")
display.render("\n")
display.render("* A copy of the critical data used to generate this report is stored\n")
display.render(" in /tmp/jtune_data-{0}.bin.bz2. Please copy this to your homedir if you\n".format(user))
display.render(" want to save/analyze this further.\n")
def _get_survivor_info(death_ages=None, survivor_info=None, gc_data=None, survivor_problem_pct=None, curr_ng_size=None, adj_ng_size=None):
"""This looks at the survivor info data structure, and will return the max
tenuring size, and max tenuring age that it feels is needed."""
# This is roughly how much larger the survivor space should be to counteract the increase
# in the frequency of ygcs caused from the smaller NG size as it pushes data into the
# survivor space more often. I don't need to change the MaxTenuringThreshold as that is
# mostly constant depending on how data ages.
#
# I'm adjusting the size of the survivor space based on the eden change. It MAY be better
# adjusting this based on time of how frequent the ygcs are happening.
ng_size_delta = curr_ng_size - adj_ng_size
# Going to use this to change the maxtenuringtrheshold parameter. The reason is that
# ygcs will happen less/more often if I change the ng size, and I'll need to counter
# that by increasing/decreasing the tenuring threshold to keep things in balance.
ng_size_delta_pct = adj_ng_size / curr_ng_size
# Changing the 'survivor_problem_pct' which is the watermark
# for objects still alive. If it's over that amount, then the
# tenuring threshold needs to be increased, if it's less, then
# the age is good. HOWEVER, I use death rate, so[-1] a 85% death
# rate is a 15% survivor rate.
survivor_watermark = 100 - survivor_problem_pct
# Get the max survivor age allowed per the jvm configuration
max_survivor_age = gc_data[0].max_threshold
# The survivor_info structure is the decrease in size for that
# age going into the next, so if the max here is 6, the actual max
# survivor size used is 7.
longest_used_ratio = len(survivor_info) + 1
# Survivor percentage of surviving objects
age_objects_still_alive = list()
current_percentage = 100
for key in sorted(survivor_info):
# [1] is the average, [2] is the max
mean_death_rate_pct = survivor_info[key][1]
current_percentage *= 1 - mean_death_rate_pct
age_objects_still_alive.append(current_percentage)
error_msg = None
if max_survivor_age < 15:
if longest_used_ratio == max_survivor_age:
if age_objects_still_alive[-1] > ((100 - survivor_watermark) / 100.0):
error_msg = "The survivor ratio of {0} is too small as {1:0.1f}% of the objects are still alive. Try increasing the MaxTenuringThreshold (Max: 15) parameter, and running this analysis again.".format(
longest_used_ratio, age_objects_still_alive[-1])
elif not survivor_info:
error_msg = "For the examined sample period, I could not retrieve any meaningful survivor statistics from the gc.log. This JVM is either sick, or the sample period was too short."
elif not survivor_info:
error_msg = "For the examined sample period, I could not retrieve any meaningful survivor statistics from the gc.log. This JVM is either sick, or the sample period was too short."
elif not survivor_info:
error_msg = "For the examined sample period, I could not retrieve any meaningful survivor statistics from the gc.log. This JVM is either sick, or the sample period was too short."
if error_msg:
raise ValueError(error_msg)
###########################################################
# Don't confuse the 'min()' with the 'max' variable. I want
# the first age where it's less than survivor_problem_pct
try:
max_tenuring_age = min([k for k, v in enumerate(age_objects_still_alive, 1) if v <= survivor_problem_pct])
except ValueError:
max_tenuring_age = 0
if not max_tenuring_age:
# Not sure if I like this algorithm, but it seems close enough
below_threshold_ct = len([death_pct for death_pct in death_ages if death_pct <= Decimal(".04")])
below_threshold_pct = below_threshold_ct / float(len(death_ages))
# If more than 33% of the ages are at or below 4%, make a note of it.
if below_threshold_pct > .33:
# It's speculative that I should add to the heap any objects that aren't reaped
# after cutting off the MaxTenuringThrehold, but since it's not getting reaped anyway,
# it may not change anything, so not adjusting for the time being.
# We're using all the available ages, but objects are still alive...
if max_survivor_age == len(death_ages):
display.render("\n".join(textwrap.wrap(
"* Warning: It looks like your tenuring threshold is too high - {0:0.0%} of your ages are reaping at or below 4% of the objects. We could make it easier for the JVM if we reduce your MaxTenuringThreshold by {1} to {2} instead of {3}.".format(
below_threshold_pct, below_threshold_ct, len(death_ages) - below_threshold_ct, max_survivor_age))))
else:
display.render("\n".join(textwrap.wrap(
"* Warning: It looks like your tenuring threshold is too high - {0:0.0%} of your ages are reaping at or below 4% of the objects. We could make it easier for the JVM if we reduce your MaxTenuringThreshold by {1} to {2} instead of {3}. BE CAREFUL - your max *used* age in the gc.logs of {4} is less than the configured max age of {3} - make sure that you used a large enough sample size, and let the JVM go through 3 FGCs (option: '-s 3') and is being checked during peak traffic.".format(
below_threshold_pct, below_threshold_ct, len(death_ages) - below_threshold_ct, max_survivor_age, len(death_ages)))))
max_tenuring_age = len(death_ages) - below_threshold_ct
else:
display.render("\n".join(textwrap.wrap(
"* Warning: Your survivor age is too short, your last age of {0} has {1:0.2f}% of its objects still alive. Because of this, I'm unable to reliably determine how your objects are aging. Unset or increase the MaxTenuringThreshold (max: 15) to mitigate this problem.".format(
len(age_objects_still_alive), age_objects_still_alive[-1]))))
tenure_sizes = list()
for gc_record in gc_data:
try:
tenure_sizes.append(gc_record.ages[max_tenuring_age - 1][2])
except IndexError:
# I saw a gc record that doesn't have that age
# level, so skip it.
pass
# It's recommended to have the tenuring size 2x the max tenure size, I then
# add in the change in newgen (ng_size_delta) to offset the decrease/increase
# in newgen as calculated in this parent's function. The 'ng_size_delta / 2' is
# such that I increase the whole max_tenuring_size by ng_size_delta, but since
# there are two survivor spaces, I need to split the ng_size_delta by 2 for each
# survivor space
max_tenuring_size = (max(tenure_sizes) * 2) + (ng_size_delta / 2)
survivor_ratio = adj_ng_size / max_tenuring_size
# Checking if survivor space is LARGER than the newgen size
if survivor_ratio < 1:
display.render("\n".join(textwrap.wrap(
"* Warning: The calculated recommended survivor ratio of {0:0.2f} is less than 1. This is not possible, so I increased the size of newgen by {1}, and set the survivor ratio to 1. Try the tuning suggestions, and watch closely.\n".format(
survivor_ratio, reduce_k((max_tenuring_size - adj_ng_size) / 1024)), display.textwrap_offset)) + "\n\n")
# This is close, but still wrong. If I run into this condition, then I need to
# also fix the newgen size b/c the tenured size is based off of the newgen
# size before I knew there was an issue. I think this is probably close enough
# for now.
survivor_ratio = 1
adj_ng_size = max_tenuring_size
else:
adj_ng_size += max_tenuring_size
# Now, change the max tenuring age/threshold
max_tenuring_age *= (1 / ng_size_delta_pct)
return adj_ng_size, survivor_ratio, max_tenuring_size, max_tenuring_age
def _show_recommendations(death_ages=None, young_gc_times=None, full_gc_times=None, fgc_rate=None, ygc_rate=None, yg_alloc_rates=None, og_rates=None, jvm_mem_cfg=None, jmap_data=None, jstat_data=None,
gc_data=None, cms_times=None, survivor_info=None, optimized_for_ygcs_rate=None, proc_details=None):
"""This is where any jvm tuning recommendations happens."""
###########################################################################
# The basis of these recommendations are as follows:
#
# 1) More frequent YGCs which take less time is almost always better
# than less frequent YGCs, but taking longer; consistently slow is
# better than periodically slower
# 2) YGC times should have a low standard deviation(<= 5)
# 3) YGC times should be low (<= 50ms, ideally)
display.render("Recommendation Summary:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~~~\n")
# This is how many ygcs/sec should be happening, if the mean ygc
# times are higher than desired
ygc_time_goal_ms = 50
ygc_stdev_goal = 5
# YGC mean ms percentile - lop off the worst offenders
# I am changing it instead of a mean of the 99p, doing a
# max of the 75p; may be better
ygc_pctile = 75
# This is just for analysis purposes; need a decent sample set count
ygc_count_goal = 10
fgc_count_goal = 3
# Marker for indicating if current config is good for
# the Java G1 garbage collector
ready_for_g1 = False
survivor_problem_pct = 10
ygc_stdev = stdev(percentile(young_gc_times, ygc_pctile))
ygc_mean_ms = float(max(percentile(young_gc_times, ygc_pctile)))
if jmap_data:
curr_ng_size = jmap_data['NewSize']
curr_og_size = jmap_data['OldSize']
# Not using b/c this data is not in the GC logs (and
# really doesn't need to be tuned...
# if "PermSize" in jmap_data:
# curr_pg_ms_size = jmap_data['PermSize']
# else:
# curr_pg_ms_size = jmap_data['MetaspaceSize']
max_heap_size = jmap_data['MaxHeapSize']
else:
curr_ng_size = jvm_mem_cfg["new_size"]
curr_og_size = jvm_mem_cfg["og_size"]
max_heap_size = jvm_mem_cfg["total_heap"]
adj_ng_size = curr_ng_size
#########################################################################################################
# This is an estimate. Because we use CMS for FGCs, it's an iterative process, and while the CMS reset is
# happening, more objects are being tenured into OG. The best we can do (I think) is to find the minimum
# size of OU, and go from there. This is why it's super important to have more than 2 FGCs to look at.
#
# This is tricky. I need to find the first record where the previous og size is bigger than
# the current. This identifies when the first CMS runs, and from there, I can find the minimum
normal_gc_data = [x for x in gc_data if x.og_used > 0]
try:
record_num = [record_num for record_num, first_gc, second_gc in zip(count(), normal_gc_data, normal_gc_data[1:]) if first_gc.og_used > second_gc.og_used][0]
except IndexError:
live_data_size_bytes = None
else:
live_data_size_bytes = _min(record.og_used for record in normal_gc_data[record_num:])
if proc_details and proc_details['proc_uptime_seconds'] < 300:
display.render("\n".join(textwrap.wrap(
"Warning: The process I'm doing the analysis on has been up for {0}, and may not be in a steady-state. It's best to let it be up for more than 5 minutes to get more realistic results.\n".format(
reduce_seconds(proc_details['proc_uptime_seconds'])))) + "\n\n")
#################################################
# Find the recommended NewGen size
if len(young_gc_times) < ygc_count_goal:
display.render("\n".join(
textwrap.wrap("Warning: There were only {0} YGC entries to do the analysis on. It's better to have > {1} to get more realistic results.\n".format(len(young_gc_times), ygc_count_goal),
display.textwrap_offset)) + "\n\n")
if ygc_stdev > ygc_stdev_goal * 4:
comment = "VERY inconsistent"
elif ygc_stdev > ygc_stdev_goal * 2:
comment = "pretty inconsistent"
elif ygc_stdev > ygc_stdev_goal:
comment = "somewhat consistent"
ready_for_g1 = True
else:
comment = "very consistent"
ready_for_g1 = True
messages = list()
# This logic block goes through different optimizaion scenarios that it
# uses to find an optimal setting.
# messages.append("- The mean YGC rate is {0:0.2f}/min, and the max {1} percentile YGC time is {2:0.0f}ms (stdev of {3:0.2f} which is {4}). It's best to have the mean YGC time be at or below {5}ms, and the YGC stdev at or below {6} if possible.".format(ygc_rate, ord_num(ygc_pctile), ygc_mean_ms, ygc_stdev, comment, ygc_time_goal_ms, ygc_stdev_goal))
# TODO: Too much repetition in this code block
if (optimized_for_ygcs_rate > ygc_rate) and (ygc_stdev > ygc_stdev_goal or ygc_mean_ms > ygc_time_goal_ms):
adj_ng_size = curr_ng_size * (ygc_rate / optimized_for_ygcs_rate)
######################################################################
# Figure out Tenuring Threshold & size for the survivor spaces, basing
# it on the last age where below 10% still live
try:
new_adj_ng_size, survivor_ratio, max_tenuring_size, max_tenuring_age = _get_survivor_info(death_ages, survivor_info, gc_data, survivor_problem_pct, curr_ng_size, adj_ng_size)
# Go ahead and set it regardless
adj_ng_size = new_adj_ng_size
except ValueError as msg:
display.render("\n" + "\n".join(textwrap.wrap("* Error: {0}".format(msg), display.textwrap_offset)) + "\n\n")
display.render("")
return False
messages.append(
"- With a mean YGC time goal of {0:0.0f}ms, the suggested (optimized for a YGC rate of {1:0.2f}/min) size of NewGen (including adjusting for calculated max tenuring size) considering the above criteria should be {2:0.0f} MiB (currently: {3:0.0f} MiB).".format(
ygc_time_goal_ms, optimized_for_ygcs_rate, float(adj_ng_size) / 1024.0 / 1024.0, float(curr_ng_size) / 1024.0 / 1024.0))
if new_adj_ng_size < curr_ng_size:
messages.append(
"- Because we're decreasing the size of NewGen, it can have an impact on system load due to increased memory management requirements. There's not an easy way to predict the impact to the application, so watch this after it's tuned.")
elif ygc_mean_ms > ygc_time_goal_ms:
adj_ng_size = curr_ng_size * (ygc_time_goal_ms / ygc_mean_ms)
######################################################################
# Figure out Tenuring Threshold & size for the survivor spaces, basing
# it on the last age where below 10% still live
try:
new_adj_ng_size, survivor_ratio, max_tenuring_size, max_tenuring_age = _get_survivor_info(death_ages, survivor_info, gc_data, survivor_problem_pct, curr_ng_size, adj_ng_size)
# Go ahead and set it regardless
adj_ng_size = new_adj_ng_size
except ValueError as msg:
display.render("\n" + "\n".join(textwrap.wrap("* Error: {0}".format(msg), display.textwrap_offset)) + "\n\n")
display.render("")
return False
messages.append(
"- With a mean YGC time goal of {0:0.0f}ms, the suggested (optimized for YGC time) size of NewGen (including adjusting for calculated max tenuring size) considering the above criteria should be {1:0.0f} MiB (currently: {2:0.0f} MiB).".format(
ygc_time_goal_ms, float(adj_ng_size) / 1024.0 / 1024.0, float(curr_ng_size) / 1024.0 / 1024.0))
if new_adj_ng_size < curr_ng_size:
messages.append(
"- Because we're decreasing the size of NewGen, it can have an impact on system load due to increased memory management requirements. There's not an easy way to predict the impact to the application, so watch this after it's tuned.")
else:
adj_ng_size = curr_ng_size
######################################################################
# Figure out Tenuring Threshold & size for the survivor spaces, basing
# it on the last age where below 10% still alive
try:
new_adj_ng_size, survivor_ratio, max_tenuring_size, max_tenuring_age = _get_survivor_info(death_ages, survivor_info, gc_data, survivor_problem_pct, curr_ng_size, adj_ng_size)
# Go ahead and set it regardless
adj_ng_size = new_adj_ng_size
except ValueError as msg:
display.render("\n" + "\n".join(textwrap.wrap("* Error: {0}".format(msg), display.textwrap_offset)) + "\n\n")
display.render("")
return False
messages.append("- The mean YGC rate is {0:0.2f}/min, and the mean YGC time is {1:0.0f}ms (stdev of {2:0.2f} which is {3}).".format(ygc_rate, ygc_mean_ms, ygc_stdev, comment))
for message in messages:
display.render("\n".join(textwrap.wrap(message)) + "\n")
#################################################
# Find the recommended PermGen size
#
# Removing this block b/c permgen/metaspace usage isn't in the gc.logs
#
############################################
# Find out what the survivor ratio should be
display.render("\n".join(
textwrap.wrap("- Looking at the worst (max) survivor percentages for all the ages, it looks like a TenuringThreshold of {0:0.0f} is ideal.".format(max_tenuring_age), display.textwrap_offset)) + "\n")
display.render("\n".join(textwrap.wrap(
"- The survivor size should be 2x the max size for tenuring threshold of {0:0.0f} given above. Given this, the survivor size of {1:0.0f}M is ideal.".format(max_tenuring_age,
max_tenuring_size / 1024 / 1024), display.textwrap_offset)) + "\n")
display.render("\n".join(textwrap.wrap("- To ensure enough survivor space is allocated, a survivor ratio of {0:0.0f} should be used.".format(survivor_ratio), display.textwrap_offset)) + "\n")
#################################################
# Find the recommended max heap size
if len(full_gc_times) < fgc_count_goal:
display.render("\n" + "\n".join(textwrap.wrap(
"* Error: You really need to have at least {0} (preferably more) FGCs happen (I found {1}) before doing any OG size recommendation analysis. Stopping any further analysis.\n".format(
fgc_count_goal, len(full_gc_times)), display.textwrap_offset)) + "\n\n")
display.render("\n")
return False
recommended_max_heap_size = 3.5 * float(live_data_size_bytes) + float(max_tenuring_size + adj_ng_size)
if max_heap_size != recommended_max_heap_size:
display.render("\n".join(textwrap.wrap(
"- It's recommended to have the max heap size 3-4x the size of the live data size (OldGen + PermGen), and adjusted to include the recommended survivor and newgen size. New recommended size is {0:0.0f}MiB (currently: {1:0.0f}MiB).".format(
float(recommended_max_heap_size) / 1024.0 / 1024.0, float(max_heap_size) / 1024.0 / 1024.0), display.textwrap_offset)) + "\n")
#################################################
# Figure out the occupancy fraction
max_cms_time = float(_max(cms_times))
# Not doing the MAX, but a max of a percentile of the og rates- I think that's better
# maybe doing a mean of a percentile?
pct_number = 99
# KiB -> B
max_og_rate = float(_max(percentile(og_rates, pct_number))) * 1024
oldgen_offset = curr_og_size - (float(_max(yg_alloc_rates) / 1024) * max_cms_time) - (max_cms_time * max_og_rate)
occ_fraction = math.floor((float(oldgen_offset) / curr_og_size) * 100)
display.render("\n".join(textwrap.wrap(
"- With a max {0} percentile OG promotion rate of {1}/s, and the max CMS sweep time of {2}s, you should not have a occupancy fraction any higher than {3:0.0f}.".format(ord_num(pct_number),
reduce_k(Decimal(str(
max_og_rate / 1024.0))),
max_cms_time,
occ_fraction),
display.textwrap_offset)) + "\n")
# Java 7 G1 Stuff
display.render("\n")
display.render("Java G1 Settings:\n")
display.render("~~~~~~~~~~~~~~~~~~~\n")
if ready_for_g1:
display.render("\n".join(textwrap.wrap(
"- With a max ygc stdev of {0:0.2f}, and a {1} percentile ygc mean ms of {2:0.0f}ms, your config is good enough to move to the G1 garbage collector.".format(ygc_stdev, ord_num(pct_number),
ygc_mean_ms),
display.textwrap_offset)) + "\n")
display.render("\n".join(textwrap.wrap("- Since G1 uses one space for everything, the consolidated heap size should be {0:0.0f}MiB.".format(float(recommended_max_heap_size) / 1024.0 / 1024.0),
display.textwrap_offset)) + "\n")
else:
display.render("\n".join(textwrap.wrap(
"- With a max ygc stdev of {0:0.2f}, and a {1} percentile ygc mean ms of {2:0.0f}ms, your config is probably not ready to move to the G1 garbage collector. Try tuning the JVM, and see if that improves things first.".format(
ygc_stdev, ord_num(pct_number), ygc_mean_ms), display.textwrap_offset)) + "\n")
display.render("\n")
display.render("The JVM arguments from the above recommendations:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
display.render("\n".join(textwrap.wrap("-Xmx{0:0.0f}m -Xms{0:0.0f}m -Xmn{1:0.0f}m -XX:SurvivorRatio={2:0.0f} -XX:MaxTenuringThreshold={3:0.0f} -XX:CMSInitiatingOccupancyFraction={4:0.0f}".format(recommended_max_heap_size / 1024.0 / 1024.0, float(adj_ng_size) / 1024.0 / 1024.0, survivor_ratio, max_tenuring_age, occ_fraction), display.textwrap_offset)) + "\n")
if ready_for_g1:
display.render("\n")
display.render("The JVM arguments for G1:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~~~~~\n")
display.render("\n".join(textwrap.wrap("-XX:+UseG1GC -XX:MaxGCPauseMillis={0:0.0f} -Xms{1:0.0f}m -Xmx{1:0.0f}m ".format(ygc_mean_ms, recommended_max_heap_size / 1024.0 / 1024.0), display.textwrap_offset)) + "\n")
def get_proc_info(pid=None):
"""Return a data structure with details of the given process id
Keyword arguments:
pid -- the process id of the process to be checked
"""
details = dict()
try:
cpu_ticks_per_sec = int(os.sysconf(os.sysconf_names['SC_CLK_TCK']))
bytes_per_page = resource.getpagesize()
details['gc_file_rotation'] = False
for line in liverun("readlink /proc/{0}/cwd".format(pid)):
line = line.decode()
details['proc_cwd'] = line.strip()
with open("/proc/{0}/cmdline".format(pid), "r") as _file:
for blob in _file:
for line in blob.split("\0"):
if "-Xloggc" in line:
gc_path = line.split(":", 1)[1]
if gc_path.startswith("/"):
details['gc_log_path'] = gc_path
else:
details['gc_log_path'] = details['proc_cwd'] + "/" + gc_path
elif "/bin/java" in line:
details['java_path'] = os.path.dirname(line)
elif "-XX:+UseGCLogFileRotation" in line:
details['gc_file_rotation'] = True
elif "-Xms" in line:
details['min_heap_size'] = line.split("ms")[1]
elif "-Xmx" in line:
details['max_heap_size'] = line.split("mx")[1]
elif "-XX:+PrintGCDateStamps" in line:
details['print_gc_date_stamps'] = True
elif "-XX:+PrintGCDetails" in line:
details['print_gc_details'] = True
elif "-XX:+PrintTenuringDistribution" in line:
details['print_tenuring_distribution'] = True
elif "-XX:SurvivorRatio=" in line:
details['survivor_ratio'] = line.split("SurvivorRatio=")[1]
elif "-XX:+UseConcMarkSweepGC" in line:
details['use_cms'] = True
elif "-XX:+UseParNewGC" in line:
details['use_parnew'] = True
if 'java_path' not in details:
details['java_path'] = ''.join(liverun("which java")).strip().replace("/java", "")
with open("/proc/uptime", "r") as _file:
for line in _file:
details['sys_uptime_seconds'] = Decimal(line.split()[0])
break
with open("/proc/{0}/stat".format(pid), "r") as _file:
for line in _file:
field = line.split()
utime_ticks = int(field[13])
stime_ticks = int(field[14])
num_threads = int(field[19])
uptime_ticks = int(field[21])
vsize_bytes = int(field[22])
rss_bytes = int(field[23]) * bytes_per_page
details['proc_uptime_seconds'] = (details['sys_uptime_seconds']) - Decimal(str(uptime_ticks / float(cpu_ticks_per_sec)))
details['proc_utime_seconds'] = utime_ticks / Decimal(cpu_ticks_per_sec)
details['proc_stime_seconds'] = stime_ticks / Decimal(cpu_ticks_per_sec)
details['proc_rss_bytes'] = rss_bytes
details['proc_vsize_bytes'] = vsize_bytes
details['num_threads'] = num_threads
break
for line in liverun("{0}/java -version".format(details['java_path'])):
line = line.decode()
if "java version" in line:
line = line.strip().replace("\"", "")
fields = line.split()
details['java_build_version'] = fields[-1]
match = re.match(r"^(\d+)\.(\d+)\.(\d+)", details['java_build_version'])
details['java_ver_int'] = match.group(2)
break
except IOError:
# The data structure will be empty, and I'll catch it when
# I get a key error on accessing it
pass
return details
def process_gclog(log_file=None, log_file_pos=0):
"""Pretty basic function that iterates through a gc log, and returns a data structure
of the log data.
Keyword arguments:
log_file -- the gc log file to be read
log_file_pos -- the offset of the log file from whence to start (as bytes)
"""
gc_log_queue = list()
try:
line_num = 0
print()
print("* Reading gc.log file...", end=" ")
current_size = os.stat(log_file).st_size
if current_size < log_file_pos:
print("log file was truncated/rotated; reading from the start", end=" ")
log_file_pos = 0
start_time = datetime.datetime.now()
with open(log_file, "r") as _file:
_file.seek(log_file_pos)
for line in _file:
gc_log_queue.append(line)
line_num += 1
elapsed_time = sec_diff(start_time, datetime.datetime.now())
print("done. Scanned {0} lines in {1:0.4f} seconds.".format(line_num, elapsed_time))
except IOError:
# I don't want/need to check the exception. If it fails, it fails.
pass
else:
gc_log_queue.append("END_OF_FILE")
return gc_log_queue
def _run_jmap(pid=None, procdetails=None):
"""Rung jmap for the given process id, and java path, returning
a data structure with the information"""
jmap_data = dict()
java_path = procdetails['java_path']
try:
for line in liverun("{0}/jmap -J-Xmx128M -heap {1}".format(java_path, pid)):
line = line.decode()
field = line.split()
if "MinHeapFreeRatio" in line:
jmap_data['MinHeapFreeRatio'] = int(field[2])
elif "MaxHeapFreeRatio" in line:
jmap_data['MaxHeapFreeRatio'] = int(field[2])
elif "MaxHeapSize" in line:
jmap_data['MaxHeapSize'] = int(field[2])
elif "NewSize" in line:
jmap_data['NewSize'] = int(field[2])
elif "MaxNewSize" in line:
jmap_data['MaxNewSize'] = int(field[2])
elif "OldSize" in line:
# JMap seems to be scaled wrong. Comparing it to jstat, it shows that
# it's off by about 1000 (1024). There's a bug in Java6 where this is in KB
# not bytes like the others. Appears to be fixed in Java8 (maybe Java7, too)
java_int = int(procdetails['java_ver_int'])
if java_int < 8:
jmap_data['OldSize'] = int(field[2]) * 1024
else:
jmap_data['OldSize'] = int(field[2])
elif "NewRatio" in line:
jmap_data['NewRatio'] = int(field[2])
elif "SurvivorRatio" in line:
jmap_data['SurvivorRatio'] = int(field[2])
elif "PermSize" in line:
jmap_data['PermSize'] = int(field[2])
elif "MaxPermSize" in line:
jmap_data['MaxPermSize'] = int(field[2])
elif "MaxMetaspaceSize" in line:
if "MB" in line:
jmap_data['MaxMetaspaceSize'] = int(field[2]) * 1024 * 1024
else:
jmap_data['MaxMetaspaceSize'] = int(field[2])
elif "MetaspaceSize" in line:
jmap_data['MetaspaceSize'] = int(field[2])
except (IOError, KeyboardInterrupt):
pass
return jmap_data
def run_jstat(pid=None, java_path=None, no_jstat_output=None, fgc_stop_count=None, max_count=None, ygc_stop_count=None):
"""Rung jstat, and outputs the data in a nice column and aligned layout.
Keyword arguments:
pid -- the process pid to run jstat against
java_path -- the path to use to run jstat
no_jstat_output -- true/false that tells this function to not output any data
fgc_stop_count -- the integer value that tells this function to stop at this number of full (cms) gcs
max_count -- the max number of lines the function should display
ygc_stop_count -- the integer value that tells this function to stop at this number of young gcs
"""
global subproc
jstat_data = dict()
jstat_data['TIME_STAMP'] = list()
# This is how the columns will be displayed in order.
ordered_fields = ["EC", "EP", "EU", "S0C/S1C", "S0C", "S1C", "S0U", "S1U", "OC", "OP", "OU", "MC", "MU", "PC", "PU", "YGC", "YGCD", "FGC", "FGCD"]
displayed_output = False
combined_survivors = False
field_map = dict()
line_num = 0
field_widths = dict()
first_fgc_ct = None
prev_fgc_ct = None
last_fgc_ct = None
total_fgcs = None
total_ygcs = None
short_fields = True
# Being able to use python3's print function that I could override would
# work much better here; instead I have to do this ghetto way...
display.render("#" * 5 + "\n")
display.render("# Start Time: {0} GMT\n".format(datetime.datetime.now()))
display.render("# Host: {0}\n".format(socket.getfqdn()))
display.render("#" * 5 + "\n")
if max_count > 0:
cmd = "{0}/jstat -J-Xmx128M -gc {1} 1000 {2}".format(java_path, pid, max_count)
else:
cmd = "{0}/jstat -J-Xmx128M -gc {1} 1000".format(java_path, pid)
try:
for line in liverun(cmd):
line = line.decode()
timestamp = datetime.datetime.now()
line = line.strip()
#######################################################################
# Print the header, and first two lines should be printed. After that,
# the logic block at the end (to see if there's been a fgc or not)
# takes over, and prints the line conditionally with decoration
field_num = 0
for field in line.split():
if line_num == 0:
jstat_data[field] = list()
field_map[field_num] = field
else:
field_name = field_map[field_num]
if field_name in ['YGCT', 'FGCT', 'GCT']:
jstat_data[field_name].append(Decimal(field))
else:
# Minding sigfigs- no decimal needed for large numbers; that's
# just silly
jstat_data[field_name].append(Decimal("{0:0.0f}".format(Decimal(field))))
field_num += 1
if jstat_data['OC'] and jstat_data['OU']:
# Better to handle the percentage-awareness here instead
# of making a unique conditional later on
if "OP" not in jstat_data:
jstat_data['OP'] = list()
jstat_data['OP'].append("{0:0.1%}".format(jstat_data['OU'][-1] / jstat_data['OC'][-1]))
if jstat_data['EC'] and jstat_data['EU']:
# Better to handle the percentage-awareness here instead
# of making a unique conditional later on
if "EP" not in jstat_data:
jstat_data['EP'] = list()
jstat_data['EP'].append("{0:0.1%}".format(jstat_data['EU'][-1] / jstat_data['EC'][-1]))
if jstat_data['GCT']:
if "YGCD" not in jstat_data:
jstat_data['YGCD'] = list()
if "FGCD" not in jstat_data:
jstat_data['FGCD'] = list()
# Young gc count delta
try:
if jstat_data['YGC'][-1] > jstat_data['YGC'][-2]:
delta = "+" + str(jstat_data['YGC'][-1] - jstat_data['YGC'][-2])
else:
delta = "-"
except IndexError:
delta = "-"
jstat_data['YGCD'].append(delta)
# full gc count delta
try:
if jstat_data['FGC'][-1] > jstat_data['FGC'][-2]:
delta = "+" + str(jstat_data['FGC'][-1] - jstat_data['FGC'][-2])
else:
delta = "-"
except IndexError:
delta = "-"
jstat_data['FGCD'].append(delta)
##################################
# I need at least two lines to get
# historical data
if line_num >= 2:
# Keep a timestamp for each record (to get sub-second granularity)
first_fgc_ct = jstat_data['FGC'][0]
first_ygc_ct = jstat_data['YGC'][0]
prev_fgc_ct = jstat_data['FGC'][-2]
last_fgc_ct = jstat_data['FGC'][-1]
last_ygc_ct = jstat_data['YGC'][-1]
total_fgcs = last_fgc_ct - first_fgc_ct
total_ygcs = last_ygc_ct - first_ygc_ct
#############################################
# line 1 is actual data, 0 is just the header
if line_num > 0:
jstat_data['TIME_STAMP'].append(timestamp)
####################################################
# See if I can combine the S0C/S1C fields (probably)
if jstat_data['S0C'][-1] == jstat_data['S1C'][-1]:
if "S0C/S1C" not in jstat_data:
jstat_data['S0C/S1C'] = list()
jstat_data['S0C/S1C'].append(jstat_data['S0C'][-1])
combined_survivors = True
else:
# This is redundant as I catch it earlier. Leaving it here for now.
logger.error("Looks like you're not running with the CMS garbage collector. You can enable this option by setting your JVM arguments to use '-XX:+UseConcMarkSweepGC'.")
sys.exit(1)
if not field_widths:
field_widths = _get_widths(jstat_data, short_fields)
if not displayed_output:
displayed_output = True
#############################################
# Don't display any output, just continue to
# the next iteration. Ick, double-negative..
if no_jstat_output:
continue
# Print the column header
display.render(" ", keep_newline=False)
for field in ordered_fields:
if combined_survivors and field != "S0C" and field != "S1C":
if field in field_widths:
width = field_widths[field]
display.render("{0:>{1}}".format(field, width + 1), keep_newline=False)
display.render("\n")
# Print a nice line spacer all even-like
display.render(" ", keep_newline=False)
for field in ordered_fields:
if combined_survivors and field != "S0C" and field != "S1C":
if field in field_widths:
width = field_widths[field]
display.render("{0:>{1}}".format("~" * width, width + 1), keep_newline=False)
display.render("\n")
# Print the first row of data that was cached so it can
# be used to determine field widths
display.render(" ", keep_newline=False)
for field in ordered_fields:
if field in field_widths:
width = field_widths[field]
# Get the last value
if combined_survivors and field != "S0C" and field != "S1C":
value = jstat_data[field][0]
if short_fields and field not in ['EP', 'OP', 'YGC', 'YGCT', 'FGC', 'FGCT', 'GCT', 'FGCD', 'YGCD']:
value = reduce_k(value, precision=1)
display.render("{0:>{1}}".format(value, width + 1), keep_newline=False)
display.render("\n")
else:
#################################
# Don't display any output, just
# continue to the next iteration.
if no_jstat_output:
if last_fgc_ct > prev_fgc_ct:
display.render("* ", keep_newline=False)
else:
display.render(" ", keep_newline=False)
# Now print the actual numbers
for field in ordered_fields:
if field in field_widths:
width = field_widths[field]
# Get the last value
if combined_survivors and field != "S0C" and field != "S1C":
value = jstat_data[field][-1]
if short_fields and field not in ['EP', 'OP', 'YGC', 'YGCT', 'FGC', 'FGCT', 'GCT', 'FGCD', 'YGCD']:
value = reduce_k(value, precision=1)
display.render("{0:>{1}}".format(value, width + 1), keep_newline=False)
display.render("\n")
else:
if last_fgc_ct > prev_fgc_ct:
display.render("* ", keep_newline=False)
else:
display.render(" ", keep_newline=False)
# Now print the actual numbers
for field in ordered_fields:
if field in field_widths:
width = field_widths[field]
# Get the last value
if combined_survivors and field != "S0C" and field != "S1C":
value = jstat_data[field][-1]
if short_fields and field not in ['EP', 'OP', 'YGC', 'YGCT', 'FGC', 'FGCT', 'GCT', 'FGCD', 'YGCD']:
value = reduce_k(value, precision=1)
display.render("{0:>{1}}".format(value, width + 1), keep_newline=False)
display.render("\n")
if 0 < fgc_stop_count <= total_fgcs:
break
if 0 < ygc_stop_count <= total_ygcs:
break
line_num += 1
except (IOError, KeyboardInterrupt):
# This triggers if I exit the 'liverun'
pass
finally:
if subproc and subproc.poll() is None:
# The process hasn't terminated
subproc.terminate()
return jstat_data
def _get_widths(jstat_data=None, short_fields=False):
"""Function that returns the recommended field widths of the jstat output"""
widths = dict()
for field in jstat_data:
max_width = max(list(map(len, list(map(str, jstat_data[field])))))
field_width = len(field)
if field_width > max_width:
widths[field] = field_width
else:
widths[field] = max_width
##################################################################
# Special handling for survivor spaces (S0C, S1C, S0U, S1U) should
# all be the same width, and b/c S{01}U alternate, it's better to
# set the width from S{01}C
if short_fields:
# The '5' accounts for 'x.xxN' (3.23K/M/G), etc.
survivor_max = 6
newgen_max = 6
oldgen_max = 6
else:
survivor_max = max(widths['S0C'], widths['S1C'], widths['S0U'], widths['S1U'])
newgen_max = max(widths['EC'], widths['EU'])
oldgen_max = max(widths['OC'], widths['OU'])
widths['OC'] = oldgen_max
widths['OU'] = oldgen_max
widths['EC'] = newgen_max
widths['EU'] = newgen_max
widths['S0C'] = survivor_max
widths['S1C'] = survivor_max
widths['S0U'] = survivor_max
widths['S1U'] = survivor_max
widths['EP'] = 6
widths['OP'] = 6
return widths
def _at_exit(raw_gc_log=None, jmap_data=None, jstat_data=None, proc_details=None, optimized_for_ygcs_rate=None):
"""The exit function that is called when the user presses ctrl-c, or when it exits after X number
of jstat iterations. It calls various functions to display useful information to the end-user."""
gc_data = list()
in_stanza = False
date_time = None
entry = list()
# I don't know if I like this, but I wouldn't get to
# this point unless I asked for GC data from stdin...
if not raw_gc_log:
raw_gc_log = sys.stdin
for line in raw_gc_log:
#############################################################################
# Since I'm using the timestamp as the record stanza delimiter, I may as well
# convert it to a datetime object here instead of doing it later.
match = re.match(r"^(\d+)-(\d+)-(\d+)T(\d+):(\d+):([\d\.]+)[+-]\d+: ([\d\.]+):", line)
if match:
in_stanza = True
# If I'm at the start of a new block, save the previous block
if date_time and entry:
gc_record = GCRecord((date_time, entry))
if gc_data:
prev_gc_record = gc_data[-1]
if gc_record.jvm_running_time and prev_gc_record.jvm_running_time > gc_record.jvm_running_time:
logger.warning("The JVM restarted at {0}. Re-initing the internal datastructures.".format(gc_record.record_timestamp))
gc_data = list()
if gc_record.valid_record:
gc_data.append(gc_record)
entry = list()
year = int(match.group(1))
month = int(match.group(2))
day = int(match.group(3))
hour = int(match.group(4))
minute = int(match.group(5))
second = Decimal(match.group(6))
# up_time = Decimal(match.group(7))
date_time = datetime.datetime.strptime("{0}-{1}-{2} {3}:{4}:{5}".format(year, month, day, hour, minute, second), "%Y-%m-%d %H:%M:%S.%f")
if in_stanza:
entry.append(line)
_run_analysis(gc_data, jmap_data, jstat_data, proc_details, optimized_for_ygcs_rate)
def get_rotated_log_file(gc_log_file):
"""Function will scan existing log files to determine latest rotated log, if none found will return
non rotated file name.
"""
log_number = 0
while os.path.isfile("{0}.{1}".format(gc_log_file, log_number)):
log_number += 1
if log_number:
gc_log_file = "{0}.{1}".format(gc_log_file, (log_number - 1))
else:
logger.debug("\n".join(
textwrap.wrap(
"Was not able to find a rotated GC log for this process, defaulting to gc log from process.",
display.textwrap_offset)))
return gc_log_file
def get_gc_log_file(procdetails):
gc_log_file = procdetails['gc_log_path']
if not gc_log_file:
logger.error("\n".join(
textwrap.wrap(
"I was not able to find a GC log for this process. Is the instance up?",
display.textwrap_offset)))
sys.exit(1)
if procdetails['gc_file_rotation']:
return get_rotated_log_file(gc_log_file)
else:
return gc_log_file
def get_jmap_data(pid=None, procdetails=None):
"""Function that runs jmap, only needed b/c jmap may not start, and this retries on failure."""
jmap_data = None
for seconds in [x * 2 for x in range(1, 8)]:
jmap_data = _run_jmap(pid, procdetails)
if "NewSize" in jmap_data:
break
else:
logger.warning("Couldn't connect to jvm via jmap to get valid data. Sleeping {0:0.0f} seconds, and trying again.".format(seconds))
time.sleep(seconds)
return jmap_data
################################################################
# Main
user = os.environ.get("SUDO_USER", None)
if not user:
user = getpass.getuser()
subproc = None
display = Display()
def main():
parser = argparse.ArgumentParser(description="Analytics tool for tuning and analyzing GC behavior.")
parser.add_argument('-o', '--optimize', help='Optimize for latency or throughput (range 0-11, 0 = ygc @ 180/min, 11 = ygc @ 1/min). Floats allowed.', type=Decimal, required=False, default=9)
parser.add_argument('-s', '--fgc-stop-count', help='How many full gcs should happen before I stop (very important for analytics)', type=int, default=0)
parser.add_argument('-y', '--ygc-stop-count', help='How many young gcs should happen before I stop', type=int, default=0)
parser.add_argument('-c', '--stop-count', help='How many iterations of jstat to run before stopping', type=int, default=0)
parser.add_argument('-n', '--no-jstat-output', help='Do not show jstat output - only print summary', action="store_true")
if DEBUG:
group = parser.add_mutually_exclusive_group(required=False)
else:
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-p', '--pid', help='Which java PID should I attach to', type=int)
group.add_argument('--gc-stdin', help='Read GC log data from stdin', action="store_true")
cmd_args = parser.parse_args()
raw_gc_log_data = list()
jmap_data = list()
jstat_data = list()
proc_details = list()
if not (cmd_args.pid or cmd_args.gc_stdin):
logger.error("Please specify -p (pid) or --gc-stdin")
sys.exit(1)
# A ygc of 180/min (3/sec)
ygc_upper_rate_per_min = 180
# Validate the optimize range
if 0 <= cmd_args.optimize <= 11:
# You won't have to change this function if you want
# to change the ygc upper/lower bounds later on
#
# Convert from rate/min to rate/sec
optimized_for_ygcs_rate = ((-Decimal(ygc_upper_rate_per_min - 1) / 11) * Decimal(str(cmd_args.optimize)) + ygc_upper_rate_per_min)
else:
logger.error("The optimize range must be between 0 and 11.")
sys.exit(1)
######################################################################
# This should be done w/ argparse, but I haven't dedicated enough time
# to figure it out
if cmd_args.no_jstat_output and not (cmd_args.ygc_stop_count or cmd_args.stop_count or cmd_args.fgc_stop_count):
logger.error("You must specify -s, -y, or -c arguments for this option to work.")
sys.exit(1)
if not cmd_args.gc_stdin:
try:
config_error = False
proc_details = get_proc_info(cmd_args.pid)
java_path = proc_details['java_path']
if proc_details.get("min_heap_size", 0) != proc_details.get("max_heap_size", 1):
config_error = True
logger.error(
"It looks like either you didn't specify your min and max heap size (-Xms & -Xmx respectively), or they are set to two different sizes. They need to be set to the same for jtune.py to work properly.")
if not proc_details.get("print_gc_date_stamps", False):
config_error = True
logger.error("You need to include the '-XX:PrintGCDateStamps' option to the JVM for JTune to work correctly.")
if not proc_details.get("print_gc_details", False):
config_error = True
logger.error("You need to include the '-XX:PrintGCDetails' option to the JVM for JTune to work correctly.")
if not proc_details.get("print_tenuring_distribution", False):
config_error = True
logger.error("You need to include the '-XX:+PrintTenuringDistribution' option to the JVM for JTune to work correctly.")
if not proc_details.get("survivor_ratio", False):
logger.warning("You probably want to include the '-XX:SurvivorRatio=<num>' option to the JVM for JTune to work correctly.")
if not proc_details.get("use_cms", False):
config_error = True
logger.error("You need to include the '-XX:+UseConcMarkSweepGC' option to the JVM for JTune to work correctly.")
if not proc_details.get("use_parnew", False):
config_error = True
logger.error("You need to include the '-XX:+UseParNewGC' option to the JVM for JTune to work correctly.")
if config_error:
logger.error("Exiting.")
sys.exit(1)
except (TypeError, KeyError):
logger.error("I was not able to get the process data for pid {0}".format(cmd_args.pid))
sys.exit(1)
###########################################
# Start the gc log watching in a subprocess
gc_log_file = get_gc_log_file(proc_details)
if not gc_log_file:
logger.error("\n".join(textwrap.wrap("I was not able to find a GC log for this process. Is the instance up?", display.textwrap_offset)))
sys.exit(1)
####################################################
# Get the file offset before starting jstat, so
# I can use it after jstat runs to read the log file
gc_log_file_pos = os.stat(gc_log_file).st_size
jmap_data = get_jmap_data(cmd_args.pid, proc_details)
if cmd_args.no_jstat_output:
jstat_data = dict()
else:
jstat_data = run_jstat(cmd_args.pid, java_path, cmd_args.no_jstat_output, cmd_args.fgc_stop_count, cmd_args.stop_count, cmd_args.ygc_stop_count)
# This basically hits after the user ctrl-c's
raw_gc_log_data = process_gclog(gc_log_file, gc_log_file_pos)
atexit.register(_at_exit, raw_gc_log_data, jmap_data, jstat_data, proc_details, optimized_for_ygcs_rate)
if __name__ == '__main__':
main()
|
python
|
import os
base_dir = os.getcwd()
project_name = '{{cookiecutter.project_name}}'
project_path = f'{project_name}'
# https://github.com/cookiecutter/cookiecutter/issues/955
for root, dirs, files in os.walk(base_dir):
for filename in files:
# read file content
with open(os.path.join(root, filename)) as f:
content = f.read()
# replace tag by install path
content = content.replace('replace_me.base_dir', base_dir)
# replace file content
with open(os.path.join(root, filename), 'w') as f:
f.write(content)
print(f'\033[0;32mSuccessfully generated {project_path}\033[0m')
print('\033[0;33mTo make the utility functions available in your shell, source utils.sh. e.g.\033[0m')
print(f'\techo "source {project_path}/utils.sh" >> ~/.bashrc')
print()
print('\033[0;33mTo setup git hooks run the following:\033[0m')
print(f'\tln -s {project_path}/githooks/commit-msg {project_name}/.git/hooks/commit-msg')
|
python
|
import unittest
from parameterized import parameterized as p
from solns.combinationSum3.combinationSum3 import *
class UnitTest_CombinationSum3(unittest.TestCase):
@p.expand([
[]
])
def test_naive(self):
pass
|
python
|
import pymongo
class Database(object): # Database class inherits all attributes from object class
URI = "mongodb://127.0.0.1:27017" # class attributes which defines a value for every class instance
DATABASE = None
@staticmethod
def initialize(): # method that creates path to desired mongodb database
client = pymongo.MongoClient(Database.URI)
Database.DATABASE = client['tooldraft']
@staticmethod
def insert(collection, data):
Database.DATABASE[collection].insert(data)
@staticmethod
def find(collection, query):
return Database.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
return Database.DATABASE[collection].find_one(query)
|
python
|
from talon import Context, actions
ctx = Context()
ctx.matches = r"""
app: mintty
"""
ctx.tags = ['terminal', 'user.file_manager', 'user.generic_terminal', 'user.git', 'user.kubectl']
@ctx.action_class('user')
class UserActions:
def file_manager_open_parent():
actions.insert('cd ..')
actions.key('enter')
@ctx.action_class('edit')
class EditActions:
def paste(): actions.key('shift-insert')
def copy(): actions.key('ctrl-insert')
def delete_line(): actions.key('ctrl-u')
|
python
|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from django.conf.urls import url
from . import views
# The API URLs are now determined automatically by the router
urlpatterns = [
url(r"^$", views.index, name="index_page"),
url(r"^projects/create$", views.project_create_view, name="create_project"),
url(r"^projects/(?P<pk>\d+)/tasks$", views.project_page_view, name="project_page"),
url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/annotation$", views.annotate_task_view, name="annotation_page"),
url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/delete$", views.task_delete_view, name="delete_task"),
url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/list_annotations$", views.list_annotations_for_task_view, name="list_task_annotations"),
url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/list_annotations/(?P<annotation_pk>\d+)/review$", views.review_annotation_view, name="review_page"),
url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/annotation/delete$", views.annotation_delete_view, name="delete_annotation"),
url(r"^projects/(?P<pk>\d+)/edit$", views.project_edit_view, name="edit_project"),
url(r"^projects/(?P<pk>\d+)/delete$", views.project_delete_view, name="delete_project"),
# API VIEWS
path('api/v1/projects/', views.ProjectList.as_view(), name="project-list"),
path('api/v1/projects/<int:pk>/', views.ProjectDetail.as_view(), name="specific_project"),
path('api/v1/projects/<int:pk>/tasks', views.ProjectTasks.as_view(), name="project-list-tasks"),
path('api/v1/root', views.api_root, name="api_root"),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
python
|
from flask import Flask, render_template
from flask_cors import CORS
from prometheus_client import Summary, MetricsHandler, Counter
from agaveflask.utils import AgaveApi, handle_error
from controllers import MetricsResource, CronResource
from errors import errors
app = Flask(__name__)
CORS(app)
api = AgaveApi(app, errors=errors)
REQUEST_TIME = Summary('request_processing_seconds', 'DESC: Time spent processing request')
# todo - probably should add a basic auth check
# for now, we comment this out because we do not authenticate the calls from prometheus;
# Authn/z
# @app.before_request
# def auth():
# authn_and_authz()
api.handle_error = handle_error
api.handle_exception = handle_error
api.handle_user_exception = handle_error
# Resources
api.add_resource(MetricsResource, '/metrics')
api.add_resource(CronResource, '/cron')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
python
|
#!/usr/local/bin/python
import SimpleHTTPServer, SocketServer, logging, subprocess, sys, glob, re, mimetypes
import argparse as argparse
# Stop traceback on ctrl-c
sys.tracebacklimit = 0
parser = argparse.ArgumentParser()
parser.add_argument("-p", nargs='?', default=8000)
parser.add_argument("-d", nargs='?', default=None)
args = parser.parse_args()
PORT = int(args.p)
serve_listing = glob.glob("serve/*")
serve_files = []
for f in serve_listing:
serve_files.append(f.replace("serve/", ""))
ANSI_COLOR_RED = "\x1b[31m"
ANSI_COLOR_GREEN = "\x1b[32m"
ANSI_COLOR_YELLOW = "\x1b[33m"
ANSI_COLOR_BLUE = "\x1b[34m"
ANSI_COLOR_MAGENTA = "\x1b[35m"
ANSI_COLOR_CYAN = "\x1b[36m"
ANSI_COLOR_RESET = "\x1b[0m"
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
def do_HEAD(self):
self.server_version = "nginx"
self.sys_version = ""
self.send_response(204)
self.send_header("Access-Control-Allow-Origin", "*")
def do_GET(self):
# Suppress information leakage & Deal with CORS
self.server_version = "nginx"
self.sys_version = ""
rows, columns = subprocess.check_output(['stty', 'size']).split()
print "="*int(columns)
print "> %sRequested GET path: %s%s" % (ANSI_COLOR_MAGENTA, self.path, ANSI_COLOR_RESET)
for h in self.headers:
print "> %s%s%s: %s" % (ANSI_COLOR_GREEN, h, ANSI_COLOR_RESET, self.headers[h])
path = self.path[1:]
path = re.sub("\?(.|\n)*", "", path)
if path in serve_files:
fp = "serve/%s" % path
d = open(fp).read()
t = mimetypes.guess_type(fp)[0] if not mimetypes.guess_type(fp)[0] == None else "text/plain"
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Content-type", t)
self.send_header("Content-length", len(d))
self.end_headers()
self.wfile.write(d)
return
if args.d != None:
fp = "serve/%s" % args.d
d = open(fp).read()
t = mimetypes.guess_type(fp)[0] if not mimetypes.guess_type(fp)[0] == None else "text/plain"
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Content-type", t)
self.send_header("Content-length", len(d))
self.end_headers()
self.wfile.write(d)
return
self.send_response(404)
self.send_header("Access-Control-Allow-Origin", "*")
def do_POST(self):
# Suppress information leakage & Deal with CORS
self.server_version = "nginx"
self.sys_version = ""
rows, columns = subprocess.check_output(['stty', 'size']).split()
print "="*int(columns)
print "> %sRequested POST path: %s%s" % (ANSI_COLOR_MAGENTA, self.path, ANSI_COLOR_RESET)
for h in self.headers:
print "> %s%s%s: %s" % (ANSI_COLOR_BLUE, h, ANSI_COLOR_RESET, self.headers[h])
data = self.rfile.read(int(self.headers['Content-Length']))
print data
self.send_response(200)
Handler = GetHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
httpd.serve_forever()
|
python
|
import os
import time
import numpy as np
import paddle.fluid as fluid
import config as cfg
from nets.attention_model import attention_train_net
from nets.crnn_ctc_model import ctc_train_net
from utils import data_reader
from utils.utility import get_ctc_feeder_data, get_attention_feeder_data
def main():
"""OCR training"""
if cfg.use_model == "crnn_ctc":
train_net = ctc_train_net
get_feeder_data = get_ctc_feeder_data
else:
train_net = attention_train_net
get_feeder_data = get_attention_feeder_data
# define network
sum_cost, error_evaluator, inference_program, model_average = train_net(cfg, cfg.data_shape, cfg.num_classes)
# data reader
train_reader = data_reader.train(batch_size=cfg.batch_size,
prefix_path=cfg.train_prefix,
cycle=cfg.total_step > 0,
model=cfg.use_model)
test_reader = data_reader.test(prefix_path=cfg.test_prefix, model=cfg.use_model)
# prepare environment
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# 加载初始化模型
if cfg.init_model:
fluid.load(program=fluid.default_main_program(),
model_path=cfg.init_model,
executor=exe,
var_list=fluid.io.get_program_parameter(fluid.default_main_program()))
print("Init model from: %s." % cfg.init_model)
train_exe = exe
error_evaluator.reset(exe)
if cfg.parallel:
train_exe = fluid.ParallelExecutor(use_cuda=cfg.use_gpu, loss_name=sum_cost.name)
fetch_vars = [sum_cost] + error_evaluator.metrics
def train_one_batch(data):
var_names = [var.name for var in fetch_vars]
if cfg.parallel:
results = train_exe.run(var_names,
feed=get_feeder_data(data, place))
results = [np.array(r).sum() for r in results]
else:
results = exe.run(program=fluid.default_main_program(),
feed=get_feeder_data(data, place),
fetch_list=fetch_vars)
results = [r[0] for r in results]
return results
def test():
error_evaluator.reset(exe)
for data in test_reader():
exe.run(inference_program, feed=get_feeder_data(data, place))
_, test_seq_error = error_evaluator.eval(exe)
return test_seq_error[0]
def save_model():
if not os.path.exists(cfg.model_path):
os.makedirs(cfg.model_path)
fluid.save(program=fluid.default_main_program(),
model_path=os.path.join(cfg.model_path, "model"))
print("Saved model to: %s" % cfg.model_path)
iter_num = 0
stop = False
while not stop:
total_loss = 0.0
total_seq_error = 0.0
# train a pass
for data in train_reader():
if cfg.total_step < iter_num:
stop = True
break
result = train_one_batch(data)
total_loss += result[0]
total_seq_error += result[2]
iter_num += 1
# training log
if iter_num % cfg.log_period == 0:
print("[%s] - Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f"
% (time.asctime(time.localtime(time.time())), iter_num,
total_loss / (cfg.log_period * cfg.batch_size),
total_seq_error / (cfg.log_period * cfg.batch_size)))
total_loss = 0.0
total_seq_error = 0.0
# evaluate
if iter_num % cfg.eval_period == 0:
if model_average:
with model_average.apply(exe):
test_seq_error = test()
else:
test_seq_error = test()
print("\n[%s] - Iter[%d]; Test seq error: %.3f\n" %
(time.asctime(time.localtime(time.time())), iter_num, test_seq_error))
# save model
if iter_num % cfg.save_model_period == 0:
if model_average:
with model_average.apply(exe):
save_model()
else:
save_model()
if __name__ == "__main__":
main()
|
python
|
#Importing Libraries
import os
import cv2
import time
import struct
import socket
import pyaudio
import freenect
import wikipedia
import playsound
import numpy as np
from gtts import gTTS
from scripts.rhino.rhino import *
from scripts.porcupine.porcupine import *
#Fucntion to get images
def get_image(type, client): #'type' to tell about RGB image/depth image
path = ""
file = open(path,'w')
file.write(IPaddr)
file.close()
#Sending the type of Image
client.send(type)
# It will wait until it gets the file name which is passed from the send function
file_name = client.recv(1024).decode()
print(file_name)
# This will open a new file in your python Dir with same file name
file = open(file_name,'wb')
# It will recieve the starting 10 bytes
data = client.recv(10)
while data:
#print(data)
file.write(data)
data = client.recv(1024)
print("Data Recieved Succesfully")
client.close()
#returning RGB or depth image
image = cv2.imread(file_name)
return image
#Function to check if center cit ent
def co_incident():
pass
#Function to go to an object
def goTo(slots, net, LABELS, ln, client):
#Getting the Value of the Key in Dictonary-Slots
obj = str(slots['ob1'])
#Initializing the variables
x = y = z = None
#Getting the coordinated of the object
(x,y,z) = getCoordinates(obj, net, LABELS, ln, client)
#Checking if the object was found or not
if x == None or y == None or z == None:
#Speaking that object was not found
print("None here")
playsound.playsound('not_found.mp3')
else:
#Ensuring the centers co-incident
co_incident()
print(x,y,z)
#Move towards the object
while z>=0.0:
#Move forward and check the distance again
send(client, "forward")
time.sleep(1)
(x,y,z) = getCoordinates(obj, net, LABELS, ln, client)
#Function to get the coordinated of the given object
def getCoordinates(obj, net, LABELS, ln, client):
while True:
#Get Images from Rpi
frame = get_image("image", client)
#Get Depth Image from Rpi
depth = get_image("depth", client)
#Fetting Shape of the frame
(H, W) = frame.shape[:2]
#Creating blob from image
blob = cv2.dnn.blobFromImage(frame, 1/255.0, (224, 224), swapRB = True, crop = False)
net.setInput(blob)
layerOutputs = net.forward(ln)
#Initializing lists for displaying the output
boxes = []
confidences = []
classIds = []
#Looping over each layer's output
for output in layerOutputs:
#Looping over each detection
for detect in output:
#Extracting ClassID and confidence
score = detect[5:]
classID = np.argmax(score)
confidence = score[classID]
#Filtering weak detection
if confidence > 0.5:
#Getting bounding rectangle
box = detect[:4] * np.array([W, H, W, H])
(centerX, centerY, Width, Height) = box.astype("int")
#Getting Top and Left Points
x = int(centerX - (Width/2))
y = int(centerY - (Height/2))
#Adding to lists
boxes.append([x, y, int(Width), int(Height)])
classIds.append(classID)
confidences.append(float(confidence))
#Non-Maxia Suppression
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
#Checking Minimum Detection
if len(idxs) > 0:
#Looping over indexs
for i in idxs.flatten():
x = boxes[i][0]
y = boxes[i][1]
w1 = boxes[i][2]
h1 = boxes[i][3]
if LABELS[classIds[i]] == obj:
#Calculating the coordinates
print("Here")
cx = int(x + (w1/2))
cy = int(y + (h1/2))
cz = 0.1236 * np.tan(depth[cy][cx] / 2842.5 + 1.1863)
return (cx,cy,cz)
#Function to speak/interact
def speak(slots):
#Getting the Value of the Key in Dictonary-Slots
keyword = str(slots['p1'])
#If the keyword in known
if keyword == "yourself":
#Declaring the text
splitted = ["Hey, my name is groooot. I am a cute, cute robooooo. I am designed by Gaurav, Harish and Swati, and I work for them. Nice meeting you. I am here to help you, just spell groooooot."]
#If keyword is not known
else:
#Searching
search_result = wikipedia.summary(keyword)
#Spliting
splitted = search_result.split("\n")
#Speech to text model
speech = gTTS(text = splitted[0], lang = 'en-in' , slow = False)
#Saving Audio File
speech.save("speak.mp3")
#Running Audio file
playsound.playsound('speak.mp3')
def send(client, dir):
#Sending data to server
client.send(dir)
#Waiting for feedback
while client.recv(1024)!= 'done':
pass
client.close()
#Main Function
def main():
#Initializing Variables
awake = False
intent_extraction_is_finalized = False
#Loading Picovoice Models
rhino_wakeword = Porcupine(library_path = "/home/garima/Gaurav/Blog_2/Integrated/res/libpv_porcupine.so",
model_file_path = "/home/garima/Gaurav/Blog_2/Integrated/res/porcupine_params.pv",
keyword_file_paths = ["/home/garima/Gaurav/Blog_2/Integrated/res/hey_groot.ppn"],
sensitivities = [0.5])
rhino_commands = Rhino(library_path = "/home/garima/Gaurav/Blog_2/Integrated/res/libpv_rhino.so",
model_path = "/home/garima/Gaurav/Blog_2/Integrated/res/rhino_params.pv",
context_path = "/home/garima/Gaurav/Blog_2/Integrated/res/robo.rhn")
# setup audio
pa = pyaudio.PyAudio()
audio_stream = pa.open(rate = rhino_commands.sample_rate,
channels = 1,
format = pyaudio.paInt16,
input = True, frames_per_buffer=rhino_commands.frame_length)
#Loading label, weight and configuration model paths for YOLO
labelPath = os.path.sep.join(["yolo-coco","coco.names"])
weightPath = os.path.sep.join(["yolo-coco", "yolov3.weights"])
configPath = os.path.sep.join(["yolo-coco", "yolov3.cfg"])
#Loading Labels
LABELS = open(labelPath).read().strip().split("\n")
#Loading YOLO
net = cv2.dnn.readNetFromDarknet(configPath, weightPath)
#Determining YOLO output layer
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
#Setting up Rpi GPIO pins numbering
#GPIO.setmode(GPIO.BOARD)
#Declaring Pin modes
#GPIO.setup(3, GPIO.OUT)
#GPIO.setup(5, GPIO.OUT)
#GPIO.setup(11, GPIO.OUT)
#GPIO.setup(13, GPIO.OUT)
#Making Commonly used Audio Files
#Speech to Text
wake = gTTS(text = "At your service friend!", lang = "en-in", slow = False)
error = gTTS(text = "I'm tired! I will take a nap.", lang = "en-in", slow = False)
not_found = gTTS(text = "Object not found!", lang = "en-in", slow = False)
not_understood = gTTS(text = "I understand your order friend", lang = "en-in", slow = False)
#Saving Audio File
wake.save("wake.mp3")
error.save("error.mp3")
not_found.save("not_found.mp3")
not_understood.save("unclear.mp3")
#Sockets Initializing
network = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# Intialising the Port
port = 12345
network.bind(('',port))
hostname = socket.gethostname()
IPaddr = socket.gethostbyname(hostname)
network.listen(5)
# Geting Client host name and the IP address Details
client, addr = network.accept()
print("Start")
# detect commands in continuous loop
while True:
#Reading Input
pcm = audio_stream.read(rhino_commands.frame_length)
pcm = struct.unpack_from("h" * rhino_commands.frame_length, pcm)
try:
#If wake word is not spoken
if not awake:
#Processing the voice input
result = rhino_wakeword.process(pcm)
#If wake word is the input, result is true
if result:
#Wake Word detected
awake = True
time.sleep(0.1)
print("awake")
#playsound.playsound('wake.mp3')
#os.system('mpg321 wake.mp3')
#time.sleep(5)
print("Speak More")
elif not intent_extraction_is_finalized:
#Getting Intent Extraction
intent_extraction_is_finalized = rhino_commands.process(pcm)
else:
#If the command is detected
if rhino_commands.is_understood():
#Getting Intent and Slots
intent, slots = rhino_commands.get_intent()
print(intent)
playsound.playsound('wake.mp3')
#os.system('mpg321 wake.mp3')
#Checking Intent and doing Neccessary Action
#If going to an object is an intent
if intent == "goTo":
#Shift the control to goTo function
goTo(slots, net, LABELS, ln, client)
#If speaking is the intent
elif intent == "speak":
#Shift the control to speak function
speak(slots)
#If coming back in the intent
#elif intent == "comeBack":
#Shift the control to comeBack function
#comeBack(slots)
#If Stop is the intent
elif intent == "stop":
#Shift the control to stop function
stop()
#No match
else:
#Command not found
time.sleep(0.1)
print("1")
playsound.playsound('unclear.mp3')
#Command not understood
else:
#print("Command not understood")
time.sleep(0.1)
playsound.playsound('unclear.mp3')
#Resetting Rhino to detect new command
rhino_commands.reset()
awake = False
intent_extraction_is_finalized = False
except Exception as e:
print(e)
time.sleep(0.1)
playsound.playsound('error.mp3')
exit()
#os.system('python3 try_1.py')
#Calling Main Funciton
if __name__ == "__main__":
main()
|
python
|
from .publish_measurement_handler import PublishMeasurementTransactionHandler
from .issue_ggo_transaction_handler import IssueGGOTransactionHandler
from .transfer_ggo_handler import TransferGGOTransactionHandler
from .split_ggo_handler import SplitGGOTransactionHandler
from .retire_ggo_handler import RetireGGOTransactionHandler
from .settlement_handler import SettlementHandler
|
python
|
__author__ = 'guorongxu'
import sys
import re
import math
import logging
def parse_correlation(correlation_file):
correlation_list = {}
with open(correlation_file) as fp:
lines = fp.readlines()
for line in lines:
fields = re.split(r'\t+', line)
correlation_list.update({fields[0] + "_" + fields[1]:fields})
return correlation_list
def parse_cluster(cluster_file, correlation_list):
filewriter = open(cluster_file + ".rep", "a")
with open(cluster_file) as fp:
lines = fp.readlines()
for line in lines:
fields = re.split(r'\t+', line)
if fields[0] == "cor":
filewriter.write(line)
if (fields[3] + "_" + fields[4]) in correlation_list:
edge = correlation_list.get(fields[3] + "_" + fields[4])
filewriter.write(edge[2] + "\t" + fields[1] + "\t" + edge[3] + "\t" + fields[3] + "\t" + fields[4])
if (fields[4] + "_" + fields[3]) in correlation_list:
edge = correlation_list.get(fields[4] + "_" + fields[3])
filewriter.write(edge[2] + "\t" + fields[1] + "\t" + edge[3] + "\t" + fields[3] + "\t" + fields[4])
filewriter.close()
## Main entry
if __name__ == "__main__":
correlation_file = sys.argv[1]
cluster_file = sys.argv[2]
print correlation_file
logging.info("correlation file: " + correlation_file)
logging.info("cluster file: " + cluster_file)
correlation_list = parse_correlation(correlation_file)
parse_cluster(cluster_file, correlation_list)
|
python
|
"""Create grid-based spatial indexes.
Basic Usage
===========
Calculate the grid index or indices for a geometry provided in
well-known binary format at a given resolution:
Example:
>>> from shapely.geometry import Point
>>> pnt = Point(555000, 185000)
>>> bng_pnt = calculate_bng_index(
wkb = pnt.wkb,
resolution = 100,
)
Changing Resolution
===================
Indices can be calculated for cell sizes of 1m, 10m, 100m, 1000m, 10000m and 100000m:
Example:
>>> from shapely.geometry import LineString
>>> line = LineString([(450750, 175000), (535000, 195250)])
>>> bng_line = calculate_bng_index(
wkb = line.wkb,
resolution = 1000,
)
Index Creation Options
======================
The ``how`` argument can be used to change the kind of indices created.
Points and Multi-Points
-----------------------
The default and only option for ``how`` is 'intersects'. This returns the
British National Grid index that the point falls within. If the point lies
on an edge or corner of the grid cell then 2 or 4 grid cells indices are
returned as appropriate.
LineStrings and MultiLineStrings
--------------------------------
The default options for ``how`` is 'intersects'. This returns all indices for
the British National Grid cells that the line geometry intersects. An alternative
option is 'bounding box', which returns all indices that intersect with the
bounding box of the line geometry:
Example:
>>> bng_line = calculate_bng_index(
wkb = line.wkb,
resolution = 100,
how = 'bounding box'
)
Although bounding boxes are fast to compute, in most cases 'intersects' will be
preferable as bounding box indexing, particularly at higher resolutions, will lead
to considerable redundancy.
Polygons and MultiPolygons
--------------------------
The default option for ``how`` is 'intersects', but alternative options of
'bounding box' and 'contains' are also available. The 'bounding box' returns
the British National Grid indices which intersect the Polygon bounding box.
The 'contains' option returns one or more tuples containing the indices that
intersect the Polygon and a boolean, where ``true`` indicates that the grid
cell is contained within the Polygon and ``false`` that the grid cell intersects
the Polygon, but doesn't lie within it (e.g. the cell crosses the Polygon boundary).
Example:
>>> from shapely.geometry import Polygon
>>> poly = Polygon([(535000, 175000),
(555250, 185000),
(556000, 162500),
(527500, 160333),
(535000, 175000)])
>>> bng_poly = calculate_bng_index(
wkb = poly.wkb,
resolution = 100,
how = 'contains'
)
Intended Usage
==============
The top-level ``calculate_bng_index()`` function is intended to be applied
over a column of geometries. The approach will support mixes of geometry types
in a single column. Although it is primarily intended for use in Spark, we
first present an example using ``geopandas`` which may be more familiar:
Example:
>>> import geopandas
>>> gdf = geopandas.read_file('some file of interest')
>>> bng = gdf.apply(lambda row: calculate_bng_index(row.geometry.wkb, 100),
index = 1)
When using the function in spark, the same approach applies, however you first
need to create a user-defined function (udf).
>>> from pyspark.sql.functions import udf
>>> from pyspark.sql.types import StringType, ArrayType
>>> from typing import Sequence
>>> @udf(returnType=ArrayType(StringType()))
>>> def apply_index(wkb: bytearray) -> Sequence[str]:
return calculate_bng_index(wkb, resolution=100, how='intersects')
This user defined function can then be applied to a spark dataframe, assuming it stores
the geometry in well-known binary format:
Example:
>>> sdf = spark.read.parquet('some parquet file of interest')
>>> sdf = sdf.withColumn('bng', apply_index('geometry'))
The intent of the indexing is that it can then be used to benefit geospatial
filtering and joining operations.
Get British National Grid Cell Geometries
=========================================
A top-level helper function is provided for simple translation of British National
Grid references into well-known text that can be plotted. The resolution is inferred
from each reference:
Example:
>>> import geopandas
>>> from shapely.wkt import loads
>>> box = wkt_from_bng("TQ3415")
>>> gdf = geopandas.GeoDataFrame(geometry = [box])
>>> gdf.plot()
The ``wkt_from_bng()`` function is also designed to be applied to collections of
references:
Example:
>>> import geopandas
>>> from shapely.wkt import loads
>>> boxes = list(map(wkt_from_bng, ["TQ3415", "SP4087", "SS9015"]))
>>> gdf = geopandas.GeoDataFrame(geometry = boxes)
>>> gdf.plot()
"""
from bng_indexer._indexing import calculate_bng_index, wkt_from_bng
|
python
|
from enum import IntEnum
class Finger(IntEnum):
Thumb = 0
Index = 1
Middle = 2
Ring = 3
Little = 4
@staticmethod
def get_array_of_points(finger):
finger_array = None
if finger == Finger.Thumb:
finger_array = [(0, 4), (4, 3), (3, 2), (2, 1)]
elif finger == Finger.Index:
finger_array = [(0, 8), (8, 7), (7, 6), (6, 5)]
elif finger == Finger.Middle:
finger_array = [(0, 12), (12, 11), (11, 10), (10, 9)]
elif finger == Finger.Ring:
finger_array = [(0, 16), (16, 15), (15, 14), (14, 13)]
else:
finger_array = [(0, 20), (20, 19), (19, 18), (18, 17)]
return finger_array
@staticmethod
def get_finger_name(finger):
finger_name = ''
if finger == Finger.Thumb:
finger_name = 'Thumb'
elif finger == Finger.Index:
finger_name = 'Index'
elif finger == Finger.Middle:
finger_name = 'Middle'
elif finger == Finger.Ring:
finger_name = 'Ring'
elif finger == Finger.Little:
finger_name = 'Little'
return finger_name
|
python
|
import torch
import os
import sys
import re
import logging
from os.path import isfile
import copy
import threading
import time
import enum
from torch.multiprocessing import Pool, Process, set_start_method, Manager, Value, Lock
try:
set_start_method('spawn')
except RuntimeError:
pass
class CFMode(enum.Enum):
MANUAL = 0
AUTO = 1
"""
Auto aggressive checkpoint manager for PyTorch
Usage : In the training script:
Initialize with the model and optimizer in local_rank 0
chk = CFCheckpoint(model=model, optim=optimizer, dl=dl)
chk_manager = CFManager(chk, chk_dir='./chk/', freq=CheckFreqMode.AUTO)
To initiate a checkpoint at the given frequency (done internally if AUTO,
if MANUAL, user code must trigger):
Snapshot the state in-memory
--------------------------
chk_manager.snapshot()
Persist the in-memory snapshot in background
--------------------------
chk_manager.persist()
On recovery, to resume from a checkpoint:
Restores the latest checkpoint in the dir being managed
----------------------------------------
chk = CFCheckpoint(model=model, optim=optimizer, dl=dl)
chk_manager = CFManager(chk, chk_dir='./chk/', freq=CheckFreqMode.AUTO)
chk_manager.restore()
"""
class CFManager:
"""
`chk_dir` : Directory where the checkpoints are managed. Can be
local storage path, or any remote storage that exposes POSIX.
All checkpoint versions are maintained in this directory, and
on start-up, the latest checkpoint version in this dir
is restored.
`chk` : An instance of the CFCheckpoint class that tracks
one or more tractable object for snapshotting.
`overwrite` : If true, maintains only the latest
version of the checkpoint at any instant, with the
exception of checkpoints made at epoch boundaries
(controlled by `keep_epoch_chk`). Storage space required
is low.
If false, keeps all versions of checkpoints written so far,
managed by the checkpointing frequency. Uses a lot of
storage space -TODO: control this by tuning `keep_latest_n`
At any point, there can be a max of two active checkpoints
if `overwrite` is True - one completed, and one ongoing.
`keep_epoch_chk` : If true, keeps a version of the checkpoint
taken at epoch boundaries that can be used later to restore
the model to the best val accuracy for instance. This
is the default behaviour. Although beware this assumes you have
enough storage space to maintain `n` versions of the checkpoint,
where `n` is the number of epochs you train for.
If false, checkpoints are overwritten - at any instant, only the
latest checkpoint is maintained.
Default is to overwrite iter chk and keep epoch chk.
`chk_prefix` : Prefix for the cjheckpoint file
"""
def __init__(
self,
chk_dir,
chk,
keep_epoch_chk = True,
overwrite = True,
mode = CFMode.AUTO,
chk_prefix = 'model_v_'):
self.logger = logging.getLogger(__name__)
self.chk_dir = chk_dir
self.chk = chk
self.keep_epoch_chk = keep_epoch_chk
self.overwrite = overwrite
self.chk_prefix = chk_prefix
self.mode = mode
self.chk_epoch_subdir = 'epoch'
self.mp_manager = Manager()
self.snapshot_copy = None
self.cpu_side = False
# Active snapshot, if true, don't snapshot again
self.active_snapshot = Value('i', 0)
self.lock = Lock()
self.in_progress_snapshot = Value('i', 0)
# Handle to the process performing checkpoint
# Can be only one at any instant. A new checkpoint
# cannot start unless the previous one completes
self.chk_process = None
# `overwrite` supersedes if False
if self.overwrite is False and self.keep_epoch_chk is False:
self.keep_epoch_chk = True
# Global ID of checkpoints being written
# Used to format the checkpoint path
# Instantiate from chk when restoring
self.chk_global_id = -1
# Sorted List of available checkpoints (fnames)
self.available_chk_iters = self.mp_manager.list()
self.available_chk_epochs = self.mp_manager.list()
self.initalize_chk_dir()
self.logger.info("Available checkpoints : ")
for item in self.available_chk_iters:
self.logger.info(item)
"""
The API to be used by the training code to initiate
checkpoint.
`additional_snapshot` : The iter, epoch, arch, and DL state must
be passed as a map if required.
File is saved with the `global_id` suffix
Only checkpoints at epoch boundaries are suffixed with epoch# instead of ID
`is_epoch` : True if this is epoch boundary
"""
def save(
self, \
additional_snapshot=None, \
is_epoch=False, \
epoch=0, \
synchronous=False, \
profile_full=False,
profile_snap=False,
use_thread=False,
persist=False):
s = time.time()
self.logger.info("[{}] ENTER SAVE FN".format(time.time()))
self.chk_global_id += 1
chk_fname = self.chk_prefix + str(self.chk_global_id)
filepath = self._get_full_path(chk_fname)
if is_epoch:
chk_fname_link = self.chk_prefix + str(self.chk_global_id) + '_' +str(epoch)
filepath_link = self._get_full_path(chk_fname, epoch=True)
self.logger.info("Writing chk {} at {}".format(self.chk_global_id, filepath))
if synchronous:
chk_fname_sync = chk_fname + '_sync'
filepath_sync = self._get_full_path(chk_fname_sync)
if not is_epoch:
self.chk._serialize_and_persist_direct(
self.chk.latest_snapshot,
filepath_sync,
additional_state=additional_snapshot,
persist=persist,
iter_chk = self.available_chk_iters,
overwrite = self.overwrite)
else:
chk_fname_link_sync = chk_fname_link + '_sync'
filepath_link_sync = self._get_full_path(chk_fname_link_sync, epoch=True)
self.chk._serialize_and_persist_direct(
self.chk.latest_snapshot,
filepath_sync,
additional_state=additional_snapshot,
persist=persist,
iter_chk = self.available_chk_iters,
overwrite = self.overwrite,
linkpath=filepath_link_sync,
epoch_chk = self.available_chk_epochs)
return
# Check if there's an ongoing checkpoint operation
if self.chk_process is not None:
# There is an checkpoint underway. Wait
if self.chk_process.is_alive():
self.chk_process.join()
# Once complete, initiate the next checkpoint synchronously
self.logger.info("[{}] START SNAPSHOT".format(time.time()))
success = self.chk._snapshot(self.active_snapshot.value, additional_state=additional_snapshot)
if success:
with self.lock:
self.active_snapshot.value = 1
dur_snap = time.time() -s
if profile_snap:
return dur_snap, 0
if use_thread:
fn = getattr(threading, 'Thread')
else:
fn = globals()["Process"]
print("Function is {}".format(fn))
# Start persist asynchronously
if not is_epoch:
keywords = { \
'iter_chk':self.available_chk_iters, \
'overwrite':self.overwrite}
self.chk_process = \
fn(target=self.chk._serialize_and_persist, \
args=[filepath, self.chk.latest_snapshot, self.active_snapshot, self.lock], kwargs=keywords)
else:
keywords = { \
'iter_chk':self.available_chk_iters, \
'overwrite':self.overwrite, \
'epoch_chk':self.available_chk_epochs,\
'linkpath': filepath_link}
self.chk_process = \
fn(target=self.chk._serialize_and_persist,\
args=[filepath, self.chk.latest_snapshot, self.active_snapshot, self.lock], kwargs=keywords)
self.logger.info("[{}] CALL PROCESS NOW".format(time.time()))
self.chk_process.start()
self.logger.info("[{}] RETURN FROM START".format(time.time()))
if profile_full:
self.chk_process.join()
dur = time.time() -s
#time.sleep(1)
return dur_snap, dur
def save_cpu(
self, \
additional_snapshot=None, \
is_epoch=False, \
epoch=0, \
synchronous=False, \
persist=False,
profile_snap=False,
profile_full=False,
use_thread=True):
self.logger.info("[{}] ENTER SAVE FN".format(time.time()))
s = time.time()
self.chk_global_id += 1
chk_fname = self.chk_prefix + str(self.chk_global_id)
filepath = self._get_full_path(chk_fname)
if is_epoch:
chk_fname_link = self.chk_prefix + str(self.chk_global_id) + '_' +str(epoch)
filepath_link = self._get_full_path(chk_fname, epoch=True)
self.logger.info("Writing chk {} at {}".format(self.chk_global_id, filepath))
# Check if there's an ongoing checkpoint operation
if self.chk_process is not None:
# There is an checkpoint underway. Wait
if self.chk_process.is_alive():
self.chk_process.join()
self.logger.info("Starting next snapshot {:.2f}s".format(time.time()-s))
# Once complete, initiate the next checkpoint synchronously
self.logger.info("[{}] SAVE FN SNAP NOW".format(time.time()))
snap_ptr = {}
for name, ref in self.chk.tracking_map.items():
snap_ptr[name] = ref.state_dict()
# check current snapshot status
if self.active_snapshot.value == 1:
self.logger.info("ERROR! Active snapshot")
return
with self.lock:
self.in_progress_snapshot.value = 1
self.logger.info("[{}] START SAVE CALL".format(time.time()))
if synchronous:
self.chk._snapshot_and_persist_async(filepath, self.active_snapshot, self.in_progress_snapshot, self.lock, snap_ptr, iter_chk=self.available_chk_iters, overwrite=self.overwrite)
self.logger.info("Returned from save in {:.2f}s".format(time.time()-s))
self.logger.info("[{}] END SAVE".format(time.time()))
return
if use_thread:
fn = getattr(threading, 'Thread')
else:
fn = globals()["Process"]
print("Function is {}".format(fn))
if not is_epoch:
keywords = { \
'iter_chk':self.available_chk_iters, \
'overwrite':self.overwrite, \
'profile': profile_snap }
self.chk_process = \
fn(target=self.chk._snapshot_and_persist_async, \
args=[filepath, self.active_snapshot, self.in_progress_snapshot, self.lock, snap_ptr], kwargs=keywords)
else:
keywords = { \
'iter_chk':self.available_chk_iters, \
'overwrite':self.overwrite, \
'epoch_chk':self.available_chk_epochs,\
'linkpath': filepath_link, \
'profile': profile_snap }
self.chk_process = \
fn(target=self.chk._snapshot_and_persist_async,\
args=[filepath, self.active_snapshot, self.in_progress_snapshot, self.lock, snap_ptr], kwargs=keywords)
self.chk_process.start()
if profile_snap or profile_full:
self.chk_process.join()
dur = time.time() -s
#time.sleep(1)
self.logger.info("Returned from save in {:.2f}s".format(time.time()-s))
self.logger.info("[{}] END SAVE".format(time.time()))
return 0, dur
"""
Restores the latest checkpoint among all available, or the latest
epoch boundary checkpoint corresponding to `epoch`
Returns : Map of state of items that were not resumed yet
This should be same as the map passed in to the save()
call by the DL/script.
These restorations are assumed to happen in the script/DL
If nothing remains to be restore, returns None
"""
def restore(self, latest=True, epoch=0, gpu=0):
fname = self.get_latest_checkpoint(latest=latest, epoch=epoch)
if fname is None:
return None
filepath = self._get_full_path(fname, epoch=not latest)
self.logger.info("Latest checkpoint is {}".format(filepath))
extra_state = self.chk._restore(filepath=filepath, gpu=gpu)
return extra_state
def initalize_chk_dir(self):
if os.path.exists(self.chk_dir):
# Get list of all files
chk_files = [os.path.splitext(f)[0] for f in os.listdir(self.chk_dir) if isfile(os.path.join(self.chk_dir, f))]
self.logger.info(chk_files)
chk_files.sort(key=natural_keys)
for files in chk_files:
self.available_chk_iters.append(files)
del chk_files
epoch_chk_dir = os.path.join(self.chk_dir, self.chk_epoch_subdir)
if os.path.exists(epoch_chk_dir):
epoch_chk_files = [os.path.split(f)[0] for f in os.listdir(epoch_chk_dir) if isfile(os.path.join(epoch_chk_dir, f))]
epoch_chk_files.sort(key=natural_keys)
for files in epoch_chk_files:
self.available_chk_epochs.append(files)
del epoch_chk_files
else:
os.makedirs(epoch_chk_dir)
else:
os.makedirs(self.chk_dir)
def get_latest_checkpoint(self, latest=True, epoch=0):
"""
Returns the full path of the latest checkpoint
`latest` : If true, return most recent checkpoint
If false, return chk corresponding to `epoch`
if available
"""
fname = None
if latest and len(self.available_chk_iters) > 0:
fname = self.available_chk_iters[-1]
elif len(self.available_chk_epochs) > 0:
fname = self.available_chk_epochs[-1]
return fname
def _get_full_path(self, fname, epoch=False):
if not epoch:
return os.path.join(self.chk_dir, fname + '.chk')
else:
return os.path.join(self.chk_dir, self.chk_epoch_subdir, fname + '.chk')
def weight_update(self):
if 'optimizer' in self.chk.tracking_map:
optimizer = self.chk.tracking_map['optimizer']
s = time.time()
while self.in_progress_snapshot.value == 1:
continue
# self.logger.info("Progresssss")
optimizer.step()
#torch.cuda.synchronize()
dur = time.time() - s
#self.logger.info("Stall to weight update = {}s".format(dur))
else:
self.logger.info("NO Optimizer found")
# Returns size of tensors of all tractable items in MB
@ property
def get_chk_size(self):
snap_ptr = {}
size = 0
for name, ref in self.chk.tracking_map.items():
snap_ptr[name] = ref.state_dict()
size += _get_all_size(snap_ptr[name])
return size/1024/1024
def _get_all_size(ele, sz = 0):
if torch.is_tensor(ele):
sz += ele.nelement()*ele.element_size()
elif isinstance(ele, dict):
for k,v in ele.items():
sz = _get_all_size(v, sz)
elif isinstance(ele, list):
for v in ele:
sz = _get_all_size(v, sz)
else:
sz += sys.getsizeof(ele)
return sz
def _save(filepath, snap):
torch.save(snap,filepath)
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
|
python
|
""" Expose PV data """
#
# import logging
# from datetime import datetime, timedelta
# from typing import List
#
# from fastapi import APIRouter, Depends
# from nowcasting_datamodel.models import PVYield
# from nowcasting_datamodel.read.read_pv import get_latest_pv_yield, get_pv_systems
# from sqlalchemy.orm.session import Session
#
# from database import get_session_pv
#
# logger = logging.getLogger(__name__)
#
#
# router = APIRouter()
#
# @router.get("/pv_latest", response_model=List[PVYield])
# def get_latest_pv_data(session: Session = Depends(get_session_pv)) -> List[PVYield]:
# """Get Latest PV data from specific pv sites
#
# Only provide PV data received within the last 1 hour
# """
#
# # get latest pv data
# pv_systems_sql = get_pv_systems(session=session)
# pv_yields_sql = get_latest_pv_yield(session=session, pv_systems=pv_systems_sql)
#
# # remove any data older than 1 hours
# now_minus_1_hours = datetime.utcnow() - timedelta(hours=1)
# pv_yields_sql = [
# pv_yield_sql
# for pv_yield_sql in pv_yields_sql
# if pv_yield_sql.datetime_utc >= now_minus_1_hours
# ]
#
# # convert to pydantic
# pv_yields = [PVYield.from_orm(pv_yield_sql) for pv_yield_sql in pv_yields_sql]
#
# return pv_yields
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class HostManagementConfig(AppConfig):
name = 'host_management'
|
python
|
import numpy as np
from scipy.io import readsav
class fts:
ll = None
ii = None
cc = None
nu = None
datafile = './fts_disk_center.idlsave'
def __init__(self):
# watt / (cm2 ster AA) as emitted at solar surface
t = readsav(self.datafile)
# convert to J s-1 m-2 m-1 sr-1
clight=2.99792458e8 #speed of light [m/s]
aa_to_m=1e-10
cm_to_m=1e-2
self.ll = t['ftswav']* aa_to_m
self.nu = clight / self.ll
self.ii = t['ftsint'] * cm_to_m**(-2) * aa_to_m**(-1) # from from W /( cm2 ster AA) to W / (m2 ster m)
self.cc = t['ftscnt'] * cm_to_m**(-2) * aa_to_m**(-1)
|
python
|
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module holding PLRsearch class."""
import logging
import math
import multiprocessing
import time
import dill
# TODO: Inform pylint about scipy (of correct version) being available.
from scipy.special import erfcx, erfc
# TODO: Teach FD.io CSIT to use multiple dirs in PYTHONPATH,
# then switch to absolute imports within PLRsearch package.
# Current usage of relative imports is just a short term workaround.
import Integrator # pylint: disable=relative-import
from log_plus import log_plus, log_minus # pylint: disable=relative-import
import stat_trackers # pylint: disable=relative-import
class PLRsearch(object):
"""A class to encapsulate data relevant for the search method.
The context is performance testing of packet processing systems.
The system, when being offered a steady stream of packets,
can process some of them successfully, other are considered "lost".
See docstring of the search method for algorithm description.
Two constants are stored as class fields for speed.
Method othed than search (and than __init__)
are just internal code structure.
TODO: Those method names should start with underscore then.
"""
xerfcx_limit = math.pow(math.acos(0), -0.5)
log_xerfcx_10 = math.log(xerfcx_limit - math.exp(10) * erfcx(math.exp(10)))
def __init__(
self, measurer, trial_duration_per_trial, packet_loss_ratio_target,
trial_number_offset=0, timeout=1800.0, trace_enabled=False):
"""Store rate measurer and additional parameters.
TODO: Copy AbstractMeasurer from MLRsearch.
:param measurer: The measurer to call when searching.
:param trial_duration_per_trial: Each trial has larger duration
than the previous trial. This is the increment, in seconds.
:param packet_loss_ratio_target: The algorithm tries to estimate
the offered load leading to this ratio on average.
Trial ratio is number of packets lost divided by packets offered.
:param trial_number_offset: The "first" trial number will be 1+this.
Use this to ensure first iterations have enough time to compute
reasonable estimates for later trials to use.
:param timeout: The search ends if it lasts more than this many seconds.
:type measurer: MLRsearch.AbstractMeasurer
:type trial_duration_per_trial: float
:type packet_loss_ratio_target: float
:type trial_number_offset: int
:type timeout: float
"""
self.measurer = measurer
self.trial_duration_per_trial = float(trial_duration_per_trial)
self.packet_loss_ratio_target = float(packet_loss_ratio_target)
self.trial_number_offset = int(trial_number_offset)
self.timeout = float(timeout)
self.trace_enabled = bool(trace_enabled)
def search(self, min_rate, max_rate):
"""Perform the search, return average and stdev for throughput estimate.
Considering measurer and packet_loss_ratio_target (see __init__),
find such an offered load (called critical load) that is expected
to hit the target loss ratio in the limit of very long trial duration.
As the system is probabilistic (and test duration is finite),
the critical ratio is only estimated.
Return the average and standard deviation of the estimate.
In principle, this algorithm performs trial measurements,
each with varied offered load (which is constant during the trial).
During each measurement, Bayesian inference is performed
on all the measurement results so far.
When timeout is up, the last estimate is returned,
else another trial is performed.
It is assumed that the system under test, even though not deterministic,
still follows the rule of large numbers. In another words,
any growing set of measurements at a particular offered load
will converge towards unique (for the given load) packet loss ratio.
This means there is a deterministic (but unknown) function
mapping the offered load to average loss ratio.
This function is called loss ratio function.
This also assumes the average loss ratio
does not depend on trial duration.
The actual probability distribution of loss counts, achieving
the average ratio on trials of various duration
can be complicated (and can depend on offered load), but simply assuming
Poisson distribution will make the algorithm converge.
Binomial distribution would be more precise,
but Poisson is more practical, as it effectively gives
less information content to high ratio results.
Even when applying other assumptions on the loss ratio function
(increasing function, limit zero ratio when load goes to zero,
global upper limit on rate of packets processed), there are still
too many different shapes of possible loss functions,
which makes full Bayesian reasoning intractable.
This implementation radically simplifies things by examining
only two shapes, each with finitely many (in this case just two)
parameters. In other words, two fitting functions
(each with two parameters and one argument).
When restricting model space to one of the two fitting functions,
the Bayesian inference becomes tractable (even though it needs
numerical integration from Integrator class).
The first measurement is done at the middle between
min_rate and max_rate, to help with convergence
if max_rate measurements give loss below target.
TODO: Fix overflow error and use min_rate instead of the middle.
The second measurement is done at max_rate, next few measurements
have offered load of previous load minus excess loss rate.
This simple rule is found to be good when offered loads
so far are way above the critical rate. After few measurements,
inference from fitting functions converges faster that this initial
"optimistic" procedure.
Offered loads close to (limiting) critical rate are the most useful,
as linear approximation of the fitting function
becomes good enough there (thus reducing the impact
of the overall shape of fitting function).
After several trials, usually one of the fitting functions
has better predictions than the other one, but the algorithm
does not track that. Simply, it uses the estimate average,
alternating between the functions.
Multiple workarounds are applied to try and avoid measurements
both in zero loss region and in big loss region,
as their results tend to make the critical load estimate worse.
The returned average and stdev is a combination of the two fitting
estimates.
:param min_rate: Avoid measuring at offered loads below this,
in packets per second.
:param max_rate: Avoid measuring at offered loads above this,
in packets per second.
:type min_rate: float
:type max_rate: float
:returns: Average and stdev of critical load estimate.
:rtype: 2-tuple of floats
"""
stop_time = time.time() + self.timeout
min_rate = float(min_rate)
max_rate = float(max_rate)
logging.info("Started search with min_rate %(min)r, max_rate %(max)r",
{"min": min_rate, "max": max_rate})
trial_result_list = list()
trial_number = self.trial_number_offset
focus_trackers = (None, None)
transmit_rate = (min_rate + max_rate) / 2.0
lossy_loads = [max_rate]
zeros = [0, 0] # Cosecutive zero loss, separately for stretch and erf.
while 1:
trial_number += 1
logging.info("Trial %(number)r", {"number": trial_number})
results = self.measure_and_compute(
self.trial_duration_per_trial * trial_number, transmit_rate,
trial_result_list, min_rate, max_rate, focus_trackers)
measurement, average, stdev, avg1, avg2, focus_trackers = results
index = trial_number % 2
zeros[index] += 1
# TODO: Ratio of fill rate to drain rate seems to have
# exponential impact. Make it configurable, or is 4:3 good enough?
if measurement.loss_fraction >= self.packet_loss_ratio_target:
for _ in range(4 * zeros[index]):
lossy_loads.append(measurement.target_tr)
if measurement.loss_count > 0:
zeros[index] = 0
lossy_loads.sort()
if stop_time <= time.time():
return average, stdev
trial_result_list.append(measurement)
if (trial_number - self.trial_number_offset) <= 1:
next_load = max_rate
elif (trial_number - self.trial_number_offset) <= 3:
next_load = (measurement.receive_rate / (
1.0 - self.packet_loss_ratio_target))
else:
index = (trial_number + 1) % 2
next_load = (avg1, avg2)[index]
if zeros[index] > 0:
if lossy_loads[0] > next_load:
diminisher = math.pow(2.0, 1 - zeros[index])
next_load = lossy_loads[0] + diminisher * next_load
next_load /= (1.0 + diminisher)
# On zero measurement, we need to drain obsoleted low losses
# even if we did not use them to increase next_load,
# in order to get to usable loses with higher load.
if len(lossy_loads) > 3:
lossy_loads = lossy_loads[3:]
logging.debug("Zeros %(z)r orig %(o)r next %(n)r loads %(s)r",
{"z": zeros, "o": (avg1, avg2)[index],
"n": next_load, "s": lossy_loads})
transmit_rate = min(max_rate, max(min_rate, next_load))
@staticmethod
def lfit_stretch(trace, load, mrr, spread):
"""Stretch-based fitting function.
Return the logarithm of average packet loss per second
when the load (argument) is offered to a system with given
mrr and spread (parameters).
Stretch function is 1/(1+Exp[-x]). The average itself is definite
integral from zero to load, of shifted and x-scaled stretch function.
As the integrator is sensitive to discontinuities,
and it calls this function at large areas of parameter space,
the implementation has to avoid rounding errors, overflows,
and correctly approximate underflows.
TODO: Explain how the high-level description
has been converted into an implementation full of ifs.
:param trace: A multiprocessing-friendly logging function (closure).
:param load: Offered load (positive), in packets per second.
:param mrr: Parameter of this fitting function, equal to limiting
(positive) average number of packets received (as opposed to lost)
when offered load is many spreads more than mrr.
:param spread: The x-scaling parameter (positive). No nice semantics,
roughly corresponds to size of "tail" for loads below mrr.
:type trace: function (str, object) -> NoneType
:type load: float
:type mrr: float
:type spread: float
:returns: Logarithm of average number of packets lost per second.
:rtype: float
"""
# TODO: What is the fastest way to use such values?
log_2 = math.log(2)
log_3 = math.log(3)
log_spread = math.log(spread)
# TODO: chi is from https://en.wikipedia.org/wiki/Nondimensionalization
chi = (load - mrr) / spread
chi0 = -mrr / spread
trace("stretch: load", load)
trace("mrr", mrr)
trace("spread", spread)
trace("chi", chi)
trace("chi0", chi0)
if chi > 0:
log_lps = math.log(
load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread)
trace("big loss direct log_lps", log_lps)
else:
two_positive = log_plus(chi, 2 * chi0 - log_2)
two_negative = log_plus(chi0, 2 * chi - log_2)
if two_positive <= two_negative:
log_lps = log_minus(chi, chi0) + log_spread
trace("small loss crude log_lps", log_lps)
return log_lps
two = log_minus(two_positive, two_negative)
three_positive = log_plus(two_positive, 3 * chi - log_3)
three_negative = log_plus(two_negative, 3 * chi0 - log_3)
three = log_minus(three_positive, three_negative)
if two == three:
log_lps = two + log_spread
trace("small loss approx log_lps", log_lps)
else:
log_lps = math.log(log_plus(0, chi) - log_plus(0, chi0))
log_lps += log_spread
trace("small loss direct log_lps", log_lps)
return log_lps
@staticmethod
def lfit_erf(trace, load, mrr, spread):
"""Erf-based fitting function.
Return the logarithm of average packet loss per second
when the load (argument) is offered to a system with given
mrr and spread (parameters).
Erf function is Primitive function to normal distribution density.
The average itself is definite integral from zero to load,
of shifted and x-scaled erf function.
As the integrator is sensitive to discontinuities,
and it calls this function at large areas of parameter space,
the implementation has to avoid rounding errors, overflows,
and correctly approximate underflows.
TODO: Explain how the high-level description
has been converted into an implementation full of ifs.
:param trace: A multiprocessing-friendly logging function (closure).
:param load: Offered load (positive), in packets per second.
:param mrr: Parameter of this fitting function, equal to limiting
(positive) average number of packets received (as opposed to lost)
when offered load is many spreads more than mrr.
:param spread: The x-scaling parameter (positive). No nice semantics,
roughly corresponds to size of "tail" for loads below mrr.
:type trace: function (str, object) -> NoneType
:type load: float
:type mrr: float
:type spread: float
:returns: Logarithm of average number of packets lost per second.
:rtype: float
"""
# Beware, this chi has the sign opposite to the stretch function chi.
# TODO: The stretch sign is just to have less minuses. Worth changing?
chi = (mrr - load) / spread
chi0 = mrr / spread
trace("Erf: load", load)
trace("mrr", mrr)
trace("spread", spread)
trace("chi", chi)
trace("chi0", chi0)
if chi >= -1.0:
trace("positive, b roughly bigger than m", None)
if chi > math.exp(10):
first = PLRsearch.log_xerfcx_10 + 2 * (math.log(chi) - 10)
trace("approximated first", first)
else:
first = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi))
trace("exact first", first)
first -= chi * chi
second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
second -= chi0 * chi0
intermediate = log_minus(first, second)
trace("first", first)
else:
trace("negative, b roughly smaller than m", None)
exp_first = PLRsearch.xerfcx_limit + chi * erfcx(-chi)
exp_first *= math.exp(-chi * chi)
exp_first -= 2 * chi
# TODO: Why has the following line chi there (as opposed to chi0)?
# In general the functions would be more readable if they explicitly
# return math.log(func(chi) - func(chi0))
# for some function "func", at least for some branches.
second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
second -= chi0 * chi0
intermediate = math.log(exp_first - math.exp(second))
trace("exp_first", exp_first)
trace("second", second)
trace("intermediate", intermediate)
result = intermediate + math.log(spread) - math.log(erfc(-chi0))
trace("result", result)
return result
@staticmethod
def find_critical_rate(
trace, lfit_func, min_rate, max_rate, loss_ratio_target,
mrr, spread):
"""Given ratio target and parameters, return the achieving offered load.
This is basically an inverse function to lfit_func
when parameters are fixed.
Instead of implementing effective implementation
of the inverse function, this implementation uses
brute force binary search. It is bisecting (nim_rate, max_rate) interval
until the critical load is found (or interval becomes degenerate).
This implementation assures min and max rate limits are honored.
TODO: Use some method with faster convergence?
:param trace: A multiprocessing-friendly logging function (closure).
:param lfit_func: Fitting function, typically lfit_spread or lfit_erf.
:param min_rate: Lower bound for binary search [pps].
:param max_rate: Upper bound for binary search [pps].
:param loss_ratio_target: Fitting function should return loss rate
giving this ratio at the returned load and parameters [1].
:param mrr: The mrr parameter for the fitting function [pps].
:param spread: The spread parameter for the fittinmg function [pps].
:type trace: function (str, object) -> None
:type lfit_func: Function from 3 floats to float.
:type min_rate: float
:type max_rate: float
:type log_lps_target: float
:type mrr: float
:type spread: float
:returns: Load [pps] which achieves the target with given parameters.
:rtype: float
"""
trace("Finding critical rate for loss_ratio_target", loss_ratio_target)
rate_lo = min_rate
rate_hi = max_rate
loss_ratio = -1
while loss_ratio != loss_ratio_target:
rate = (rate_hi + rate_lo) / 2.0
if rate == rate_hi or rate == rate_lo:
break
loss_rate = math.exp(lfit_func(trace, rate, mrr, spread))
loss_ratio = loss_rate / rate
if loss_ratio > loss_ratio_target:
trace("halving down", rate)
rate_hi = rate
elif loss_ratio < loss_ratio_target:
trace("halving up", rate)
rate_lo = rate
trace("found", rate)
return rate
@staticmethod
def log_weight(trace, lfit_func, trial_result_list, mrr, spread):
"""Return log of weight of trial results by the function and parameters.
Integrator assumes uniform distribution, but over different parameters.
Weight and likelihood are used interchangeably here anyway.
Each trial has an offered load, a duration and a loss count.
Fitting function is used to compute the average loss per second.
Poisson distribution (with average loss per trial) is used
to get likelihood of one trial result, the overal likelihood
is a product of all trial likelihoods.
As likelihoods can be extremely small, logarithms are tracked instead.
TODO: Copy ReceiveRateMeasurement from MLRsearch.
:param trace: A multiprocessing-friendly logging function (closure).
:param lfit_func: Fitting function, typically lfit_spread or lfit_erf.
:param result_list: List of trial measurement results.
:param mrr: The mrr parameter for the fitting function.
:param spread: The spread parameter for the fittinmg function.
:type trace: function (str, object) -> None
:type lfit_func: Function from 3 floats to float.
:type result_list: list of MLRsearch.ReceiveRateMeasurement
:type mrr: float
:type spread: float
:returns: Logarithm of result weight for given function and parameters.
:rtype: float
"""
log_likelihood = 0.0
trace("log_weight for mrr", mrr)
trace("spread", spread)
for result in trial_result_list:
trace("for tr", result.target_tr)
trace("lc", result.loss_count)
trace("d", result.duration)
log_avg_loss_per_second = lfit_func(
trace, result.target_tr, mrr, spread)
log_avg_loss_per_trial = (
log_avg_loss_per_second + math.log(result.duration))
# Poisson probability computation works nice for logarithms.
log_trial_likelihood = (
result.loss_count * log_avg_loss_per_trial
- math.exp(log_avg_loss_per_trial))
log_trial_likelihood -= math.lgamma(1 + result.loss_count)
log_likelihood += log_trial_likelihood
trace("avg_loss_per_trial", math.exp(log_avg_loss_per_trial))
trace("log_trial_likelihood", log_trial_likelihood)
return log_likelihood
# TODO: Refactor (somehow) so pylint stops complaining about
# too many local variables.
def measure_and_compute(
self, trial_duration, transmit_rate, trial_result_list,
min_rate, max_rate, focus_trackers=(None, None), max_samples=None):
"""Perform both measurement and computation at once.
High level steps: Prepare and launch computation worker processes,
perform the measurement, stop computation and combine results.
Integrator needs a specific function to process (-1, 1) parameters.
As our fitting functions use dimensional parameters,
so a transformation is performed, resulting in a specific prior
distribution over the dimensional parameters.
Maximal rate (line rate) is needed for that transformation.
Two fitting functions are used, computation is started
on temporary worker process per fitting function. After the measurement,
average and stdev of the critical rate (not log) of each worker
are combined and returned. Raw averages are also returned,
offered load for next iteration is chosen based on them.
The idea is that one fitting function might be fitting much better,
measurements at its avg are best for relevant results (for both),
but we do not know which fitting function it is.
Focus trackers are updated in-place. If a focus tracker in None,
new instance is created.
TODO: Define class for result object, so that fields are documented.
TODO: Re-use processes, instead creating on each computation?
TODO: As only one result is needed fresh, figure out a way
how to keep the other worker running. This will alow shorter
duration per trial. Special handling at first and last measurement
will be needed (to properly initialize and to properly combine results).
:param trial_duration: Length of the measurement in seconds.
:param transmit_rate: Offered load in packets per second.
:param trial_result_list: Results of previous measurements.
:param min_rate: Practical minimum of possible ofered load.
:param max_rate: Practical maximum of possible ofered load.
:param focus_trackers: Pair of trackers initialized
to speed up the numeric computation.
:param max_samples: Limit for integrator samples, for debugging.
:type trial_duration: float
:type transmit_rate: float
:type trial_result_list: list of MLRsearch.ReceiveRateMeasurement
:type min_rate: float
:type max_rate: float
:type focus_trackers: 2-tuple of None or stat_trackers.VectorStatTracker
:type max_samples: None or int
:returns: Measurement and computation results.
:rtype: 6-tuple: ReceiveRateMeasurement, 4 floats, 2-tuple of trackers.
"""
logging.debug(
"measure_and_compute started with self %(self)r, trial_duration "
+ "%(dur)r, transmit_rate %(tr)r, trial_result_list %(trl)r, "
+ "max_rate %(mr)r, focus_trackers %(track)r, max_samples %(ms)r",
{"self": self, "dur": trial_duration, "tr": transmit_rate,
"trl": trial_result_list, "mr": max_rate, "track": focus_trackers,
"ms": max_samples})
# Preparation phase.
dimension = 2
stretch_focus_tracker, erf_focus_tracker = focus_trackers
if stretch_focus_tracker is None:
stretch_focus_tracker = stat_trackers.VectorStatTracker(dimension)
stretch_focus_tracker.unit_reset()
if erf_focus_tracker is None:
erf_focus_tracker = stat_trackers.VectorStatTracker(dimension)
erf_focus_tracker.unit_reset()
old_trackers = stretch_focus_tracker.copy(), erf_focus_tracker.copy()
def start_computing(fitting_function, focus_tracker):
"""Just a block of code to be used for each fitting function.
Define function for integrator, create process and pipe ends,
start computation, return the boss pipe end.
:param fitting_function: lfit_erf or lfit_stretch.
:param bias_avg: Tuple of floats to start searching around.
:param bias_cov: Covariance matrix defining initial focus shape.
:type fitting_function: Function from 3 floats to float.
:type bias_avg: 2-tuple of floats
:type bias_cov: 2-tuple of 2-tuples of floats
:returns: Boss end of communication pipe.
:rtype: multiprocessing.Connection
"""
def value_logweight_func(trace, x_mrr, x_spread):
"""Return log of critical rate and log of likelihood.
This is a closure. The ancestor function got
trial_result_list as a parameter, and we are accessing it.
As integrator has strict conditions on function signature,
trial_result_list cannot be an explicit argument
of the current function.
This is also why we have to define this closure
at each invocation of the ancestor function anew.
The dimensional spread parameter is the (dimensional) mrr
raised to the power of x_spread scaled to interval (0, 1).
The dimensional mrr parameter distribution has shape of
1/(1+x^2), but x==1 corresponds to max_rate
and 1.0 pps is added to avoid numerical problems in fitting
functions.
TODO: x^-2 (for x>1.0) might be simpler/nicer prior.
:param trace: Multiprocessing-safe logging function (closure).
:param x_mrr: The first dimensionless param
from (-1, 1) interval.
:param x_spread: The second dimensionless param
from (-1, 1) interval.
:type trace: function (str, object) -> None
:type x_mrr: float
:type x_spread: float
:returns: Log of critical rate [pps] and log of likelihood.
:rtype: 2-tuple of float
"""
mrr = max_rate * (1.0 / (x_mrr + 1.0) - 0.5) + 1.0
spread = math.exp((x_spread + 1.0) / 2.0 * math.log(mrr))
logweight = self.log_weight(
trace, fitting_function, trial_result_list, mrr, spread)
value = math.log(self.find_critical_rate(
trace, fitting_function, min_rate, max_rate,
self.packet_loss_ratio_target, mrr, spread))
return value, logweight
dilled_function = dill.dumps(value_logweight_func)
boss_pipe_end, worker_pipe_end = multiprocessing.Pipe()
boss_pipe_end.send(
(dimension, dilled_function, focus_tracker, max_samples))
worker = multiprocessing.Process(
target=Integrator.try_estimate_nd, args=(
worker_pipe_end, 10.0, self.trace_enabled))
worker.daemon = True
worker.start()
return boss_pipe_end
erf_pipe = start_computing(
self.lfit_erf, erf_focus_tracker)
stretch_pipe = start_computing(
self.lfit_stretch, stretch_focus_tracker)
# Measurement phase.
measurement = self.measurer.measure(trial_duration, transmit_rate)
# Processing phase.
def stop_computing(name, pipe):
"""Just a block of code to be used for each worker.
Send stop object, poll for result, then either
unpack response, log messages and return, or raise traceback.
TODO: Define class/structure for the return value?
:param name: Human friendly worker identifier for logging purposes.
:param pipe: Boss end of connection towards worker to stop.
:type name: str
:type pipe: multiprocessing.Connection
:returns: Computed value tracker, actual focus tracker,
and number of samples used for this iteration.
:rtype: 3-tuple of tracker, tracker and int
"""
pipe.send(None)
if not pipe.poll(10.0):
raise RuntimeError(
"Worker {name} did not finish!".format(name=name))
result_or_traceback = pipe.recv()
try:
value_tracker, focus_tracker, debug_list, trace_list, sampls = (
result_or_traceback)
except ValueError:
raise RuntimeError(
"Worker {name} failed with the following traceback:\n{tr}"
.format(name=name, tr=result_or_traceback))
logging.info("Logs from worker %(name)r:", {"name": name})
for message in debug_list:
logging.info(message)
for message in trace_list:
logging.debug(message)
logging.debug("trackers: value %(val)r focus %(foc)r", {
"val": value_tracker, "foc": focus_tracker})
return value_tracker, focus_tracker, sampls
stretch_value_tracker, stretch_focus_tracker, stretch_samples = (
stop_computing("stretch", stretch_pipe))
erf_value_tracker, erf_focus_tracker, erf_samples = (
stop_computing("erf", erf_pipe))
stretch_avg = stretch_value_tracker.average
erf_avg = erf_value_tracker.average
# TODO: Take into account secondary stats.
stretch_stdev = math.exp(stretch_value_tracker.log_variance / 2)
erf_stdev = math.exp(erf_value_tracker.log_variance / 2)
avg = math.exp((stretch_avg + erf_avg) / 2.0)
var = (stretch_stdev * stretch_stdev + erf_stdev * erf_stdev) / 2.0
var += (stretch_avg - erf_avg) * (stretch_avg - erf_avg) / 4.0
stdev = avg * math.sqrt(var)
focus_trackers = (stretch_focus_tracker, erf_focus_tracker)
logging.info(
"measure_and_compute finished with trial result %(res)r "
"avg %(avg)r stdev %(stdev)r stretch %(a1)r erf %(a2)r "
"new trackers %(nt)r old trackers %(ot)r stretch samples %(ss)r "
"erf samples %(es)r",
{"res": measurement, "avg": avg, "stdev": stdev,
"a1": math.exp(stretch_avg), "a2": math.exp(erf_avg),
"nt": focus_trackers, "ot": old_trackers, "ss": stretch_samples,
"es": erf_samples})
return (
measurement, avg, stdev, math.exp(stretch_avg),
math.exp(erf_avg), focus_trackers)
|
python
|
import discord
from itertools import cycle
from discord.ext import commands, tasks
status = cycle(['Add ur text here','ur text here','ur text here','ur text here']) # you can add as much as you want EX: 'Stiizzy cat is hot','Name'
bot = commands.Bot(command_prefix="!") # prefix will not be used for changng status
@bot.event
async def on_ready():
print("Changing Status started")
change_status.start()
@tasks.loop(seconds=5) # change to how many secs you want - 5 is best
async def change_status():
await bot.change_presence(activity=discord.Game(next(status)))
bot.run("Your Token here", bot=False)
#made by stiizzy cat
# not my fault if you get disabled
|
python
|
import logging
from datetime import datetime
from pprint import pprint
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import text
from opennem.db import db_connect
from opennem.db.load_fixtures import update_existing_geos
from opennem.db.models.opennem import Facility, Station
from opennem.geo.google_geo import place_search
logger = logging.getLogger(__name__)
def build_address_string(station_record):
l = [
station_record.network_name,
station_record.locality,
station_record.state,
"Australia",
]
address_string = ", ".join(str(i) for i in l if i)
return address_string
def opennem_geocode(limit=None):
engine = db_connect()
session = sessionmaker(bind=engine)
s = session()
records = (
s.query(Station)
.filter(Station.geom == None)
.filter(Station.geocode_skip == False)
)
count = 0
skipped = 0
records_added = 0
for r in records:
geo_address_string = build_address_string(r)
logger.info("Geocoding record: {}".format(geo_address_string))
continue
google_result = place_search(geo_address_string)
pprint(google_result)
if (
google_result
and type(google_result) is list
and len(google_result) > 0
):
result = google_result[0]
r.place_id = result["place_id"]
lat = result["geometry"]["location"]["lat"]
lng = result["geometry"]["location"]["lng"]
r.geom = "SRID=4326;POINT({} {})".format(lng, lat)
r.geocode_processed_at = datetime.now()
r.geocode_by = "google"
r.geocode_approved = False
try:
s.add(r)
s.commit()
records_added += 1
except IntegrityError as e:
logger.error(e)
skipped += 1
pass
except Exception as e:
skipped += 1
logger.error("Error: {}".format(e))
else:
skipped += 1
count += 1
if limit and count >= limit:
break
print(
"Geocode of opennem records done. Added {} records. Couldn't match {}".format(
records_added, skipped
)
)
if __name__ == "__main__":
update_existing_geos()
opennem_geocode()
|
python
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
# from http://stackoverflow.com/questions/15896217/django-loading-a-page-that-
# has-external-authentication-changes-the-session-key
class PersistentSessionMiddleware(object):
""" Injects the username into REMOTE_USER so that users continue to be
logged in on views that don't require authentication.
"""
def __init__(self):
pass
def process_request(self, request):
header = "REMOTE_USER"
if request.user.is_authenticated() and header not in request.META:
request.META[header] = request.user.username
return None
|
python
|
# -*- coding: UTF-8 -*-
from mpi4py import MPI
from sympy import pi, cos, sin
from sympy.abc import x, y
from sympy.utilities.lambdify import implemented_function
import pytest
from sympde.calculus import grad, dot
from sympde.calculus import laplace
from sympde.topology import ScalarFunctionSpace
from sympde.topology import element_of
from sympde.topology import NormalVector
from sympde.topology import Square
from sympde.topology import Union
from sympde.expr import BilinearForm, LinearForm, integral
from sympde.expr import Norm
from sympde.expr import find, EssentialBC
from psydac.fem.basic import FemField
from psydac.api.discretization import discretize
#==============================================================================
def get_boundaries(*args):
if not args:
return ()
else:
assert all(1 <= a <= 4 for a in args)
assert len(set(args)) == len(args)
boundaries = {1: {'axis': 0, 'ext': -1},
2: {'axis': 0, 'ext': 1},
3: {'axis': 1, 'ext': -1},
4: {'axis': 1, 'ext': 1}}
return tuple(boundaries[i] for i in args)
#==============================================================================
def run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary,
ncells, degree, comm=None):
assert isinstance(dir_zero_boundary , (list, tuple))
assert isinstance(dir_nonzero_boundary, (list, tuple))
#+++++++++++++++++++++++++++++++
# 1. Abstract model
#+++++++++++++++++++++++++++++++
domain = Square()
B_dirichlet_0 = Union(*[domain.get_boundary(**kw) for kw in dir_zero_boundary])
B_dirichlet_i = Union(*[domain.get_boundary(**kw) for kw in dir_nonzero_boundary])
B_dirichlet = Union(B_dirichlet_0, B_dirichlet_i)
B_neumann = domain.boundary.complement(B_dirichlet)
V = ScalarFunctionSpace('V', domain)
u = element_of(V, name='u')
v = element_of(V, name='v')
nn = NormalVector('nn')
# Bilinear form a: V x V --> R
a = BilinearForm((u, v), integral(domain, dot(grad(u), grad(v))))
# Linear form l: V --> R
l0 = LinearForm(v, integral(domain, f * v))
if B_neumann:
l1 = LinearForm(v, integral(B_neumann, v * dot(grad(solution), nn)))
l = LinearForm(v, l0(v) + l1(v))
else:
l = l0
# Dirichlet boundary conditions
bc = []
if B_dirichlet_0: bc += [EssentialBC(u, 0, B_dirichlet_0)]
if B_dirichlet_i: bc += [EssentialBC(u, solution, B_dirichlet_i)]
# Variational model
equation = find(u, forall=v, lhs=a(u, v), rhs=l(v), bc=bc)
# Error norms
error = u - solution
l2norm = Norm(error, domain, kind='l2')
h1norm = Norm(error, domain, kind='h1')
#+++++++++++++++++++++++++++++++
# 2. Discretization
#+++++++++++++++++++++++++++++++
# Create computational domain from topological domain
domain_h = discretize(domain, ncells=ncells, comm=comm)
# Discrete spaces
Vh = discretize(V, domain_h, degree=degree)
# Discretize equation using Dirichlet bc
equation_h = discretize(equation, domain_h, [Vh, Vh])
# Discretize error norms
l2norm_h = discretize(l2norm, domain_h, Vh)
h1norm_h = discretize(h1norm, domain_h, Vh)
#+++++++++++++++++++++++++++++++
# 3. Solution
#+++++++++++++++++++++++++++++++
# Solve linear system
x = equation_h.solve()
uh = FemField( Vh, x )
# Compute error norms
l2_error = l2norm_h.assemble(u=uh)
h1_error = h1norm_h.assemble(u=uh)
return l2_error, h1_error
#==============================================================================
def run_laplace_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary,
ncells, degree, comm=None):
assert isinstance(dir_zero_boundary , (list, tuple))
assert isinstance(dir_nonzero_boundary, (list, tuple))
#+++++++++++++++++++++++++++++++
# 1. Abstract model
#+++++++++++++++++++++++++++++++
domain = Square()
B_dirichlet_0 = Union(*[domain.get_boundary(**kw) for kw in dir_zero_boundary])
B_dirichlet_i = Union(*[domain.get_boundary(**kw) for kw in dir_nonzero_boundary])
B_dirichlet = Union(B_dirichlet_0, B_dirichlet_i)
B_neumann = domain.boundary.complement(B_dirichlet)
V = ScalarFunctionSpace('V', domain)
u = element_of(V, name='u')
v = element_of(V, name='v')
nn = NormalVector('nn')
# Bilinear form a: V x V --> R
a = BilinearForm((u, v), integral(domain, dot(grad(u), grad(v)) + u * v))
# Linear form l: V --> R
l0 = LinearForm(v, integral(domain, f * v))
if B_neumann:
l1 = LinearForm(v, integral(B_neumann, v * dot(grad(solution), nn)))
l = LinearForm(v, l0(v) + l1(v))
else:
l = l0
# Dirichlet boundary conditions
bc = []
if B_dirichlet_0: bc += [EssentialBC(u, 0, B_dirichlet_0)]
if B_dirichlet_i: bc += [EssentialBC(u, solution, B_dirichlet_i)]
# Variational model
equation = find(u, forall=v, lhs=a(u, v), rhs=l(v), bc=bc)
# Error norms
error = u - solution
l2norm = Norm(error, domain, kind='l2')
h1norm = Norm(error, domain, kind='h1')
#+++++++++++++++++++++++++++++++
# 2. Discretization
#+++++++++++++++++++++++++++++++
# Create computational domain from topological domain
domain_h = discretize(domain, ncells=ncells, comm=comm)
# Discrete spaces
Vh = discretize(V, domain_h, degree=degree)
# Discretize equation using Dirichlet bc
equation_h = discretize(equation, domain_h, [Vh, Vh])
# Discretize error norms
l2norm_h = discretize(l2norm, domain_h, Vh)
h1norm_h = discretize(h1norm, domain_h, Vh)
#+++++++++++++++++++++++++++++++
# 3. Solution
#+++++++++++++++++++++++++++++++
# Solve linear system
x = equation_h.solve()
uh = FemField( Vh, x )
# Compute error norms
l2_error = l2norm_h.assemble(u=uh)
h1_error = h1norm_h.assemble(u=uh)
return l2_error, h1_error
#==============================================================================
def run_biharmonic_2d_dir(solution, f, dir_zero_boundary, ncells, degree, comm=None):
assert isinstance(dir_zero_boundary, (list, tuple))
#+++++++++++++++++++++++++++++++
# 1. Abstract model
#+++++++++++++++++++++++++++++++
domain = Square()
B_dirichlet_0 = Union(*[domain.get_boundary(**kw) for kw in dir_zero_boundary])
B_dirichlet_i = domain.boundary.complement(B_dirichlet_0)
V = ScalarFunctionSpace('V', domain)
u = element_of(V, name='u')
v = element_of(V, name='v')
nn = NormalVector('nn')
# Bilinear form a: V x V --> R
a = BilinearForm((u, v), integral(domain, laplace(u) * laplace(v)))
# Linear form l: V --> R
l = LinearForm(v, integral(domain, f * v))
# Essential boundary conditions
dn = lambda a: dot(grad(a), nn)
bc = []
if B_dirichlet_0:
bc += [EssentialBC( u , 0, B_dirichlet_0)]
bc += [EssentialBC(dn(u), 0, B_dirichlet_0)]
if B_dirichlet_i:
bc += [EssentialBC( u , solution , B_dirichlet_i)]
bc += [EssentialBC(dn(u), dn(solution), B_dirichlet_i)]
# Variational model
equation = find(u, forall=v, lhs=a(u, v), rhs=l(v), bc=bc)
# Error norms
error = u - solution
l2norm = Norm(error, domain, kind='l2')
h1norm = Norm(error, domain, kind='h1')
h2norm = Norm(error, domain, kind='h2')
#+++++++++++++++++++++++++++++++
# 2. Discretization
#+++++++++++++++++++++++++++++++
# Create computational domain from topological domain
domain_h = discretize(domain, ncells=ncells, comm=comm)
# Discrete spaces
Vh = discretize(V, domain_h, degree=degree)
# Discretize equation using Dirichlet bc
equation_h = discretize(equation, domain_h, [Vh, Vh])
# Discretize error norms
l2norm_h = discretize(l2norm, domain_h, Vh)
h1norm_h = discretize(h1norm, domain_h, Vh)
h2norm_h = discretize(h2norm, domain_h, Vh)
#+++++++++++++++++++++++++++++++
# 3. Solution
#+++++++++++++++++++++++++++++++
# Solve linear system
x = equation_h.solve()
uh = FemField( Vh, x )
# Compute error norms
l2_error = l2norm_h.assemble(u=uh)
h1_error = h1norm_h.assemble(u=uh)
h2_error = h2norm_h.assemble(u=uh)
return l2_error, h1_error, h2_error
###############################################################################
# SERIAL TESTS
###############################################################################
#==============================================================================
# 2D Poisson's equation
#==============================================================================
def test_poisson_2d_dir0_1234():
solution = sin(pi*x)*sin(pi*y)
f = 2*pi**2*sin(pi*x)*sin(pi*y)
dir_zero_boundary = get_boundaries(1, 2, 3, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00021808678604760232
expected_h1_error = 0.013023570720360362
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_234_neu0_1():
solution = cos(0.5*pi*x)*sin(pi*y)
f = (5./4.)*pi**2*solution
dir_zero_boundary = get_boundaries(2, 3, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00015546057796452772
expected_h1_error = 0.00926930278452745
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_134_neu0_2():
solution = sin(0.5*pi*x)*sin(pi*y)
f = (5./4.)*pi**2*solution
dir_zero_boundary = get_boundaries(1, 3, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.0001554605779481901
expected_h1_error = 0.009269302784527256
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_124_neu0_3():
solution = sin(pi*x)*cos(0.5*pi*y)
f = (5./4.)*pi**2*solution
dir_zero_boundary = get_boundaries(1, 2, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.0001554605779681901
expected_h1_error = 0.009269302784528678
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_123_neu0_4():
solution = sin(pi*x)*sin(0.5*pi*y)
f = (5./4.)*pi**2*solution
dir_zero_boundary = get_boundaries(1, 2, 3)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00015546057796339546
expected_h1_error = 0.009269302784526841
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_24_neu0_13():
solution = cos(0.5*pi*x)*cos(0.5*pi*y)
f = (1./2.)*pi**2*solution
dir_zero_boundary = get_boundaries(2, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 2.6119892736036942e-05
expected_h1_error = 0.0016032430287934746
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_13_neu0_24():
solution = sin(0.5*pi*x)*sin(0.5*pi*y)
f = (1./2.)*pi**2*solution
dir_zero_boundary = get_boundaries(1, 3)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 2.611989253883369e-05
expected_h1_error = 0.0016032430287973409
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_4_neu0_123():
solution = cos(pi*x)*cos(0.5*pi*y)
f = 5./4.*pi**2*solution
dir_zero_boundary = get_boundaries(4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00015494478505412876
expected_h1_error = 0.009242166414700994
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_234_neui_1():
solution = sin(pi*x)*sin(pi*y)
f = 2*pi**2*solution
dir_zero_boundary = get_boundaries(2, 3, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00021786960672322118
expected_h1_error = 0.01302350067761091
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_134_neui_2():
solution = sin(pi*x)*sin(pi*y)
f = 2*pi**2*solution
dir_zero_boundary = get_boundaries(1, 3, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00021786960672322118
expected_h1_error = 0.01302350067761091
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_124_neui_3():
solution = sin(pi*x)*sin(pi*y)
f = 2*pi**2*solution
dir_zero_boundary = get_boundaries(1, 2, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00021786960672322118
expected_h1_error = 0.01302350067761091
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_123_neui_4():
solution = sin(pi*x)*sin(pi*y)
f = 2*pi**2*solution
dir_zero_boundary = get_boundaries(1, 2, 3)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00021786960672322118
expected_h1_error = 0.01302350067761091
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_123_diri_4():
solution = sin(pi * x) * sin(0.5*pi * y)
f = 5/4*pi**2 * solution
dir_zero_boundary = get_boundaries(1, 2, 3)
dir_nonzero_boundary = get_boundaries(4)
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00015292215711784052
expected_h1_error = 0.009293161646614652
assert abs(l2_error - expected_l2_error) < 1.e-7
assert abs(h1_error - expected_h1_error) < 1.e-7
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_13_diri_24():
solution = sin(3*pi/2 * x) * sin(3*pi/2 * y)
f = 9/2*pi**2 * solution
dir_zero_boundary = get_boundaries(1, 3)
dir_nonzero_boundary = get_boundaries(2, 4)
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.0007786454571731944
expected_h1_error = 0.0449669071240554
assert abs(l2_error - expected_l2_error) < 1.e-7
assert abs(h1_error - expected_h1_error) < 1.e-7
#------------------------------------------------------------------------------
def test_poisson_2d_dir0_1234_user_function():
solution = sin(pi*x)*sin(pi*y)
# ...
# User provides right-hand side in the form of a callable Python function:
def f(x, y):
from numpy import pi, sin
return 2*pi**2*sin(pi*x)*sin(pi*y)
# Python function is converted to Sympy's "implemented function" and then
# called with symbolic arguments (x, y):
f = implemented_function('f', f)(x, y)
# ...
dir_zero_boundary = get_boundaries(1, 2, 3, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.00021808678604760232
expected_h1_error = 0.013023570720360362
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#==============================================================================
# 2D "Laplace-like" equation
#==============================================================================
def test_laplace_2d_neu0_1234():
solution = cos(pi*x)*cos(pi*y)
f = (2.*pi**2 + 1.)*solution
dir_zero_boundary = get_boundaries()
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_laplace_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2])
expected_l2_error = 0.0002172846538950129
expected_h1_error = 0.012984852988125026
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#==============================================================================
# 2D biharmonic equation
#==============================================================================
def test_biharmonic_2d_dir0_1234():
solution = sin(pi * x)**2 * sin(pi * y)**2
f = laplace(laplace(solution))
dir_zero_boundary = get_boundaries(1, 2, 3, 4)
l2_error, h1_error, h2_error = run_biharmonic_2d_dir(solution, f,
dir_zero_boundary, ncells=[2**3, 2**3], degree=[3, 3])
expected_l2_error = 0.00019981371108040476
expected_h1_error = 0.0063205179028178295
expected_h2_error = 0.2123929568623994
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
assert( abs(h2_error - expected_h2_error) < 1.e-7)
#------------------------------------------------------------------------------
@pytest.mark.xfail
def test_biharmonic_2d_dir0_123_diri_4():
solution = sin(pi * x)**2 * sin(0.5*pi * y)**2
f = laplace(laplace(solution))
dir_zero_boundary = get_boundaries(1, 2, 3)
l2_error, h1_error, h2_error = run_biharmonic_2d_dir(solution, f,
dir_zero_boundary, ncells=[2**3, 2**3], degree=[3, 3])
print()
print(l2_error)
print(h1_error)
print(h2_error)
print()
assert False
#------------------------------------------------------------------------------
@pytest.mark.xfail
def test_biharmonic_2d_dir0_13_diri_24():
solution = sin(3*pi/2 * x)**2 * sin(3*pi/2 * y)**2
f = laplace(laplace(solution))
dir_zero_boundary = get_boundaries(1, 3)
l2_error, h1_error, h2_error = run_biharmonic_2d_dir(solution, f,
dir_zero_boundary, ncells=[2**3, 2**3], degree=[3, 3])
print()
print(l2_error)
print(h1_error)
print(h2_error)
print()
assert False
###############################################################################
# PARALLEL TESTS
###############################################################################
#==============================================================================
@pytest.mark.parallel
def test_poisson_2d_dir0_1234_parallel():
solution = sin(pi*x)*sin(pi*y)
f = 2*pi**2*sin(pi*x)*sin(pi*y)
dir_zero_boundary = get_boundaries(1, 2, 3, 4)
dir_nonzero_boundary = get_boundaries()
l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary,
dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2],
comm=MPI.COMM_WORLD)
expected_l2_error = 0.00021808678604760232
expected_h1_error = 0.013023570720360362
assert( abs(l2_error - expected_l2_error) < 1.e-7)
assert( abs(h1_error - expected_h1_error) < 1.e-7)
#==============================================================================
# CLEAN UP SYMPY NAMESPACE
#==============================================================================
def teardown_module():
from sympy import cache
cache.clear_cache()
def teardown_function():
from sympy import cache
cache.clear_cache()
|
python
|
# Solution of;
# Project Euler Problem 226: A Scoop of Blancmange
# https://projecteuler.net/problem=226
#
# The blancmange curve is the set of points $(x, y)$ such that $0 \le x \le 1$
# and $y = \sum \limits_{n = 0}^{\infty} {\dfrac{s(2^n x)}{2^n}}$, where
# $s(x)$ is the distance from $x$ to the nearest integer. The area under the
# blancmange curve is equal to ½, shown in pink in the diagram below. Let C be
# the circle with centre $\left ( \frac{1}{4}, \frac{1}{2} \right )$ and
# radius $\frac{1}{4}$, shown in black in the diagram. What area under the
# blancmange curve is enclosed by C?Give your answer rounded to eight decimal
# places in the form 0. abcdefgh
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 226
timed.caller(dummy, n, i, prob_id)
|
python
|
from aiohttp import ClientSession
from asyncio import get_event_loop
class ManagedHTTP:
def __init__(self):
self.session = ClientSession()
async def ensure_session(self):
if self.session.closed:
self.session = ClientSession()
async def request(self, method: str, url: str, *args, **kwargs):
await self.ensure_session()
return await self.session.request(method, url, *args, **kwargs)
|
python
|
"""Test the functions exposed at the top level of the module.
This isn't a full test of each method's capabilities,
just checking that the method is exposed at the top level namespace.
"""
import warnings
import pytest
import uk_politics
import uk_politics.exceptions
def test_color():
"""Check that uk_politics.color works."""
assert uk_politics.color("Liberal Democrats") == "#FAA61A"
def test_location():
"""Check that uk_politics.Location works.
Just creation and comparison here.
"""
wales = uk_politics.Location(country="Wales")
west_glamorgan = uk_politics.elections.COUNTS[0].location
assert wales >= west_glamorgan
assert not west_glamorgan >= wales
def test_find_party():
"""Check that uk_politics.find_party works."""
assert uk_politics.find_party(
"Tory", return_short_name=True) == "Conservative Party"
def test_find_empty():
"""Passing an empty string should raise assertion error."""
with pytest.raises(uk_politics.exceptions.PartyNicknameEmpty):
assert uk_politics.find_party("") == ""
def test_scottish_labour():
"""Test variations on Scottish Labour return Labour not SNP.
The name is a bit too close to two different parties.
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", module="uk_politics")
assert uk_politics.find_party("Scottish Labour") == "Labour Party"
assert uk_politics.find_party("Scottish labour") == "Labour Party"
def test_rename_gives_warning(caplog):
"""Test that a bad name prompts a rename warning."""
uk_politics.find_party("labuor")
print(caplog.records)
expected = "Renaming 'labuor' -> 'Labour Party'"
assert expected in caplog.text
|
python
|
# Copyright 2018 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import re
import uuid
from simpleeval import simple_eval
from simpleeval import InvalidExpression
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import DateTime
from sqlalchemy import Text
from sqlalchemy import Boolean
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.orm import load_only
from common import crmint_logging
from common.task import Task
from controller import inline
from controller.database import BaseModel
from controller.mailers import NotificationMailer
def _parse_num(s):
try:
return int(s)
except ValueError:
try:
return float(s)
# TODO(dulacp) should raise a ValueError exception, not silence it
except ValueError:
return 0
class Pipeline(BaseModel):
__tablename__ = 'pipelines'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
emails_for_notifications = Column(String(255))
status = Column(String(50), nullable=False, default='idle')
status_changed_at = Column(DateTime)
jobs = relationship('Job', backref='pipeline',
lazy='dynamic')
run_on_schedule = Column(Boolean, nullable=False, default=False)
schedules = relationship('Schedule', lazy='dynamic')
params = relationship('Param', lazy='dynamic', order_by='asc(Param.name)')
class STATUS(object):
IDLE = 'idle'
FAILED = 'failed'
SUCCEEDED = 'succeeded'
STOPPING = 'stopping'
RUNNING = 'running'
INACTIVE_STATUSES = [IDLE, FAILED, SUCCEEDED]
def __init__(self, name=None):
super(Pipeline, self).__init__()
self.name = name
@property
def state(self):
return self.status
@property
def has_jobs(self):
return self.jobs.count() > 0
@property
def recipients(self):
if self.emails_for_notifications:
return self.emails_for_notifications.split()
return []
def assign_attributes(self, attributes):
for key, value in attributes.items():
if key in ['schedules', 'jobs', 'params']:
continue
if key == 'run_on_schedule':
self.__setattr__(key, value == 'True')
continue
self.__setattr__(key, value)
def save_relations(self, relations):
for key, value in relations.items():
if key == 'schedules':
self.assign_schedules(value)
elif key == 'params':
self.assign_params(value)
def assign_params(self, parameters):
Param.update_list(parameters, self)
def assign_schedules(self, arg_schedules):
# Remove if records not in list ids for update
arg_schedule_ids = []
for arg_schedule in arg_schedules:
if arg_schedule.get('id') is not None:
# Updating
schedule = Schedule.find(arg_schedule.get('id'))
schedule.update(cron=arg_schedule['cron'])
arg_schedule_ids.append(arg_schedule['id'])
else:
# Creating
schedule = Schedule.create(pipeline_id=self.id,
cron=arg_schedule['cron'])
arg_schedule_ids.append(schedule.id)
# Removing
ids_for_removing = []
for schedule in self.schedules:
if schedule.id not in arg_schedule_ids:
ids_for_removing.append(schedule.id)
Schedule.destroy(*ids_for_removing)
def populate_params_runtime_values(self):
inline.open_session()
try:
global_context = {}
for param in Param.where(pipeline_id=None, job_id=None).all():
global_context[param.name] = param.populate_runtime_value()
pipeline_context = global_context.copy()
for param in self.params.all():
pipeline_context[param.name] = param.populate_runtime_value(global_context)
for job in self.jobs.all():
for param in job.params.all():
param.populate_runtime_value(pipeline_context)
inline.close_session()
return True
except (InvalidExpression, TypeError, ValueError, SyntaxError) as e:
inline.close_session()
from common import crmint_logging
job_id = 'N/A'
worker_class = 'N/A'
if param.job_id is not None:
job_id = param.job_id
worker_class = param.job.worker_class
message = 'Invalid job parameter "%s": %s' % (param.label, e)
elif param.pipeline_id is not None:
message = 'Invalid pipeline variable "%s": %s' % (param.label, e)
else:
message = 'Invalid global variable "%s": %s' % (param.label, e)
crmint_logging.logger.log_struct({
'labels': {
'pipeline_id': self.id,
'job_id': job_id,
'worker_class': worker_class,
},
'log_level': 'ERROR',
'message': message,
})
return False
def set_status(self, status):
self.update(status=status, status_changed_at=datetime.now())
def get_ready(self):
if not self.populate_params_runtime_values():
return False
for job in self.jobs.all():
if not job.get_ready():
return False
self.set_status(Pipeline.STATUS.RUNNING)
return True
def start(self):
if self.status not in Pipeline.STATUS.INACTIVE_STATUSES:
return False
jobs = self.jobs.all()
if len(jobs) < 1:
return False
for job in jobs:
if job.status not in Job.STATUS.INACTIVE_STATUSES:
return False
if not self.get_ready():
return False
for job in jobs:
job.start()
return True
def _cancel_all_tasks(self):
for job in self.jobs:
job.cancel_tasks()
def stop(self):
if self.status != Pipeline.STATUS.RUNNING:
return False
for job in self.jobs:
job.stop()
for job in self.jobs:
if job.status not in [Job.STATUS.FAILED, Job.STATUS.SUCCEEDED]:
job.set_status(Job.STATUS.STOPPING)
self._cancel_all_tasks()
return self.job_finished()
def start_single_job(self, job):
if self.status not in Pipeline.STATUS.INACTIVE_STATUSES:
return False
if not self.populate_params_runtime_values():
return False
if not job.get_ready():
return False
self.set_status(Pipeline.STATUS.RUNNING)
job.start_as_single()
return True
def job_finished(self):
for job in self.jobs:
if job.status == Job.STATUS.STOPPING:
job.set_status(Job.STATUS.FAILED)
for job in self.jobs:
if job.status not in Job.STATUS.INACTIVE_STATUSES:
return False
self._finish()
return True
def _finish(self):
jobs = Job.query.outerjoin((StartCondition,
Job.id == StartCondition.preceding_job_id))
jobs = jobs.filter(Job.pipeline_id == self.id)
jobs = jobs.filter(StartCondition.preceding_job_id == None)
jobs = jobs.options(load_only('status')).all()
status = Pipeline.STATUS.SUCCEEDED
for job in jobs:
# IDLE means the job has not run at all or it has been cancelled
if job.status == Job.STATUS.FAILED:
status = Pipeline.STATUS.FAILED
break
self.set_status(status)
NotificationMailer().finished_pipeline(self)
def import_data(self, data):
self.assign_params(data['params'])
self.assign_schedules(data['schedules'])
job_mapping = {}
jobs = []
if data['jobs']:
for job_data in data['jobs']:
job = Job()
job.pipeline_id = self.id
job.assign_attributes(job_data)
job.save()
job.save_relations(job_data)
jobs.append(job)
job_mapping[job_data['id']] = job.id
for job in jobs:
index = list(job_mapping.values()).index(job.id)
job_id = list(job_mapping.keys())[index]
job_data = next((j for j in data['jobs'] if j['id'] == job_id), None)
job.assign_hash_start_conditions(job_data['hash_start_conditions'],
job_mapping)
def is_blocked(self):
return (self.run_on_schedule or
self.status in [Pipeline.STATUS.RUNNING, Pipeline.STATUS.STOPPING])
def destroy(self):
sc_ids = [sc.id for sc in self.schedules]
if sc_ids:
Schedule.destroy(*sc_ids)
for job in self.jobs:
job.destroy()
param_ids = [p.id for p in self.params.all()]
if param_ids:
Param.destroy(*param_ids)
self.delete()
class Job(BaseModel):
__tablename__ = 'jobs'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
status = Column(String(50), nullable=False, default='idle')
status_changed_at = Column(DateTime)
worker_class = Column(String(255))
pipeline_id = Column(Integer, ForeignKey('pipelines.id'))
params = relationship('Param', backref='job', lazy='dynamic')
start_conditions = relationship(
'StartCondition',
primaryjoin='Job.id==StartCondition.job_id')
dependent_jobs = relationship(
'Job',
secondary='start_conditions',
primaryjoin='Job.id==StartCondition.preceding_job_id',
secondaryjoin='StartCondition.job_id==Job.id')
class STATUS(object):
IDLE = 'idle'
FAILED = 'failed'
SUCCEEDED = 'succeeded'
RUNNING = 'running'
WAITING = 'waiting'
STOPPING = 'stopping'
INACTIVE_STATUSES = [IDLE, FAILED, SUCCEEDED]
def __init__(self, name=None, worker_class=None, pipeline_id=None):
super(Job, self).__init__()
self.name = name
self.worker_class = worker_class
self.pipeline_id = pipeline_id
def destroy(self):
sc_ids = [sc.id for sc in self.start_conditions]
if sc_ids:
StartCondition.destroy(*sc_ids)
dependent_job_sc_ids = [
sc.id for sc in StartCondition.where(preceding_job_id=self.id).all()]
if dependent_job_sc_ids:
StartCondition.destroy(*dependent_job_sc_ids)
param_ids = [p.id for p in self.params.all()]
if param_ids:
Param.destroy(*param_ids)
self.delete()
def get_ready(self):
if self.status not in Job.STATUS.INACTIVE_STATUSES:
return False
self.set_status(Job.STATUS.WAITING)
return True
def _get_task_namespace(self):
return 'pipeline=%s_job=%s' % (str(self.pipeline_id), str(self.id))
def _add_task_with_name(self, task_name):
task_namespace = self._get_task_namespace()
TaskEnqueued.create(task_namespace=task_namespace, task_name=task_name)
return True
def _delete_task_with_name(self, task_name):
"""
Returns: Number of remaining tasks in the DB.
"""
task_namespace = self._get_task_namespace()
TaskEnqueued.where(task_namespace=task_namespace,
task_name=task_name).delete()
return self._enqueued_task_count()
def cancel_tasks(self):
task_namespace = self._get_task_namespace()
enqueued_tasks = TaskEnqueued.where(task_namespace=task_namespace)
if enqueued_tasks:
TaskEnqueued.where(task_namespace=task_namespace).delete()
def _enqueued_task_count(self):
task_namespace = self._get_task_namespace()
return TaskEnqueued.count_in_namespace(task_namespace)
def _start_condition_is_fulfilled(self, start_condition):
preceding_job_status = start_condition.preceding_job.status
if start_condition.condition == StartCondition.CONDITION.SUCCESS:
if preceding_job_status == Job.STATUS.FAILED:
return False
elif start_condition.condition == StartCondition.CONDITION.FAIL:
if preceding_job_status == Job.STATUS.SUCCEEDED:
return False
return True
def start_as_single(self):
"""
Returns: Task object that was added to the task queue, otherwise None.
"""
if self.status != Job.STATUS.WAITING:
return None
else:
self.set_status(Job.STATUS.RUNNING)
return self.run()
def start(self):
"""
Returns: Task object that was added to the task queue, otherwise None.
"""
# Validates that preceding jobs fulfill the starting conditions.
for start_condition in self.start_conditions:
if self._start_condition_is_fulfilled(start_condition):
if start_condition.preceding_job.status not in [
Job.STATUS.SUCCEEDED,
Job.STATUS.FAILED]:
return None
else:
# pipeline failure
self.set_status(Job.STATUS.FAILED)
self.pipeline.update(status=Pipeline.STATUS.FAILED,
status_changed_at=datetime.now())
self.pipeline.stop()
return None
if self.pipeline.status == Pipeline.STATUS.FAILED:
return None
return self.start_as_single()
def run(self):
worker_params = dict([(p.name, p.worker_value) for p in self.params])
return self.enqueue(self.worker_class, worker_params)
def stop(self):
self.cancel_tasks()
if self.status == Job.STATUS.WAITING:
self.set_status(Job.STATUS.IDLE)
return True
elif self.status == Job.STATUS.RUNNING:
self.set_status(Job.STATUS.STOPPING)
return True
return False
def enqueue(self, worker_class, worker_params, delay=0):
if self.status != Job.STATUS.RUNNING:
return False
name = str(uuid.uuid4())
general_settings = {gs.name: gs.value for gs in GeneralSetting.all()}
task = Task(name, self.pipeline_id, self.id,
worker_class, worker_params, general_settings)
task.enqueue(delay)
# Keep track of running tasks.
self._add_task_with_name(name)
self.save()
return True
def _start_dependent_jobs(self):
if self.dependent_jobs:
for job in self.dependent_jobs:
job.start()
def set_status(self, status):
self.update(status=status, status_changed_at=datetime.now())
def _task_completed(self, task_name):
"""Completes task execution.
Returns: True if it was the last tasks to be completed. False otherwise.
"""
remaining_tasks = self._delete_task_with_name(task_name)
return remaining_tasks == 0
def task_succeeded(self, task_name):
was_last_task = self._task_completed(task_name)
# Updates the job database status if there is no more running tasks.
# NB: `was_last_task` acts as a concurrent lock, only one task can
# validate this condition.
if was_last_task:
# Cancel all tasks if one condition doesn't match the success status.
for job in self.dependent_jobs:
for start_condition in job.start_conditions:
success_statuses = [
StartCondition.CONDITION.SUCCESS,
StartCondition.CONDITION.WHATEVER
]
if (start_condition.preceding_job.id == self.id
and start_condition.condition not in success_statuses):
self.set_status(Job.STATUS.SUCCEEDED)
return self.pipeline.stop()
self.set_status(Job.STATUS.SUCCEEDED)
# We can safely start children jobs, because of our concurrent lock.
self._start_dependent_jobs()
self.pipeline.job_finished()
def task_failed(self, task_name):
was_last_task = self._task_completed(task_name)
# If no dependent jobs then the pipeline failed
if not self.dependent_jobs:
self.set_status(Job.STATUS.FAILED)
return self.pipeline.stop()
# Cancel all tasks if one condition doesn't match the failed status.
for job in self.dependent_jobs:
for start_condition in job.start_conditions:
failed_statuses = [
StartCondition.CONDITION.FAIL,
StartCondition.CONDITION.WHATEVER
]
if (start_condition.preceding_job.id == self.id
and start_condition.condition not in failed_statuses):
self.set_status(Job.STATUS.FAILED)
return self.pipeline.stop()
if was_last_task:
self.set_status(Job.STATUS.FAILED)
# We can safely start children jobs, because of our concurrent lock.
self._start_dependent_jobs()
self.pipeline.job_finished()
def assign_attributes(self, attributes):
for key, value in attributes.items():
if key in ['params', 'start_conditions', 'id', 'hash_start_conditions']:
continue
self.__setattr__(key, value)
def save_relations(self, relations):
for key, value in relations.items():
if key == 'params':
self.assign_params(value)
elif key == 'start_conditions':
self.assign_start_conditions(value)
def add_start_conditions(self, items):
for item in items:
self.start_conditions.append(item)
def assign_params(self, parameters):
Param.update_list(parameters, self)
def assign_hash_start_conditions(self, arg_start_conditions, job_mapping):
for arg_start_condition in arg_start_conditions:
preceding_job_id = job_mapping[arg_start_condition['preceding_job_id']]
StartCondition.create(
job_id=self.id,
preceding_job_id=preceding_job_id,
condition=arg_start_condition['condition']
)
def assign_start_conditions(self, arg_start_conditions):
scs = []
for arg_start_condition in arg_start_conditions:
scs.append(StartCondition.parse_value(arg_start_condition))
arg_sc_ids = set([sc['id'] for sc in scs])
cur_sc_ids = set([sc.preceding_job_id for sc in self.start_conditions])
sc_intersection_ids = set(arg_sc_ids) & set(cur_sc_ids)
new_sc_ids = set(arg_sc_ids) - set(cur_sc_ids)
for v in scs:
# Add new start conditions
if v['id'] in new_sc_ids:
StartCondition.create(
job_id=self.id,
preceding_job_id=v['id'],
condition=v['condition']
)
# Update current start conditions
elif v['id'] in sc_intersection_ids:
sc = StartCondition.where(
job_id=self.id,
preceding_job_id=v['id']
).first()
sc.condition = v['condition']
sc.save()
# Delete extra start conditions
delete_sc_ids = set(cur_sc_ids) - set(arg_sc_ids)
StartCondition.where(
job_id=self.id,
preceding_job_id__in=delete_sc_ids
).delete(synchronize_session=False)
class Param(BaseModel):
__tablename__ = 'params'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255), nullable=False)
type = Column(String(50), nullable=False)
pipeline_id = Column(Integer, ForeignKey('pipelines.id'))
job_id = Column(Integer, ForeignKey('jobs.id'))
is_required = Column(Boolean, nullable=False, default=False)
description = Column(Text)
label = Column(String(255))
value = Column(Text())
runtime_value = Column(Text())
_INLINER_REGEX = re.compile(r'{%.+?%}')
def populate_runtime_value(self, context={}):
names = context.copy()
names.update({'True': True, 'False': False})
value = self.value
inliners = self._INLINER_REGEX.findall(value)
for inliner in inliners:
result = simple_eval(inliner[2:-2], functions=inline.functions,
names=names)
value = value.replace(inliner, str(result))
if self.job_id is not None:
self.update(runtime_value=value)
return value
@property
def worker_value(self):
if self.type == 'boolean':
return self.runtime_value == '1'
if self.type == 'number':
return _parse_num(self.runtime_value)
if self.type == 'string_list':
return self.runtime_value.split('\n')
if self.type == 'number_list':
return [_parse_num(l) for l in self.runtime_value.split('\n')
if l.strip()]
return self.runtime_value
@property
def api_value(self):
if self.type == 'boolean':
return self.value == '1'
return self.value
def __init__(self, name=None, type=None):
self.name = name
self.type = type
@classmethod
def update_list(cls, parameters, obj=None):
arg_param_ids = []
for arg_param in parameters:
param = None
if arg_param.get('id') is not None:
# Updating
param = Param.find(arg_param.get('id'))
else:
# Creating
param = Param()
if obj and obj.__class__.__name__ == 'Pipeline':
param.pipeline_id = obj.id
elif obj and obj.__class__.__name__ == 'Job':
param.job_id = obj.id
param.name = arg_param['name']
try:
param.label = arg_param['label']
except KeyError:
param.label = arg_param['name']
param.type = arg_param['type']
if arg_param['type'] == 'boolean':
param.value = arg_param['value']
else:
param.value = str(arg_param['value']).encode('utf-8')
param.save()
arg_param_ids.append(param.id)
# Removing
ids_for_removing = []
params = obj.params if obj else Param.where(pipeline_id=None,
job_id=None).all()
for param in params:
if param.id not in arg_param_ids:
ids_for_removing.append(param.id)
Param.destroy(*ids_for_removing)
class StartCondition(BaseModel):
__tablename__ = 'start_conditions'
id = Column(Integer, primary_key=True, autoincrement=True)
job_id = Column(Integer, ForeignKey('jobs.id'))
preceding_job_id = Column(Integer, ForeignKey('jobs.id'))
condition = Column(String(255))
job = relationship('Job', foreign_keys=[job_id])
preceding_job = relationship('Job', foreign_keys=[preceding_job_id])
class CONDITION(object):
SUCCESS = 'success'
FAIL = 'fail'
WHATEVER = 'whatever'
def __init__(self, job_id=None, preceding_job_id=None, condition=None):
self.job_id = job_id
self.preceding_job_id = preceding_job_id
self.condition = condition
@property
def preceding_job_name(self):
return self.preceding_job.name
@property
def value(self):
return ','.join([str(self.preceding_job_id), self.condition])
@classmethod
def parse_value(cls, value):
return {
'id': int(value['preceding_job_id']),
'condition': value['condition']
}
class Schedule(BaseModel):
__tablename__ = 'schedules'
id = Column(Integer, primary_key=True, autoincrement=True)
pipeline_id = Column(Integer, ForeignKey('pipelines.id'))
cron = Column(String(255))
pipeline = relationship('Pipeline', foreign_keys=[pipeline_id])
class GeneralSetting(BaseModel):
__tablename__ = 'general_settings'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
value = Column(Text())
class Stage(BaseModel):
__tablename__ = 'stages'
id = Column(Integer, primary_key=True, autoincrement=True)
sid = Column(String(255))
def assign_attributes(self, attributes):
for key, value in attributes.items():
self.__setattr__(key, value)
class TaskEnqueued(BaseModel):
__tablename__ = 'enqueued_tasks'
id = Column(Integer, primary_key=True, autoincrement=True)
task_namespace = Column(String(60), index=True)
task_name = Column(String(100), index=True, unique=True)
@classmethod
def count_in_namespace(cls, task_namespace):
count_query = cls.where(task_namespace=task_namespace)
return count_query.count()
|
python
|
# Generated by Django 3.1.2 on 2020-10-25 14:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='YandexKassaPayment',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('period', models.CharField(default='month', max_length=255)),
('status', models.CharField(default='pending', max_length=255)),
('description', models.TextField(blank=True, null=True)),
('value', models.PositiveIntegerField()),
('currency', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='yandex_kassa_payments', to=settings.AUTH_USER_MODEL)),
],
),
]
|
python
|
name = 'membercount'
aliases = ['members']
async def run(message):
'Lists the number of people that are in this server'
true_member_count = message.guild.member_count
await message.channel.send(
f'There are **{true_member_count:,}** people in this server.'
)
|
python
|
import subprocess as sp
import os
import shutil
import tempfile
import logging
logger = logging.getLogger(__name__)
class AutoLoader(object):
"""Base class for automatic loaders (e.g. Git)"""
pass
class Git(AutoLoader):
def __init__(self, url, import_as=None, branch=None):
"Creates a temporary directory full of a git repo."
self.url = url
logger.debug("Creating temporary directory")
self.path = tempfile.mkdtemp()
logger.info("Importing {} using git".format(self.url))
git_cmd ="/usr/bin/git -C {dir} clone {url}".format(dir=self.path, url=self.url).split(" ")
if import_as is not None:
git_cmd.append(import_as)
if branch is not None:
git_cmd.extend(["-b", branch])
sp.check_call(git_cmd)
def __del__(self):
logger.debug("Deleting temporary directory {}".format(self.path))
shutil.rmtree(self.path)
|
python
|
"""Filex."""
import random
from utils import timex
MIN_INT, MAX_INT = 10 ** 15, 10 ** 16 - 1
def read(file_name):
"""Read."""
with open(file_name, 'r') as fin:
content = fin.read()
fin.close()
return content
def write(file_name, content, mode='w'):
"""Write."""
with open(file_name, mode) as fout:
fout.write(content)
fout.close()
def get_tmp_file():
"""Get tmp file name."""
return '/tmp/tmp.%s.%d' % (
timex.format_time(timex.get_unixtime(), '%Y%m%d%H%M%S'),
random.randint(MIN_INT, MAX_INT),
)
|
python
|
# -*- coding: utf-8 -*-
"""
fudcon.ui
------
fudcon ui application package
"""
|
python
|
from enigma import eRect, eServiceReference, iServiceInformation, iPlayableService
from Screens.Screen import Screen
from Screens.ServiceInfo import ServiceInfoList, ServiceInfoListEntry
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Pixmap import Pixmap
from Components.Label import Label
from Components.ScrollLabel import ScrollLabel
from Tools.Directories import resolveFilename, pathExists, fileExists, SCOPE_MEDIA
from Components.Sources.List import List
from Components.ServicePosition import ServicePositionGauge
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Sources.StaticText import StaticText
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.config import *
from Components.FileList import FileList
from _ctypes import *
import os, re
from os import path as os_path
#------------------------------------------------------------------------------------------
class MC_VideoInfoView(Screen):
skin = """
<screen position="80,130" size="560,320" title="View Video Info" >
<widget name="infolist" position="5,5" size="550,310" selectionDisabled="1" />
</screen>"""
def __init__(self, session, fullname, name, ref):
self.skin = MC_VideoInfoView.skin
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.close,
"ok": self.close
}, -1)
tlist = [ ]
self["infolist"] = ServiceInfoList(tlist)
currPlay = self.session.nav.getCurrentService()
if currPlay is not None:
stitle = currPlay.info().getInfoString(iServiceInformation.sTitle)
if stitle == "":
stitle = currPlay.info().getName().split('/')[-1]
tlist.append(ServiceInfoListEntry("Title: ", stitle))
tlist.append(ServiceInfoListEntry("sNamespace: ", currPlay.info().getInfoString(iServiceInformation.sNamespace)))
tlist.append(ServiceInfoListEntry("sProvider: ", currPlay.info().getInfoString(iServiceInformation.sProvider)))
tlist.append(ServiceInfoListEntry("sTimeCreate: ", currPlay.info().getInfoString(iServiceInformation.sTimeCreate)))
tlist.append(ServiceInfoListEntry("sVideoWidth: ", currPlay.info().getInfoString(iServiceInformation.sVideoWidth)))
tlist.append(ServiceInfoListEntry("sVideoHeight: ", currPlay.info().getInfoString(iServiceInformation.sVideoHeight)))
tlist.append(ServiceInfoListEntry("sDescription: ", currPlay.info().getInfoString(iServiceInformation.sDescription)))
class Showiframe():
def __init__(self):
lib="/usr/lib/"
if fileExists(lib +"libshowiframe.so.0.0.0"):
self.showiframe = dlopen(lib +"libshowiframe.so.0.0.0")
try:
self.showSinglePic = dlsym(self.showiframe, "showSinglePic")
self.finishShowSinglePic = dlsym(self.showiframe, "finishShowSinglePic")
except OSError, e:
self.showSinglePic = dlsym(self.showiframe, "_Z13showSinglePicPKc")
self.finishShowSinglePic = dlsym(self.showiframe, "_Z19finishShowSinglePicv")
def showStillpicture(self, pic):
call_function(self.showSinglePic, (pic, ))
def finishStillPicture(self):
call_function(self.finishShowSinglePic, ())
def shortname(movie,showing = None):
movielist = movie.split('/')
for n in movielist:
if n is not "":
movie = n
movie = movie.upper()
movieback = movie
movie = re.sub("\W720P(.*[^.]+).","",movie)
movie = re.sub("\W1080I(.*[^.]+).","",movie)
movie = re.sub("\W1080P(.*[^.]+).","",movie)
movie = re.sub("\W[(].*?[)](.*[^.]+).","",movie)
movie = re.sub("\W[[].*?[]](.*[^.]+).","",movie)
movie = re.sub("\W[0-9]{4}","",movie)
if not showing:
movie = re.sub("\WDVDRIP(.*[^.]+).","",movie)
movie = re.sub("\WAC3D(.*[^.]+).","",movie)
movie = re.sub("\WAC3(.*[^.]+).","",movie)
movie = re.sub("\WX264(.*[^.]+).","",movie)
movie = re.sub("\WXVID(.*[^.]+).","",movie)
movie = re.sub("\WBLURAY(.*[^.]+).","",movie)
movie = re.sub("\WGERMAN(.*[^.]+).","",movie)
movie = re.sub("\WCD[0-9]{2}","",movie)
movie = re.sub("\WCD[0-9]","",movie)
movie = re.sub("\WDVD[0-9]{2}","",movie)
movie = re.sub("\WDVD[0-9]","",movie)
movie = re.sub("\WDISC[0-9]{2}","",movie)
movie = re.sub("\WDISC[0-9]","",movie)
movie = re.sub("\W[0-9]{2}DISC","",movie)
movie = re.sub("\W[0-9]DISC","",movie)
# movie = re.sub("\WS[0-9]{2}","",movie)
# movie = re.sub("\WE[0-9]{2}","",movie)
movie = re.sub("\WSEASON[0-9]{2}","",movie)
movie = re.sub("\WSEASON[0-9]","",movie)
movie = re.sub("[0-9]{8} ","",movie)
movie = re.sub(" -","-",movie)
if len(movie) != 0:
if movie[0] == '-':
moviesplit = movie.split('-')[2:]
movie = "".join(moviesplit)
movie = movie[1:]
replace_list = "rar iso img avi mkv mp4 mpg mpeg mts ogm m2ts pls trp ts vdr vob wmv AC3 AC3D BDRIP BLURAY CAM CAMRIP COMPLETE CUSTOM CUT DC Directors DL DOKU DTS DVDR DVDRIP DVDSCR DVDSCREENER EXTENDED FRENCH FiNNiSH GERMAN HD HDDVD HDDVDRip HDTV INT INTERNAL Int LD LiMiTED MULTi MULTiSUBS NORDIC NTSC PAL PL R1 R5 RECUT REMASTERED REPACK RIP SCREENER SE SEE special.edition SSE STV SUBBED SWEDISH Staffel TC TELECINE TELESYNC TS UNCUT UNRATED WS XXX iTALiAN mvcd rsvcd svcd x264"
replacelist = replace_list.upper()
replacelist = replacelist.split(' ')
for n in replacelist:
movie = movie.replace(" ", ".")
movie = movie.replace(" " + n + " ", ".")
movie = movie.replace("." + n + ".", ".")
movie = movie.replace("." + n + "-", ".")
movie = movie.replace("." + n + "_", ".")
movie = movie.replace("-" + n + ".", ".")
movie = movie.replace("-" + n + "-", ".")
movie = movie.replace("-" + n + "_", ".")
movie = movie.replace("_" + n + ".", ".")
movie = movie.replace("_" + n + "-", ".")
movie = movie.replace("_" + n + "_", ".")
movie = movie.replace("..", ".")
movie = movie.replace("..", ".")
movie = movie.replace("..", ".")
movie = movie.replace("..", ".")
for n in replacelist:
if movie.upper().endswith("." + n):
if movie.__contains__("."):
while not movie.endswith("."):
movie = movie[:-1]
movie = movie[:-1]
movie = movie.replace(".", " ")
movie = movie.replace("-", " ")
movie = movie.replace("_", " ")
movie = movie.replace(":", " ")
if len(movie) == 0:
movie = movieback
return movie
|
python
|
'''
Content under Creative Commons Attribution license CC-BY 4.0,
code under MIT license (c)2018 Sergio Rojas ([email protected])
http://en.wikipedia.org/wiki/MIT_License
http://creativecommons.org/licenses/by/4.0/
Created on march, 2018
Last Modified on: may 15, 2018
'''
def myfuncPrimeFactors(n):
"""
This function finds and returns the prime factorization
of a whole number (excluding zero) via the reiterative division
method.
Example of usage:
getPrimeFactors = myfuncPrimeFactors( 716 )
print(getPrimeFactors)
"""
i = 2
factors = []
while n != 1:
if (n % i == 0):
factors = factors + [i]
n = n//i
else:
i = i+1
return factors
|
python
|
import hashlib
from settings import SIZE
class Address:
def __init__(self, ip, port):
self.ip = ip
self.port = port
def __key(self):
return f"{self.ip}{self.port}".encode()
def __hash__(self):
"""
Python uses a random hash seed to prevent attackers from tar-pitting your application by sending you keys
designed to collide. to prevent changing hash code for this test project I use this method.
"""
m = hashlib.sha256()
m.update(self.__key())
return int(m.hexdigest(), 16) % SIZE
def __eq__(self, other):
if isinstance(other, Address):
return self.__key() == other.__key()
return NotImplemented
def __lt__(self, other):
return self.__hash__() < other.__hash__()
def __gt__(self, other):
return self.__hash__() > other.__hash__()
def __le__(self, other):
return self.__hash__() <= other.__hash__()
def __ge__(self, other):
return self.__hash__() >= other.__hash__()
def __str__(self):
return f'{self.ip}:{self.port}'
def __repr__(self):
return self.__str__()
for port in range(9000, 9020):
print(f"{port} -> {Address('127.0.0.1', port).__hash__()}")
|
python
|
# -*- coding:utf-8 -*-
import logging
from time import sleep
import bigsuds
from networkapi.plugins import exceptions as base_exceptions
from networkapi.system.facade import get_value as get_variable
log = logging.getLogger(__name__)
class Lb(object):
def __init__(self, hostname, username, password, session=True):
self._hostname = hostname
self._username = username
self._password = password
self._time_reconn = 10
try:
self._channel = bigsuds.BIGIP(
hostname=self._hostname,
username=self._username,
password=self._password
)
except Exception, e:
logging.critical("Unable to connect to BIG-IP. Details: %s" % (e))
raise base_exceptions.CommandErrorException(e)
else:
log.info('Connected in hostname:%s' % hostname)
try:
self._version = self._channel.System.SystemInfo.get_version()
if self._version[8:len(self._version)].split('.')[0] <= 10:
raise base_exceptions.UnsupportedVersion(
'This plugin only supports BIG-IP v11 or above')
else:
if session:
log.info('Try get new session')
session_cur = self._channel.System.Session.get_session_timeout()
log.info('Session Timeout Current: %s' % session_cur)
session_timeout = get_variable("set_session_timeout_plugin_f5", 60)
if int(session_cur) > session_timeout:
self._channel.System.Session.set_session_timeout(session_timeout)
self._channel = self.get_session()
except Exception, e:
log.error(e)
raise base_exceptions.CommandErrorException(e)
def get_session(self):
try:
channel = self._channel.with_session_id()
log.info('Session %s', channel)
except Exception, e:
if 'There are too many existing user sessions.'.lower() in str(e).lower():
self._time_reconn *= 2
log.warning(
'There are too many existing user sessions. '
'Trying again in %s seconds' % self._time_reconn)
sleep(self._time_reconn)
self.get_session()
else:
raise e
else:
return channel
|
python
|
import image, touch, gc, time
from machine import I2C
from board import board_info
from fpioa_manager import fm
from Maix import GPIO
import time
from machine import SPI
from micropython import const
from sx127x import SX127x
board_info=board_info()
i2c = I2C(I2C.I2C3, freq=1000*1000, scl=24, sda=27) # amigo
devices = i2c.scan()
print(devices)
touch.TouchLow.config(i2c)
tmp = touch.Touch(320, 480, 200)
whichButton = -1
message = "Welcome!"
rssi = ""
snr = ""
loraPacket = ""
myFreq = 433e6
mySF = 12
myBW = 7
myTX = 17
pingCounter = 0
squareWidth = 90
squareHeight = 70
check = [2,4,5,8]
################### config ###################
LORA_RST = const(22)
LORA_CS = const(12)
LORA_SPI_SCK = const(19)
LORA_SPI_MOSI = const(7)
LORA_SPI_MISO = const(9)
LORA_SPI_NUM = SPI.SPI1
LORA_SPI_FREQ_KHZ = const(100)
##############################################
# gpio init
fm.register(LORA_RST, fm.fpioa.GPIOHS22, force=True) # RST
fm.register(LORA_CS, fm.fpioa.GPIOHS12, force=True) # CS
# set gpiohs work mode to output mode
cs = GPIO(GPIO.GPIOHS12, GPIO.OUT)
rst = GPIO(GPIO.GPIOHS22, GPIO.IN)
spi1 = SPI(LORA_SPI_NUM, mode=SPI.MODE_MASTER, baudrate=LORA_SPI_FREQ_KHZ * 1000,
polarity=0, phase=0, bits=8, firstbit=SPI.MSB, sck=LORA_SPI_SCK,
mosi=LORA_SPI_MOSI, miso = LORA_SPI_MISO)
lora = SX127x(spi=spi1, pin_ss=cs)
def Version():
global message
version = lora.readRegister(0x42)
print("Version: 0x"+hex(version))
message = "Version: "+hex(version)
if version == 0x12:
message += " [o]"
else:
message += " [x]"
showMap()
def PING():
global pingCounter, message
payload = 'PING #{0}'.format(pingCounter)
print("Sending packet: {}".format(payload))
message = "Sent "+payload
lora.print(payload)
pingCounter += 1
showMap()
def NOP():
print("NOP")
def SF10():
global mySF, check
mySF = 10
if check.count(4)>0: #SF12
check.remove(4)
if check.count(3)==0: #SF10
check.append(3)
setParameters()
def SF12():
global mySF, check
mySF = 12
if check.count(3)>0: #SF10
check.remove(3)
if check.count(4)==0: #SF12
check.append(4)
setParameters()
def BW6():
global myBW, check
myBW = 6
if check.count(2)>0: #BW7
check.remove(2)
if check.count(1)==0: #BW6
check.append(1)
setParameters()
def BW7():
global myBW, check
myBW = 7
if check.count(1)>0: #BW6
check.remove(1)
if check.count(2)==0: #BW7
check.append(2)
setParameters()
def F433():
global myFreq, check
myFreq = 433e6
if check.count(6)>0: #868
check.remove(6)
if check.count(5)==0: #433
check.append(5)
setParameters()
def F868():
global myFreq, check
myFreq = 868e6
if check.count(5)>0: #433
check.remove(5)
if check.count(6)==0: #868
check.append(6)
setParameters()
def Tx10():
global myTX, check
myTX = 10
if check.count(8)>0: #Tx17
check.remove(8)
if check.count(7)==0: #Tx10
check.append(7)
setParameters()
def Tx17():
global myTX, check
myTX = 17
if check.count(7)>0: #Tx10
check.remove(7)
if check.count(8)==0: #Tx17
check.append(8)
setParameters()
menus = ["ping", "BW6", "BW7", "SF10", "SF12", "433", "868", "Tx10", "Tx17"]
actions = [PING, BW6, BW7, SF10, SF12, F433, F868, Tx10, Tx17]
numMenus = len(menus)
def setParameters():
global mySF, myBW, myFreq, myTX, message
# lora reset
rst.value(0)
time.sleep_ms(10)
rst.value(1)
time.sleep_ms(100)
lora.init()
fq = round(myFreq/1000000, 3)
print("Setting freq to: {0} MHz".format(fq))
lora.setFrequency(myFreq)
bins = (7.8E3, 10.4E3, 15.6E3, 20.8E3, 31.25E3, 41.7E3, 62.5E3, 125E3, 250E3, 500E3)
if myBW<0 or myBW>9:
myBW=7
BWrate = bins[myBW]
print("Setting BW to: "+str(BWrate/1e3)+" KHz / "+str(myBW))
lora.setSignalBandwidth(BWrate)
print("Setting SF to: "+str(mySF))
lora.setSpreadingFactor(mySF)
print("Setting TX power to: "+str(myTX))
lora.setTxPower(myTX)
print("------------------------")
print("Checking:")
fq = round(lora.getFrequency()/1000000.0, 3)
print("• fq: {0} MHz".format(fq))
sf = lora.getSpreadingFactor()
print("• sf: "+str(sf))
bwnum, bw = lora.getSignalBandwidth()
print("• bw: {0} ie {1} KHz".format(bwnum, (bw/1e3)))
Pout, Pmax, paboost = lora.getTxPower()
if paboost:
paboost = "PA_BOOST pin"
else:
paboost = "RFO pin"
print('Pout {0} dBm, Pmax {1}, {2}'.format(Pout, Pmax, paboost))
print("------------------------")
message = "{0} MHz SF{1} BW {2} KHz".format(fq, sf, round(bw/1e3, 1))
showMap()
def showMap():
global whichButton, message, loraPacket, rssi, snr, squareWidth, squareHeight
img = image.Image(size=(320, 480))
img.draw_rectangle(0, 0, 320, 480, color=(255, 64, 64), fill=True)
img.draw_string(140, 10, "MENU", color=(255, 255, 255), scale=2)
for i in range(0, numMenus):
x = (i % 3) * (squareWidth+10) + 10
y = int(i/3) * (squareHeight+10) + 50
if whichButton == i:
img.draw_rectangle(x, y, squareWidth, squareHeight, color=(0, 191, 191), fill=True)
img.draw_rectangle(x, y, squareWidth, squareHeight, color=(0, 0, 0), thickness=3)
clr = color=(255, 255, 255)
if whichButton == i:
clr = (33, 33, 33)
offsetX = 32
offsetY = 22
if check.count(i)>0:
# check mark
img.draw_rectangle(x+3, y+3, squareWidth-6, squareHeight-6, color=(0, 0, 255), thickness=3)
dsp = menus[i]
offsetX = 45 - (8*len(dsp))
img.draw_string(x+offsetX, y+20, dsp, clr, scale=3)
py = y + squareHeight + 10
ln = len(message)
if ln > 0:
myScale = 2
myWidth = 5 * myScale
img.draw_string(int((320-ln*myWidth)/2), 470-myScale*10, message, (0, 0, 0), scale=myScale)
ln = len(loraPacket)
if ln > 0:
myScale = 2
myWidth = 5 * myScale
pieces=[]
limit = 28
while len(loraPacket)>0:
pieces.append(loraPacket[0:limit])
loraPacket=loraPacket[limit:]
pieces.append(rssi+" "+snr)
for i in pieces:
ln = len(i)
img.draw_string(6, py, i, (255, 222, 222), scale=myScale)
py += 24
lcd.rotation(1)
lcd.mirror(1)
lcd.display(img)
gc.collect()
showMap()
setParameters()
while 1:
tmp.event()
#print(tmp.state, tmp.points)
[(y0, x0, t0), (y1, x1, t1)] = tmp.points
#print(str(x0)+":"+str(y0))
if(x0!=0 and y0 != 0):
print("Touch")
while(x0!=0 and y0 != 0):
saveX = x1
saveY = y1
if saveY<50:
whichButton = -1
else:
x = int((saveX-10)/(squareWidth+10))
y = int((saveY-50)/(squareHeight+10))
whichButton = y*3+x
showMap()
tmp.event()
[(y0, x0, t0), (y1, x1, t1)] = tmp.points
print("Released")
if saveY<50:
print('abort')
else:
print(str(saveX)+":"+str(saveY))
x = int((saveX-10)/(squareWidth+10))
y = int((saveY-50)/(squareHeight+10))
index = y*3+x
if index>(numMenus-1):
print('abort')
else:
print("You selected menu: "+str(index))
actions[index]()
whichButton = -1
showMap()
gc.collect()
if lora.receivedPacket():
try:
loraPacket = lora.read_payload().decode()
rssi = "RSSI: {}".format(lora.packetRssi())
snr = "SNR: {}".format(lora.packetSNR())
print("*** Received message *** {} {} {}".format(loraPacket, rssi, snr))
message = "Incoming!"
showMap()
except Exception as e:
print(e)
gc.collect()
time.sleep_ms(30)
|
python
|
#!/usr/bin/env python
import boto3
import botocore
import argparse
import sys
parser = argparse.ArgumentParser(description='Check if the given AWS VPC exists.')
parser.add_argument('--region_name', dest='region_name', action='store', required=True, help='AWS Region name, e.g. eu-west-1')
parser.add_argument('--vpc_name', dest='vpc_name', action='store', required=True, help='AWS VPC name, e.g. backend_vpc')
args = parser.parse_args()
try:
conn_ec2 = boto3.resource('ec2', region_name=args.region_name)
except botocore.exceptions.EndpointConnectionError as e:
sys.stderr.write("EC2: Could not connect to AWS region: %s, check credentials, IAM role privileges, region name." % args.region_name)
sys.stderr.write(str(e))
sys.exit(1)
instances = conn_ec2.instances
instances = conn_ec2.instances.filter(Filters=[])
all_vpc_ids = [instance.vpc_id for instance in instances]
all_vpc_ids = list(set(all_vpc_ids))
if len(all_vpc_ids) == 0:
sys.stderr.write("No VPCs found. Please verify that VPC %s exists and/or create one and try again." % args.vpc_name)
sys.exit(1)
target_vpc = []
for vpc_id in all_vpc_ids:
if vpc_id is not None:
if conn_ec2.Vpc(vpc_id).tags:
if {'Key': 'Name', 'Value': args.vpc_name} in conn_ec2.Vpc(vpc_id).tags:
target_vpc.append(vpc_id)
if len(target_vpc) == 0:
sys.stderr.write("No VPC found. Please verify that VPC %s exists and/or create one and then try again." % args.vpc_name)
sys.exit(1)
if len(target_vpc) > 1:
sys.stderr.write("More than one %s VPC found. Please investigate. There can be only one..." % args.vpc_name)
sys.exit(1)
sys.stdout.write(target_vpc[0])
|
python
|
def arrays(arr):
# complete this function
# use numpy.array
return(numpy.array(arr,float))[::-1]
|
python
|
# Copyright 2004-2018 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This contains various Displayables that handle events.
from __future__ import print_function
import renpy.display
import renpy.audio
from renpy.display.render import render, Render
import pygame_sdl2 as pygame
import math
def compile_event(key, keydown):
"""
Compiles a keymap entry into a python expression.
keydown determines if we are dealing with keys going down (press),
or keys going up (release).
"""
# Lists or tuples get turned into or expressions.
if isinstance(key, (list, tuple)):
if not key:
return "(False)"
return "(" + " or ".join([compile_event(i, keydown) for i in key]) + ")"
# If it's in config.keymap, compile what's in config.keymap.
if key in renpy.config.keymap:
return compile_event(renpy.config.keymap[key], keydown)
if key in renpy.config.default_keymap:
return compile_event(renpy.config.default_keymap[key], keydown)
if key is None:
return "(False)"
part = key.split("_")
# Deal with the mouse.
if part[0] == "mousedown":
if keydown:
return "(ev.type == %d and ev.button == %d)" % (pygame.MOUSEBUTTONDOWN, int(part[1]))
else:
return "(False)"
if part[0] == "mouseup":
if keydown:
return "(ev.type == %d and ev.button == %d)" % (pygame.MOUSEBUTTONUP, int(part[1]))
else:
return "(False)"
# Deal with the Joystick / Gamepad.
if part[0] == "joy" or part[0] == "pad":
return "(False)"
# Otherwise, deal with it as a key.
if keydown:
rv = "(ev.type == %d" % pygame.KEYDOWN
else:
rv = "(ev.type == %d" % pygame.KEYUP
MODIFIERS = { "repeat", "alt", "meta", "shift", "noshift", "ctrl" }
modifiers = set()
while part[0] in MODIFIERS:
modifiers.add(part.pop(0))
key = "_".join(part)
if "repeat" in modifiers:
rv += " and (ev.repeat)"
else:
rv += " and (not ev.repeat)"
if key not in [ "K_LALT", "K_RALT" ]:
if "alt" in modifiers:
rv += " and (ev.mod & %d)" % pygame.KMOD_ALT
else:
rv += " and not (ev.mod & %d)" % pygame.KMOD_ALT
if key not in [ "K_LGUI", "K_RGUI" ]:
if "meta" in modifiers:
rv += " and (ev.mod & %d)" % pygame.KMOD_META
else:
rv += " and not (ev.mod & %d)" % pygame.KMOD_META
if key not in [ "K_LCTRL", "K_RCTRL" ]:
if "ctrl" in modifiers:
rv += " and (ev.mod & %d)" % pygame.KMOD_CTRL
else:
rv += " and not (ev.mod & %d)" % pygame.KMOD_CTRL
if key not in [ "K_LSHIFT", "K_RSHIFT" ]:
if "shift" in modifiers:
rv += " and (ev.mod & %d)" % pygame.KMOD_SHIFT
if "noshift" in modifiers:
rv += " and not (ev.mod & %d)" % pygame.KMOD_SHIFT
if len(part) == 1:
if len(part[0]) != 1:
if renpy.config.developer:
raise Exception("Invalid key specifier %s" % key)
else:
return "(False)"
rv += " and ev.unicode == %r)" % part[0]
else:
if part[0] != "K":
if renpy.config.developer:
raise Exception("Invalid key specifier %s" % key)
else:
return "(False)"
rv += " and ev.key == %d)" % (getattr(pygame.constants, key))
return rv
# These store a lambda for each compiled key in the system.
event_cache = { }
keyup_cache = { }
def clear_keymap_cache():
"""
:doc: other
Clears the keymap cache. This allows changes to :var:`config.keymap` to
take effect without restarting Ren'Py.
"""
event_cache.clear()
keyup_cache.clear()
def queue_event(name, up=False, **kwargs):
"""
:doc: other
Queues an event with the given name. `Name` should be one of the event
names in :var:`config.keymap`, or a list of such names.
`up`
This should be false when the event begins (for example, when a keyboard
button is pressed.) It should be true when the event ends (when the
button is released.)
The event is queued at the time this function is called. This function will
not work to replace an event with another - doing so will change event order.
(Use :var:`config.keymap` instead.)
This method is threadsafe.
"""
# Avoid queueing events before we're ready.
if not renpy.display.interface:
return
if not isinstance(name, (list, tuple)):
name = [ name ]
data = { "eventnames" : name, "up" : up }
data.update(kwargs)
ev = pygame.event.Event(renpy.display.core.EVENTNAME, data)
pygame.event.post(ev)
def map_event(ev, keysym):
"""
:doc: udd_utility
Returns true if the pygame event `ev` matches `keysym`
`keysym`
One of:
* The name of a keybinding in :var:`config.keymap`.
* A keysym, as documented in the :ref:`keymap` section.
* A list containing one or more keysyms.
"""
if ev.type == renpy.display.core.EVENTNAME:
if (keysym in ev.eventnames) and not ev.up:
return True
return False
check_code = event_cache.get(keysym, None)
if check_code is None:
check_code = eval("lambda ev : " + compile_event(keysym, True), globals())
event_cache[keysym] = check_code
return check_code(ev)
def map_keyup(ev, name):
"""Returns true if the event matches the named keycode being released."""
if ev.type == renpy.display.core.EVENTNAME:
if (name in ev.eventnames) and ev.up:
return True
check_code = keyup_cache.get(name, None)
if check_code is None:
check_code = eval("lambda ev : " + compile_event(name, False), globals())
keyup_cache[name] = check_code
return check_code(ev)
def skipping(ev):
"""
This handles setting skipping in response to the press of one of the
CONTROL keys. The library handles skipping in response to TAB.
"""
if not renpy.config.allow_skipping:
return
if not renpy.store._skipping:
return
if map_event(ev, "skip"):
renpy.config.skipping = "slow"
renpy.exports.restart_interaction()
if map_keyup(ev, "skip") or map_event(ev, "stop_skipping"):
renpy.config.skipping = None
renpy.exports.restart_interaction()
return
def inspector(ev):
return map_event(ev, "inspector")
##############################################################################
# Utility functions for dealing with actions.
def predict_action(var):
"""
Predicts some of the actions that may be caused by a variable.
"""
if var is None:
return
if isinstance(var, renpy.ui.Action):
var.predict()
if isinstance(var, (list, tuple)):
for i in var:
predict_action(i)
def run(action, *args, **kwargs):
"""
:doc: run
:name: renpy.run
:args: (action)
Run an action or list of actions. A single action is called with no
arguments, a list of actions is run in order using this function, and
None is ignored.
Returns the result of the first action to return a value.
"""
if action is None:
return None
if isinstance(action, (list, tuple)):
rv = None
for i in action:
new_rv = run(i, *args, **kwargs)
if new_rv is not None:
rv = new_rv
return rv
return action(*args, **kwargs)
def run_unhovered(var):
"""
Calls the unhovered method on the variable, if it exists.
"""
if var is None:
return None
if isinstance(var, (list, tuple)):
for i in var:
run_unhovered(i)
return
f = getattr(var, "unhovered", None)
if f is not None:
f()
def run_periodic(var, st):
if isinstance(var, (list, tuple)):
rv = None
for i in var:
v = run_periodic(i, st)
if rv is None or v < rv:
rv = v
return rv
if isinstance(var, renpy.ui.Action):
return var.periodic(st)
def is_selected(action):
"""
:doc: run
Returns true if `action` indicates it is selected, or false otherwise.
"""
if isinstance(action, (list, tuple)):
for i in action:
if isinstance(i, renpy.store.SelectedIf): # @UndefinedVariable
return i.get_selected()
return any(is_selected(i) for i in action)
elif isinstance(action, renpy.ui.Action):
return action.get_selected()
else:
return False
def is_sensitive(action):
"""
:doc: run
Returns true if `action` indicates it is sensitive, or False otherwise.
"""
if isinstance(action, (list, tuple)):
for i in action:
if isinstance(i, renpy.store.SensitiveIf): # @UndefinedVariable
return i.get_sensitive()
return all(is_sensitive(i) for i in action)
elif isinstance(action, renpy.ui.Action):
return action.get_sensitive()
else:
return True
def alt(clicked):
if isinstance(clicked, (list, tuple)):
rv = [ ]
for i in clicked:
t = alt(i)
if t is not None:
rv.append(t)
if rv:
return " ".join(rv)
else:
return None
if isinstance(clicked, renpy.ui.Action):
return clicked.alt
else:
return None
##############################################################################
# Special-Purpose Displayables
class Keymap(renpy.display.layout.Null):
"""
This is a behavior that maps keys to actions that are called when
the key is pressed. The keys are specified by giving the appropriate
k_constant from pygame.constants, or the unicode for the key.
"""
def __init__(self, replaces=None, activate_sound=None, **keymap):
if activate_sound is not None:
super(Keymap, self).__init__(style='default', activate_sound=activate_sound)
else:
super(Keymap, self).__init__(style='default')
self.keymap = keymap
def event(self, ev, x, y, st):
for name, action in self.keymap.iteritems():
if map_event(ev, name):
renpy.exports.play(self.style.activate_sound)
rv = run(action)
if rv is not None:
return rv
raise renpy.display.core.IgnoreEvent()
def predict_one_action(self):
for i in self.keymap.itervalues():
predict_action(i)
class RollForward(renpy.display.layout.Null):
"""
This behavior implements rollforward.
"""
def __init__(self, value, **properties):
super(RollForward, self).__init__(**properties)
self.value = value
def event(self, ev, x, y, st):
if map_event(ev, "rollforward"):
return renpy.exports.roll_forward_core(self.value)
class PauseBehavior(renpy.display.layout.Null):
"""
This is a class implementing the Pause behavior, which is to
return a value after a certain amount of time has elapsed.
"""
voice = False
def __init__(self, delay, result=False, voice=False, **properties):
super(PauseBehavior, self).__init__(**properties)
self.delay = delay
self.result = result
self.voice = voice
def event(self, ev, x, y, st):
if st >= self.delay:
if self.voice and renpy.config.nw_voice:
if (not renpy.config.afm_callback()) or renpy.display.tts.is_active():
renpy.game.interface.timeout(0.05)
return
# If we have been drawn since the timeout, simply return
# true. Otherwise, force a redraw, and return true when
# it comes back.
if renpy.game.interface.drawn_since(st - self.delay):
return self.result
else:
renpy.game.interface.force_redraw = True
renpy.game.interface.timeout(max(self.delay - st, 0))
class SoundStopBehavior(renpy.display.layout.Null):
"""
This is a class implementing the sound stop behavior,
which is to return False when a sound is no longer playing
on the named channel.
"""
def __init__(self, channel, result=False, **properties):
super(SoundStopBehavior, self).__init__(**properties)
self.channel = channel
self.result = result
def event(self, ev, x, y, st):
if not renpy.audio.music.get_playing(self.channel):
return self.result
renpy.game.interface.timeout(.025)
class SayBehavior(renpy.display.layout.Null):
"""
This is a class that implements the say behavior,
which is to return True (ending the interaction) if
the user presses space or enter, or clicks the left
mouse button.
"""
focusable = True
text = None
dismiss_unfocused = [ 'dismiss_unfocused' ]
def __init__(self, default=True, afm=None, dismiss=[ 'dismiss' ], allow_dismiss=None, dismiss_unfocused=[ 'dismiss_unfocused' ], **properties):
super(SayBehavior, self).__init__(default=default, **properties)
if not isinstance(dismiss, (list, tuple)):
dismiss = [ dismiss ]
if afm is not None:
self.afm_length = len(afm)
else:
self.afm_length = None
# What keybindings lead to dismissal?
self.dismiss = dismiss
self.allow_dismiss = allow_dismiss
def _tts_all(self):
raise renpy.display.tts.TTSRoot()
def set_text(self, text):
self.text = text
self.afm_length = max(text.end - text.start, 1)
def event(self, ev, x, y, st):
if self.afm_length and renpy.game.preferences.afm_time and renpy.game.preferences.afm_enable:
afm_delay = ( 1.0 * ( renpy.config.afm_bonus + self.afm_length ) / renpy.config.afm_characters ) * renpy.game.preferences.afm_time
if self.text is not None:
afm_delay += self.text.get_time()
if st > afm_delay:
if renpy.config.afm_callback:
if renpy.config.afm_callback() and not renpy.display.tts.is_active():
return True
else:
renpy.game.interface.timeout(0.1)
else:
return True
else:
renpy.game.interface.timeout(afm_delay - st)
dismiss = [ (i, True) for i in self.dismiss ] + [ (i, False) for i in self.dismiss_unfocused ]
for dismiss_event, check_focus in dismiss:
if map_event(ev, dismiss_event):
if check_focus and not self.is_focused():
continue
if renpy.config.skipping:
renpy.config.skipping = None
renpy.exports.restart_interaction()
raise renpy.display.core.IgnoreEvent()
if not renpy.config.enable_rollback_side:
rollback_side = "disable"
if renpy.mobile:
rollback_side = renpy.game.preferences.mobile_rollback_side
else:
rollback_side = renpy.game.preferences.desktop_rollback_side
if ev.type == pygame.MOUSEBUTTONUP:
percent = 1.0 * x / renpy.config.screen_width
if rollback_side == "left":
if percent < renpy.config.rollback_side_size:
renpy.exports.rollback()
raise renpy.display.core.IgnoreEvent()
elif rollback_side == "right":
if (1.0 - percent) < renpy.config.rollback_side_size:
renpy.exports.rollback()
raise renpy.display.core.IgnoreEvent()
if renpy.game.preferences.using_afm_enable and \
renpy.game.preferences.afm_enable and \
not renpy.game.preferences.afm_after_click:
renpy.game.preferences.afm_enable = False
renpy.exports.restart_interaction()
raise renpy.display.core.IgnoreEvent()
if self.allow_dismiss:
if not self.allow_dismiss():
raise renpy.display.core.IgnoreEvent()
return True
skip_delay = renpy.config.skip_delay / 1000.0
if renpy.config.skipping and renpy.config.allow_skipping and renpy.store._skipping:
if ev.type == renpy.display.core.TIMEEVENT and st >= skip_delay:
if renpy.game.preferences.skip_unseen:
return True
elif renpy.config.skipping == "fast":
return True
elif renpy.game.context().seen_current(True):
return True
else:
renpy.config.skipping = False
renpy.exports.restart_interaction()
else:
renpy.game.interface.timeout(skip_delay - st)
return None
##############################################################################
# Button
KEY_EVENTS = (
pygame.KEYDOWN,
pygame.KEYUP,
pygame.TEXTEDITING,
pygame.TEXTINPUT
)
class Button(renpy.display.layout.Window):
keymap = { }
action = None
alternate = None
longpress_start = None
longpress_x = None
longpress_y = None
role_parameter = None
keysym = None
alternate_keysym = None
def __init__(self, child=None, style='button', clicked=None,
hovered=None, unhovered=None, action=None, role=None,
time_policy=None, keymap={}, alternate=None,
selected=None, sensitive=None, keysym=None, alternate_keysym=None,
**properties):
if isinstance(clicked, renpy.ui.Action):
action = clicked
super(Button, self).__init__(child, style=style, **properties)
self.action = action
self.selected = selected
self.sensitive = sensitive
self.clicked = clicked
self.hovered = hovered
self.unhovered = unhovered
self.alternate = alternate
self.focusable = True # (clicked is not None) or (action is not None)
self.role_parameter = role
self.keymap = keymap
self.keysym = keysym
self.alternate_keysym = alternate_keysym
self.time_policy_data = None
self._duplicatable = False
def _duplicate(self, args):
if args and args.args:
args.extraneous()
return self
def predict_one_action(self):
predict_action(self.clicked)
predict_action(self.hovered)
predict_action(self.unhovered)
predict_action(self.alternate)
if self.keymap:
for v in self.keymap.itervalues():
predict_action(v)
def render(self, width, height, st, at):
if self.style.time_policy:
st, self.time_policy_data = self.style.time_policy(st, self.time_policy_data, self.style)
rv = super(Button, self).render(width, height, st, at)
if self.clicked:
rect = self.style.focus_rect
if rect is not None:
fx, fy, fw, fh = rect
else:
fx = self.style.left_margin
fy = self.style.top_margin
fw = rv.width - self.style.right_margin
fh = rv.height - self.style.bottom_margin
mask = self.style.focus_mask
if mask is True:
mask = rv
elif mask is not None:
try:
mask = renpy.display.render.render(mask, rv.width, rv.height, st, at)
except:
if callable(mask):
mask = mask
else:
raise Exception("Focus_mask must be None, True, a displayable, or a callable.")
if mask is not None:
fmx = 0
fmy = 0
else:
fmx = None
fmy = None
rv.add_focus(self, None,
fx, fy, fw, fh,
fmx, fmy, mask)
return rv
def focus(self, default=False):
super(Button, self).focus(default)
rv = None
if not default:
rv = run(self.hovered)
self.set_transform_event(self.role + "hover")
if self.child is not None:
self.child.set_transform_event(self.role + "hover")
return rv
def unfocus(self, default=False):
super(Button, self).unfocus(default)
self.longpress_start = None
if not default:
run_unhovered(self.hovered)
run(self.unhovered)
self.set_transform_event(self.role + "idle")
if self.child is not None:
self.child.set_transform_event(self.role + "idle")
def is_selected(self):
if self.selected is not None:
return self.selected
return is_selected(self.action)
def is_sensitive(self):
if self.sensitive is not None:
return self.sensitive
return is_sensitive(self.action)
def per_interact(self):
if self.action is not None:
if self.is_selected():
role = 'selected_'
else:
role = ''
if self.is_sensitive():
clicked = self.action
else:
clicked = None
role = ''
else:
role = ''
clicked = self.clicked
if self.role_parameter is not None:
role = self.role_parameter
if (role != self.role) or (clicked is not self.clicked):
renpy.display.render.invalidate(self)
self.role = role
self.clicked = clicked
if self.clicked is not None:
self.set_style_prefix(self.role + "idle_", True)
self.focusable = True
else:
self.set_style_prefix(self.role + "insensitive_", True)
self.focusable = False
super(Button, self).per_interact()
def event(self, ev, x, y, st):
def handle_click(action):
renpy.exports.play(self.style.activate_sound)
rv = run(action)
if rv is not None:
return rv
else:
raise renpy.display.core.IgnoreEvent()
# Call self.action.periodic()
timeout = run_periodic(self.action, st)
if timeout is not None:
renpy.game.interface.timeout(timeout)
# If we have a child, try passing the event to it. (For keyboard
# events, this only happens if we're focused.)
if (not (ev.type in KEY_EVENTS)) or self.style.key_events:
rv = super(Button, self).event(ev, x, y, st)
if rv is not None:
return rv
if (self.keysym is not None) and (self.clicked is not None):
if map_event(ev, self.keysym):
return handle_click(self.clicked)
if (self.alternate_keysym is not None) and (self.alternate is not None):
if map_event(ev, self.alternate_keysym):
return handle_click(self.alternate)
# If not focused, ignore all events.
if not self.is_focused():
return None
# Check the keymap.
for name, action in self.keymap.iteritems():
if map_event(ev, name):
return run(action)
# Handle the longpress event, if necessary.
if (self.alternate is not None) and renpy.display.touch:
if ev.type == pygame.MOUSEBUTTONDOWN and ev.button == 1:
self.longpress_start = st
self.longpress_x = x
self.longpress_y = y
renpy.game.interface.timeout(renpy.config.longpress_duration)
if self.longpress_start is not None:
if math.hypot(x - self.longpress_x, y - self.longpress_y) > renpy.config.longpress_radius:
self.longpress_start = None
elif st >= (self.longpress_start + renpy.config.longpress_duration):
renpy.exports.vibrate(renpy.config.longpress_vibrate)
renpy.display.interface.after_longpress()
return handle_click(self.alternate)
# Ignore as appropriate:
if (self.clicked is not None) and map_event(ev, "button_ignore"):
raise renpy.display.core.IgnoreEvent()
if (self.clicked is not None) and map_event(ev, "button_alternate_ignore"):
raise renpy.display.core.IgnoreEvent()
# If clicked,
if (self.clicked is not None) and map_event(ev, "button_select"):
return handle_click(self.clicked)
if (self.alternate is not None) and map_event(ev, "button_alternate"):
return handle_click(self.alternate)
return None
def set_style_prefix(self, prefix, root):
if root:
super(Button, self).set_style_prefix(prefix, root)
def _tts(self):
return ""
def _tts_all(self):
rv = self._tts_common(alt(self.action))
if self.is_selected():
rv += " " + renpy.minstore.__("selected")
return rv
# Reimplementation of the TextButton widget as a Button and a Text
# widget.
def TextButton(text, style='button', text_style='button_text',
clicked=None, **properties):
text_properties, button_properties = renpy.easy.split_properties(properties, "text_", "")
text = renpy.text.text.Text(text, style=text_style, **text_properties) # @UndefinedVariable
return Button(text, style=style, clicked=clicked, **button_properties)
class ImageButton(Button):
"""
Used to implement the guts of an image button.
"""
def __init__(self,
idle_image,
hover_image=None,
insensitive_image=None,
activate_image=None,
selected_idle_image=None,
selected_hover_image=None,
selected_insensitive_image=None,
selected_activate_image=None,
style='image_button',
clicked=None,
hovered=None,
**properties):
hover_image = hover_image or idle_image
insensitive_image = insensitive_image or idle_image
activate_image = activate_image or hover_image
selected_idle_image = selected_idle_image or idle_image
selected_hover_image = selected_hover_image or hover_image
selected_insensitive_image = selected_insensitive_image or insensitive_image
selected_activate_image = selected_activate_image or activate_image
self.state_children = dict(
idle_=renpy.easy.displayable(idle_image),
hover_=renpy.easy.displayable(hover_image),
insensitive_=renpy.easy.displayable(insensitive_image),
activate_=renpy.easy.displayable(activate_image),
selected_idle_=renpy.easy.displayable(selected_idle_image),
selected_hover_=renpy.easy.displayable(selected_hover_image),
selected_insensitive_=renpy.easy.displayable(selected_insensitive_image),
selected_activate_=renpy.easy.displayable(selected_activate_image),
)
super(ImageButton, self).__init__(None,
style=style,
clicked=clicked,
hovered=hovered,
**properties)
def visit(self):
return self.state_children.values()
def get_child(self):
return self.style.child or self.state_children[self.style.prefix]
# This is used for an input that takes its focus from a button.
class HoveredProxy(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __call__(self):
self.a()
if self.b:
return self.b()
# The currently editable input value.
current_input_value = None
# Is the current input value active?
input_value_active = False
# The default input value to use if the currently editable value doesn't
# exist.
default_input_value = None
# A list of input values that exist.
input_values = [ ]
# A list of inputs that exist in the current interaction.
inputs = [ ]
def input_pre_per_interact():
global input_values
global inputs
global default_input_value
input_values = [ ]
inputs = [ ]
default_input_value = None
def input_post_per_interact():
global current_input_value
global input_value_active
for i in input_values:
if i is current_input_value:
break
else:
current_input_value = default_input_value
input_value_active = True
for i in inputs:
editable = (i.value is current_input_value) and input_value_active and i.value.editable
content = i.value.get_text()
if (i.editable != editable) or (content != i.content):
i.update_text(content, editable)
i.caret_pos = len(content)
class Input(renpy.text.text.Text): # @UndefinedVariable
"""
This is a Displayable that takes text as input.
"""
changed = None
prefix = ""
suffix = ""
caret_pos = 0
old_caret_pos = 0
pixel_width = None
default = u""
edit_text = u""
value = None
def __init__(self,
default="",
length=None,
style='input',
allow=None,
exclude=None,
prefix="",
suffix="",
changed=None,
button=None,
replaces=None,
editable=True,
pixel_width=None,
value=None,
**properties):
super(Input, self).__init__("", style=style, replaces=replaces, substitute=False, **properties)
if value:
self.value = value
changed = value.set_text
default = value.get_text()
self.default = unicode(default)
self.content = self.default
self.length = length
self.allow = allow
self.exclude = exclude
self.prefix = prefix
self.suffix = suffix
self.changed = changed
self.editable = editable
self.pixel_width = pixel_width
caretprops = { 'color' : None }
for i in properties:
if i.endswith("color"):
caretprops[i] = properties[i]
self.caret = renpy.display.image.Solid(xmaximum=1, style=style, **caretprops)
self.caret_pos = len(self.content)
self.old_caret_pos = self.caret_pos
if button:
self.editable = False
button.hovered = HoveredProxy(self.enable, button.hovered)
button.unhovered = HoveredProxy(self.disable, button.unhovered)
if isinstance(replaces, Input):
self.content = replaces.content
self.editable = replaces.editable
self.caret_pos = replaces.caret_pos
self.update_text(self.content, self.editable)
def _show(self):
if self.default != self.content:
self.content = self.default
self.caret_pos = len(self.content)
self.update_text(self.content, self.editable)
def update_text(self, new_content, editable, check_size=False):
edit = renpy.display.interface.text_editing
old_content = self.content
if new_content != self.content or editable != self.editable or edit:
renpy.display.render.redraw(self, 0)
self.editable = editable
# Choose the caret.
caret = self.style.caret
if caret is None:
caret = self.caret
# Format text being edited by the IME.
if edit:
self.edit_text = edit.text
edit_text_0 = edit.text[:edit.start]
edit_text_1 = edit.text[edit.start:edit.start + edit.length]
edit_text_2 = edit.text[edit.start + edit.length:]
edit_text = ""
if edit_text_0:
edit_text += "{u=1}" + edit_text_0.replace("{", "{{") + "{/u}"
if edit_text_1:
edit_text += "{u=2}" + edit_text_1.replace("{", "{{") + "{/u}"
if edit_text_2:
edit_text += "{u=1}" + edit_text_2.replace("{", "{{") + "{/u}"
else:
self.edit_text = ""
edit_text = ""
def set_content(content):
if content == "":
content = u"\u200b"
if editable:
l = len(content)
self.set_text([self.prefix, content[0:self.caret_pos].replace("{", "{{"), edit_text, caret,
content[self.caret_pos:l].replace("{", "{{"), self.suffix])
else:
self.set_text([self.prefix, content.replace("{", "{{"), self.suffix ])
set_content(new_content)
if check_size and self.pixel_width:
w, _h = self.size()
if w > self.pixel_width:
self.caret_pos = self.old_caret_pos
set_content(old_content)
return
if new_content != old_content:
self.content = new_content
if self.changed:
self.changed(new_content)
# This is needed to ensure the caret updates properly.
def set_style_prefix(self, prefix, root):
if prefix != self.style.prefix:
self.update_text(self.content, self.editable)
super(Input, self).set_style_prefix(prefix, root)
def enable(self):
self.update_text(self.content, True)
def disable(self):
self.update_text(self.content, False)
def per_interact(self):
global default_input_value
if self.value is not None:
inputs.append(self)
input_values.append(self.value)
if self.value.default and (default_input_value is None):
default_input_value = self.value
def event(self, ev, x, y, st):
self.old_caret_pos = self.caret_pos
if not self.editable:
return None
if (ev.type == pygame.KEYDOWN) and (pygame.key.get_mods() & pygame.KMOD_LALT) and (not ev.unicode):
return None
l = len(self.content)
raw_text = None
if map_event(ev, "input_backspace"):
if self.content and self.caret_pos > 0:
content = self.content[0:self.caret_pos-1] + self.content[self.caret_pos:l]
self.caret_pos -= 1
self.update_text(content, self.editable)
renpy.display.render.redraw(self, 0)
raise renpy.display.core.IgnoreEvent()
elif map_event(ev, "input_enter"):
content = self.content
if self.edit_text:
content = content[0:self.caret_pos] + self.edit_text + self.content[self.caret_pos:]
if self.value:
return self.value.enter()
if not self.changed:
return content
elif map_event(ev, "input_left"):
if self.caret_pos > 0:
self.caret_pos -= 1
self.update_text(self.content, self.editable)
renpy.display.render.redraw(self, 0)
raise renpy.display.core.IgnoreEvent()
elif map_event(ev, "input_right"):
if self.caret_pos < l:
self.caret_pos += 1
self.update_text(self.content, self.editable)
renpy.display.render.redraw(self, 0)
raise renpy.display.core.IgnoreEvent()
elif map_event(ev, "input_delete"):
if self.caret_pos < l:
content = self.content[0:self.caret_pos] + self.content[self.caret_pos+1:l]
self.update_text(content, self.editable)
renpy.display.render.redraw(self, 0)
raise renpy.display.core.IgnoreEvent()
elif map_event(ev, "input_home"):
self.caret_pos = 0
self.update_text(self.content, self.editable)
renpy.display.render.redraw(self, 0)
raise renpy.display.core.IgnoreEvent()
elif map_event(ev, "input_end"):
self.caret_pos = l
self.update_text(self.content, self.editable)
renpy.display.render.redraw(self, 0)
raise renpy.display.core.IgnoreEvent()
elif ev.type == pygame.TEXTEDITING:
self.update_text(self.content, self.editable, check_size=True)
raise renpy.display.core.IgnoreEvent()
elif ev.type == pygame.TEXTINPUT:
self.edit_text = ""
raw_text = ev.text
elif ev.type == pygame.KEYDOWN:
if ev.unicode and ord(ev.unicode[0]) >= 32:
raw_text = ev.unicode
elif renpy.display.interface.text_event_in_queue():
raw_text = ''
if raw_text is not None:
text = ""
for c in raw_text:
if self.allow and c not in self.allow:
continue
if self.exclude and c in self.exclude:
continue
text += c
if self.length:
remaining = self.length - len(self.content)
text = text[:remaining]
if text:
content = self.content[0:self.caret_pos] + text + self.content[self.caret_pos:l]
self.caret_pos += len(text)
self.update_text(content, self.editable, check_size=True)
raise renpy.display.core.IgnoreEvent()
def render(self, width, height, st, at):
rv = super(Input, self).render(width, height, st, at)
if self.editable:
rv.text_input = True
return rv
# A map from adjustment to lists of displayables that want to be redrawn
# if the adjustment changes.
adj_registered = { }
# This class contains information about an adjustment that can change the
# position of content.
class Adjustment(renpy.object.Object):
"""
:doc: ui
:name: ui.adjustment class
Adjustment objects represent a value that can be adjusted by a bar
or viewport. They contain information about the value, the range
of the value, and how to adjust the value in small steps and large
pages.
"""
def __init__(self, range=1, value=0, step=None, page=None, changed=None, adjustable=None, ranged=None): # @ReservedAssignment
"""
The following parameters correspond to fields or properties on
the adjustment object:
`range`
The range of the adjustment, a number.
`value`
The value of the adjustment, a number.
`step`
The step size of the adjustment, a number. If None, then
defaults to 1/10th of a page, if set. Otherwise, defaults
to the 1/20th of the range.
This is used when scrolling a viewport with the mouse wheel.
`page`
The page size of the adjustment. If None, this is set
automatically by a viewport. If never set, defaults to 1/10th
of the range.
It's can be used when clicking on a scrollbar.
The following parameters control the behavior of the adjustment.
`adjustable`
If True, this adjustment can be changed by a bar. If False,
it can't.
It defaults to being adjustable if a `changed` function
is given or if the adjustment is associated with a viewport,
and not adjustable otherwise.
`changed`
This function is called with the new value when the value of
the adjustment changes.
`ranged`
This function is called with the adjustment object when
the range of the adjustment is set by a viewport.
.. method:: change(value)
Changes the value of the adjustment to `value`, updating
any bars and viewports that use the adjustment.
"""
super(Adjustment, self).__init__()
if adjustable is None:
if changed:
adjustable = True
self._value = value
self._range = range
self._page = page
self._step = step
self.changed = changed
self.adjustable = adjustable
self.ranged = ranged
def get_value(self):
if self._value > self._range:
return self._range
return self._value
def set_value(self, v):
self._value = v
value = property(get_value, set_value)
def get_range(self):
return self._range
def set_range(self, v):
self._range = v
if self.ranged:
self.ranged(self)
range = property(get_range, set_range) # @ReservedAssignment
def get_page(self):
if self._page is not None:
return self._page
return self._range / 10
def set_page(self, v):
self._page = v
page = property(get_page, set_page)
def get_step(self):
if self._step is not None:
return self._step
if self._page is not None and self.page > 0:
return self._page / 10
if isinstance(self._range, float):
return self._range / 10
else:
return 1
def set_step(self, v):
self._step = v
step = property(get_step, set_step)
# Register a displayable to be redrawn when this adjustment changes.
def register(self, d):
adj_registered.setdefault(self, [ ]).append(d)
def change(self, value):
if value < 0:
value = 0
if value > self._range:
value = self._range
if value != self._value:
self._value = value
for d in adj_registered.setdefault(self, [ ]):
renpy.display.render.redraw(d, 0)
if self.changed:
return self.changed(value)
return None
def update(self):
"""
Updates things that depend on this adjustment without firing the
changed handler.
"""
for d in adj_registered.setdefault(self, [ ]):
renpy.display.render.redraw(d, 0)
class Bar(renpy.display.core.Displayable):
"""
Implements a bar that can display an integer value, and respond
to clicks on that value.
"""
__version__ = 2
def after_upgrade(self, version):
if version < 1:
self.adjustment = Adjustment(self.range, self.value, changed=self.changed) # E1101
self.adjustment.register(self)
del self.range # E1101
del self.value # E1101
del self.changed # E1101
if version < 2:
self.value = None
def __init__(self,
range=None, # @ReservedAssignment
value=None,
width=None,
height=None,
changed=None,
adjustment=None,
step=None,
page=None,
bar=None,
style=None,
vertical=False,
replaces=None,
hovered=None,
unhovered=None,
**properties):
self.value = None
if adjustment is None:
if isinstance(value, renpy.ui.BarValue):
if isinstance(replaces, Bar):
value.replaces(replaces.value)
self.value = value
adjustment = value.get_adjustment()
renpy.game.interface.timeout(0)
else:
adjustment = Adjustment(range, value, step=step, page=page, changed=changed)
if style is None:
if self.value is not None:
if vertical:
style = self.value.get_style()[1]
else:
style = self.value.get_style()[0]
else:
if vertical:
style = 'vbar'
else:
style = 'bar'
if width is not None:
properties['xmaximum'] = width
if height is not None:
properties['ymaximum'] = height
super(Bar, self).__init__(style=style, **properties)
self.adjustment = adjustment
self.focusable = True
# These are set when we are first rendered.
self.thumb_dim = 0
self.height = 0
self.width = 0
self.hidden = False
self.hovered = hovered
self.unhovered = unhovered
def per_interact(self):
if self.value is not None:
adjustment = self.value.get_adjustment()
if adjustment.value != self.value:
renpy.display.render.invalidate(self)
self.adjustment = adjustment
self.focusable = self.adjustment.adjustable
self.adjustment.register(self)
def visit(self):
rv = [ ]
self.style._visit_bar(rv.append)
return rv
def render(self, width, height, st, at):
# Handle redrawing.
if self.value is not None:
redraw = self.value.periodic(st)
if redraw is not None:
renpy.display.render.redraw(self, redraw)
xminimum = self.style.xminimum
yminimum = self.style.yminimum
if xminimum is not None:
width = max(width, xminimum)
height = max(height, yminimum)
# Store the width and height for the event function to use.
self.width = width
self.height = height
range = self.adjustment.range # @ReservedAssignment
value = self.adjustment.value
page = self.adjustment.page
if range <= 0:
if self.style.unscrollable == "hide":
self.hidden = True
return renpy.display.render.Render(width, height)
elif self.style.unscrollable == "insensitive":
self.set_style_prefix("insensitive_", True)
self.hidden = False
if self.style.bar_invert ^ self.style.bar_vertical:
value = range - value
bar_vertical = self.style.bar_vertical
if bar_vertical:
dimension = height
else:
dimension = width
fore_gutter = self.style.fore_gutter
aft_gutter = self.style.aft_gutter
active = dimension - fore_gutter - aft_gutter
if range:
thumb_dim = active * page / (range + page)
else:
thumb_dim = active
thumb_offset = abs(self.style.thumb_offset)
if bar_vertical:
thumb = render(self.style.thumb, width, thumb_dim, st, at)
thumb_shadow = render(self.style.thumb_shadow, width, thumb_dim, st, at)
thumb_dim = thumb.height
else:
thumb = render(self.style.thumb, thumb_dim, height, st, at)
thumb_shadow = render(self.style.thumb_shadow, thumb_dim, height, st, at)
thumb_dim = thumb.width
# Remove the offset from the thumb.
thumb_dim -= thumb_offset * 2
self.thumb_dim = thumb_dim
active -= thumb_dim
if range:
fore_size = active * value / range
else:
fore_size = active
fore_size = int(fore_size)
aft_size = active - fore_size
fore_size += fore_gutter
aft_size += aft_gutter
rv = renpy.display.render.Render(width, height)
if bar_vertical:
if self.style.bar_resizing:
foresurf = render(self.style.fore_bar, width, fore_size, st, at)
aftsurf = render(self.style.aft_bar, width, aft_size, st, at)
rv.blit(thumb_shadow, (0, fore_size - thumb_offset))
rv.blit(foresurf, (0, 0), main=False)
rv.blit(aftsurf, (0, height-aft_size), main=False)
rv.blit(thumb, (0, fore_size - thumb_offset))
else:
foresurf = render(self.style.fore_bar, width, height, st, at)
aftsurf = render(self.style.aft_bar, width, height, st, at)
rv.blit(thumb_shadow, (0, fore_size - thumb_offset))
rv.blit(foresurf.subsurface((0, 0, width, fore_size)), (0, 0), main=False)
rv.blit(aftsurf.subsurface((0, height - aft_size, width, aft_size)), (0, height - aft_size), main=False)
rv.blit(thumb, (0, fore_size - thumb_offset))
else:
if self.style.bar_resizing:
foresurf = render(self.style.fore_bar, fore_size, height, st, at)
aftsurf = render(self.style.aft_bar, aft_size, height, st, at)
rv.blit(thumb_shadow, (fore_size - thumb_offset, 0))
rv.blit(foresurf, (0, 0), main=False)
rv.blit(aftsurf, (width-aft_size, 0), main=False)
rv.blit(thumb, (fore_size - thumb_offset, 0))
else:
foresurf = render(self.style.fore_bar, width, height, st, at)
aftsurf = render(self.style.aft_bar, width, height, st, at)
rv.blit(thumb_shadow, (fore_size - thumb_offset, 0))
rv.blit(foresurf.subsurface((0, 0, fore_size, height)), (0, 0), main=False)
rv.blit(aftsurf.subsurface((width - aft_size, 0, aft_size, height)), (width-aft_size, 0), main=False)
rv.blit(thumb, (fore_size - thumb_offset, 0))
if self.focusable:
rv.add_focus(self, None, 0, 0, width, height)
return rv
def focus(self, default=False):
super(Bar, self).focus(default)
self.set_transform_event("hover")
if not default:
run(self.hovered)
def unfocus(self, default=False):
super(Bar, self).unfocus()
self.set_transform_event("idle")
if not default:
run_unhovered(self.hovered)
run(self.unhovered)
def event(self, ev, x, y, st):
if not self.focusable:
return None
if not self.is_focused():
return None
if self.hidden:
return None
range = self.adjustment.range # @ReservedAssignment
old_value = self.adjustment.value
value = old_value
vertical = self.style.bar_vertical
invert = self.style.bar_invert ^ vertical
if invert:
value = range - value
grabbed = (renpy.display.focus.get_grab() is self)
just_grabbed = False
ignore_event = False
if not grabbed and map_event(ev, "bar_activate"):
renpy.display.tts.speak(renpy.minstore.__("activate"))
renpy.display.focus.set_grab(self)
self.set_style_prefix("selected_hover_", True)
just_grabbed = True
grabbed = True
ignore_event = True
if grabbed:
if vertical:
increase = "bar_down"
decrease = "bar_up"
else:
increase = "bar_right"
decrease = "bar_left"
if map_event(ev, decrease):
renpy.display.tts.speak(renpy.minstore.__("decrease"))
value -= self.adjustment.step
ignore_event = True
if map_event(ev, increase):
renpy.display.tts.speak(renpy.minstore.__("increase"))
value += self.adjustment.step
ignore_event = True
if ev.type in (pygame.MOUSEMOTION, pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN):
if vertical:
tgutter = self.style.fore_gutter
bgutter = self.style.aft_gutter
zone_height = self.height - tgutter - bgutter - self.thumb_dim
if zone_height:
value = (y - tgutter - self.thumb_dim / 2) * range / zone_height
else:
value = 0
else:
lgutter = self.style.fore_gutter
rgutter = self.style.aft_gutter
zone_width = self.width - lgutter - rgutter - self.thumb_dim
if zone_width:
value = (x - lgutter - self.thumb_dim / 2) * range / zone_width
else:
value = 0
ignore_event = True
if isinstance(range, int):
value = int(value)
if value < 0:
renpy.display.tts.speak("")
value = 0
if value > range:
renpy.display.tts.speak("")
value = range
if invert:
value = range - value
if grabbed and not just_grabbed and map_event(ev, "bar_deactivate"):
renpy.display.tts.speak(renpy.minstore.__("deactivate"))
self.set_style_prefix("hover_", True)
renpy.display.focus.set_grab(None)
ignore_event = True
if value != old_value:
rv = self.adjustment.change(value)
if rv is not None:
return rv
if ignore_event:
raise renpy.display.core.IgnoreEvent()
else:
return None
def set_style_prefix(self, prefix, root):
if root:
super(Bar, self).set_style_prefix(prefix, root)
def _tts(self):
return ""
def _tts_all(self):
if self.value is not None:
alt = self.value.alt
else:
alt = ""
return self._tts_common(alt) + renpy.minstore.__("bar")
class Conditional(renpy.display.layout.Container):
"""
This class renders its child if and only if the condition is
true. Otherwise, it renders nothing. (Well, a Null).
Warning: the condition MUST NOT update the game state in any
way, as that would break rollback.
"""
def __init__(self, condition, *args, **properties):
super(Conditional, self).__init__(*args, **properties)
self.condition = condition
self.null = renpy.display.layout.Null()
self.state = eval(self.condition, vars(renpy.store))
def render(self, width, height, st, at):
if self.state:
return render(self.child, width, height, st, at)
else:
return render(self.null, width, height, st, at)
def event(self, ev, x, y, st):
state = eval(self.condition, vars(renpy.store))
if state != self.state:
renpy.display.render.redraw(self, 0)
self.state = state
if state:
return self.child.event(ev, x, y, st)
class TimerState(renpy.python.RevertableObject):
"""
Stores the state of the timer, which may need to be rolled back.
"""
# Prevents us from having to worry about our initialization being
# rolled back.
started = False
next_event = None
class Timer(renpy.display.layout.Null):
__version__ = 1
started = False
def after_upgrade(self, version):
if version < 1:
self.state = TimerState()
self.state.started = self.started
self.state.next_event = self.next_event
def __init__(self, delay, action=None, repeat=False, args=(), kwargs={}, replaces=None, **properties):
super(Timer, self).__init__(**properties)
if action is None:
raise Exception("A timer must have an action supplied.")
if delay <= 0:
raise Exception("A timer's delay must be > 0.")
# The delay.
self.delay = delay
# Should we repeat the event?
self.repeat = repeat
# The time the next event should occur.
self.next_event = None
# The function and its arguments.
self.function = action
self.args = args
self.kwargs = kwargs
# Did we start the timer?
self.started = False
if replaces is not None:
self.state = replaces.state
else:
self.state = TimerState()
def event(self, ev, x, y, st):
state = self.state
if not state.started:
state.started = True
state.next_event = st + self.delay
if state.next_event is None:
return
if st < state.next_event:
renpy.game.interface.timeout(state.next_event - st)
return
if not self.repeat:
state.next_event = None
else:
state.next_event = state.next_event + self.delay
if state.next_event < st:
state.next_event = st + self.delay
renpy.game.interface.timeout(state.next_event - st)
return run(self.function, *self.args, **self.kwargs)
class MouseArea(renpy.display.core.Displayable):
# The offset between st and at.
at_st_offset = 0
def __init__(self, hovered=None, unhovered=None, replaces=None, **properties):
super(MouseArea, self).__init__(**properties)
self.hovered = hovered
self.unhovered = unhovered
# Are we hovered right now?
self.is_hovered = False
if replaces is not None:
self.is_hovered = replaces.is_hovered
# Taken from the render.
self.width = 0
self.height = 0
def render(self, width, height, st, at):
self.width = width
self.height = height
self.at_st_offset = at - st
return Render(width, height)
def event(self, ev, x, y, st):
# Mouseareas should not handle events when something else is grabbing.
if renpy.display.focus.get_grab():
return
if self.style.focus_mask is not None:
crend = renpy.display.render.render(self.style.focus_mask, self.width, self.height, st, self.at_st_offset + st)
is_hovered = crend.is_pixel_opaque(x, y)
elif 0 <= x < self.width and 0 <= y < self.height:
is_hovered = True
else:
is_hovered = False
if is_hovered and not self.is_hovered:
self.is_hovered = True
return run(self.hovered)
elif not is_hovered and self.is_hovered:
self.is_hovered = False
run_unhovered(self.hovered)
run(self.unhovered)
class OnEvent(renpy.display.core.Displayable):
"""
This is a displayable that runs an action in response to a transform
event. It's used to implement the screen language on statement.
"""
def __init__(self, event, action=[ ]):
"""
`event`
A string giving the event name.
`action`
An action or list of actions that are run when the event occurs.
"""
super(OnEvent, self).__init__()
self.event_name = event
self.action = action
def _handles_event(self, event):
if self.event_name == event:
return True
else:
return False
def set_transform_event(self, event):
if event == self.event_name:
run(self.action)
def render(self, width, height, st, at):
return renpy.display.render.Render(0, 0)
|
python
|
# # -*- coding: utf-8 -*-
from collections import Counter
from tests.testapp.tests.base_tests import BaseRedisTestCase
from tests.testapp.tests.multi_server_tests import MultiServerTests
from django.test import TestCase, override_settings
LOCATION = "unix://:yadayada@/tmp/redis0.sock?db=15"
LOCATIONS = [
"unix://:yadayada@/tmp/redis0.sock?db=15",
"unix://:yadayada@/tmp/redis1.sock?db=15",
"unix://:yadayada@/tmp/redis2.sock?db=15",
]
class SocketTestCase(BaseRedisTestCase, TestCase):
pass
@override_settings(
CACHES={
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': LOCATION,
'OPTIONS': {
'DB': 15,
'PASSWORD': 'yadayada',
'PARSER_CLASS': 'redis.connection.HiredisParser',
'PICKLE_VERSION': 2,
'CONNECTION_POOL_CLASS': 'redis.ConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 2,
}
},
},
}
)
class SingleHiredisTestCase(SocketTestCase):
pass
@override_settings(
CACHES={
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': LOCATION,
'OPTIONS': {
'DB': 15,
'PASSWORD': 'yadayada',
'PARSER_CLASS': 'redis.connection.PythonParser',
'PICKLE_VERSION': 2,
'CONNECTION_POOL_CLASS': 'redis.ConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 2,
}
},
},
}
)
class SinglePythonParserTestCase(SocketTestCase):
pass
@override_settings(
CACHES={
'default': {
'BACKEND': 'redis_cache.ShardedRedisCache',
'LOCATION': LOCATIONS,
'OPTIONS': {
'DB': 15,
'PASSWORD': 'yadayada',
'PARSER_CLASS': 'redis.connection.HiredisParser',
'PICKLE_VERSION': 2,
'CONNECTION_POOL_CLASS': 'redis.ConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 2,
}
},
},
}
)
class MultipleHiredisTestCase(MultiServerTests, SocketTestCase):
def test_equal_number_of_nodes(self):
counter = Counter(
[node._node[3] for node in self.cache.sharder._nodes]
)
self.assertEqual(counter, {
'/tmp/redis0.sock': 16,
'/tmp/redis1.sock': 16,
'/tmp/redis2.sock': 16,
})
@override_settings(
CACHES={
'default': {
'BACKEND': 'redis_cache.ShardedRedisCache',
'LOCATION': LOCATIONS,
'OPTIONS': {
'DB': 15,
'PASSWORD': 'yadayada',
'PARSER_CLASS': 'redis.connection.PythonParser',
'PICKLE_VERSION': 2,
'CONNECTION_POOL_CLASS': 'redis.ConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 2,
}
},
},
}
)
class MultiplePythonParserTestCase(MultiServerTests, SocketTestCase):
pass
|
python
|
import os
import logging
# (windows only for now)
if os.name == 'nt':
try:
logging.info('Looking for CUDA and adding it to path...')
# some python versions fail to load the path variables, so we're doing it manually here before importing tf
loaddir = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2/bin"
os.add_dll_directory(loaddir)
logging.info('Found!')
except Exception as ex:
logging.info(f'CUDA not found, this gon be slow af \n{ex}')
import shutil
import json
import numpy as np
logging.info('Loading TensorFlow Libs...') # noqa: E402
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, GRU
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
logging.info('Done!')
# Save model weights
VERBOSE: int = 1 # 0: no visual feedback, 1: animated progress bar, 2: show number of epoch
checkpointer = ModelCheckpoint(
filepath="pred_model_weights.hdf5", verbose=VERBOSE, save_best_only=True)
# Use early stopping to exit training if validation loss is not decreasing even after certain epochs (patience)
PATIENCE: int = 20 # For early-stopping
earlystopping = EarlyStopping(
monitor='loss', mode='min', verbose=VERBOSE, patience=PATIENCE)
model_metrics = [checkpointer, earlystopping]
# This function builds a sequential model (linear - only one pathway)
def build_sequential(window_len, input_columns, output_size, neurons=3000, activ_func='linear',
dropout=0.2, loss='mse', optimizer='adam'):
model = Sequential()
# model.add(Embedding(window_len, 512))
model.add(GRU(neurons, input_shape=(window_len, input_columns)))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(512, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(512, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(256, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(256, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(units=output_size, activation=activ_func))
model.compile(loss=loss, optimizer=optimizer)
return model
# This function builds a nn model using the 'functional api':
# it's more advanced and allows to have multiple pathways (non-linear)
def build(window_len, input_columns, output_size, neurons, activ_func='linear',
dropout=0.2, loss='mse', optimizer='adam'):
inputs = keras.Input(shape=(window_len, input_columns))
# long path
gru = GRU(neurons)
x = gru(inputs)
x = Dense(neurons*2, activation="relu")(x)
x = Dropout(dropout)(x)
x = Dense(neurons*2, activation="relu")(x)
x = Dropout(dropout)(x)
x = Dense(neurons*2, activation="relu")(x)
x = Dropout(dropout)(x)
# x = Dense(neurons*2, activation="relu")(x)
# x = Dropout(dropout)(x)
# x = Dense(neurons, activation="relu")(x)
# x = Dropout(dropout)(x)
# x = Dense(neurons, activation="relu")(x)
# x = Dropout(dropout)(x)
# x = Dense(neurons, activation="relu")(x)
# x = Dropout(dropout)(x)
# x = Dense(512, activation="linear")(x)
# x = Dropout(dropout)(x)
# x = Dense(256, activation="linear")(x)
# x = Dropout(dropout)(x)
# x = Dense(128, activation="linear")(x)
# x = Dropout(dropout)(x)
# x = Dense(64, activation="linear")(x)
# x = Dropout(dropout)(x)
# x = Dense(32, activation="linear")(x)
# x = Dropout(dropout)(x)
# x = Dense(16, activation="linear")(x)
# x = Dropout(dropout)(x)
# x = Dense(output_size, activation="linear")(x)
# # short path
# gru = GRU(int(512))
# x2 = gru(inputs)
# x2 = Dense(output_size, activation="linear")(x2)
# x = Add()([x,x2])
# x = Activation('linear')(x)
outputs = Dense(output_size, activation=activ_func)(x)
model = keras.Model(inputs=inputs, outputs=outputs,
name="Price_Prediction_Model")
model.compile(loss=loss, optimizer=optimizer)
return model
def save(model, history, config):
model_json = model.to_json()
with open(f"{config.SYMBOL}_pred_model.json", "w") as json_file:
json_file.write(model_json)
if history is not None:
np.save('history.npy', history.history)
path = os.path.join('trained_models', config.MODEL_FOLDER)
os.mkdir(path) if not os.path.isdir(path) else [logging.info(
f'This model version already exists! OVERWRITING!'), shutil.rmtree(path), os.mkdir(path)]
params = {'optimizer': config.OPTIMIZER, 'loss': config.LOSS}
with open(os.path.join(path, 'params.json'), 'w') as json_file:
json.dump(params, json_file)
os.rename("history.npy", os.path.join(path, "history.npy"))
os.rename(f"{config.SYMBOL}_pred_model.json", os.path.join(
path, f"{config.SYMBOL}_pred_model.json"))
os.rename("pred_model_weights.hdf5", os.path.join(
path, f"{config.SYMBOL}_pred_model_weights.hdf5"))
logging.info(f'Trained model successfully saved to {path}')
def load(model_folder, optimizer=None, loss=None, metrics=None):
logging.info(f'Loading model <{model_folder}>...')
try:
with open(os.path.join('trained_models', model_folder, 'eth_pred_model.json'), 'r') as json_file:
json_saved_model = json_file.read()
except Exception as e:
logging.info(f'Failed to load the model - model not found!\n{e}')
return
model = tf.keras.models.model_from_json(json_saved_model)
# history=np.load(os.path.join('trained_models', model_folder, 'history.npy'),allow_pickle='TRUE').item()
if optimizer is None or loss is None:
logging.info(
f'No model parameters given - reading from file')
try:
with open(os.path.join('trained_models', model_folder, 'params.json')) as json_file:
params = json.load(json_file)
except Exception as e:
logging.info(f'Failed to load the params - file not found! Please provide the optimizer and loss '
f'in the function call or in a params.json file\n{e}')
return
optimizer = params['optimizer']
loss = params['loss']
try:
logging.info('Loading model weights...')
model.load_weights(os.path.join(
'trained_models', model_folder, 'eth_pred_model_weights.hdf5'))
except Exception as e:
logging.info(f'Failed to load the model - weight file not found!\n{e}')
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
logging.info(f'Successfully loaded the model')
return model
|
python
|
import os
import getpass
import hashlib
os.system('cls')
print("Done...")
if 'MainDrive' in os.listdir('.'):
os.rmdir("MainDrive")
os.mkdir('MainDrive/')
os.mkdir('MainDrive/Users/')
username = input("Username: ")
password = getpass.getpass("Password (No echo): ")
encp = password.encode()
d = hashlib.sha256(encp)
hash = d.hexdigest()
os.mkdir(f'MainDrive/Users/{username}')
if 'ubin' not in os.listdir('.'):
os.mkdir('ubin')
usrdir = open(f"MainDrive/Users/{username}/usrdir", "w").write(username)
pswdir = open(f"MainDrive/Users/{username}/pswdir", "w").write(hash)
os.mkdir(f'MainDrive/Users/{username}/Desktop')
os.system(f'python3 bootscreen.py --noboot --nologin --username {username} --password {password}')
|
python
|
# -*- encoding: utf-8 -*-
"""
@File : Surprise_SGD.py
@Time : 2020/11/21 14:41
@Author : biao chen
@Email : [email protected]
@Software: PyCharm
"""
from surprise import Dataset
from surprise import Reader
from surprise import BaselineOnly, KNNBasic
from surprise import accuracy
from surprise.model_selection import KFold
# 数据读取
file_path = 'E:/python/machina/kaggle_practice/week4/data/ratings.csv'
reader = Reader(line_format='user item rating timestamp', sep=',', skip_lines=1)
data = Dataset.load_from_file(file_path, reader=reader)
train_set = data.build_full_trainset()
'''
SGD参数:
reg:代价函数的正则化项,默认为0.02。
learning_rate:学习率,默认为0.005。
n_epochs:迭代次数,默认为20。
'''
# Baseline算法,使用SGD进行优化
bsl_options = {'method': 'sgd','n_epochs': 5}
algo = BaselineOnly(bsl_options=bsl_options)
# 定义K折交叉验证迭代器,K=3
kf = KFold(n_splits=3)
for trainset, testset in kf.split(data):
algo.fit(trainset)
predictions = algo.test(testset)
accuracy.rmse(predictions, verbose=True)
uid = str(196)
iid = str(302)
pred = algo.predict(uid, iid, r_ui=4, verbose=True)
print(pred)
# 迭代速度比ALS快
|
python
|
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
from datetime import datetime
from flask import current_app
from manager_rest import models, storage_manager
from manager_rest.blueprints_manager import tasks, BlueprintsManager
import manager_rest.manager_exceptions
import manager_rest.workflow_client as wf_client
from dsl_parser import constants
from handlers import (DeploymentUpdateNodeHandler,
DeploymentUpdateNodeInstanceHandler)
from validator import StepValidator
from utils import extract_ids
from constants import STATE, CHANGE_TYPE
class DeploymentUpdateManager(object):
def __init__(self):
self.sm = storage_manager.get_storage_manager()
self.workflow_client = wf_client.get_workflow_client()
self._node_handler = DeploymentUpdateNodeHandler()
self._node_instance_handler = DeploymentUpdateNodeInstanceHandler()
self._step_validator = StepValidator()
def get_deployment_update(self, deployment_update_id):
"""Return the deployment update object
:param deployment_update_id:
:return:
"""
return self.sm.get_deployment_update(deployment_update_id)
def deployment_updates_list(self, include=None, filters=None,
pagination=None, sort=None):
"""Return a list of deployment updates.
:param include:
:param filters:
:param pagination:
:param sort:
:return:
"""
return self.sm.deployment_updates_list(include=include,
filters=filters,
pagination=pagination,
sort=sort)
def stage_deployment_update(self, deployment_id, staged_blueprint):
"""Stage a deployment update
:param deployment_id: the deployment id for the update
:param staged_blueprint: the modified blueprint
:return:
"""
self._validate_no_active_updates_per_deployment(deployment_id)
deployment_update = models.DeploymentUpdate(deployment_id,
staged_blueprint)
self.sm.put_deployment_update(deployment_update)
return deployment_update
def create_deployment_update_step(self, deployment_update_id,
operation, entity_type, entity_id):
"""Create deployment update step
:param deployment_update_id:
:param operation: add/remove/modify
:param entity_type: add/relationship
:param entity_id:
:return:
"""
step = models.DeploymentUpdateStep(operation,
entity_type,
entity_id)
dep_update = self.get_deployment_update(deployment_update_id)
self._step_validator.validate(dep_update, step)
self.sm.put_deployment_update_step(deployment_update_id, step)
return step
def commit_deployment_update(self, deployment_update_id):
"""commit the deployment update steps
:param deployment_update_id:
:return:
"""
dep_update = self.get_deployment_update(deployment_update_id)
# mark deployment update as committing
dep_update.state = STATE.COMMITTING
self.sm.update_deployment_update(dep_update)
# Update the nodes on the storage
modified_entity_ids, depup_nodes = \
self._node_handler.handle(dep_update)
# Extract changes from raw nodes
node_instance_changes = self._extract_changes(dep_update,
depup_nodes)
# Create (and update for adding step type) node instances
# according to the changes in raw_nodes
depup_node_instances = \
self._node_instance_handler.handle(dep_update,
node_instance_changes)
# Saving the needed changes back to sm for future use
# (removing entities).
dep_update.deployment_update_nodes = depup_nodes
dep_update.deployment_update_node_instances = depup_node_instances
dep_update.modified_entity_ids = modified_entity_ids.to_dict()
self.sm.update_deployment_update(dep_update)
# Execute update workflow using added and related instances
# This workflow will call a finalize_update, since removing entities
# should be done after the executions.
# The raw_node_instances are being used only for their ids, Thus
# They should really hold the finished version for the node instance.
self._execute_update_workflow(dep_update,
depup_node_instances,
modified_entity_ids.to_dict())
return models.DeploymentUpdate(deployment_update_id,
dep_update.blueprint)
def _validate_no_active_updates_per_deployment(self, deployment_id):
"""
Validate there are no uncommitted updates for provided deployment.
raises conflict error if there are.
:param deployment_id: deployment id
"""
existing_updates = \
self.deployment_updates_list(filters={
'deployment_id': deployment_id
}).items
active_update = \
next(iter(
[u for u in existing_updates
if u.state != STATE.COMMITTED]), None)
if active_update:
raise manager_rest.manager_exceptions.ConflictError(
'deployment update {0} is not committed yet'
.format(active_update.id)
)
def _extract_changes(self, dep_update, raw_nodes):
"""Extracts the changes between the current node_instances and
the raw_nodes specified
:param dep_update:
:param raw_nodes:
:return: a dictionary of modification type and node instanced modifed
"""
deployment_id_filter = \
{'deployment_id': dep_update.deployment_id}
# By this point the node_instances aren't updated yet
raw_node_instances = \
[instance.to_dict() for instance in
self.sm.get_node_instances(filters=deployment_id_filter).items]
# project changes in deployment
return tasks.modify_deployment(
nodes=raw_nodes,
previous_node_instances=raw_node_instances,
modified_nodes=()
)
def _execute_update_workflow(self,
dep_update,
node_instances,
modified_entity_ids):
"""Executed the update workflow
:param dep_update:
:param node_instances: a dictionary of modification type and
modified instances
:param modified_entity_ids: the entire modified entities list (by id)
:return:
"""
added_instances = node_instances[CHANGE_TYPE.ADDED_AND_RELATED]
extended_instances = node_instances[CHANGE_TYPE.EXTENDED_AND_RELATED]
reduced_instances = node_instances[CHANGE_TYPE.REDUCED_AND_RELATED]
removed_instances = node_instances[CHANGE_TYPE.REMOVED_AND_RELATED]
instance_ids = {
# needed in order to finalize the commit
'update_id': dep_update.id,
# For any added node instance
'added_instance_ids':
extract_ids(added_instances.get(CHANGE_TYPE.AFFECTED)),
'added_target_instances_ids':
extract_ids(added_instances.get(CHANGE_TYPE.RELATED)),
# encapsulated all the change entity_ids (in a dictionary with
# 'node' and 'relationship' keys.
'modified_entity_ids': modified_entity_ids,
# Any nodes which were extended (positive modification)
'extended_instance_ids':
extract_ids(extended_instances.get(CHANGE_TYPE.AFFECTED)),
'extend_target_instance_ids':
extract_ids(extended_instances.get(CHANGE_TYPE.RELATED)),
# Any nodes which were reduced (negative modification)
'reduced_instance_ids':
extract_ids(reduced_instances.get(CHANGE_TYPE.AFFECTED)),
'reduce_target_instance_ids':
extract_ids(reduced_instances.get(CHANGE_TYPE.RELATED)),
# Any nodes which were removed as a whole
'removed_instance_ids':
extract_ids(removed_instances.get(CHANGE_TYPE.AFFECTED)),
'remove_target_instance_ids':
extract_ids(removed_instances.get(CHANGE_TYPE.RELATED))
}
return self.execute_workflow(deployment_id=dep_update.deployment_id,
workflow_id='update',
parameters=instance_ids)
def finalize_commit(self, deployment_update_id):
""" finalizes the update process by removing any removed
node/node instances and updating any reduced node
:param deployment_update_id:
:return:
"""
dep_update = self.get_deployment_update(deployment_update_id)
self._node_instance_handler.finalize(dep_update)
self._node_handler.finalize(dep_update)
# mark deployment update as committed
dep_update.state = STATE.COMMITTED
self.sm.update_deployment_update(dep_update)
return models.DeploymentUpdate(deployment_update_id,
dep_update.blueprint)
def execute_workflow(self,
deployment_id,
workflow_id,
parameters=None,
allow_custom_parameters=False,
force=False):
"""Executes the specified workflow
:param deployment_id:
:param workflow_id:
:param parameters:
:param allow_custom_parameters:
:param force:
:return:
"""
deployment = self.sm.get_deployment(deployment_id)
blueprint = self.sm.get_blueprint(deployment.blueprint_id)
if workflow_id not in deployment.workflows:
raise manager_rest.manager_exceptions.NonexistentWorkflowError(
'Workflow {0} does not exist in deployment {1}'.format(
workflow_id, deployment_id))
workflow = deployment.workflows[workflow_id]
execution_parameters = \
BlueprintsManager._merge_and_validate_execution_parameters(
workflow, workflow_id, parameters, allow_custom_parameters)
execution_id = str(uuid.uuid4())
new_execution = models.Execution(
id=execution_id,
status=models.Execution.PENDING,
created_at=str(datetime.now()),
blueprint_id=deployment.blueprint_id,
workflow_id=workflow_id,
deployment_id=deployment_id,
error='',
parameters=BlueprintsManager._get_only_user_execution_parameters(
execution_parameters),
is_system_workflow=False)
self.sm.put_execution(new_execution.id, new_execution)
# executing the user workflow
workflow_plugins = blueprint.plan[
constants.WORKFLOW_PLUGINS_TO_INSTALL]
self.workflow_client.execute_workflow(
workflow_id,
workflow,
workflow_plugins=workflow_plugins,
blueprint_id=deployment.blueprint_id,
deployment_id=deployment_id,
execution_id=execution_id,
execution_parameters=execution_parameters)
return new_execution
# What we need to access this manager in Flask
def get_deployment_updates_manager():
"""
Get the current app's deployment updates manager, create if necessary
"""
manager = current_app.config.get('deployment_updates_manager')
if not manager:
current_app.config['deployment_updates_manager'] = \
DeploymentUpdateManager()
manager = current_app.config.get('deployment_updates_manager')
return manager
|
python
|
#########################################################################
# Copyright/License Notice (Modified BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2008, Daniel Knaggs #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
import serial
class GSMDevice(object):
def __init__(self, port, speed, bits, parity, stop, timeout):
self.ser = serial.Serial()
self.ser.baudrate = speed
self.ser.bytesize = bits
self.ser.parity = parity
self.ser.port = port
self.ser.stopbits = stop
self.ser.timeout = timeout
self.ser.open()
def changeTimeout(self, newtimeout):
self.ser.timeout = newtimeout
def dispose(self):
self.ser.close()
self.ser = None
def getBERPercentage(self, index):
if index == 0:
return "< 0.2%"
elif index == 1:
return "0.2-0.4%"
elif index == 2:
return "0.4-0.8%"
elif index == 3:
return "0.8-1.6%"
elif index == 4:
return "1.6-3.2%"
elif index == 5:
return "3.2-6.4%"
elif index == 6:
return "6.4-12.8%"
elif index == 7:
return "> 12.8%"
elif index == 99:
return "Not Known"
def receiveChar(self, chars = 1):
return self.ser.read(chars)
def receiveDualResult(self):
blank = self.receiveLine()
ir = self.receiveLine()
blank = self.receiveLine()
frc = self.receiveLine()
return ir, frc
def receiveLine(self):
return self.ser.readline().replace("\r", "").replace("\n", "")
def receiveSingleResult(self):
blank = self.receiveLine()
frc = self.receiveLine()
return frc
def sendATCommand(self, command = "", skipcheck = False, dualcheck = False):
if skipcheck:
self.ser.write("AT%s\r" % command)
return True
else:
self.ser.write("AT%s=?\r" % command.split("=")[0].replace("?", ""))
if not dualcheck:
if self.receiveSingleResult() == "OK":
self.ser.write("AT%s\r" % command)
return True
else:
return False
else:
ir, frc = self.receiveDualResult()
if frc == "OK":
self.ser.write("AT%s\r" % command)
return True
else:
return False
def sendRawCommand(self, command, newline = True):
self.ser.write(command)
if newline:
self.ser.write("\r")
|
python
|
from .test_helper import argv_kiwi_tests
import sys
import mock
from mock import patch
import azurectl
from pytest import raises
from azurectl.commands.storage_account import StorageAccountTask
from azurectl.azurectl_exceptions import AzureInvalidCommand
class TestStorageAccountTask:
def setup(self):
sys.argv = [
sys.argv[0], '--config', '../data/config',
'storage', 'account', 'list'
]
self.task = StorageAccountTask()
self.task.request_wait = mock.Mock()
azurectl.commands.storage_account.StorageAccount = mock.Mock(
return_value=mock.Mock()
)
azurectl.commands.storage_account.AzureAccount = mock.Mock(
return_value=mock.Mock()
)
azurectl.commands.storage_account.Help = mock.Mock(
return_value=mock.Mock()
)
def teardown(self):
sys.argv = argv_kiwi_tests
def __init_command_args(self):
self.task.command_args = {
'create': False,
'delete': False,
'help': False,
'list': False,
'show': False,
'update': False,
'regions': False,
'--name': None,
'--description': None,
'--label': None,
'--locally-redundant': None,
'--zone-redundant': None,
'--geo-redundant': None,
'--read-access-geo-redundant': None,
'--new-primary-key': None,
'--new-secondary-key': None,
'--wait': True
}
def test_process_storage_account_help(self):
self.__init_command_args()
self.task.command_args['help'] = True
self.task.process()
self.task.manual.show.assert_called_once_with(
'azurectl::storage::account'
)
@patch('azurectl.commands.storage_account.DataOutput')
def test_process_storage_account_list(self, mock_out):
self.__init_command_args()
self.task.command_args['list'] = True
self.task.process()
self.task.storage_account.list.assert_called_once_with()
@patch('azurectl.commands.storage_account.DataOutput')
def test_process_storage_account_show(self, mock_out):
self.__init_command_args()
self.task.command_args['show'] = True
self.task.command_args['--name'] = 'test'
self.task.process()
self.task.storage_account.show.assert_called_once_with(
self.task.command_args['--name']
)
@patch('azurectl.commands.storage_account.DataOutput')
def test_process_storage_account_create(self, mock_out):
self.__init_command_args()
self.task.command_args['create'] = True
self.task.command_args['--name'] = 'testname'
self.task.command_args['--label'] = 'test-label'
self.task.command_args['--description'] = 'test-description'
self.task.command_args['--locally-redundant'] = True
self.task.process()
self.task.storage_account.create.assert_called_once_with(
'testname',
'test-description',
'test-label',
'Standard_LRS'
)
@patch('azurectl.commands.storage_account.DataOutput')
def test_process_storage_account_update(self, mock_out):
self.__init_command_args()
self.task.command_args['update'] = True
self.task.command_args['--name'] = 'testname'
self.task.command_args['--label'] = 'test-label'
self.task.command_args['--description'] = 'test-description'
self.task.command_args['--locally-redundant'] = True
self.task.process()
self.task.storage_account.update.assert_called_once_with(
'testname',
'test-description',
'test-label',
'Standard_LRS',
None,
None
)
@patch('azurectl.commands.storage_account.DataOutput')
def test_process_storage_account_delete(self, mock_out):
self.__init_command_args()
self.task.command_args['delete'] = True
self.task.command_args['--name'] = 'test'
self.task.process()
self.task.storage_account.delete.assert_called_once_with(
self.task.command_args['--name']
)
def test_storage_account_command_invalid_caps(self):
self.__init_command_args()
self.task.command_args['--name'] = 'CAPSAREINVALID'
with raises(AzureInvalidCommand):
self.task.validate_account_name()
def test_storage_account_command_invalid_punctuation(self):
self.__init_command_args()
self.task.command_args['--name'] = 'punctuation-is.bad'
with raises(AzureInvalidCommand):
self.task.validate_account_name()
@patch('azurectl.commands.storage_account.DataOutput')
def test_process_storage_account_regions(self, mock_out):
self.__init_command_args()
self.task.command_args['regions'] = True
self.task.process()
self.task.account.locations.assert_called_once_with('Storage')
|
python
|
def mySqrt(x):
r = x
precision = 10 ** (-10)
print(precision)
while abs(x - r * r) > precision:
r = (r + x / r) / 2
return r
print(mySqrt(25))
print(mySqrt(36))
|
python
|
from restkit.handlers.http_mrg_handlers import query_handler as chandler_0 # noqa
from restkit.handlers.http_mrg_handlers.http_report_handlers import report_csv_handler as chandler_1 # noqa
__all__ = [
'chandler_0',
'chandler_1',
]
|
python
|
from dart_fss.api import filings
def test_get_corp_code():
res = filings.get_corp_code()
actual = res[0].keys()
expected = ['corp_code', 'corp_name', 'stock_code', 'modify_date']
for act in actual:
assert act in expected
def test_get_corp_info():
se = filings.get_corp_info('00126380')
actual = se['est_dt']
expected = '19690113'
assert actual == expected
def test_download_document():
import tempfile
with tempfile.TemporaryDirectory() as path:
res = filings.download_document(path, '20190401004781')
assert res is not None
def test_search_filings():
f = filings.search_filings(corp_code='00126380', bgn_de='20190101', end_de='20190301', last_reprt_at='Y')
actual = f['total_count']
expected = 29
assert actual == expected
|
python
|
from urllib.parse import urlencode
import requests
from module_pipedrive.pipedrive import exceptions
from module_pipedrive.pipedrive.activities import Activities
from module_pipedrive.pipedrive.deals import Deals
from module_pipedrive.pipedrive.filters import Filters
from module_pipedrive.pipedrive.leads import Leads
from module_pipedrive.pipedrive.notes import Notes
from module_pipedrive.pipedrive.organizations import Organizations
from module_pipedrive.pipedrive.persons import Persons
from module_pipedrive.pipedrive.pipelines import Pipelines
from module_pipedrive.pipedrive.products import Products
from module_pipedrive.pipedrive.recents import Recents
from module_pipedrive.pipedrive.stages import Stages
from module_pipedrive.pipedrive.users import Users
from module_pipedrive.pipedrive.webhooks import Webhooks
class Client:
BASE_URL = 'https://api-proxy.pipedrive.com/'
OAUTH_BASE_URL = 'https://oauth.pipedrive.com/oauth/'
def __init__(self, client_id=None, client_secret=None, domain=None):
self.client_id = client_id
self.client_secret = client_secret
self.access_token = None
self.api_token = None
self.activities = Activities(self)
self.deals = Deals(self)
self.filters = Filters(self)
self.leads = Leads(self)
self.notes = Notes(self)
self.organizations = Organizations(self)
self.persons = Persons(self)
self.pipelines = Pipelines(self)
self.products = Products(self)
self.recents = Recents(self)
self.stages = Stages(self)
self.users = Users(self)
self.webhooks = Webhooks(self)
if domain:
if not domain.endswith('/'):
domain += '/'
self.BASE_URL = domain + 'v1/'
def authorization_url(self, redirect_uri, state=None):
params = {
'client_id': self.client_id,
'redirect_uri': redirect_uri,
}
if state is not None:
params['state'] = state
return self.OAUTH_BASE_URL + 'authorize?' + urlencode(params)
def exchange_code(self, redirect_uri, code):
data = {
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': redirect_uri
}
return self._post(self.OAUTH_BASE_URL + 'token', data=data, auth=(self.client_id, self.client_secret))
def refresh_token(self, refresh_token):
data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
return self._post(self.OAUTH_BASE_URL + 'token', data=data, auth=(self.client_id, self.client_secret))
def set_access_token(self, access_token):
self.access_token = access_token
def set_api_token(self, api_token):
self.api_token = api_token
def _get(self, url, params=None, **kwargs):
return self._request('get', url, params=params, **kwargs)
def _post(self, url, **kwargs):
return self._request('post', url, **kwargs)
def _put(self, url, **kwargs):
return self._request('put', url, **kwargs)
def _delete(self, url, **kwargs):
return self._request('delete', url, **kwargs)
def _request(self, method, url, headers=None, params=None, **kwargs):
_headers = {}
_params = {}
if self.access_token:
_headers['Authorization'] = 'Bearer {}'.format(self.access_token)
if self.api_token:
_params['api_token'] = self.api_token
if headers:
_headers.update(headers)
if params:
_params.update(params)
return self._parse(requests.request(method, url, headers=_headers, params=_params, **kwargs))
def _parse(self, response):
status_code = response.status_code
if 'Content-Type' in response.headers and 'application/json' in response.headers['Content-Type']:
r = response.json()
else:
return response.text
if not response.ok:
error = None
if 'error' in r:
error = r['error']
if status_code == 400:
raise exceptions.BadRequestError(error, response)
elif status_code == 401:
raise exceptions.UnauthorizedError(error, response)
elif status_code == 403:
raise exceptions.ForbiddenError(error, response)
elif status_code == 404:
raise exceptions.NotFoundError(error, response)
elif status_code == 410:
raise exceptions.GoneError(error, response)
elif status_code == 415:
raise exceptions.UnsupportedMediaTypeError(error, response)
elif status_code == 422:
raise exceptions.UnprocessableEntityError(error, response)
elif status_code == 429:
raise exceptions.TooManyRequestsError(error, response)
elif status_code == 500:
raise exceptions.InternalServerError(error, response)
elif status_code == 501:
raise exceptions.NotImplementedError(error, response)
elif status_code == 503:
raise exceptions.ServiceUnavailableError(error, response)
else:
raise exceptions.UnknownError(error, response)
return r
|
python
|
#!/usr/bin/python
"""
Copyright (C) International Business Machines Corp., 2005
Author: Dan Smith <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; under version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
##
## These are utility functions for test cases
##
import sys
import commands
import os
import pwd
import time
import pty
import select
import signal
import re
import glob
TEST_PASS = 0
TEST_FAIL = 255
TEST_SKIP = 77
# We currently advise waiting this many seconds for the ramdisk to
# boot inside a domU
TEST_DOMU_BOOT_DELAY = 20
if os.environ.get("TEST_VERBOSE"):
verbose = True
else:
verbose = False
class TimeoutError(Exception):
def __init__(self, msg, outputSoFar):
self.msg = msg
self.output = outputSoFar
def __str__(self):
return str(self.msg)
def runWithTimeout(cmd, timeout):
args = cmd.split()
pid, fd = pty.fork();
startTime = time.time()
if pid == 0:
os.execvp(args[0], args)
output = ""
while time.time() - startTime < timeout:
i, o, e = select.select([fd], [], [], timeout)
if fd in i:
try:
str = os.read(fd, 1)
output += str
except OSError, e:
exitPid, status = os.waitpid(pid, os.WNOHANG)
if exitPid == pid:
if verbose:
print "Child exited with %i" % status
return status, output
if verbose:
print "Command timed out: killing pid %i" % pid
os.kill(pid, signal.SIGINT)
raise TimeoutError("Command execution time exceeded %i seconds" % timeout,
outputSoFar=output)
def traceCommand(command, timeout=None, logOutput=True):
if verbose:
print "[dom0] Running `%s'" % command
if timeout:
status, output = runWithTimeout(command, timeout)
else:
status, output = commands.getstatusoutput(command)
if logOutput and verbose:
print output
return status, output
def getTestName():
script = sys.argv[0]
fname = os.path.basename(script)
match = re.match("([^\.]+)\.[a-z]+", fname)
if match:
tname = match.group(1)
else:
tname = "UNKNOWN"
return tname
def becomeNonRoot():
"""Become a non-root user, or FAIL if this is not possible. This call
succeeds if we are already running as a non-root user.
"""
if os.geteuid() == 0:
# Try and become "nobody". This user is commonly in place, but this
# could be extended to consider any number of users to be acceptable,
# if there are systems where "nobody" is not present.
allusers = pwd.getpwall()
for u in allusers:
if u[0] == "nobody":
os.setreuid(u[2], u[2])
break
if os.geteuid() == 0:
FAIL("Could not become a non-root user")
def FAIL(format, *args):
print "\nREASON:", (format % args)
sys.exit(TEST_FAIL)
def SKIP(format, *args):
print "\nREASON:", (format % args)
sys.exit(TEST_SKIP)
def saveLog(logText, filename=None):
if not filename:
filename = "log";
logfile = open(filename, 'w');
date = commands.getoutput("date");
logfile.write("-- BEGIN XmTest Log @" + date + "\n");
logfile.write(logText);
logfile.write("\n-- END XmTest Log\n");
logfile.close();
def waitForBoot():
if verbose:
print "[dom0] Waiting %i seconds for domU boot..." % TEST_DOMU_BOOT_DELAY
time.sleep(TEST_DOMU_BOOT_DELAY)
def timeStamp():
name = getTestName()
t = time.asctime(time.localtime())
print "*** Test %s started at %s %s" % (name, t,
time.tzname[time.daylight])
#
# Try to start a domain and attach a console to it to see if
# the console system is working
#
def isConsoleDead():
from XmTestLib import XmTestDomain, DomainError, XmConsole, ConsoleError
domain = XmTestDomain()
try:
console = domain.start()
console.runCmd("ls")
except DomainError, e:
return True
except ConsoleError, e:
domain.destroy()
return True
domain.destroy()
return False
#
# We currently can only load as many concurrent HVM domains as loop
# devices, need to find how many devices the system has.
def getMaxHVMDomains():
nodes = glob.glob("/dev/loop*")
maxd = len(nodes)
return maxd
if __name__ == "__main__":
timeStamp()
FAIL("foo")
|
python
|
r"""Analyze Traffic Images
This executable is used to annotate traffic images to highlight vehicle types and to produce stats
and graphs for the amount of time bicycle lanes and bus stops are blocked by vehicles:
Example usage:
./analyzeimages \
-path_images ./data/rawimages/
-path_labels_map data/car_label_map.pbtxt
-save_directory data/processedimages/
"""
import sys
from matplotlib.ticker import FormatStrFormatter, FuncFormatter
sys.path.append('./models-master/research/')
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import argparse
from argparse import RawTextHelpFormatter
import time
import numpy as np
import os
import tensorflow as tf
import csv
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
from io import StringIO
# from matplotlib import pyplot as plt
import matplotlib.path as mpltPath
from PIL import Image
import scipy.misc
def processimages(path_images_dir, path_labels_map,save_directory):
pathcpkt = 'data/output_inference_graph.pb/frozen_inference_graph.pb'
csv_file = 'data/csvfile.csv'
num_classes = 6
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(pathcpkt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(path_labels_map)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
f = open(csv_file, 'w')
#f.write(
# 'timestamp,number cars in bike lane, number trucks in bike lane, '
# 'number cars in bus stop, number trucks in bus stop\n')
def load_image_into_numpy_array(imageconvert):
(im_width, im_height) = imageconvert.size
try:
return np.array(imageconvert.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
except ValueError:
return np.array([])
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
polygon_right_lane = [(178, 122), (188, 240), (231, 240), (187, 125)]
polygon_left_lane = [(108, 143), (0, 215), (0, 233), (123, 142), (108, 97)]
polygon_bus_lane = [(200, 155), (230, 240), (292, 240), (225, 157)]
pathrightlane = mpltPath.Path(polygon_right_lane)
pathleftlane = mpltPath.Path(polygon_left_lane)
pathbuslane = mpltPath.Path(polygon_bus_lane)
for testpath in os.listdir(path_images_dir):
start_time = time.time()
timestamp = testpath.split(".jpg")[0]
try:
image = Image.open(path_images_dir + '/' + testpath)
image_np = load_image_into_numpy_array(image)
except IOError:
print("Issue opening "+testpath)
continue
if image_np.size == 0:
print("Skipping image "+testpath)
continue
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
min_score_thresh=0.4,
use_normalized_coordinates=True,
line_thickness=2)
scores = np.squeeze(scores)
boxes = np.squeeze(boxes)
num_cars_in_bikelane, num_cars_in_bus_stop, num_trucks_in_bike_lane, num_trucks_in_bus_stop = 0, 0, 0, 0
for i in range(boxes.shape[0]):
if scores[i] > .4:
box = tuple(boxes[i].tolist())
ymin, xmin, ymax, xmax = box
center_x = (((xmax * 352) - (xmin * 352)) / 2) + (xmin * 352)
center_y = (((ymax * 240) - (ymin * 240)) / 2) + (ymin * 240)
classes = np.squeeze(classes).astype(np.int32)
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
if class_name == 'car':
points = [(center_x, center_y)]
if pathrightlane.contains_points(points) or pathleftlane.contains_points(points):
num_cars_in_bikelane += 1
elif pathbuslane.contains_points(points):
num_cars_in_bus_stop += 1
elif class_name == 'truck' or class_name == 'police' or class_name == 'ups':
points = [(center_x, center_y)]
if pathrightlane.contains_points(points) or pathleftlane.contains_points(points):
num_trucks_in_bike_lane += 1
elif pathbuslane.contains_points(points):
num_trucks_in_bus_stop += 1
# write to a csv file whenever there is a vehicle, how many and of what type with timestamp
f.write(timestamp + ',' + str(num_cars_in_bikelane) + ',' + str(num_trucks_in_bike_lane) + ',' + str(
num_cars_in_bus_stop) + ',' + str(num_trucks_in_bus_stop) + '\n')
print("Process Time " + str(time.time() - start_time))
scipy.misc.imsave(save_directory + testpath, image_np)
f.close()
return csv_file
def initialize_datastore():
blankarray = [0] * 24
alldata = [[list(blankarray), list(blankarray), list(blankarray)],
[list(blankarray), list(blankarray), list(blankarray)]]
# alldata [ [cars_blocking_bikelane[24],trucks_blocking_bikelane[24],eitherblockingbikelane[24]
# [cars_blocking_buslane[24],trucks_blocking_buslane[24],eitherblockingbuslane[24]]
weekdaydata = [[list(blankarray), list(blankarray), list(blankarray)],
[list(blankarray), list(blankarray), list(blankarray)]]
# same as alldata above but for weekdays, weekenddata same but for weekends
weekenddata = [[list(blankarray), list(blankarray), list(blankarray)],
[list(blankarray), list(blankarray), list(blankarray)]]
return [alldata, weekdaydata, weekenddata]
def weekday(datevalue):
if datevalue.weekday() < 5:
return True
else:
return False
def incrementarray(array, blockagearray, delta_time):
timestamp_string = (blockagearray[0].split(".jpg"))[0]
datetime_object = datetime.strptime(timestamp_string, '%Y-%m-%d %H:%M:%S.%f')
hour = datetime_object.hour
num_cars_in_bike_lane = int(blockagearray[1])
num_trucks_in_bike_lane = int(blockagearray[2])
num_cars_in_bus_stop = int(blockagearray[3])
num_truck_in_bus_stop = int(blockagearray[4])
if num_cars_in_bike_lane > 0:
array[0][0][hour] += delta_time
if num_trucks_in_bike_lane > 0:
array[0][1][hour] += delta_time
if num_cars_in_bike_lane > 0 or num_trucks_in_bike_lane > 0:
array[0][2][hour] += delta_time
if num_cars_in_bus_stop > 0:
array[1][0][hour] += delta_time
if num_truck_in_bus_stop > 0:
array[1][1][hour] += delta_time
if num_cars_in_bus_stop > 0 or num_truck_in_bus_stop > 0:
array[1][2][hour] += delta_time
def incrementarrays(dataarrays, blockagearray, delta_time):
alldata = dataarrays[0]
weekdaydata = dataarrays[1]
weekenddata = dataarrays[2]
datetime_object = datetime.strptime((blockagearray[0].split(".jpg"))[0], '%Y-%m-%d %H:%M:%S.%f')
incrementarray(alldata, blockagearray, delta_time)
if weekday(datetime_object):
incrementarray(weekdaydata, blockagearray, delta_time)
else:
incrementarray(weekenddata, blockagearray, delta_time)
return [alldata, weekdaydata, weekenddata]
def buildsaveplot(list_to_graph, title):
label = ['', '', '', '', '', '6 am', '',
'', '', '', '', '12 noon', '', '', '', '', '', '6 Pm', '',
'',
'', '', '', 'Midnight']
index = np.arange(len(label))
plt.bar(index, list_to_graph)
plt.xticks(index, label, fontsize=10, rotation=30)
plt.title(title)
plt.plot()
plt.ylim([0, 100.0])
ax = plt.gca()
ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f%%'))
plt.savefig("output/"+title.replace(" ", "") + ".png", bbox_inches='tight')
plt.close()
def analyzeresults(csv_file):
total_time_secs, total_time_bike_lane_blocked_secs, total_time_bus_stop_blocked_secs = 0, 0, 0
weekdaytotalseconds = [1] * 24 # where we are going to store how many seconds worth of images there are
weekendtotalseconds = [1] * 24 # for each hour this is necessary beecause we may be missing images
previous_timestamp = 0
dataarrays = initialize_datastore()
data = csv.reader(open(csv_file, 'r'))
data = sorted(data, key=lambda rowparse: datetime.strptime((rowparse[0].split(".jpg"))[0], '%Y-%m-%d %H:%M:%S.%f'))
for row in data:
datetime_object = datetime.strptime((row[0].split(".jpg"))[0], '%Y-%m-%d %H:%M:%S.%f')
timestamp = float(datetime_object.strftime('%s'))
hour = datetime_object.hour
if previous_timestamp != 0:
delta_time = timestamp - previous_timestamp
if delta_time > 30:
print("DELTA TIME LARGE")
delta_time = 30
total_time_secs += delta_time
if weekday(datetime_object):
weekdaytotalseconds[hour] += delta_time # necessary because there may be time stamps missing in images
else:
weekendtotalseconds[hour] += delta_time
dataarrays = incrementarrays(dataarrays, row, delta_time)
previous_timestamp = timestamp
weekendpercentageblocked = [[0] * 24, [0] * 24] # bike lane first array and bus lane second
weekdaypercentageblocked = [[0] * 24, [0] * 24]
for hour in range(0, 24):
total_time_bike_lane_blocked_secs += dataarrays[0][0][2][hour]
total_time_bus_stop_blocked_secs += dataarrays[0][1][2][hour]
weekdaypercentageblocked[0][hour] = 100 * (dataarrays[1][0][2][hour] / weekdaytotalseconds[hour])
weekendpercentageblocked[0][hour] = 100 * (dataarrays[2][0][2][hour] / weekendtotalseconds[hour])
weekdaypercentageblocked[1][hour] = 100 * (dataarrays[1][1][2][hour] / weekdaytotalseconds[hour])
weekendpercentageblocked[1][hour] = 100 * (dataarrays[2][1][2][hour] / weekendtotalseconds[hour])
total_time_seven2seven, blockedbikelaneseven2seven, blockedbuslaneseven2seven = 0, 0, 0
for x in range(7, 19):
total_time_seven2seven += weekdaytotalseconds[x]
blockedbikelaneseven2seven += dataarrays[1][0][2][x]
blockedbuslaneseven2seven += dataarrays[1][1][2][x]
print("RESULTS \n Total Time " + str(total_time_secs) + " blocked bike lane time " + str(
total_time_bike_lane_blocked_secs) + "blocked truck lane time" + str(total_time_bus_stop_blocked_secs))
print("Bike lane blocked " + str(100 * (total_time_bike_lane_blocked_secs / total_time_secs)) + "% of the time")
print("Bus lane blocked " + str(100 * (total_time_bus_stop_blocked_secs / total_time_secs)) + "% of the time")
print("Bike lane blocked " + str(
100 * (blockedbikelaneseven2seven / total_time_seven2seven)) + "% of the time durring weekday from 7 am to 7pm")
print("Bus lane blocked " + str(
100 * (blockedbuslaneseven2seven / total_time_seven2seven)) + "% of the time durring weekday from 7 am to 7pm")
buildsaveplot(weekdaypercentageblocked[0], 'Weekday Bike Lane Percentage Blocked by Hour')
buildsaveplot(weekdaypercentageblocked[1], 'Weekday Bus Stop Percentage Blocked by Hour')
buildsaveplot(weekendpercentageblocked[0], 'Weekend Bike Lane Percentage Blocked by Hour')
buildsaveplot(weekendpercentageblocked[1], 'Weekend Bus Stop Percentage Blocked by Hour')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Analyze traffic images to determine rate of blocking bike'
'and bus lanes', formatter_class=RawTextHelpFormatter)
parser.add_argument('-path_images', help='the folder with all the downloaded images in it')
parser.add_argument('-path_labels_map', help='the file with the integer to label map')
parser.add_argument('-save_directory', help='the directory you want to save the annotated images to')
args = parser.parse_args()
#csv_file = processimages(args.path_images,args.path_labels_map,args.save_directory)
analyzeresults('data/analysis10days.csv')
analyzeresults(csv_file)
|
python
|
import json
import random
from locoloco.models.db_orm import db
from locoloco.models.db_models import User
from locoloco.models.db_models import Country
from locoloco.models.db_models import DistributionCenter
from locoloco.models.db_models import StoreStatus
from locoloco.models.db_models import Store
from locoloco.models.db_models import StoreComponent
# For demo purposes we are only generating about 10 stores:
# - distribution center is randomly assigned to the existing 4 dcs
# (range 1 to 3), 4 is for country LU.
# - status will be randomly assigned (range 1 to 3)
# - store numbers are simply increased and assigned.
# - store components are randomly generated.
# randomly to a distribution center, status will also be
def load_users_from_json():
"""
Importing JSON data to table users
"""
json_filename = 'db/json/users.json'
with open(json_filename, 'r', encoding='utf-8') as f:
json_object = json.load(f)
users = []
for user in json_object['users']:
# Each user is a dict
users.append(User(
provider='locoloco',
social_id=User.generate_social_id(),
email_address=user.get('email_address'),
password=user.get('password'))
)
# Add data to users
db.session.add_all(users)
# Flush the remaining changes and commit the transaction
db.session.commit()
# Close the Session
db.session.close()
def load_countries_from_json():
"""
Importing JSON data to table stores
"""
json_filename = 'db/json/countries.json'
with open(json_filename, 'r', encoding='utf-8') as f:
json_object = json.load(f)
countries = []
for country in json_object['countries']:
countries.append(Country(
country_code=country.get('country_code'),
country_name=country.get('country_name'))
)
db.session.add_all(countries)
db.session.commit()
db.session.close()
def load_distribution_centers_from_json():
"""
Importing JSON data to table distribution_centers
"""
json_filename = 'db/json/distribution_centers.json'
with open(json_filename, 'r', encoding='utf-8') as f:
json_object = json.load(f)
dcs = []
for dc in json_object['distribution_centers']:
dcs.append(DistributionCenter(
country_code=dc.get('country_code'),
number=dc.get('number'),
name=dc.get('name'),
tag=dc.get('tag'))
)
db.session.add_all(dcs)
db.session.commit()
db.session.close()
def load_stores_status_from_json():
"""
Importing JSON data to table store_status
"""
json_filename = 'db/json/store_status.json'
with open(json_filename, 'r', encoding='utf-8') as f:
json_object = json.load(f)
statuses = []
for status in json_object['store_status']:
statuses.append(StoreStatus(
sequence=status.get('sequence'),
name=status.get('name'),
description=status.get('description'))
)
db.session.add_all(statuses)
db.session.commit()
db.session.close()
def load_stores_from_json():
"""
Importing JSON data to table stores.
Table dependencies: countries, distribution_centers
"""
json_filename = 'db/json/stores.json'
with open(json_filename, 'r', encoding='utf-8') as f:
json_object = json.load(f)
stores = []
number = 0
# Use default iterators/operators, no need for .keys()
for key in json_object.get('stores'):
# Default value for user_id will be 1, as there should always
# be a default user.
user_id = 1
# Retrieve country_code
country_code = json_object.get(
'stores').get(key).get('store').get('country_code')
# Retrieve dc_id using country_code and the exported dc number
# dc_number = json_object.get(
# 'stores').get(key).get('store').get('dc_number')
if country_code == 'LU':
dc_number = 4
else:
dc_number = random.randint(1, 3)
# Some countries can have no DC, so we change it to the
# relevant parent country.
# if country_code == 'LU':
# dc_id = DistributionCenter.get_id('BE', dc_number)
# else:
# dc_id = DistributionCenter.get_id(country_code, dc_number)
# Retrieve store number
# number = json_object.get(
# 'stores').get(key).get('store').get('number')
number += 1
# Add the fields to the store
stores.append(Store(
user_id=user_id,
country_code=country_code,
dc_id=dc_number,
number=int(key),
name=json_object.get(
'stores').get(key).get('store').get('name'),
status_id=random.randint(1, 3),
street_name=json_object.get(
'stores').get(key).get('store').get('street_name'),
street_number=json_object.get(
'stores').get(key).get('store').get('street_number'),
postal_code=json_object.get(
'stores').get(key).get('store').get('postal_code'),
city=json_object.get(
'stores').get(key).get('store').get('city'),
))
db.session.add_all(stores)
db.session.commit()
db.session.close()
def load_store_components_from_json():
"""
Importing JSON data to table store_components.
We only load components for stores with status Open (sequence 2).
"""
json_filename = 'db/json/stores.json'
with open(json_filename, 'r', encoding='utf-8') as f:
json_object = json.load(f)
backoffices = []
network_routers = []
network_switches = []
access_points = []
# Use default iterators/operators, no need for .keys()
for key in json_object.get('stores'):
# backoffice
i = 1
while i <= random.randint(1, 2):
country_code = json_object.get(
'stores').get(key).get('store').get('country_code')
number = int(key)
bo_hostname = 'Backoffice {}'.format(i)
backoffices.append(StoreComponent(
store_id=Store.get_id(country_code, number),
component_type='backoffice',
hostname=bo_hostname,
ip_address='127.0.0.1')
)
i += 1
# network_routers
i = 1
while i <= random.randint(1, 3):
country_code = json_object.get(
'stores').get(key).get('store').get('country_code')
number = int(key)
nr_hostname = 'Network Router {}'.format(i)
network_routers.append(StoreComponent(
store_id=Store.get_id(country_code, number),
component_type='network_routers',
hostname=nr_hostname,
ip_address='127.0.0.1')
)
i += 1
# network_switches
i = 1
while i <= random.randint(1, 2):
country_code = json_object.get(
'stores').get(key).get('store').get('country_code')
number = int(key)
ns_hostname = 'Network Switch {}'.format(i)
network_switches.append(StoreComponent(
store_id=Store.get_id(country_code, number),
component_type='network_switches',
hostname=ns_hostname,
ip_address='127.0.0.1')
)
i += 1
# network_access_points
i = 1
while i <= random.randint(1, 5):
country_code = json_object.get(
'stores').get(key).get('store').get('country_code')
number = int(key)
ap_hostname = 'Network Access Point {}'.format(i)
access_points.append(StoreComponent(
store_id=Store.get_id(country_code, number),
component_type='network_access_points',
hostname=ap_hostname,
ip_address='127.0.0.1')
)
i += 1
db.session.add_all(backoffices)
db.session.add_all(network_routers)
db.session.add_all(network_switches)
db.session.add_all(access_points)
db.session.commit()
db.session.close()
|
python
|
from pdfrw import PdfObject, PdfReader, PdfWriter
import os
defaultlang = 'en-US'
#read all files in the folder called 'files'
files = os.listdir('files')
for file in files:
print(file)
fixlist = []
trailer = PdfReader('files\\'+file)
print("Lang: ",trailer.Root.Lang)
if trailer.Root.Lang == None:
fixlist.append('Lang')
print("Title: ",trailer.Info.Title)
if trailer.Info.Title == None:
fixlist.append('Title')
print(trailer.Root.MarkInfo)
if trailer.Root.MarkInfo == None:
fixlist.append('MarkInfo')
print('')
print('Found issues with these:')
print(fixlist)
tofix = input('Do you want to fix all of these issues? y or n')
if tofix == 'y' or tofix == 'Y':
print('Fixing this:')
for fix in fixlist:
print(fix)
if fix == 'Lang':
trailer.Root.Lang = defaultlang
if fix == 'Title':
totitle = input('Do you want the title to be: '+file.split(".")[0])
if totitle == 'y' or totitle == 'Y':
title = file.split(".")[0]
else:
title = input('What does the title need to be?')
trailer.Info.Title = title
if fix == 'MarkInfo':
trailer.Root.MarkInfo = PdfObject('<</Marked true>>')
#commit the changes
PdfWriter('out\\'+file, trailer=trailer).write()
tofix = input('Do you want to fix anything else in this file? y or n')
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import oci
import os
from ads.common import utils
def api_keys(
oci_config: str = os.path.join(os.path.expanduser("~"), ".oci", "config"),
profile: str = "DEFAULT",
client_kwargs: dict = None,
) -> dict:
r"""Prepares authentication and extra arguments necessary for creating clients for different OCI services using API
Keys.
Parameters
----------
oci_config : str
OCI authentication config file location. Default is $HOME/.oci/config.
profile : str
Profile name to select from the config file. The defautl is DEFAULT
client_kwargs : dict
kwargs that are required to instantiate the Client if we need to override the defaults.
Returns
-------
dict
Contains keys - config, signer and client_kwargs.
- The config contains the config loaded from the configuration loaded from `oci_config`.
- The signer contains the signer object created from the api keys.
- client_kwargs contains the `client_kwargs` that was passed in as input parameter.
Examples
--------
>>> from ads.common import auth as authutil
>>> from ads.common import oci_client as oc
>>> auth = authutil.api_keys(oci_config="/home/datascience/.oci/config", profile="TEST", client_kwargs={"timeout": 6000})
>>> oc.OCIClientFactory(**auth).object_storage # Creates Object storage client with timeout set to 6000 using API Key authentication
"""
configuration = oci.config.from_file(oci_config, profile)
return {
"config": configuration,
"signer": oci.signer.Signer(
configuration["tenancy"],
configuration["user"],
configuration["fingerprint"],
configuration["key_file"],
configuration.get("pass_phrase"),
),
"client_kwargs": client_kwargs,
}
def resource_principal(client_kwargs=None):
r"""Prepares authentication and extra arguments necessary for creating clients for different OCI services using
Resource Principals.
Parameters
----------
client_kwargs : dict
kwargs that are required to instantiate the Client if we need to override the defaults.
Returns
-------
dict
Contains keys - config, signer and client_kwargs.
- The config contains and empty dictionary.
- The signer contains the signer object created from the resource principal.
- client_kwargs contains the `client_kwargs` that was passed in as input parameter.
Examples
--------
>>> from ads.common import auth as authutil
>>> from ads.common import oci_client as oc
>>> auth = authutil.resource_principal({"timeout": 6000})
>>> oc.OCIClientFactory(**auth).object_storage # Creates Object Storage client with timeout set to 6000 seconds using resource principal authentication
"""
return {
"config": {},
"signer": oci.auth.signers.get_resource_principals_signer(),
"client_kwargs": client_kwargs,
}
def default_signer(client_kwargs=None):
r"""Prepares authentication and extra arguments necessary for creating clients for different OCI services based on
the default authentication setting for the session. Refer ads.set_auth API for further reference.
Parameters
----------
client_kwargs : dict
kwargs that are required to instantiate the Client if we need to override the defaults.
Returns
-------
dict
Contains keys - config, signer and client_kwargs.
- The config contains the config loaded from the configuration loaded from the default location if the default
auth mode is API keys, otherwise it is empty dictionary.
- The signer contains the signer object created from default auth mode.
- client_kwargs contains the `client_kwargs` that was passed in as input parameter.
Examples
--------
>>> from ads.common import auth as authutil
>>> from ads.common import oci_client as oc
>>> auth = authutil.default_signer()
>>> oc.OCIClientFactory(**auth).object_storage # Creates Object storage client
"""
if utils.is_resource_principal_mode():
return resource_principal(client_kwargs)
else:
return api_keys(client_kwargs=client_kwargs, profile=utils.oci_key_profile())
def get_signer(oci_config=None, oci_profile=None, **client_kwargs):
if oci_config and oci_profile:
return api_keys(oci_config, oci_profile, client_kwargs)
else:
return resource_principal(client_kwargs)
|
python
|
import sys
def print_line(to_file=None):
if to_file:
print("--------------------------------------------------",
file=to_file)
else:
print("--------------------------------------------------")
def print_header():
print_line()
print("NAS Parallel Benchmark v3.2")
print(" MG")
print("[main]: initializing...")
print_line()
print()
def print_config(app_data):
nx = ny = nz = app_data['prob_size']
print_line()
print("# Problem Settings #")
print("[main]: CLASS = \""+app_data['prob_class']+"\"")
print("[main]: top level =", app_data['lt'])
print("[main]: bottom level =", app_data['lb'])
print("[main]: grid size =", nx, "x", ny, "x", nz)
print("[main]: n-iterations =", app_data['nit'])
print()
print("# Stencil Co-efficients #")
print("[main]: a =", app_data['a'])
print("[main]: c =", app_data['c'])
verify_data = app_data['verify_data']
print()
print("# Verification Values #")
print("[main]: threshold =", verify_data['epsilon'])
print("[main]: Class \""+app_data['prob_class']+"\" " \
+ "L2 norm =", verify_data['verify_value'])
print_line()
return
def print_init_norm(app_data):
print()
print("# Initial Norms #")
print("[main]: initial norm =", app_data['rnm2'])
print("[main]: initial err =", app_data['rnmu'])
print_line()
return
|
python
|
import asyncio
import os
from telethon import TelegramClient
TELETHON_SESSION_FILE: os.path = input("Please insert path to telethon session file: ")
API_ID: int = int(input("Please insert session api id: "))
API_HASH: str = input("Please insert session api hash: ")
TG_USERNAME_RECIPIENT: str = input(
"Please insert telegram username recipient fro test message: "
)
async def check_telethon():
async with TelegramClient(
TELETHON_SESSION_FILE.partition('.session')[0], api_id=API_ID, api_hash=API_HASH
) as client:
await client.send_message(TG_USERNAME_RECIPIENT, "Client success work!")
if __name__ == "__main__":
asyncio.run(check_telethon())
|
python
|
import logging
from sklearn.dummy import DummyClassifier, DummyRegressor
from amlb.benchmark import TaskConfig
from amlb.data import Dataset
from amlb.results import save_predictions
from amlb.utils import Timer, unsparsify
log = logging.getLogger(__name__)
def run(dataset: Dataset, config: TaskConfig):
log.info("\n**** Constant predictor (sklearn dummy) ****\n")
is_classification = config.type == 'classification'
predictor = DummyClassifier(strategy='prior') if is_classification else DummyRegressor(strategy='median')
encode = config.framework_params.get('_encode', False)
X_train = unsparsify(dataset.train.X_enc if encode else dataset.train.X, fmt='array')
y_train = unsparsify(dataset.train.y_enc if encode else dataset.train.y, fmt='array')
X_test = unsparsify(dataset.test.X_enc if encode else dataset.test.X, fmt='array')
y_test = unsparsify(dataset.test.y_enc if encode else dataset.test.y, fmt='array')
with Timer() as training:
predictor.fit(X_train, y_train)
with Timer() as predict:
predictions = predictor.predict(X_test)
probabilities = predictor.predict_proba(X_test) if is_classification else None
save_predictions(dataset=dataset,
output_file=config.output_predictions_file,
probabilities=probabilities,
predictions=predictions,
truth=y_test,
target_is_encoded=encode)
return dict(
models_count=1,
training_duration=training.duration,
predict_duration=predict.duration
)
|
python
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import argparse
import os
import random
import time
import distutils.util
import numpy as np
import paddle
import paddle.nn.functional as F
import paddlenlp as ppnlp
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.experimental import FasterErnieForSequenceClassification, to_tensor
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--save_dir", default='./checkpoint', type=str, help="The output directory where the model checkpoints will be written.")
parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. "
"Sequences longer than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--batch_size", default=32, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--epochs", default=3, type=int, help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.0, type=float, help="Linear warmup proption over the training process.")
parser.add_argument("--init_from_ckpt", type=str, default=None, help="The path of checkpoint to be loaded.")
parser.add_argument("--seed", type=int, default=1000, help="random seed for initialization")
parser.add_argument('--device', choices=['cpu', 'gpu', 'xpu'], default="gpu", help="Select which device to train model, defaults to gpu.")
parser.add_argument("--use_amp", type=distutils.util.strtobool, default=False, help="Enable mixed precision training.")
parser.add_argument("--scale_loss", type=float, default=2**15, help="The value of scale_loss for fp16.")
parser.add_argument("--save_steps", default=100, type=int, help="The interval steps to save checkppoints.")
parser.add_argument("--logging_steps", default=10, type=int, help="The interval steps to logging.")
args = parser.parse_args()
# yapf: enable
def set_seed(seed):
"""sets random seed"""
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
@paddle.no_grad()
def evaluate(model, criterion, metric, data_loader):
model.eval()
metric.reset()
losses = []
for batch in data_loader:
texts, labels = batch['text'], batch['label']
texts = to_tensor(texts, "texts")
logits, predictions = model(texts)
loss = criterion(logits, labels)
losses.append(loss.numpy())
correct = metric.compute(logits, labels)
metric.update(correct)
accu = metric.accumulate()
print("eval loss: %.5f, accuracy: %.5f" % (np.mean(losses), accu))
model.train()
metric.reset()
def create_dataloader(dataset, mode='train', batch_size=1):
def trans_fn(example):
return {
"text": example["text"],
"label": np.array(
example["label"], dtype="int64")
}
dataset.map(trans_fn)
shuffle = True if mode == 'train' else False
if mode == 'train':
batch_sampler = paddle.io.DistributedBatchSampler(
dataset, batch_size=batch_size, shuffle=shuffle)
else:
batch_sampler = paddle.io.BatchSampler(
dataset, batch_size=batch_size, shuffle=shuffle)
return paddle.io.DataLoader(dataset=dataset, batch_sampler=batch_sampler)
def do_train():
paddle.set_device(args.device)
set_seed(args.seed)
train_ds, dev_ds = load_dataset("chnsenticorp", splits=["train", "dev"])
model = FasterErnieForSequenceClassification.from_pretrained(
'ernie-1.0',
num_classes=len(train_ds.label_list),
max_seq_len=args.max_seq_length)
train_data_loader = create_dataloader(
train_ds, mode='train', batch_size=args.batch_size)
dev_data_loader = create_dataloader(
dev_ds, mode='dev', batch_size=args.batch_size)
if args.init_from_ckpt and os.path.isfile(args.init_from_ckpt):
state_dict = paddle.load(args.init_from_ckpt)
model.set_dict(state_dict)
num_training_steps = len(train_data_loader) * args.epochs
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
args.warmup_proportion)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
criterion = paddle.nn.loss.CrossEntropyLoss()
metric = paddle.metric.Accuracy()
if args.use_amp:
scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)
global_step = 0
tic_train = time.time()
total_train_time = 0
for epoch in range(1, args.epochs + 1):
for step, batch in enumerate(train_data_loader, start=1):
texts, labels = batch["text"], batch["label"]
texts = to_tensor(texts)
with paddle.amp.auto_cast(
args.use_amp,
custom_white_list=["fused_feedforward", "fused_attention"]):
logits, predictions = model(texts)
loss = criterion(logits, labels)
probs = F.softmax(logits, axis=1)
correct = metric.compute(logits, labels)
metric.update(correct)
acc = metric.accumulate()
if args.use_amp:
scaler.scale(loss).backward()
scaler.minimize(optimizer, loss)
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
global_step += 1
if global_step % args.logging_steps == 0:
time_diff = time.time() - tic_train
total_train_time += time_diff
print(
"global step %d, epoch: %d, batch: %d, loss: %.5f, accuracy: %.5f, speed: %.2f step/s"
% (global_step, epoch, step, loss, acc,
args.logging_steps / time_diff))
tic_train = time.time()
if global_step % args.save_steps == 0:
save_dir = os.path.join(args.save_dir, "model_%d" % global_step)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
evaluate(model, criterion, metric, dev_data_loader)
model.save_pretrained(save_dir)
tic_train = time.time()
print("Speed: %.2f steps/s" % (global_step / total_train_time))
if __name__ == "__main__":
do_train()
|
python
|
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Persons import Person, Persons
from PLC.Auth import Auth
class DeletePerson(Method):
"""
Mark an existing account as deleted.
Users and techs can only delete themselves. PIs can only delete
themselves and other non-PIs at their sites. ins can delete
anyone.
Returns 1 if successful, faults otherwise.
"""
roles = ['admin', 'pi', 'user', 'tech']
accepts = [
Auth(),
Mixed(Person.fields['person_id'],
Person.fields['email'])
]
returns = Parameter(int, '1 if successful')
def call(self, auth, person_id_or_email):
# Get account information
persons = Persons(self.api, [person_id_or_email])
if not persons:
raise PLCInvalidArgument("No such account")
person = persons[0]
if person['peer_id'] is not None:
raise PLCInvalidArgument("Not a local account")
# Authenticated function
assert self.caller is not None
# Check if we can update this account
if not self.caller.can_update(person):
raise PLCPermissionDenied("Not allowed to delete specified account")
person.delete()
# Logging variables
self.event_objects = {'Person': [person['person_id']]}
self.message = 'Person %d deleted' % person['person_id']
return 1
|
python
|
#-------------------------------------------------------------------------------
#
# An abstract base class implementation of the ITemplateDataNameItem interface
# that looks for all specified values in its input context or optionally any of
# its sub-contexts and outputs a context containing all such values found.
#
# Written by: David C. Morrill
#
# Date: 07/29/2007
#
# (c) Copyright 2007 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
""" An abstract base class implementation of the ITemplateDataNameItem interface
that looks for all specified values in its input context or optionally any
of its sub-contexts and outputs a context containing all such values found.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from traits.api \
import HasPrivateTraits, Instance, Property, provides
from apptools.template.template_traits \
import TBool
from apptools.template.itemplate_data_context \
import ITemplateDataContext
from apptools.template.itemplate_data_name_item \
import ITemplateDataNameItem
from apptools.template.template_impl \
import Template
from .template_data_context \
import TemplateDataContext
from .helper \
import path_for
#-------------------------------------------------------------------------------
# 'AnyDataNameItem' class:
#-------------------------------------------------------------------------------
class AnyDataNameItem ( Template ):
""" An abstract base class implementation of the ITemplateDataNameItem
interface that looks for all specified values in its input context or
optionally any of its sub-contexts and outputs a context containing all
such values found.
"""
implements ( ITemplateDataNameItem )
#-- 'ITemplateDataNameItem' Interface Implementation -----------------------
# The data context which this data name item should match against:
input_data_context = Instance( ITemplateDataContext )
# The data context containing the data values and/or contexts this data
# name item matches:
output_data_context = Instance( ITemplateDataContext )
# The ITemplateChoice instance representing the current settings of the
# data name item. This value must be read/write, and must be overridden by
# sublasses.
data_name_item_choice = Property
# The alternative choices the user has for the data name item settings for
# the current input data context. The list may be empty, in which case the
# user cannot change the settings of the data name item. This value can be
# read only, and must be overridden by subclasses.
data_name_item_choices = Property
#-- Public Traits ----------------------------------------------------------
# Should all sub-contexts be included in the search:
recursive = TBool( False )
# Should included sub-contexts be flattened into a single context?
flatten = TBool( False )
#-- Private Traits ---------------------------------------------------------
# The current recursive setting:
current_recursive = TBool( False )
# The current input data context:
current_input_data_context = Property
#-- Abstract Methods (Must be overridden in subclasses) --------------------
def filter ( self, name, value ):
""" Returns **True** if the specified context data *name* and *value*
should be included in the output context; and **False** otherwise.
"""
raise NotImplementedError
#-- Property Implementations -----------------------------------------------
def _get_data_name_item_choice ( self ):
raise NotImplementedError
def _set_data_name_item_choice ( self, value ):
raise NotImplementedError
def _get_data_name_item_choices ( self ):
raise NotImplementedError
def _get_current_input_data_context ( self ):
return self.input_data_context
#-- Trait Event Handlers ---------------------------------------------------
def _recursive_changed ( self, value ):
""" Handles the primary recursive setting being changed.
"""
self.current_recursive = value
def _input_data_context_changed ( self ):
""" Handles the 'input_data_context' trait being changed.
"""
self.inputs_changed()
#-- Private Methods --------------------------------------------------------
def inputs_changed ( self ):
""" Handles any input value being changed. This method should be called
by subclasses when any of their input values change.
"""
output_context = None
input_context = self.current_input_data_context
if input_context is not None:
values = {}
if self.current_recursive:
if self.flatten:
self._add_context( input_context, values )
else:
self._copy_context( input_context, values )
else:
self._add_values( input_context, values, '' )
if len( values ) > 0:
output_context = TemplateDataContext(
data_context_path = input_context.data_context_path,
data_context_name = input_context.data_context_name,
values = values )
self.output_data_context = output_context
def _add_values ( self, input_context, values, path = '' ):
""" Adds all of the matching values in the specified *input_context* to
the specified *values* dictionary.
"""
# Filter each name/value in the current input context to see if it
# should be added to the output values:
filter = self.filter
gdcv = input_context.get_data_context_value
for name in input_context.data_context_values:
value = gdcv( name )
if self.filter( name, value ):
values[ path_for( path, name ) ] = value
def _add_context ( self, input_context, values, path = '' ):
""" Adds all of the matching values in the specified *input_context* to
the specified *output_context*, and then applies itself recursively
to all contexts contained in the specified *input_context*.
"""
# Add all of the filtered values in the specified input context:
self._add_values( input_context, values, path )
# Now process all of the input context's sub-contexts:
gdc = input_context.get_data_context
for name in input_context.data_contexts:
self._add_context( gdc( name ), values, path_for( path,
input_context.data_context_name ) )
def _copy_context ( self, input_context ):
""" Clone the input context so that the result only contains values and
contexts which contain valid values and are not empty.
"""
values = {}
contexts = {}
# Add all of the filtered values in the specified input context:
self._add_values( input_context, values )
# Now process all of the input context's sub-contexts:
gdc = input_context.get_data_context
for name in input_context.data_contexts:
context = self._copy_context( gdc( name ) )
if context is not None:
contexts[ name ] = context
if (len( values ) == 0) and (len( contexts ) == 0):
return None
return TemplateDataContext(
data_context_path = input_context.data_context_path,
data_context_name = input_context.data_context_name,
values = values,
contexts = contexts )
|
python
|
import itertools
import math
def Solution():
N: int
T = int(input("테스트 수행횟수 입력:")) # 갯수
for i in range(0, T):
N = int(input("입력데이터: "))
sData = str(input("값 추가 \n"))
ans = math.trunc(sortMaxMin(sData, N))
print(ans)
def sortMaxMin(inputData: str, N: int):
maxNumber : float
minNumber : float
maxNumber = float(inputData.split(' ')[0])
minNumber = float(inputData.split(' ')[0])
for i in range(0, N):
if maxNumber < float(inputData.split(' ')[i]):
maxNumber = float(inputData.split(' ')[i])
if minNumber > float(inputData.split(' ')[i]):
minNumber = float(inputData.split(' ')[i])
return (float(str(maxNumber)) - float(str(minNumber)))
# ans = int(max(float(inputData)) - min(float(inputData)))
# return ans
|
python
|
#!/usr/bin/python3
"""
We want to use quad trees to store an N x N boolean grid. Each cell in the grid
can only be true or false. The root node represents the whole grid. For each
node, it will be subdivided into four children nodes until the values in the
region it represents are all the same.
Each node has another two boolean attributes : isLeaf and val. isLeaf is true if
and only if the node is a leaf node. The val attribute for a leaf node contains
the value of the region it represents.
"""
__author__ = 'Danyang'
# Definition for a QuadTree node.
class Node:
def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight):
self.val = val
self.isLeaf = isLeaf
self.topLeft = topLeft
self.topRight = topRight
self.bottomLeft = bottomLeft
self.bottomRight = bottomRight
class Solution:
def construct(self, grid):
"""
DPS, check 4 children then merge
:type grid: List[List[int]]
:rtype: Node
"""
l = len(grid)
return self._construct(grid, 0, 0, l)
def _construct(self, grid, row, col, l):
"""
Use row col for matrix rather than x y coordiate since the direction is
error-prone
"""
if l == 1:
return Node(grid[row][col], True, None, None, None, None)
l_child = l // 2
topLeft = self._construct(grid, row, col, l_child)
topRight = self._construct(grid, row, col + l_child, l_child)
bottomLeft = self._construct(grid, row + l_child, col, l_child)
bottomRight = self._construct(grid, row + l_child, col + l_child, l_child)
is_leaf = (
topLeft.val == topRight.val == bottomLeft.val == bottomRight.val
!= "*"
)
if is_leaf:
return Node(grid[row][col], True, None, None, None, None)
return Node("*", False, topLeft, topRight, bottomLeft, bottomRight)
|
python
|
#!/usr/bin/env python
# Small web app to allow a user to top up their personal PaperCut balance
# Add a custom URL to the PaperCut user web page, which is used by end users
# when they want to add credit to their PaperCut personal account. The url
# should refer to this small web app When the user clicks on the URL link
# (in the PaperCut user web page) to the web app, the user identification details
# are passed as part of the URL. This is explained at:
# https://www.papercut.com/products/ng/manual/common/topics/customize-user-web-pages.html#customize-user-web-pages-nav-links
# The URL neeeds to something like http://localhost:8081/simpleTopUpBalance/?user=%user%&return_url=%return_url%
# Generally additional security should be provided. For example if the URL is http://localhost:8081/promptForPassword/?user=%user%&return_url=%return_url%
# then the user will need to enter their PaperCut password to access the payment system
# Handy Tip: By default the link will open in a separate winodow. You can edit the advanced config property user.web.custom-links and
# change "_body" to "_self". You should then use the %return_url% to return the user to the PaperCut MF/NG web interface
# This code is a basic example only. It should not be used for production
import xmlrpc.client
import sys
from json import load as loadjs
import logging
import traceback
# Bottle does not depend on any external libraries.
# You can just download bottle.py into your project directory and using
# $ wget http://bottlepy.org/bottle.py
from bottle import route, run, template, request, debug, response
# Prefer HTTPS connection
# If not localhost then this address will need to be whitelisted in PaperCut
host = "http://localhost:9191/rpc/api/xmlrpc"
auth = "token" # Value defined in advanced config property "auth.webservices.auth-token". Should be random
proxy = xmlrpc.client.ServerProxy(host)
# For more information on this user database file refer to the custom auth and sync demo
paperCutAccountInfoFile = 'c:\\Program Files\\PaperCut MF\\server\\custom\\config.json'
paperCutAccountData = {}
# The user is sent back to the Summary page as if they had just logged in,
# assuming their session has not timed out
# Therefore return url should be consistent
redirect_url = ''
@route('/')
def wrongUrl():
return("Please log into PaperCut and set top up your account from there")
@route('/promptForPassword/')
def prompForPassword():
user = request.query.user or ""
try:
if len(user) == 0 or not proxy.api.isUserExists(auth, user):
return( "Can't find user {}".format(user))
except Exception as e:
logging.error(traceback.format_exc())
return_url = request.query.return_url or ""
return template( 'promptForPassword', user=user, return_url=return_url)
@route('/simpleTopUpBalance/', method='GET')
def promptUser():
user = request.query.user or ""
return_url = request.query.return_url or ""
password = request.query.password or ""
if paperCutAccountData is None or paperCutAccountData['userdata'][user]['password'] == password:
return template('promptForDeposit',user=user, return_url=return_url)
# Password validation failed
return template( 'promptForPassword', user=user, error_text="Invalid password entered", return_url=return_url)
@route("/topUp/")
def topUp(method="GET"):
return_url = request.query.return_url or None
if request.query.cancel == "cancel":
if return_url is None:
return "Cancelled. Please close this tab/window and return to PaperCut"
else:
response.set_header("Refresh", "5; url={}".format(return_url))
return "Cancelled. You will be returned to PaperCut in 5s"
user = request.query.user
amount = float(request.query.amount)
if not amount > 0.0: # Example of data validation -- not used because our form already does this one
return template('promptForDeposit',user=user, return_url=return_url, error_text="Invalid amount \"{}\" entered".format(amount))
proxy.api.adjustUserAccountBalance(
auth, user, amount, "Money added by the Simple Top Up Page")
if len(return_url) == 0:
return "Updated balance is now {}<br><br>Please close this tab/window and return to PaperCut".format(
proxy.api.getUserAccountBalance(auth,user))
# Add refresh with 5s timeout back to PaperCut MF/NG
response.set_header("Refresh", "5; url={}".format(return_url))
return "Updated balance is now {}<br><br>You will be returned to PaperCcut in 5s".format(
proxy.api.getUserAccountBalance(auth,user))
try:
with open(paperCutAccountInfoFile) as f:
paperCutAccountData = loadjs(f)
except OSError:
paperCutAccountData = None
run(host='localhost', port=8081, debug=True, reloader=True)
|
python
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
from distutils.core import setup, Extension
import os
import sys
prefix = os.environ.get("prefix", "/usr")
from distutils.core import setup, Extension
import subprocess as S
setup(name="polkit",
version="1.0.2",
description="Python bindings for polkit-1",
long_description="Python bindings for polkit-1",
license="GNU GPL2",
author="Bahadır Kandemir",
author_email="[email protected]",
url="http://github.com/Pardus-Linux/python-polkit/",
py_modules = ["polkit"],
ext_modules = [Extension('_polkit',
sources=['pypolkit.c'],
include_dirs=["/usr/include/polkit-1", "/usr/include/glib-2.0", "/usr/lib/glib-2.0/include"],
libraries=["polkit-gobject-1", "gio-2.0", "gobject-2.0", "gmodule-2.0", "gthread-2.0", "pthread", "rt", "glib-2.0"],
library_dirs=[],
)],
)
|
python
|
"""
Loading data and events submodule.
"""
from ..signal import find_events
#from .eeg_preprocessing import *
import numpy as np
import pandas as pd
import mne
import os
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def read_eeg(filename, path="", eog=('HEOG', 'VEOG'), misc="auto", reference=None, montage="easycap-M1", preload=True, verbose="CRITICAL"):
"""
Load EEG data into mne.io.Raw file.
Parameters
----------
filename : str
Filename.
path : str
File's path.
eog : list
Names of channels or list of indices that should be designated EOG channels. Values should correspond to the vhdr file. Default is ('HEOG', 'VEOG'), but MNE's default is ('HEOGL', 'HEOGR', 'VEOGb').
misc : list
Names of channels or list of indices that should be designated MISC channels. Values should correspond to the electrodes in the vhdr file. If 'auto', units in vhdr file are used for inferring misc channels. Default is 'auto'.
reference : str or list
re-reference using specific sensors.
montage : str
Path or instance of montage containing electrode positions. If None, sensor locations are (0,0,0). See the documentation of mne.channels.read_montage() for more information.
preload : bool
If True, all data are loaded at initialization. If False, data are not read until save.
verbose : str
Level of verbosity. "DEBUG", "INFO", "WARNING", "ERROR" or "CRITICAL".
Returns
----------
raw : mne.io.Raw
Raw data in FIF format.
Example
----------
>>> import neurokit as nk
>>> raw = nk.read_eeg("filename")
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- mne
*See Also*
- mne package: http://martinos.org/mne/dev/index.html
"""
file = path + filename
# Find correct file
extension = filename.split(".")
if len(extension) == 1:
extension = None
else:
extension = "." + extension[-1]
if extension in [".vhdr", ".raw", ".set", ".fif", ".edf"]:
file = file.split(".")[0]
else:
if extension is None:
extension = ".vhdr"
if os.path.exists(file + extension) is False:
extension = ".raw"
if os.path.exists(file + extension) is False:
extension = ".set"
if os.path.exists(file + extension) is False:
extension = ".fif"
if os.path.exists(file + extension) is False:
extension = ".edf"
if os.path.exists(file + extension) is False:
print("NeuroKit Error: read_eeg(): couldn't find compatible format of data.")
return()
# Load the data
try:
if extension == ".vhdr":
raw = mne.io.read_raw_brainvision(file + extension, eog=eog, misc=misc, montage=montage, preload=preload, verbose=verbose)
elif extension == ".raw":
raw = mne.io.read_raw_egi(file + extension, eog=eog, misc=misc, montage=montage, preload=preload, verbose=verbose)
elif extension == ".set":
raw = mne.io.read_raw_eeglab(file + extension, eog=eog, misc=misc, montage=montage, preload=preload, verbose=verbose)
elif extension == ".fif":
raw = mne.io.read_raw_fif(file + extension, preload=preload, verbose=verbose)
elif extension == ".edf":
raw = mne.io.read_raw_edf(file + extension, preload=preload, verbose=verbose)
else:
print("NeuroKit Error: read_eeg(): couldn't find compatible reader of data. Try to do it manually using mne.")
# Re-reference if needed and if not MEG data
if True not in ["MEG" in chan for chan in raw.info["ch_names"]]:
if reference is None:
raw.set_eeg_reference()
else:
raw.set_eeg_reference(reference)
except KeyError:
print("NeuroKit Error: read_eeg(): something went wrong. This might be because you have channel names that are missing from the montage definition. Try do read data manually using mne.")
except FileNotFoundError:
print("NeuroKit Error: read_eeg(): something went wrong, check the file names that are inside your info files (.vhdr, .vmrk, ...)")
except:
print("NeuroKit Error: read_eeg(): error in data loading. Try to do it manually using mne.")
return(raw)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_add_channel(raw, channel, sync_index_raw=0, sync_index_channel=0, channel_type=None, channel_name=None):
"""
Add a channel to a raw m/eeg file.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel : list or numpy.array
The channel to be added.
sync_index_raw : int or list
The index by which to align the two inputs.
sync_index_channel : int or list
The index by which to align the two inputs.
channel_type : str
Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'.
Returns
----------
raw : mne.io.Raw
Raw data in FIF format.
Example
----------
>>> import neurokit as nk
>>> raw = nk.eeg_add_channel(raw, ecg, channel_type="ecg")
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- mne
*See Also*
- mne: http://martinos.org/mne/dev/index.html
"""
if channel_name is None:
if isinstance(channel, pd.core.series.Series):
if channel.name is not None:
channel_name = channel.name
else:
channel_name = "Added_Channel"
else:
channel_name = "Added_Channel"
# Compute the distance between the two signals
diff = sync_index_channel - sync_index_raw
if diff > 0:
channel = list(channel)[diff:len(channel)]
channel = channel + [np.nan]*diff
if diff < 0:
channel = [np.nan]*diff + list(channel)
channel = list(channel)[0:len(channel)]
# Adjust to raw size
if len(channel) < len(raw):
channel = list(channel) + [np.nan]*(len(raw)-len(channel))
else:
channel = list(channel)[0:len(raw)] # Crop to fit the raw data
info = mne.create_info([channel_name], raw.info["sfreq"], ch_types=channel_type)
channel = mne.io.RawArray([channel], info)
raw.add_channels([channel], force_update_info=True)
return(raw)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_select_channels(raw, channel_names):
"""
Select one or several channels by name and returns them in a dataframe.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel_names : str or list
Channel's name(s).
Returns
----------
channels : pd.DataFrame
Channel.
Example
----------
>>> import neurokit as nk
>>> raw = nk.eeg_select_channel(raw, "TP7")
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- mne
*See Also*
- mne package: http://martinos.org/mne/dev/index.html
"""
if isinstance(channel_names, list) is False:
channel_names = [channel_names]
channels, time_index = raw.copy().pick_channels(channel_names)[:]
if len(channel_names) > 1:
channels = pd.DataFrame(channels.T, columns=channel_names)
else:
channels = pd.Series(channels[0])
channels.name = channel_names[0]
return(channels)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_create_events(onsets, conditions=None):
"""
Create MNE compatible events.
Parameters
----------
onsets : list or array
Events onsets.
conditions : list
A list of equal length containing the stimuli types/conditions.
Returns
----------
(events, event_id) : tuple
MNE-formated events and a dictionary with event's names.
Example
----------
>>> import neurokit as nk
>>> events, event_id = nk.create_mne_events(events_onset, trigger_list)
Authors
----------
Dominique Makowski
Dependencies
----------
None
"""
event_id = {}
# Sanity check
if len(conditions) != len(onsets):
print("NeuroKit Warning: eeg_create_events(): conditions parameter of different length than onsets. Aborting.")
return()
if conditions is None:
conditions = ["Event"] * len(onsets)
event_names = list(set(conditions))
# event_index = [1, 2, 3, 4, 5, 32, 64, 128]
event_index = list(range(len(event_names)))
for i in enumerate(event_names):
conditions = [event_index[i[0]] if x==i[1] else x for x in conditions]
event_id[i[1]] = event_index[i[0]]
events = np.array([onsets, [0]*len(onsets), conditions])
return(events, event_id)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_add_events(raw, events_channel, conditions=None, treshold="auto", cut="higher", time_index=None, number="all", after=0, before=None, min_duration=1):
"""
Create MNE compatible events.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
events_channel : str or array
Name of the trigger channel if in the raw, or array of equal length if externally supplied.
conditions : list
List containing the stimuli types/conditions.
treshold : float
The treshold value by which to select the events. If "auto", takes the value between the max and the min.
cut : str
"higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower".
Add a corresponding datetime index, will return an addional array with the onsets as datetimes.
number : str or int
How many events should it select.
after : int
If number different than "all", then at what time should it start selecting the events.
before : int
If number different than "all", before what time should it select the events.
min_duration : int
The minimum duration of an event (in timepoints).
Returns
----------
(raw, events, event_id) : tuple
The raw file with events, the mne-formatted events and event_id.
Example
----------
>>> import neurokit as nk
>>>
>>> raw, events, event_id = nk.eeg_add_events(raw, events_channel, conditions)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- pandas
*See Also*
- mne: http://martinos.org/mne/dev/index.html
References
-----------
- None
"""
# Extract the events_channel from raw if needed
if isinstance(events_channel, str):
try:
events_channel = eeg_select_channels(raw, events_channel)
except:
print("NeuroKit error: eeg_add_events(): Wrong events_channel name provided.")
# Find event onsets
events = find_events(events_channel, treshold=treshold, cut=cut, time_index=time_index, number=number, after=after, before=before, min_duration=min_duration)
# Create mne compatible events
events, event_id = eeg_create_events(events["onsets"], conditions)
# Add them
raw.add_events(events)
return(raw, events, event_id)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_epochs_to_dict(epochs, include="all", exclude=None, hemisphere="both", include_central=True):
"""
Convert mne.Epochs object to Python dict.
"""
data = {}
for index, epoch in enumerate(epochs.get_data()):
epoch = pd.DataFrame(epoch.T)
epoch.columns = epochs.ch_names
selection = eeg_select_sensor_area(include=include, exclude=exclude, hemisphere=hemisphere, include_central=include_central)
data[index] = epoch[selection]
return()
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_select_sensor_area(include="all", exclude=None, hemisphere="both", include_central=True):
"""
Returns list of electrodes names (according to a 10-20 EEG montage). This function is probably not very flexibile. Looking for help to improve it.
Parameters
----------
include : str
Sensor area to include.
exclude : str or None
Sensor area to exclude.
hemisphere : str
Select both hemispheres? "both", "left" or "right".
include_central : bool
if `hemisphere != "both"`, select the central line?
Returns
----------
sensors : list
List of sensors corresponding to the selected area.
Example
----------
>>> import neurokit as nk
>>> nk.eeg_select_sensor_area(include="F", exclude="C")
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None
"""
sensors = ['AF3', 'AF4', 'AF7', 'AF8', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'CP1', 'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CPz', 'Cz', 'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'FC1', 'FC2', 'FC3', 'FC4', 'FC5', 'FC6', 'Fp1', 'Fp2', 'FT10', 'FT7', 'FT8', 'FT9', 'O1', 'O2', 'Oz', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'PO3', 'PO4', 'PO7', 'PO8', 'POz', 'Pz', 'FCz', 'T7', 'T8', 'TP10', 'TP7', 'TP8', 'TP9', 'AFz']
if include != "all":
sensors = [s for s in sensors if include in s]
if exclude != None:
if isinstance(exclude, str):
exclude = [exclude]
for to_exclude in exclude:
sensors = [s for s in sensors if to_exclude not in s]
if hemisphere != "both":
if include_central == False:
if hemisphere == "left":
sensors = [s for s in sensors if "1" in s or "3" in s or "5" in s or "7" in s or "9" in s]
if hemisphere == "right":
sensors = [s for s in sensors if "2" in s or "4" in s or "6" in s or "8" in s or "10" in s]
else:
if hemisphere == "left":
sensors = [s for s in sensors if "1" in s or "3" in s or "5" in s or "7" in s or "9" in s or "z" in s]
if hemisphere == "right":
sensors = [s for s in sensors if "2" in s or "4" in s or "6" in s or "8" in s or "10" in s or "z" in s]
return(sensors)
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
#def eeg_create_raws(filename, path, participants=None, runs=None, lowpass_filter=None, highpass_filter=None, notch_filter=False, ica_eog=False, ica_ecg=False, resample=False):
# """
# """
# if participants is None:
# participants = os.listdir(path)
#
# raws = {} # Initialize empty dic
# for participant in participants:
#
# if runs is None:
# runs = os.listdir(path + "/" + participant + "/")
#
# raws[participant] = {}
# for run in runs:
# # Load the participant's file into a raw object
# raw = eeg_load_raw(filename=filename, path=path + "/" + participant + "/" + run + "/")
# # Filter and downsample
# raw = eeg_filter(raw, lowpass=lowpass_filter, highpass=highpass_filter, notch=notch_filter)
#
# # Apply ICA to remove EOG and ECG artifacts
# raw, ica = eeg_ica(raw, eog=ica_eog, ecg=ica_ecg)
#
# # Resample to 125 points/s
# raw = raw.resample(resample)
#
# # Add data to dict
# raws[participant][run] = raw
#
# return(raws)
#
|
python
|
import json
your_json = '["foo", {"bar":["baz", null, 1.0, 2]}]'
parsed = json.loads(your_json)
print(type(your_json))
print(type(parsed))
#print(json.dumps(parsed, indent=4, sort_keys=True))
|
python
|
# Time complexity: O(n)
# Approach: Implementation using 2 arrays.
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.minStack = []
def push(self, val: int) -> None:
if len(self.minStack)==0:
self.stack.append(val)
self.minStack.append(val)
else:
if val > self.minStack[-1]:
self.minStack.append(self.minStack[-1])
else:
self.minStack.append(val)
self.stack.append(val)
def pop(self) -> None:
self.stack.pop()
self.minStack.pop()
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.minStack[-1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(val)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
|
python
|
from celery.utils.log import get_task_logger
from flask.ext.celery import Celery
from datetime import datetime, timedelta
import time
from app import app, db
from models import Agency, Prediction
from nextbus import Nextbus
"""
Celery is a task queue for background task processing. We're using it
for scheduled tasks, which are configured in this file.
The task execution schedule can be found/tweaked in config.py.
"""
# Create new Celery object with configured broker; get other cfg params
celery = Celery(app)
celery.conf.update(app.config)
# This wraps task execution in an app context (for db session, etc)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
logger = get_task_logger(__name__)
# Task definitions:
@celery.task()
def update_agencies():
"""
Refresh our list of Agencies from NextBus
"""
Nextbus.get_agencies(truncate=True)
@celery.task()
def update_routes(agencies=None):
"""
Refresh our list of Routes, Stops, and Directions from Nextbus
"""
if not agencies:
agencies = app.config['AGENCIES']
route_count = 0
for agency_tag in agencies:
route_count += len(Nextbus.get_routes(agency_tag, truncate=True))
print("update_routes: Got {0} routes for {1} agencies"\
.format(route_count, len(agencies)))
@celery.task()
def update_predictions(agencies=None):
"""
Get the latest vehicle arrival predictions from Nextbus
"""
start = time.time()
if not agencies:
agencies = app.config['AGENCIES']
prediction_count = len(Nextbus.get_predictions(agencies, truncate=False))
elapsed = time.time() - start
print("Got {0} predictions for {1} agencies in {2:0.2f} sec."\
.format(prediction_count, len(agencies), elapsed))
@celery.task()
def update_vehicle_locations(agencies=None):
"""
Get the latest vehicle locations (coords/speed/heading) from NextBus
"""
start = time.time()
if not agencies:
agencies = app.config['AGENCIES']
vl_count = len(Nextbus.get_vehicle_locations(agencies, truncate=False))
elapsed = time.time() - start
print("Got {0} vehicle locations for {1} agencies in {2:0.2f} seconds."\
.format(vl_count, len(agencies), elapsed))
@celery.task()
def delete_stale_predictions():
"""
Delete predictions older than PREDICTIONS_MAX_AGE.
"""
delete = Nextbus.delete_stale_predictions()
print("{0} stale predictions deleted".format(delete))
@celery.task()
def delete_stale_vehicle_locations():
"""
Delete vehicle locations older than LOCATIONS_MAX_AGE.
"""
delete = Nextbus.delete_stale_vehicle_locations()
print("{0} stale vehicle locations deleted".format(delete))
|
python
|
"""
Контекстный процессор для меню.
"""
from .utils import get_menus
def menu_processor(request):
"""
Контекстный процессор для возможности отображения всех меню на сайте.
Меню обычно распологаются на нескольких страницах, поэтому вынесено сюда.
"""
current_path = request.path
context = {
'menus': get_menus(current_path),
}
return context
|
python
|
from typing import Union
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch import Tensor
from .functions import ActivationModule
from activations.utils.utils import _get_auto_axis_layout
def tent_activation(x, delta):
"""
Functional implementation of TentActivation.
"""
return torch.clamp(delta - torch.abs(x), min=0)
class TentActivation(ActivationModule):
distribution_display_mode = "kde"
list = []
def __init__(self, delta: Union[torch.Tensor, float] = 1.0, lb=0.0, ub=500.0, learnable: bool = False):
"""
Applies element-wise Tent(x) = max(0, delta - |x|)
:param delta: The delta which is used as initialization
:param lb: The lower bound of the possible delta values
:param ub: The upper bound of the possible delta values
"""
super().__init__("tent")
if torch.is_tensor(delta):
self.delta = nn.Parameter(delta, requires_grad=learnable)
else:
self.delta = nn.Parameter(torch.tensor(delta), requires_grad=learnable)
# self.delta.requires_grad = learnable
self.lb = lb
self.ub = ub
self.learnable = learnable
self.list.append(self)
def forward(self, x: Tensor) -> Tensor:
return tent_activation(x, self.delta)
def extra_repr(self) -> str:
return f'delta={self.delta}, lb={self.lb}, ub={self.ub}, learnable={self.learnable}'
def __str__(self):
return "Tent"
@classmethod
def show_all(cls, x=None, fitted_function=True, other_func=None,
display=True, tolerance=0.001, title=None, axes=None,
layout="auto", writer=None, step=None, colors="#1f77b4"):
"""
Shows a graph of the all instanciated rational functions (or returns \
it if ``returns=True``).
Arguments:
x (range):
The range to print the function on.\n
Default ``None``
fitted_function (bool):
If ``True``, displays the best fitted function if searched.
Otherwise, returns it. \n
Default ``True``
other_funcs (callable):
another function to be plotted or a list of other callable
functions or a dictionary with the function name as key
and the callable as value.
display (bool):
If ``True``, displays the plot.
Otherwise, returns the figure. \n
Default ``False``
tolerance (float):
If the input histogram is used, it will be pruned. \n
Every bin containg less than `tolerance` of the total \
input is pruned out.
(Reduces noise).
Default ``0.001``
title (str):
If not None, a title for the figure
Default ``None``
axes (matplotlib.pyplot.axis):
On ax or a list of axes to be plotted on. \n
If None, creates them automatically (see `layout`). \n
Default ``None``
layout (tuple or 'auto'):
Grid layout of the figure. If "auto", one is generated.\n
Default ``"auto"``
writer (tensorboardX.SummaryWriter):
A tensorboardX writer to give the image to, in case of
debugging.
Default ``None``
step (int):
A step/epoch for tensorboardX writer.
If None, incrementing itself.
Default ``None``
"""
if axes is None:
if layout == "auto":
total = len(cls.list)
layout = _get_auto_axis_layout(total)
if len(layout) != 2:
msg = 'layout should be either "auto" or a tuple of size 2'
raise TypeError(msg)
figs = tuple(np.flip(np.array(layout)* (2, 3)))
try:
import seaborn as sns
with sns.axes_style("whitegrid"):
fig, axes = plt.subplots(*layout, figsize=figs)
except ImportError:
RationalImportSeabornWarning.warn()
fig, axes = plt.subplots(*layout, figsize=figs)
if isinstance(axes, plt.Axes):
axes = np.array([axes])
# if display:
for ax in axes.flatten()[len(cls.list):]:
ax.remove()
axes = axes[:len(cls.list)]
elif isinstance(axes, plt.Axes):
axes = np.array([axes for _ in range(len(cls.list))])
fig = plt.gcf()
if isinstance(colors, str):
colors = [colors]*len(axes.flatten())
if isinstance(x, list):
for rat, ax, x_rat, color in zip(cls.list, axes.flatten(), x, colors):
rat.show(x_rat, fitted_function, other_func, False, tolerance,
title, axis=ax, writer=None, step=step,
color=color)
else:
for rat, ax, color in zip(cls.list, axes.flatten(), colors):
rat.show(x, fitted_function, other_func, False, tolerance,
title, axis=ax, writer=None, step=step,
color=color)
if title is not None:
fig.suptitle(title, y=0.95)
fig = plt.gcf()
fig.tight_layout()
if writer is not None:
if step is None:
step = cls._step
cls._step += 1
writer.add_figure(title, fig, step)
elif display:
# plt.legend()
plt.show()
else:
return fig
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import numpy as np
import operator as op
from NumPyNet.exception import LayerError
from NumPyNet.utils import check_is_fitted
from NumPyNet.layers.base import BaseLayer
__author__ = ['Mattia Ceccarelli', 'Nico Curti']
__email__ = ['[email protected]', '[email protected]']
class Route_layer(BaseLayer):
def __init__(self, input_layers, by_channels=True, **kwargs):
'''
Route layer
For Now the idea is : it takes the seleted layers output and concatenate
them along the batch axis OR the channels axis
YOLOv3 implementation always concatenate by channels
By definition, this layer can't be used without a Network model.
Parameters
----------
input_layers: iterable, list of integers, or single integer, index of the layers in the network for which
inputs have to concatenated.
by_channels : bool, default True. It determines along
which dimension the concatenation is performed. For examples if two
input with size (b1, w, h , c) and (b2, w, h, c) are concatenated with by_channels=False,
then the final output shape will be (b1 + b2, w, h, c).
Otherwise, if the shapes are (b, w, h, c1) and (b, w, h, c2) and axis=3, the final output size
will be (b, w, h, c1 + c2) (YOLOv3 model)
'''
self.axis = 3 if by_channels else 0
if isinstance(input_layers, int):
self.input_layer = (input_layers, )
elif hasattr(input_layers, '__iter__'):
self.input_layers = tuple(input_layers)
else :
raise ValueError('Route Layer : parameter "input_layer" is neither iterable or an integer')
super(Route_layer, self).__init__()
def __str__(self):
return 'route {}'.format(list(self.input_layers))
def _build(self, previous_layer):
out_shapes = [x.out_shape for x in previous_layer]
self.input_shape = list(out_shapes[-1])
if self.axis:
print(np.sum(map(op.itemgetter(self.axis), out_shapes)))
self.input_shape[-1] = np.sum(list(map(op.itemgetter(self.axis), out_shapes)))
else:
self.input_shape[0] = np.sum(list(map(op.itemgetter(self.axis), out_shapes)))
self.input_shape = tuple(self.input_shape)
return self
def __call__(self, previous_layer):
for prev in previous_layer:
if prev.out_shape is None:
class_name = self.__class__.__name__
prev_name = prev.__class__.__name__
raise LayerError('Incorrect shapes found. Layer {0} cannot be connected to the previous {1} layer.'.format(class_name, prev_name))
self._build(previous_layer)
return self
def forward(self, network):
'''
Concatenate along chosen axis the outputs of selected network layers
In main CNN applications, like YOLOv3, the concatenation happens long channels axis
Parameters
----------
network : Network object type.
Returns
-------
Route Layer object
'''
self.output = np.concatenate([network[layer_idx].output for layer_idx in self.input_layers], axis=self.axis)
self.delta = np.zeros(shape=self.out_shape, dtype=float)
return self
def backward(self, delta, network):
'''
Sum self.delta to the correct layer delta on the network
Parameters
----------
delta : 4-d numpy array, network delta to be backpropagated
network: Network object type.
Returns
-------
Route layer object
'''
check_is_fitted(self, 'delta')
# NumPyNet implementation
if self.axis == 3: # this works for concatenation by channels axis
channels_sum = 0
for idx in self.input_layers:
channels = network[idx].out_shape[3]
network[idx].delta += self.delta[..., channels_sum : channels_sum + channels]
channels_sum += channels
elif self.axis == 0: # this works for concatenation by batch axis
batch_sum = 0
for idx in self.self.input_layers:
batches = network[idx].out_shape[0]
network[idx].delta += self.delta[batch_sum : batch_sum + batches, ...]
batch_sum += batches
return self
if __name__ == '__main__':
layer = Route_layer((1, 2))
print(layer)
print(layer.out_shape)
# TODO the idea is to create a toy model for numpynet and keras, and try some
# concatenation (mainly by channel, since the batch implementation doesn't really
# make sense to me)
|
python
|
try:
from libs.layers import *
from libs.utils_ft import *
except:
from layers import *
from utils_ft import *
import copy
import os
import sys
from collections import defaultdict
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import MultiheadAttention, TransformerEncoderLayer
from torch.nn.init import constant_, xavier_uniform_
from torchinfo import summary
current_path = os.path.dirname(os.path.abspath(__file__))
SRC_ROOT = os.path.dirname(current_path)
sys.path.append(SRC_ROOT)
ADDITIONAL_ATTR = ['normalizer', 'raw_laplacian', 'return_latent',
'residual_type', 'norm_type', 'norm_eps', 'boundary_condition',
'upscaler_size', 'downscaler_size', 'spacial_dim', 'spacial_fc',
'regressor_activation', 'attn_activation',
'downscaler_activation', 'upscaler_activation',
'encoder_dropout', 'decoder_dropout', 'ffn_dropout']
class SimpleTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=96,
pos_dim=1,
n_head=2,
dim_feedforward=512,
attention_type='fourier',
pos_emb=False,
layer_norm=True,
attn_norm=None,
norm_type='layer',
norm_eps=None,
batch_norm=False,
attn_weight=False,
xavier_init: float=1e-2,
diagonal_weight: float=1e-2,
symmetric_init=False,
residual_type='add',
activation_type='relu',
dropout=0.1,
ffn_dropout=None,
debug=False,
):
super(SimpleTransformerEncoderLayer, self).__init__()
dropout = default(dropout, 0.05)
if attention_type in ['linear', 'softmax']:
dropout = 0.1
ffn_dropout = default(ffn_dropout, dropout)
norm_eps = default(norm_eps, 1e-5)
attn_norm = default(attn_norm, not layer_norm)
if (not layer_norm) and (not attn_norm):
attn_norm = True
norm_type = default(norm_type, 'layer')
self.attn = SimpleAttention(n_head=n_head,
d_model=d_model,
attention_type=attention_type,
diagonal_weight=diagonal_weight,
xavier_init=xavier_init,
symmetric_init=symmetric_init,
pos_dim=pos_dim,
norm=attn_norm,
norm_type=norm_type,
eps=norm_eps,
dropout=dropout)
self.d_model = d_model
self.n_head = n_head
self.pos_dim = pos_dim
self.add_layer_norm = layer_norm
if layer_norm:
self.layer_norm1 = nn.LayerNorm(d_model, eps=norm_eps)
self.layer_norm2 = nn.LayerNorm(d_model, eps=norm_eps)
dim_feedforward = default(dim_feedforward, 2*d_model)
self.ff = FeedForward(in_dim=d_model,
dim_feedforward=dim_feedforward,
batch_norm=batch_norm,
activation=activation_type,
dropout=ffn_dropout,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.residual_type = residual_type # plus or minus
self.add_pos_emb = pos_emb
if self.add_pos_emb:
self.pos_emb = PositionalEncoding(d_model)
self.debug = debug
self.attn_weight = attn_weight
self.__name__ = attention_type.capitalize() + 'TransformerEncoderLayer'
def forward(self, x, pos=None, weight=None):
'''
- x: node feature, (batch_size, seq_len, n_feats)
- pos: position coords, needed in every head
Remark:
- for n_head=1, no need to encode positional
information if coords are in features
'''
if self.add_pos_emb:
x = x.permute((1, 0, 2))
x = self.pos_emb(x)
x = x.permute((1, 0, 2))
if pos is not None and self.pos_dim > 0:
att_output, attn_weight = self.attn(
x, x, x, pos=pos, weight=weight) # encoder no mask
else:
att_output, attn_weight = self.attn(x, x, x, weight=weight)
if self.residual_type in ['add', 'plus'] or self.residual_type is None:
x = x + self.dropout1(att_output)
else:
x = x - self.dropout1(att_output)
if self.add_layer_norm:
x = self.layer_norm1(x)
x1 = self.ff(x)
x = x + self.dropout2(x1)
if self.add_layer_norm:
x = self.layer_norm2(x)
if self.attn_weight:
return x, attn_weight
else:
return x
class GalerkinTransformerDecoderLayer(nn.Module):
r"""
A lite implementation of the decoder layer based on linear causal attention
adapted from the TransformerDecoderLayer in PyTorch
https://github.com/pytorch/pytorch/blob/afc1d1b3d6dad5f9f56b1a4cb335de109adb6018/torch/nn/modules/transformer.py#L359
"""
def __init__(self, d_model,
nhead,
pos_dim = 1,
dim_feedforward=512,
attention_type='galerkin',
layer_norm=True,
attn_norm=None,
norm_type='layer',
norm_eps=1e-5,
xavier_init: float=1e-2,
diagonal_weight: float = 1e-2,
dropout=0.05,
ffn_dropout=None,
activation_type='relu',
device=None,
dtype=None,
debug=False,) -> None:
factory_kwargs = {'device': device, 'dtype': dtype, }
super(GalerkinTransformerDecoderLayer, self).__init__()
ffn_dropout = default(ffn_dropout, dropout)
self.debug = debug
self.self_attn = SimpleAttention(nhead, d_model,
attention_type=attention_type,
pos_dim=pos_dim,
norm=attn_norm,
eps=norm_eps,
norm_type=norm_type,
diagonal_weight=diagonal_weight,
xavier_init=xavier_init,
dropout=dropout,)
self.multihead_attn = SimpleAttention(nhead, d_model,
attention_type='causal',
pos_dim=pos_dim,
norm=attn_norm,
eps=norm_eps,
norm_type=norm_type,
diagonal_weight=diagonal_weight,
xavier_init=xavier_init,
dropout=dropout,)
dim_feedforward = default(dim_feedforward, 2*d_model)
self.ff = FeedForward(in_dim=d_model,
dim_feedforward=dim_feedforward,
activation=activation_type,
dropout=ffn_dropout,
)
self.dropout = nn.Dropout(ffn_dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)
self.add_layer_norm = layer_norm
if self.add_layer_norm:
self.norm1 = nn.LayerNorm(d_model, eps=norm_eps, **factory_kwargs)
self.norm2 = nn.LayerNorm(d_model, eps=norm_eps, **factory_kwargs)
self.norm3 = nn.LayerNorm(d_model, eps=norm_eps, **factory_kwargs)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = F.relu
def forward(self, x: Tensor, memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,) -> Tensor:
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
Shape:
see the docs in Transformer class.
"""
if self.add_layer_norm:
x = self.norm1(x + self._sa_block(x, tgt_mask))
x = self.norm2(x + self._mha_block(x, memory, memory_mask))
x = self.norm3(x + self._ff_block(x))
else:
x = x + self._sa_block(x, tgt_mask)
x = x + self._mha_block(x, memory, memory_mask)
x = x + self._ff_block(x)
return x
# self-attention block
def _sa_block(self, x: Tensor, attn_mask: Optional[Tensor]) -> Tensor:
x = self.self_attn(x, x, x, attn_mask=attn_mask,)[0]
return self.dropout1(x)
# multihead attention block
def _mha_block(self, x: Tensor, mem: Tensor, attn_mask: Optional[Tensor]) -> Tensor:
x = self.multihead_attn(x, mem, mem, mask=attn_mask,)[0]
return self.dropout2(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.ff(x)
return self.dropout(x)
class _TransformerEncoderLayer(nn.Module):
r"""
Taken from official torch implementation:
https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer
- add a layer norm switch
- add an attn_weight output switch
- batch first
batch_first has been added in PyTorch 1.9.0
https://github.com/pytorch/pytorch/pull/55285
"""
def __init__(self, d_model, nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm=True,
attn_weight=False,
):
super(_TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.add_layer_norm = layer_norm
self.attn_weight = attn_weight
self.activation = nn.ReLU()
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(_TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: Tensor,
pos: Optional[Tensor] = None,
weight: Optional[Tensor] = None,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layer.
Args (modified from torch):
src: the sequence to the encoder layer (required): (batch_size, seq_len, d_model)
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
Remark:
PyTorch official implementation: (seq_len, n_batch, d_model) as input
here we permute the first two dims as input
so in the first line the dim needs to be permuted then permuted back
"""
if pos is not None:
src = torch.cat([pos, src], dim=-1)
src = src.permute(1, 0, 2)
if (src_mask is None) or (src_key_padding_mask is None):
src2, attn_weight = self.self_attn(src, src, src)
else:
src2, attn_weight = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
if self.add_layer_norm:
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
if self.add_layer_norm:
src = self.norm2(src)
src = src.permute(1, 0, 2)
if self.attn_weight:
return src, attn_weight
else:
return src
class TransformerEncoderWrapper(nn.Module):
r"""TransformerEncoder is a stack of N encoder layers
Modified from pytorch official implementation
TransformerEncoder's input and output shapes follow
those of the encoder_layer fed into as this is essentially a wrapper
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
__constants__ = ['norm']
def __init__(self, encoder_layer, num_layers,
norm=None,):
super(TransformerEncoderWrapper, self).__init__()
self.layers = nn.ModuleList(
[copy.deepcopy(encoder_layer) for i in range(num_layers)])
self.num_layers = num_layers
self.norm = norm
def forward(self, src: Tensor,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for mod in self.layers:
output = mod(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class GCN(nn.Module):
def __init__(self,
node_feats=4,
out_features=96,
num_gcn_layers=2,
edge_feats=6,
activation=True,
raw_laplacian=False,
dropout=0.1,
debug=False):
super(GCN, self).__init__()
'''
A simple GCN, a wrapper for Kipf and Weiling's code
learnable edge features similar to
Graph Transformer https://arxiv.org/abs/1911.06455
but using neighbor agg
'''
self.edge_learner = EdgeEncoder(out_dim=out_features,
edge_feats=edge_feats,
raw_laplacian=raw_laplacian
)
self.gcn_layer0 = GraphConvolution(in_features=node_feats, # hard coded
out_features=out_features,
debug=debug,
)
self.gcn_layers = nn.ModuleList([copy.deepcopy(GraphConvolution(
in_features=out_features, # hard coded
out_features=out_features,
debug=debug
)) for _ in range(1, num_gcn_layers)])
self.activation = activation
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.edge_feats = edge_feats
self.debug = debug
def forward(self, x, edge):
x = x.permute(0, 2, 1).contiguous()
edge = edge.permute([0, 3, 1, 2]).contiguous()
assert edge.size(1) == self.edge_feats
edge = self.edge_learner(edge)
out = self.gcn_layer0(x, edge)
for gc in self.gcn_layers[:-1]:
out = gc(out, edge)
if self.activation:
out = self.relu(out)
# last layer no activation
out = self.gcn_layers[-1](out, edge)
return out.permute(0, 2, 1)
class GAT(nn.Module):
def __init__(self,
node_feats=4,
out_features=96,
num_gcn_layers=2,
edge_feats=None,
activation=False,
debug=False):
super(GAT, self).__init__()
'''
A simple GAT: modified from the official implementation
'''
self.gat_layer0 = GraphAttention(in_features=node_feats,
out_features=out_features,
)
self.gat_layers = nn.ModuleList([copy.deepcopy(GraphAttention(
in_features=out_features,
out_features=out_features,
)) for _ in range(1, num_gcn_layers)])
self.activation = activation
self.relu = nn.ReLU()
self.debug = debug
def forward(self, x, edge):
'''
input: node feats (-1, seq_len, n_feats)
edge only takes adj (-1, seq_len, seq_len)
edge matrix first one in the last dim is graph Lap.
'''
edge = edge[..., 0].contiguous()
out = self.gat_layer0(x, edge)
for layer in self.gat_layers[:-1]:
out = layer(out, edge)
if self.activation:
out = self.relu(out)
# last layer no activation
return self.gat_layers[-1](out, edge)
class PointwiseRegressor(nn.Module):
def __init__(self, in_dim, # input dimension
n_hidden,
out_dim, # number of target dim
num_layers: int = 2,
spacial_fc: bool = False,
spacial_dim=1,
dropout=0.1,
activation='silu',
return_latent=False,
debug=False):
super(PointwiseRegressor, self).__init__()
'''
A wrapper for a simple pointwise linear layers
'''
dropout = default(dropout, 0.1)
self.spacial_fc = spacial_fc
activ = nn.SiLU() if activation == 'silu' else nn.ReLU()
if self.spacial_fc:
in_dim = in_dim + spacial_dim
self.fc = nn.Linear(in_dim, n_hidden)
self.ff = nn.ModuleList([nn.Sequential(
nn.Linear(n_hidden, n_hidden),
activ,
)])
for _ in range(num_layers - 1):
self.ff.append(nn.Sequential(
nn.Linear(n_hidden, n_hidden),
activ,
))
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(n_hidden, out_dim)
self.return_latent = return_latent
self.debug = debug
def forward(self, x, grid=None):
'''
2D:
Input: (-1, n, n, in_features)
Output: (-1, n, n, n_targets)
1D:
Input: (-1, n, in_features)
Output: (-1, n, n_targets)
'''
if self.spacial_fc:
x = torch.cat([x, grid], dim=-1)
x = self.fc(x)
for layer in self.ff:
x = layer(x)
x = self.dropout(x)
x = self.out(x)
if self.return_latent:
return x, None
else:
return x
class SpectralRegressor(nn.Module):
def __init__(self, in_dim,
n_hidden,
freq_dim,
out_dim,
modes: int,
num_spectral_layers: int = 2,
n_grid=None,
dim_feedforward=None,
spacial_fc=False,
spacial_dim=2,
return_freq=False,
return_latent=False,
normalizer=None,
activation='silu',
last_activation=True,
dropout=0.1,
debug=False):
super(SpectralRegressor, self).__init__()
'''
A wrapper for both SpectralConv1d and SpectralConv2d
Ref: Li et 2020 FNO paper
https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_2d.py
A new implementation incoporating all spacial-based FNO
in_dim: input dimension, (either n_hidden or spacial dim)
n_hidden: number of hidden features out from attention to the fourier conv
'''
if spacial_dim == 2: # 2d, function + (x,y)
spectral_conv = SpectralConv2d
elif spacial_dim == 1: # 1d, function + x
spectral_conv = SpectralConv1d
else:
raise NotImplementedError("3D not implemented.")
activation = default(activation, 'silu')
self.activation = nn.SiLU() if activation == 'silu' else nn.ReLU()
dropout = default(dropout, 0.1)
self.spacial_fc = spacial_fc # False in Transformer
if self.spacial_fc:
self.fc = nn.Linear(in_dim + spacial_dim, n_hidden)
self.spectral_conv = nn.ModuleList([spectral_conv(in_dim=n_hidden,
out_dim=freq_dim,
n_grid=n_grid,
modes=modes,
dropout=dropout,
activation=activation,
return_freq=return_freq,
debug=debug)])
for _ in range(num_spectral_layers - 1):
self.spectral_conv.append(spectral_conv(in_dim=freq_dim,
out_dim=freq_dim,
n_grid=n_grid,
modes=modes,
dropout=dropout,
activation=activation,
return_freq=return_freq,
debug=debug))
if not last_activation:
self.spectral_conv[-1].activation = Identity()
self.n_grid = n_grid # dummy for debug
self.dim_feedforward = default(dim_feedforward, 2*spacial_dim*freq_dim)
self.regressor = nn.Sequential(
nn.Linear(freq_dim, self.dim_feedforward),
self.activation,
nn.Linear(self.dim_feedforward, out_dim),
)
self.normalizer = normalizer
self.return_freq = return_freq
self.return_latent = return_latent
self.debug = debug
def forward(self, x, edge=None, pos=None, grid=None):
'''
2D:
Input: (-1, n, n, in_features)
Output: (-1, n, n, n_targets)
1D:
Input: (-1, n, in_features)
Output: (-1, n, n_targets)
'''
x_latent = []
x_fts = []
if self.spacial_fc:
x = torch.cat([x, grid], dim=-1)
x = self.fc(x)
for layer in self.spectral_conv:
if self.return_freq:
x, x_ft = layer(x)
x_fts.append(x_ft.contiguous())
else:
x = layer(x)
if self.return_latent:
x_latent.append(x.contiguous())
x = self.regressor(x)
if self.normalizer:
x = self.normalizer.inverse_transform(x)
if self.return_freq or self.return_latent:
return x, dict(preds_freq=x_fts, preds_latent=x_latent)
else:
return x
class DownScaler(nn.Module):
def __init__(self, in_dim,
out_dim,
dropout=0.1,
padding=5,
downsample_mode='conv',
activation_type='silu',
interp_size=None,
debug=False):
super(DownScaler, self).__init__()
'''
A wrapper for conv2d/interp downscaler
'''
if downsample_mode == 'conv':
self.downsample = nn.Sequential(Conv2dEncoder(in_dim=in_dim,
out_dim=out_dim,
activation_type=activation_type,
debug=debug),
Conv2dEncoder(in_dim=out_dim,
out_dim=out_dim,
padding=padding,
activation_type=activation_type,
debug=debug))
elif downsample_mode == 'interp':
self.downsample = Interp2dEncoder(in_dim=in_dim,
out_dim=out_dim,
interp_size=interp_size,
activation_type=activation_type,
dropout=dropout,
debug=debug)
else:
raise NotImplementedError("downsample mode not implemented.")
self.in_dim = in_dim
self.out_dim = out_dim
def forward(self, x):
'''
2D:
Input: (-1, n, n, in_dim)
Output: (-1, n_s, n_s, out_dim)
'''
n_grid = x.size(1)
bsz = x.size(0)
x = x.view(bsz, n_grid, n_grid, self.in_dim)
x = x.permute(0, 3, 1, 2)
x = self.downsample(x)
x = x.permute(0, 2, 3, 1)
return x
class UpScaler(nn.Module):
def __init__(self, in_dim: int,
out_dim: int,
hidden_dim=None,
padding=2,
output_padding=0,
dropout=0.1,
upsample_mode='conv',
activation_type='silu',
interp_mode='bilinear',
interp_size=None,
debug=False):
super(UpScaler, self).__init__()
'''
A wrapper for DeConv2d upscaler or interpolation upscaler
Deconv: Conv1dTranspose
Interp: interp->conv->interp
'''
hidden_dim = default(hidden_dim, in_dim)
if upsample_mode in ['conv', 'deconv']:
self.upsample = nn.Sequential(
DeConv2dBlock(in_dim=in_dim,
out_dim=out_dim,
hidden_dim=hidden_dim,
padding=padding,
output_padding=output_padding,
dropout=dropout,
activation_type=activation_type,
debug=debug),
DeConv2dBlock(in_dim=in_dim,
out_dim=out_dim,
hidden_dim=hidden_dim,
padding=padding*2,
output_padding=output_padding,
dropout=dropout,
activation_type=activation_type,
debug=debug))
elif upsample_mode == 'interp':
self.upsample = Interp2dUpsample(in_dim=in_dim,
out_dim=out_dim,
interp_mode=interp_mode,
interp_size=interp_size,
dropout=dropout,
activation_type=activation_type,
debug=debug)
else:
raise NotImplementedError("upsample mode not implemented.")
self.in_dim = in_dim
self.out_dim = out_dim
def forward(self, x):
'''
2D:
Input: (-1, n_s, n_s, in_dim)
Output: (-1, n, n, out_dim)
'''
x = x.permute(0, 3, 1, 2)
x = self.upsample(x)
x = x.permute(0, 2, 3, 1)
return x
class SimpleTransformer(nn.Module):
def __init__(self, **kwargs):
super(SimpleTransformer, self).__init__()
self.config = defaultdict(lambda: None, **kwargs)
self._get_setting()
self._initialize()
self.__name__ = self.attention_type.capitalize() + 'Transformer'
def forward(self, node, edge, pos, grid=None, weight=None):
'''
seq_len: n, number of grid points
node_feats: number of features of the inputs
edge_feats: number of Laplacian matrices (including learned)
pos_dim: dimension of the Euclidean space
- node: (batch_size, seq_len, node_feats)
- pos: (batch_size, seq_len, pos_dim)
- edge: (batch_size, seq_len, seq_len, edge_feats)
- weight: (batch_size, seq_len, seq_len): mass matrix prefered
or (batch_size, seq_len) when mass matrices are not provided
Remark:
for classic Transformer: pos_dim = n_hidden = 512
pos encodings is added to the latent representation
'''
x_latent = []
attn_weights = []
x = self.feat_extract(node, edge)
if self.spacial_residual or self.return_latent:
res = x.contiguous()
x_latent.append(res)
for encoder in self.encoder_layers:
if self.return_attn_weight:
x, attn_weight = encoder(x, pos, weight)
attn_weights.append(attn_weight)
else:
x = encoder(x, pos, weight)
if self.return_latent:
x_latent.append(x.contiguous())
if self.spacial_residual:
x = res + x
x_freq = self.freq_regressor(
x)[:, :self.pred_len, :] if self.n_freq_targets > 0 else None
x = self.dpo(x)
x = self.regressor(x, grid=grid)
return dict(preds=x,
preds_freq=x_freq,
preds_latent=x_latent,
attn_weights=attn_weights)
def _initialize(self):
self._get_feature()
self._get_encoder()
if self.n_freq_targets > 0:
self._get_freq_regressor()
self._get_regressor()
if self.decoder_type in ['pointwise', 'convolution']:
self._initialize_layer(self.regressor)
self.config = dict(self.config)
@staticmethod
def _initialize_layer(layer, gain=1e-2):
for param in layer.parameters():
if param.ndim > 1:
xavier_uniform_(param, gain=gain)
else:
constant_(param, 0)
def _get_setting(self):
all_attr = list(self.config.keys()) + ADDITIONAL_ATTR
for key in all_attr:
setattr(self, key, self.config[key])
self.dim_feedforward = default(self.dim_feedforward, 2*self.n_hidden)
self.spacial_dim = default(self.spacial_dim, self.pos_dim)
self.spacial_fc = default(self.spacial_fc, False)
self.dropout = default(self.dropout, 0.05)
self.dpo = nn.Dropout(self.dropout)
if self.decoder_type == 'attention':
self.num_encoder_layers += 1
self.attention_types = ['fourier', 'integral',
'cosine', 'galerkin', 'linear', 'softmax']
def _get_feature(self):
if self.num_feat_layers > 0 and self.feat_extract_type == 'gcn':
self.feat_extract = GCN(node_feats=self.node_feats,
edge_feats=self.edge_feats,
num_gcn_layers=self.num_feat_layers,
out_features=self.n_hidden,
activation=self.graph_activation,
raw_laplacian=self.raw_laplacian,
debug=self.debug,
)
elif self.num_feat_layers > 0 and self.feat_extract_type == 'gat':
self.feat_extract = GAT(node_feats=self.node_feats,
out_features=self.n_hidden,
num_gcn_layers=self.num_feat_layers,
activation=self.graph_activation,
debug=self.debug,
)
else:
self.feat_extract = Identity(in_features=self.node_feats,
out_features=self.n_hidden)
def _get_encoder(self):
if self.attention_type in self.attention_types:
encoder_layer = SimpleTransformerEncoderLayer(d_model=self.n_hidden,
n_head=self.n_head,
attention_type=self.attention_type,
dim_feedforward=self.dim_feedforward,
layer_norm=self.layer_norm,
attn_norm=self.attn_norm,
norm_type=self.norm_type,
batch_norm=self.batch_norm,
pos_dim=self.pos_dim,
xavier_init=self.xavier_init,
diagonal_weight=self.diagonal_weight,
symmetric_init=self.symmetric_init,
attn_weight=self.return_attn_weight,
residual_type=self.residual_type,
activation_type=self.attn_activation,
dropout=self.encoder_dropout,
ffn_dropout=self.ffn_dropout,
debug=self.debug)
else:
encoder_layer = _TransformerEncoderLayer(d_model=self.n_hidden,
nhead=self.n_head,
dim_feedforward=self.dim_feedforward,
layer_norm=self.layer_norm,
attn_weight=self.return_attn_weight,
dropout=self.encoder_dropout
)
self.encoder_layers = nn.ModuleList(
[copy.deepcopy(encoder_layer) for _ in range(self.num_encoder_layers)])
def _get_freq_regressor(self):
if self.bulk_regression:
self.freq_regressor = BulkRegressor(in_dim=self.seq_len,
n_feats=self.n_hidden,
n_targets=self.n_freq_targets,
pred_len=self.pred_len)
else:
self.freq_regressor = nn.Sequential(
nn.Linear(self.n_hidden, self.n_hidden),
nn.ReLU(),
nn.Linear(self.n_hidden, self.n_freq_targets),
)
def _get_regressor(self):
if self.decoder_type == 'pointwise':
self.regressor = PointwiseRegressor(in_dim=self.n_hidden,
n_hidden=self.n_hidden,
out_dim=self.n_targets,
spacial_fc=self.spacial_fc,
spacial_dim=self.spacial_dim,
activation=self.regressor_activation,
dropout=self.decoder_dropout,
debug=self.debug)
elif self.decoder_type == 'ifft':
self.regressor = SpectralRegressor(in_dim=self.n_hidden,
n_hidden=self.n_hidden,
freq_dim=self.freq_dim,
out_dim=self.n_targets,
num_spectral_layers=self.num_regressor_layers,
modes=self.fourier_modes,
spacial_dim=self.spacial_dim,
spacial_fc=self.spacial_fc,
dim_feedforward=self.freq_dim,
activation=self.regressor_activation,
dropout=self.decoder_dropout,
)
else:
raise NotImplementedError("Decoder type not implemented")
def get_graph(self):
return self.gragh
def get_encoder(self):
return self.encoder_layers
class FourierTransformer2D(nn.Module):
def __init__(self, **kwargs):
super(FourierTransformer2D, self).__init__()
self.config = defaultdict(lambda: None, **kwargs)
self._get_setting()
self._initialize()
self.__name__ = self.attention_type.capitalize() + 'Transformer2D'
def forward(self, node, edge, pos, grid, weight=None, boundary_value=None):
'''
- node: (batch_size, n, n, node_feats)
- pos: (batch_size, n_s*n_s, pos_dim)
- edge: (batch_size, n_s*n_s, n_s*n_s, edge_feats)
- weight: (batch_size, n_s*n_s, n_s*n_s): mass matrix prefered
or (batch_size, n_s*n_s) when mass matrices are not provided (lumped mass)
- grid: (batch_size, n-2, n-2, 2) excluding boundary
'''
bsz = node.size(0)
n_s = int(pos.size(1)**(0.5))
x_latent = []
attn_weights = []
if not self.downscaler_size:
node = torch.cat(
[node, pos.contiguous().view(bsz, n_s, n_s, -1)], dim=-1)
x = self.downscaler(node)
x = x.view(bsz, -1, self.n_hidden)
x = self.feat_extract(x, edge)
x = self.dpo(x)
for encoder in self.encoder_layers:
if self.return_attn_weight and self.attention_type != 'official':
x, attn_weight = encoder(x, pos, weight)
attn_weights.append(attn_weight)
elif self.attention_type != 'official':
x = encoder(x, pos, weight)
else:
out_dim = self.n_head*self.pos_dim + self.n_hidden
x = x.view(bsz, -1, self.n_head, self.n_hidden//self.n_head).transpose(1, 2)
x = torch.cat([pos.repeat([1, self.n_head, 1, 1]), x], dim=-1)
x = x.transpose(1, 2).contiguous().view(bsz, -1, out_dim)
x = encoder(x)
if self.return_latent:
x_latent.append(x.contiguous())
x = x.view(bsz, n_s, n_s, self.n_hidden)
x = self.upscaler(x)
if self.return_latent:
x_latent.append(x.contiguous())
x = self.dpo(x)
if self.return_latent:
x, xr_latent = self.regressor(x, grid=grid)
x_latent.append(xr_latent)
else:
x = self.regressor(x, grid=grid)
if self.normalizer:
x = self.normalizer.inverse_transform(x)
if self.boundary_condition == 'dirichlet':
x = x[:, 1:-1, 1:-1].contiguous()
x = F.pad(x, (0, 0, 1, 1, 1, 1), "constant", 0)
if boundary_value is not None:
assert x.size() == boundary_value.size()
x += boundary_value
return dict(preds=x,
preds_latent=x_latent,
attn_weights=attn_weights)
def _initialize(self):
self._get_feature()
self._get_scaler()
self._get_encoder()
self._get_regressor()
self.config = dict(self.config)
def cuda(self, device=None):
self = super().cuda(device)
if self.normalizer:
self.normalizer = self.normalizer.cuda(device)
return self
def cpu(self):
self = super().cpu()
if self.normalizer:
self.normalizer = self.normalizer.cpu()
return self
def to(self, *args, **kwargs):
self = super().to(*args, **kwargs)
if self.normalizer:
self.normalizer = self.normalizer.to(*args, **kwargs)
return self
def print_config(self):
for a in self.config.keys():
if not a.startswith('__'):
print(f"{a}: \t", getattr(self, a))
@staticmethod
def _initialize_layer(layer, gain=1e-2):
for param in layer.parameters():
if param.ndim > 1:
xavier_uniform_(param, gain=gain)
else:
constant_(param, 0)
@staticmethod
def _get_pos(pos, downsample):
'''
get the downscaled position in 2d
'''
bsz = pos.size(0)
n_grid = pos.size(1)
x, y = pos[..., 0], pos[..., 1]
x = x.view(bsz, n_grid, n_grid)
y = y.view(bsz, n_grid, n_grid)
x = x[:, ::downsample, ::downsample].contiguous()
y = y[:, ::downsample, ::downsample].contiguous()
return torch.stack([x, y], dim=-1)
def _get_setting(self):
all_attr = list(self.config.keys()) + ADDITIONAL_ATTR
for key in all_attr:
setattr(self, key, self.config[key])
self.dim_feedforward = default(self.dim_feedforward, 2*self.n_hidden)
self.dropout = default(self.dropout, 0.05)
self.dpo = nn.Dropout(self.dropout)
if self.decoder_type == 'attention':
self.num_encoder_layers += 1
self.attention_types = ['fourier', 'integral', 'local', 'global',
'cosine', 'galerkin', 'linear', 'softmax']
def _get_feature(self):
if self.feat_extract_type == 'gcn' and self.num_feat_layers > 0:
self.feat_extract = GCN(node_feats=self.n_hidden,
edge_feats=self.edge_feats,
num_gcn_layers=self.num_feat_layers,
out_features=self.n_hidden,
activation=self.graph_activation,
raw_laplacian=self.raw_laplacian,
debug=self.debug,
)
elif self.feat_extract_type == 'gat' and self.num_feat_layers > 0:
self.feat_extract = GAT(node_feats=self.n_hidden,
out_features=self.n_hidden,
num_gcn_layers=self.num_feat_layers,
activation=self.graph_activation,
debug=self.debug,
)
else:
self.feat_extract = Identity()
def _get_scaler(self):
if self.downscaler_size:
self.downscaler = DownScaler(in_dim=self.node_feats,
out_dim=self.n_hidden,
downsample_mode=self.downsample_mode,
interp_size=self.downscaler_size,
dropout=self.downscaler_dropout,
activation_type=self.downscaler_activation)
else:
self.downscaler = Identity(in_features=self.node_feats+self.spacial_dim,
out_features=self.n_hidden)
if self.upscaler_size:
self.upscaler = UpScaler(in_dim=self.n_hidden,
out_dim=self.n_hidden,
upsample_mode=self.upsample_mode,
interp_size=self.upscaler_size,
dropout=self.upscaler_dropout,
activation_type=self.upscaler_activation)
else:
self.upscaler = Identity()
def _get_encoder(self):
if self.attention_type in self.attention_types:
encoder_layer = SimpleTransformerEncoderLayer(d_model=self.n_hidden,
n_head=self.n_head,
attention_type=self.attention_type,
dim_feedforward=self.dim_feedforward,
layer_norm=self.layer_norm,
attn_norm=self.attn_norm,
batch_norm=self.batch_norm,
pos_dim=self.pos_dim,
xavier_init=self.xavier_init,
diagonal_weight=self.diagonal_weight,
symmetric_init=self.symmetric_init,
attn_weight=self.return_attn_weight,
dropout=self.encoder_dropout,
ffn_dropout=self.ffn_dropout,
norm_eps=self.norm_eps,
debug=self.debug)
elif self.attention_type == 'official':
encoder_layer = TransformerEncoderLayer(d_model=self.n_hidden+self.pos_dim*self.n_head,
nhead=self.n_head,
dim_feedforward=self.dim_feedforward,
dropout=self.encoder_dropout,
batch_first=True,
layer_norm_eps=self.norm_eps,
)
else:
raise NotImplementedError("encoder type not implemented.")
self.encoder_layers = nn.ModuleList(
[copy.deepcopy(encoder_layer) for _ in range(self.num_encoder_layers)])
def _get_regressor(self):
if self.decoder_type == 'pointwise':
self.regressor = PointwiseRegressor(in_dim=self.n_hidden,
n_hidden=self.n_hidden,
out_dim=self.n_targets,
num_layers=self.num_regressor_layers,
spacial_fc=self.spacial_fc,
spacial_dim=self.spacial_dim,
activation=self.regressor_activation,
dropout=self.decoder_dropout,
return_latent=self.return_latent,
debug=self.debug)
elif self.decoder_type == 'ifft2':
self.regressor = SpectralRegressor(in_dim=self.n_hidden,
n_hidden=self.freq_dim,
freq_dim=self.freq_dim,
out_dim=self.n_targets,
num_spectral_layers=self.num_regressor_layers,
modes=self.fourier_modes,
spacial_dim=self.spacial_dim,
spacial_fc=self.spacial_fc,
activation=self.regressor_activation,
last_activation=self.last_activation,
dropout=self.decoder_dropout,
return_latent=self.return_latent,
debug=self.debug
)
else:
raise NotImplementedError("Decoder type not implemented")
class FourierTransformer2DLite(nn.Module):
'''
A lite model of the Fourier/Galerkin Transformer
'''
def __init__(self, **kwargs):
super(FourierTransformer2DLite, self).__init__()
self.config = defaultdict(lambda: None, **kwargs)
self._get_setting()
self._initialize()
def forward(self, node, edge, pos, grid=None):
'''
seq_len: n, number of grid points
node_feats: number of features of the inputs
pos_dim: dimension of the Euclidean space
- node: (batch_size, n*n, node_feats)
- pos: (batch_size, n*n, pos_dim)
- grid: (batch_size, n, n, pos_dim)
Remark:
for classic Transformer: pos_dim = n_hidden = 512
pos encodings is added to the latent representation
'''
bsz = node.size(0)
input_dim = node.size(-1)
n_grid = grid.size(1)
node = torch.cat([node.view(bsz, -1, input_dim), pos],
dim=-1)
x = self.feat_extract(node, edge)
for encoder in self.encoder_layers:
x = encoder(x, pos)
x = self.dpo(x)
x = x.view(bsz, n_grid, n_grid, -1)
x = self.regressor(x, grid=grid)
return dict(preds=x,
preds_freq=None,
preds_latent=None,
attn_weights=None)
def _initialize(self):
self._get_feature()
self._get_encoder()
self._get_regressor()
self.config = dict(self.config)
def _get_setting(self):
all_attr = list(self.config.keys()) + ADDITIONAL_ATTR
for key in all_attr:
setattr(self, key, self.config[key])
self.dim_feedforward = default(self.dim_feedforward, 2*self.n_hidden)
self.spacial_dim = default(self.spacial_dim, self.pos_dim)
self.spacial_fc = default(self.spacial_fc, False)
self.dropout = default(self.dropout, 0.05)
self.dpo = nn.Dropout(self.dropout)
if self.decoder_type == 'attention':
self.num_encoder_layers += 1
self.attention_types = ['fourier', 'integral',
'cosine', 'galerkin', 'linear', 'softmax']
def _get_feature(self):
self.feat_extract = Identity(in_features=self.node_feats,
out_features=self.n_hidden)
def _get_encoder(self):
encoder_layer = SimpleTransformerEncoderLayer(d_model=self.n_hidden,
n_head=self.n_head,
dim_feedforward=self.dim_feedforward,
layer_norm=self.layer_norm,
attention_type=self.attention_type,
attn_norm=self.attn_norm,
norm_type=self.norm_type,
xavier_init=self.xavier_init,
diagonal_weight=self.diagonal_weight,
dropout=self.encoder_dropout,
ffn_dropout=self.ffn_dropout,
pos_dim=self.pos_dim,
debug=self.debug)
self.encoder_layers = nn.ModuleList(
[copy.deepcopy(encoder_layer) for _ in range(self.num_encoder_layers)])
def _get_regressor(self):
self.regressor = SpectralRegressor(in_dim=self.n_hidden,
n_hidden=self.n_hidden,
freq_dim=self.freq_dim,
out_dim=self.n_targets,
num_spectral_layers=self.num_regressor_layers,
modes=self.fourier_modes,
spacial_dim=self.spacial_dim,
spacial_fc=self.spacial_fc,
dim_feedforward=self.freq_dim,
activation=self.regressor_activation,
dropout=self.decoder_dropout,
)
if __name__ == '__main__':
for graph in ['gcn', 'gat']:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
config = defaultdict(lambda: None,
node_feats=1,
edge_feats=5,
pos_dim=1,
n_targets=1,
n_hidden=96,
num_feat_layers=2,
num_encoder_layers=2,
n_head=2,
pred_len=0,
n_freq_targets=0,
dim_feedforward=96*2,
feat_extract_type=graph,
graph_activation=True,
raw_laplacian=True,
attention_type='fourier', # no softmax
xavier_init=1e-4,
diagonal_weight=1e-2,
symmetric_init=False,
layer_norm=True,
attn_norm=False,
batch_norm=False,
spacial_residual=False,
return_attn_weight=True,
seq_len=None,
bulk_regression=False,
decoder_type='ifft',
freq_dim=64,
num_regressor_layers=2,
fourier_modes=16,
spacial_dim=1,
spacial_fc=True,
dropout=0.1,
debug=False,
)
ft = SimpleTransformer(**config)
ft.to(device)
batch_size, seq_len = 8, 512
summary(ft, input_size=[(batch_size, seq_len, 1),
(batch_size, seq_len, seq_len, 5),
(batch_size, seq_len, 1),
(batch_size, seq_len, 1)], device=device)
layer = TransformerEncoderLayer(d_model=128, nhead=4)
print(layer.__class__)
|
python
|
from django.conf.urls import url
from authen import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^api/user/(?P<pk>[0-9]+)/$', views.person_detail),
url(r'^api/add_group/(?P<pk>[0-9]+)/$', views.add_group),
url(r'^api/user/$', views.person_list),
url(r'^$', views.home),
url(r'^api/add_owner/$', views.add_owner),
url(r'^api/login/$', obtain_auth_token),
url(r'^api/user/(?P<pk>.+)/$', views.get_person_by_email),
]
|
python
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
from deepspeech.models.ds2_online import DeepSpeech2ModelOnline
class TestDeepSpeech2ModelOnline(unittest.TestCase):
def setUp(self):
paddle.set_device('cpu')
self.batch_size = 2
self.feat_dim = 161
max_len = 210
# (B, T, D)
audio = np.random.randn(self.batch_size, max_len, self.feat_dim)
audio_len = np.random.randint(max_len, size=self.batch_size)
audio_len[-1] = max_len
# (B, U)
text = np.array([[1, 2], [1, 2]])
text_len = np.array([2] * self.batch_size)
self.audio = paddle.to_tensor(audio, dtype='float32')
self.audio_len = paddle.to_tensor(audio_len, dtype='int64')
self.text = paddle.to_tensor(text, dtype='int32')
self.text_len = paddle.to_tensor(text_len, dtype='int64')
def test_ds2_1(self):
model = DeepSpeech2ModelOnline(
feat_size=self.feat_dim,
dict_size=10,
num_conv_layers=2,
num_rnn_layers=3,
rnn_size=1024,
num_fc_layers=2,
fc_layers_size_list=[512, 256],
use_gru=False)
loss = model(self.audio, self.audio_len, self.text, self.text_len)
self.assertEqual(loss.numel(), 1)
def test_ds2_2(self):
model = DeepSpeech2ModelOnline(
feat_size=self.feat_dim,
dict_size=10,
num_conv_layers=2,
num_rnn_layers=3,
rnn_size=1024,
num_fc_layers=2,
fc_layers_size_list=[512, 256],
use_gru=True)
loss = model(self.audio, self.audio_len, self.text, self.text_len)
self.assertEqual(loss.numel(), 1)
def test_ds2_3(self):
model = DeepSpeech2ModelOnline(
feat_size=self.feat_dim,
dict_size=10,
num_conv_layers=2,
num_rnn_layers=3,
rnn_size=1024,
num_fc_layers=2,
fc_layers_size_list=[512, 256],
use_gru=False)
loss = model(self.audio, self.audio_len, self.text, self.text_len)
self.assertEqual(loss.numel(), 1)
def test_ds2_4(self):
model = DeepSpeech2ModelOnline(
feat_size=self.feat_dim,
dict_size=10,
num_conv_layers=2,
num_rnn_layers=3,
rnn_size=1024,
num_fc_layers=2,
fc_layers_size_list=[512, 256],
use_gru=True)
loss = model(self.audio, self.audio_len, self.text, self.text_len)
self.assertEqual(loss.numel(), 1)
def test_ds2_5(self):
model = DeepSpeech2ModelOnline(
feat_size=self.feat_dim,
dict_size=10,
num_conv_layers=2,
num_rnn_layers=3,
rnn_size=1024,
num_fc_layers=2,
fc_layers_size_list=[512, 256],
use_gru=False)
loss = model(self.audio, self.audio_len, self.text, self.text_len)
self.assertEqual(loss.numel(), 1)
def test_ds2_6(self):
model = DeepSpeech2ModelOnline(
feat_size=self.feat_dim,
dict_size=10,
num_conv_layers=2,
num_rnn_layers=3,
rnn_size=1024,
rnn_direction='bidirect',
num_fc_layers=2,
fc_layers_size_list=[512, 256],
use_gru=False)
loss = model(self.audio, self.audio_len, self.text, self.text_len)
self.assertEqual(loss.numel(), 1)
def test_ds2_7(self):
use_gru = False
model = DeepSpeech2ModelOnline(
feat_size=self.feat_dim,
dict_size=10,
num_conv_layers=2,
num_rnn_layers=1,
rnn_size=1024,
rnn_direction='forward',
num_fc_layers=2,
fc_layers_size_list=[512, 256],
use_gru=use_gru)
model.eval()
paddle.device.set_device("cpu")
de_ch_size = 8
eouts, eouts_lens, final_state_h_box, final_state_c_box = model.encoder(
self.audio, self.audio_len)
eouts_by_chk_list, eouts_lens_by_chk_list, final_state_h_box_chk, final_state_c_box_chk = model.encoder.forward_chunk_by_chunk(
self.audio, self.audio_len, de_ch_size)
eouts_by_chk = paddle.concat(eouts_by_chk_list, axis=1)
eouts_lens_by_chk = paddle.add_n(eouts_lens_by_chk_list)
decode_max_len = eouts.shape[1]
eouts_by_chk = eouts_by_chk[:, :decode_max_len, :]
self.assertEqual(paddle.allclose(eouts_by_chk, eouts), True)
self.assertEqual(
paddle.allclose(final_state_h_box, final_state_h_box_chk), True)
if use_gru is False:
self.assertEqual(
paddle.allclose(final_state_c_box, final_state_c_box_chk), True)
def test_ds2_8(self):
use_gru = True
model = DeepSpeech2ModelOnline(
feat_size=self.feat_dim,
dict_size=10,
num_conv_layers=2,
num_rnn_layers=1,
rnn_size=1024,
rnn_direction='forward',
num_fc_layers=2,
fc_layers_size_list=[512, 256],
use_gru=use_gru)
model.eval()
paddle.device.set_device("cpu")
de_ch_size = 8
eouts, eouts_lens, final_state_h_box, final_state_c_box = model.encoder(
self.audio, self.audio_len)
eouts_by_chk_list, eouts_lens_by_chk_list, final_state_h_box_chk, final_state_c_box_chk = model.encoder.forward_chunk_by_chunk(
self.audio, self.audio_len, de_ch_size)
eouts_by_chk = paddle.concat(eouts_by_chk_list, axis=1)
eouts_lens_by_chk = paddle.add_n(eouts_lens_by_chk_list)
decode_max_len = eouts.shape[1]
eouts_by_chk = eouts_by_chk[:, :decode_max_len, :]
self.assertEqual(paddle.allclose(eouts_by_chk, eouts), True)
self.assertEqual(
paddle.allclose(final_state_h_box, final_state_h_box_chk), True)
if use_gru is False:
self.assertEqual(
paddle.allclose(final_state_c_box, final_state_c_box_chk), True)
if __name__ == '__main__':
unittest.main()
|
python
|
# Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import os
import unittest
import numpy as np
import torch
from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import CUDAFunctionManager, CUDASampler
from warp_drive.utils.common import get_project_root
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
pytorch_cuda_init_success = torch.cuda.FloatTensor(8)
_CUBIN_FILEPATH = f"{get_project_root()}/warp_drive/cuda_bin"
_ACTIONS = Constants.ACTIONS
class TestActionSampler(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dm = CUDADataManager(num_agents=5, episode_length=1, num_envs=2)
self.fm = CUDAFunctionManager(
num_agents=int(self.dm.meta_info("n_agents")),
num_envs=int(self.dm.meta_info("n_envs")),
)
self.fm.load_cuda_from_binary_file(f"{_CUBIN_FILEPATH}/test_build.fatbin")
self.sampler = CUDASampler(function_manager=self.fm)
self.sampler.init_random(seed=None)
def test_agent_action_distribution(self):
tensor = DataFeed()
tensor.add_data(name=f"{_ACTIONS}_a", data=[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
self.dm.push_data_to_device(tensor, torch_accessible=True)
self.assertTrue(self.dm.is_data_on_device_via_torch(f"{_ACTIONS}_a"))
self.sampler.register_actions(self.dm, f"{_ACTIONS}_a", 3)
agent_distribution = np.array(
[
[
[0.333, 0.333, 0.333],
[0.2, 0.5, 0.3],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
],
[
[0.1, 0.7, 0.2],
[0.7, 0.2, 0.1],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
],
]
)
agent_distribution = torch.from_numpy(agent_distribution)
agent_distribution = agent_distribution.float().cuda()
# run 10000 times to collect statistics
actions_a_cuda = torch.from_numpy(
np.empty((10000, 2, 5), dtype=np.int32)
).cuda()
for i in range(10000):
self.sampler.sample(
self.dm, agent_distribution, action_name=f"{_ACTIONS}_a"
)
actions_a_cuda[i] = self.dm.data_on_device_via_torch(f"{_ACTIONS}_a")
actions_a = actions_a_cuda.cpu().numpy()
actions_a_env_0 = actions_a[:, 0]
actions_a_env_1 = actions_a[:, 1]
# Sampler is based on distribution, we test
# sample mean = given mean and deviation < 10% mean
self.assertAlmostEqual(
(actions_a_env_0[:, 0] == 0).sum() / 10000.0, 0.333, delta=0.03
)
self.assertAlmostEqual(
(actions_a_env_0[:, 0] == 1).sum() / 10000.0, 0.333, delta=0.03
)
self.assertAlmostEqual(
(actions_a_env_0[:, 0] == 2).sum() / 10000.0, 0.333, delta=0.03
)
self.assertAlmostEqual(
(actions_a_env_0[:, 1] == 0).sum() / 10000.0, 0.2, delta=0.02
)
self.assertAlmostEqual(
(actions_a_env_0[:, 1] == 1).sum() / 10000.0, 0.5, delta=0.05
)
self.assertAlmostEqual(
(actions_a_env_0[:, 1] == 2).sum() / 10000.0, 0.3, delta=0.03
)
self.assertEqual((actions_a_env_0[:, 2] == 0).sum(), 10000)
self.assertEqual((actions_a_env_0[:, 3] == 1).sum(), 10000)
self.assertEqual((actions_a_env_0[:, 4] == 2).sum(), 10000)
self.assertAlmostEqual(
(actions_a_env_1[:, 0] == 0).sum() / 10000.0, 0.1, delta=0.01
)
self.assertAlmostEqual(
(actions_a_env_1[:, 0] == 1).sum() / 10000.0, 0.7, delta=0.07
)
self.assertAlmostEqual(
(actions_a_env_1[:, 0] == 2).sum() / 10000.0, 0.2, delta=0.02
)
self.assertAlmostEqual(
(actions_a_env_1[:, 1] == 0).sum() / 10000.0, 0.7, delta=0.07
)
self.assertAlmostEqual(
(actions_a_env_1[:, 1] == 1).sum() / 10000.0, 0.2, delta=0.02
)
self.assertAlmostEqual(
(actions_a_env_1[:, 1] == 2).sum() / 10000.0, 0.1, delta=0.01
)
self.assertAlmostEqual(
(actions_a_env_1[:, 2] == 0).sum() / 10000.0, 0.5, delta=0.05
)
self.assertAlmostEqual(
(actions_a_env_1[:, 2] == 1).sum() / 10000.0, 0.5, delta=0.05
)
self.assertEqual((actions_a_env_1[:, 2] == 2).sum(), 0)
self.assertEqual((actions_a_env_1[:, 3] == 0).sum(), 0)
self.assertAlmostEqual(
(actions_a_env_1[:, 3] == 1).sum() / 10000.0, 0.5, delta=0.05
)
self.assertAlmostEqual(
(actions_a_env_1[:, 3] == 2).sum() / 10000.0, 0.5, delta=0.05
)
self.assertAlmostEqual(
(actions_a_env_1[:, 4] == 0).sum() / 10000.0, 0.5, delta=0.05
)
self.assertEqual((actions_a_env_1[:, 4] == 1).sum(), 0)
self.assertAlmostEqual(
(actions_a_env_1[:, 4] == 2).sum() / 10000.0, 0.5, delta=0.05
)
def test_planner_action_distribution(self):
tensor = DataFeed()
tensor.add_data(name=f"{_ACTIONS}_p", data=[[0], [0]])
self.dm.push_data_to_device(tensor, torch_accessible=True)
self.assertTrue(self.dm.is_data_on_device_via_torch(f"{_ACTIONS}_p"))
self.sampler.register_actions(self.dm, f"{_ACTIONS}_p", 4)
planner_distribution = np.array(
[[[0.25, 0.25, 0.25, 0.25]], [[0.10, 0.60, 0.15, 0.15]]]
)
planner_distribution = torch.from_numpy(planner_distribution)
planner_distribution = planner_distribution.float().cuda()
# run 10000 times to collect statistics
actions_p_cuda = torch.from_numpy(
np.empty((10000, 2, 1), dtype=np.int32)
).cuda()
for i in range(10000):
self.sampler.sample(
self.dm, planner_distribution, action_name=f"{_ACTIONS}_p"
)
actions_p_cuda[i] = self.dm.data_on_device_via_torch(f"{_ACTIONS}_p")
actions_p = actions_p_cuda.cpu().numpy()
actions_p_env_0 = actions_p[:, 0]
actions_p_env_1 = actions_p[:, 1]
self.assertAlmostEqual(
(actions_p_env_0[:, 0] == 0).sum() / 10000.0, 0.25, delta=0.03
)
self.assertAlmostEqual(
(actions_p_env_0[:, 0] == 1).sum() / 10000.0, 0.25, delta=0.03
)
self.assertAlmostEqual(
(actions_p_env_0[:, 0] == 2).sum() / 10000.0, 0.25, delta=0.03
)
self.assertAlmostEqual(
(actions_p_env_0[:, 0] == 3).sum() / 10000.0, 0.25, delta=0.03
)
self.assertAlmostEqual(
(actions_p_env_1[:, 0] == 0).sum() / 10000.0, 0.1, delta=0.01
)
self.assertAlmostEqual(
(actions_p_env_1[:, 0] == 1).sum() / 10000.0, 0.6, delta=0.06
)
self.assertAlmostEqual(
(actions_p_env_1[:, 0] == 2).sum() / 10000.0, 0.15, delta=0.015
)
self.assertAlmostEqual(
(actions_p_env_1[:, 0] == 3).sum() / 10000.0, 0.15, delta=0.015
)
def test_seed_randomness_across_threads(self):
tensor = DataFeed()
tensor.add_data(name=f"{_ACTIONS}_s", data=[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
self.dm.push_data_to_device(tensor, torch_accessible=True)
self.assertTrue(self.dm.is_data_on_device_via_torch(f"{_ACTIONS}_s"))
self.sampler.register_actions(self.dm, f"{_ACTIONS}_s", 4)
agent_distribution = np.array(
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
],
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
],
]
)
agent_distribution = torch.from_numpy(agent_distribution)
agent_distribution = agent_distribution.float().cuda()
# run 10 times to collect statistics
actions_s_cuda = torch.from_numpy(
np.empty((10000, 2, 5), dtype=np.int32)
).cuda()
for i in range(10000):
self.sampler.sample(
self.dm, agent_distribution, action_name=f"{_ACTIONS}_s"
)
actions_s_cuda[i] = self.dm.data_on_device_via_torch(f"{_ACTIONS}_s")
actions_s = actions_s_cuda.cpu().numpy()
self.assertTrue(actions_s.std(axis=-1).reshape(-1).mean() > 0.9)
|
python
|
# Copyright (c) 2017 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from vmware_nsx.db import nsxv_db
from vmware_nsx.db import nsxv_models
from vmware_nsx.plugins.nsx_v.vshield import edge_utils
from vmware_nsx.tests.unit.nsx_v import test_plugin
PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin'
# Run all relevant plugin tests when the metadata proxy is enabled.
# Those tests does not specifically test the md_proxy. just verify that
# nothing gets broken.
class NsxVPluginWithMdV2TestCase(test_plugin.NsxVPluginV2TestCase):
def setUp(self, plugin=PLUGIN_NAME,
ext_mgr=None,
service_plugins=None):
# Add the metadata configuration
cfg.CONF.set_override('mgt_net_moid', 'net-1', group="nsxv")
cfg.CONF.set_override('mgt_net_proxy_ips', ['2.2.2.2'], group="nsxv")
cfg.CONF.set_override('mgt_net_proxy_netmask', '255.255.255.0',
group="nsxv")
cfg.CONF.set_override('mgt_net_default_gateway', '1.1.1.1',
group="nsxv")
cfg.CONF.set_override('nova_metadata_ips', ['3.3.3.3'], group="nsxv")
# Add some mocks required for the md code
mock_alloc_vnic = mock.patch.object(nsxv_db, 'allocate_edge_vnic')
mock_alloc_vnic_inst = mock_alloc_vnic.start()
mock_alloc_vnic_inst.return_value = nsxv_models.NsxvEdgeVnicBinding
mock.patch.object(edge_utils, "update_internal_interface").start()
super(NsxVPluginWithMdV2TestCase, self).setUp(
plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
class TestNetworksWithMdV2(test_plugin.TestNetworksV2,
NsxVPluginWithMdV2TestCase):
# Skip all the tests that count networks, as there is an
# additional internal network for metadata.
def test_list_networks_with_sort_native(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_without_pk_in_fields_pagination_emulated(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_with_sort_emulated(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_with_shared(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_without_pk_in_fields_pagination_native(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_with_parameters(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_with_pagination_native(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_with_pagination_reverse_emulated(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_with_pagination_emulated(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_with_pagination_reverse_native(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_networks_with_fields(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_networks_bulk_wrong_input(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_networks_bulk_native_plugin_failure(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_networks_bulk_native_quotas(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_networks_bulk_emulated_plugin_failure(self):
self.skipTest("The test is not suitable for the metadata test case")
class TestSubnetsWithMdV2(test_plugin.TestSubnetsV2,
NsxVPluginWithMdV2TestCase):
# Skip all the tests that count subnets, as there is an
# additional internal subnet for metadata.
def test_list_subnets_with_sort_native(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_subnets_with_sort_emulated(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_subnets_with_pagination_native(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_subnets_with_parameter(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_subnets_with_pagination_emulated(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_subnets_shared(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_list_subnets(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_subnets_bulk_native_plugin_failure(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_subnets_bulk_native_quotas(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_subnets_bulk_emulated_plugin_failure(self):
self.skipTest("The test is not suitable for the metadata test case")
class TestExclusiveRouterWithMdTestCase(
test_plugin.TestExclusiveRouterTestCase,
NsxVPluginWithMdV2TestCase):
# Skip all the tests that count firewall rules, as there are
# some MD specific rules
def test_router_set_gateway_with_nosnat(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_interfaces_different_tenants_update_firewall(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_interfaces_with_update_firewall(self):
self.skipTest("The test is not suitable for the metadata test case")
# Skip all the tests that count routers or ports, as there is
# an additional router for the md proxy
def test_router_list_with_pagination_reverse(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_list_with_sort(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_list_with_pagination(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_list(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_add_interface_delete_port_after_failure(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_router_fail_at_the_backend(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_floatingip_delete_router_intf_with_port_id_returns_409(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_address_scope_snat_rules(self):
self.skipTest("The test is not suitable for the metadata test case")
class TestVdrWithMdTestCase(test_plugin.TestVdrTestCase,
NsxVPluginWithMdV2TestCase):
# Skip all the tests that count firewall rules, as there are
# some MD specific rules
def test_router_set_gateway_with_nosnat(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_interfaces_different_tenants_update_firewall(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_interfaces_with_update_firewall(self):
self.skipTest("The test is not suitable for the metadata test case")
# Skip all the tests that count routers or ports, as there is
# an additional router for the md proxy
def test_router_list_with_pagination_reverse(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_list_with_sort(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_list_with_pagination(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_list(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_add_interface_delete_port_after_failure(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_router_fail_at_the_backend(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_floatingip_delete_router_intf_with_port_id_returns_409(self):
self.skipTest("The test is not suitable for the metadata test case")
#TODO(asarfaty): fix some mocks so those tests will pass
def test_router_plr_binding_default_size(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_plr_binding_configured_size(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_plr_binding_default_az(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_plr_binding_with_az(self):
self.skipTest("The test is not suitable for the metadata test case")
class TestSharedRouterWithMdTestCase(test_plugin.TestSharedRouterTestCase,
NsxVPluginWithMdV2TestCase):
# Skip all the tests that count firewall rules, as there are
# some MD specific rules
def test_router_set_gateway_with_nosnat(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_routers_set_gateway_with_nosnat(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_interfaces_different_tenants_update_firewall(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_interfaces_with_update_firewall(self):
self.skipTest("The test is not suitable for the metadata test case")
# Skip all the tests that count routers or ports, as there is
# an additional router for the md proxy
def test_router_list_with_pagination_reverse(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_list_with_sort(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_list_with_pagination(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_list(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_router_add_interface_delete_port_after_failure(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_create_router_fail_at_the_backend(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self):
self.skipTest("The test is not suitable for the metadata test case")
def test_floatingip_delete_router_intf_with_port_id_returns_409(self):
self.skipTest("The test is not suitable for the metadata test case")
|
python
|
import re
from model.contact import Contact
def test_contact_info_from_home_page(app, db):
app.navigation.open_home_page()
contact_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
def clean(contact):
return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip(),
address=contact.address.strip(),
home=contact.home, mobile=contact.mobile, phone2=contact.phone2,
email=contact.email, email2=contact.email2, email3=contact.email3)
contact_from_db_list = list(map(clean, db.get_contact_list()))
print("Contacts_from_home_page>>>>", contact_from_home_page)
print("Contacts_from_DB>>>>", contact_from_db_list)
i = 0
for item in contact_from_home_page:
assert item.address == contact_from_db_list[i].address
assert item.lastname == contact_from_db_list[i].lastname.strip()
assert item.firstname == contact_from_db_list[i].firstname.strip()
assert item.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_db_list[i])
assert item.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_db_list[i])
i += 1
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home, contact.mobile, contact.work, contact.phone2]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None, [contact.email, contact.email2, contact.email3]))))
# def test_contacts(app, ormdb):
# random_index = randrange(app.contact.count())
# # взять все контакты с главной страницы
# contact_from_home_page = app.contact.get_contact_list()
# # взять все записи конатктов из бд
# contact_from_db = ormdb.get_contact_list()
# # сравниваем списки, сортируя
# assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(contact_from_db, key=Contact.id_or_max)
# def test_contact_info_on_main_page(app):
# if app.contact.amount() == 0:
# app.contact.create(
# Contact(firstname="TestTest", middlename="Test", lastname="Testing", nickname="testing",
# title="test", company="Test test", address="Spb", home="000222111",
# mobile="444555222", work="99966655", fax="11122255", email="[email protected]",
# email2="[email protected]", email3="[email protected]", homepage="www.test.ru", bday="15",
# bmonth="May", byear="1985", aday="14", amonth="June", ayear="1985",
# address2="Spb", phone2="111111", notes="Friend"))
# random_index = randrange(app.contact.amount())
# contact_from_home_page = app.contact.get_contact_list()[random_index]
# contact_from_edit_page = app.contact.get_contact_info_from_edit_page(random_index)
# assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
# assert contact_from_home_page.firstname == contact_from_edit_page.firstname
# assert contact_from_home_page.lastname == contact_from_edit_page.lastname
# assert contact_from_home_page.address == contact_from_edit_page.address
# assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.