file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
util.rs
|
// Copyright 2022 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
pub mod stringify {
use std::{fmt::Display, marker::PhantomData, str::FromStr};
use serde::{de::Visitor, Deserializer, Serializer};
/// Deserialize T using [`FromStr`]
pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: FromStr,
T::Err: Display,
{
struct Helper<S>(PhantomData<S>);
impl<'de, S> Visitor<'de> for Helper<S>
where
S: FromStr,
<S as FromStr>::Err: Display,
{
type Value = S;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result
|
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
value.parse::<Self::Value>().map_err(serde::de::Error::custom)
}
}
deserializer.deserialize_str(Helper(PhantomData))
}
/// Serialize T using [`Display`]
pub fn serialize<T, S>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
where
T: Display,
S: Serializer,
{
serializer.collect_str(&value)
}
}
/// `serde_bytes` cannot be used with sized arrays, so this works around that limitation.
pub mod bytify {
use std::marker::PhantomData;
use serde::{de::Visitor, Deserializer, Serializer};
/// Deserialize T from bytes
pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: for<'a> TryFrom<&'a [u8]>,
{
struct Helper<S>(PhantomData<S>);
impl<'de, S> Visitor<'de> for Helper<S>
where
S: for<'a> TryFrom<&'a [u8]>,
{
type Value = S;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(formatter, "bytes")
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
v.try_into().map_err(|_| serde::de::Error::custom("invalid bytes"))
}
}
deserializer.deserialize_bytes(Helper(PhantomData))
}
/// Serialize T as bytes
pub fn serialize<T, S>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
where
T: AsRef<[u8]>,
S: Serializer,
{
serde_bytes::Serialize::serialize(value.as_ref(), serializer)
}
}
|
{
write!(formatter, "a string")
}
|
main.go
|
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
Package main provides a client used for benchmarking. Before running the
client, the user would need to launch the grpc server.
To start the server before running the client, you can run look for the command
under the following file:
benchmark/server/main.go
After starting the server, the client can be run. An example of how to run this
command is:
go run benchmark/client/main.go -test_name=grpc_test
If the server is running on a different port than 50051, then use the port flag
for the client to hit the server on the correct port.
An example for how to run this command on a different port can be found here:
go run benchmark/client/main.go -test_name=grpc_test -port=8080
*/
package main
import (
"context"
"flag"
"fmt"
"os"
"runtime"
"runtime/pprof"
"sync"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/benchmark"
testpb "google.golang.org/grpc/benchmark/grpc_testing"
"google.golang.org/grpc/benchmark/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/syscall"
)
var (
port = flag.String("port", "50051", "Localhost port to connect to.")
numRPC = flag.Int("r", 1, "The number of concurrent RPCs on each connection.")
numConn = flag.Int("c", 1, "The number of parallel connections.")
warmupDur = flag.Int("w", 10, "Warm-up duration in seconds")
duration = flag.Int("d", 60, "Benchmark duration in seconds")
rqSize = flag.Int("req", 1, "Request message size in bytes.")
rspSize = flag.Int("resp", 1, "Response message size in bytes.")
rpcType = flag.String("rpc_type", "unary",
`Configure different client rpc type. Valid options are:
unary;
streaming.`)
testName = flag.String("test_name", "", "Name of the test used for creating profiles.")
wg sync.WaitGroup
hopts = stats.HistogramOptions{
NumBuckets: 2495,
GrowthFactor: .01,
}
mu sync.Mutex
hists []*stats.Histogram
logger = grpclog.Component("benchmark")
)
func main() {
flag.Parse()
if *testName == "" {
logger.Fatalf("test_name not set")
}
req := &testpb.SimpleRequest{
ResponseType: testpb.PayloadType_COMPRESSABLE,
ResponseSize: int32(*rspSize),
Payload: &testpb.Payload{
Type: testpb.PayloadType_COMPRESSABLE,
Body: make([]byte, *rqSize),
},
}
connectCtx, connectCancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
defer connectCancel()
ccs := buildConnections(connectCtx)
warmDeadline := time.Now().Add(time.Duration(*warmupDur) * time.Second)
endDeadline := warmDeadline.Add(time.Duration(*duration) * time.Second)
cf, err := os.Create("/tmp/" + *testName + ".cpu")
if err != nil {
logger.Fatalf("Error creating file: %v", err)
}
defer cf.Close()
pprof.StartCPUProfile(cf)
cpuBeg := syscall.GetCPUTime()
for _, cc := range ccs {
runWithConn(cc, req, warmDeadline, endDeadline)
}
wg.Wait()
cpu := time.Duration(syscall.GetCPUTime() - cpuBeg)
pprof.StopCPUProfile()
mf, err := os.Create("/tmp/" + *testName + ".mem")
if err != nil {
logger.Fatalf("Error creating file: %v", err)
}
defer mf.Close()
runtime.GC() // materialize all statistics
if err := pprof.WriteHeapProfile(mf); err != nil {
logger.Fatalf("Error writing memory profile: %v", err)
}
hist := stats.NewHistogram(hopts)
for _, h := range hists {
hist.Merge(h)
}
parseHist(hist)
fmt.Println("Client CPU utilization:", cpu)
fmt.Println("Client CPU profile:", cf.Name())
fmt.Println("Client Mem Profile:", mf.Name())
}
func buildConnections(ctx context.Context) []*grpc.ClientConn {
ccs := make([]*grpc.ClientConn, *numConn)
for i := range ccs {
ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithInsecure(), grpc.WithBlock())
}
return ccs
}
func
|
(cc *grpc.ClientConn, req *testpb.SimpleRequest, warmDeadline, endDeadline time.Time) {
for i := 0; i < *numRPC; i++ {
wg.Add(1)
go func() {
defer wg.Done()
caller := makeCaller(cc, req)
hist := stats.NewHistogram(hopts)
for {
start := time.Now()
if start.After(endDeadline) {
mu.Lock()
hists = append(hists, hist)
mu.Unlock()
return
}
caller()
elapsed := time.Since(start)
if start.After(warmDeadline) {
hist.Add(elapsed.Nanoseconds())
}
}
}()
}
}
func makeCaller(cc *grpc.ClientConn, req *testpb.SimpleRequest) func() {
client := testpb.NewBenchmarkServiceClient(cc)
if *rpcType == "unary" {
return func() {
if _, err := client.UnaryCall(context.Background(), req); err != nil {
logger.Fatalf("RPC failed: %v", err)
}
}
}
stream, err := client.StreamingCall(context.Background())
if err != nil {
logger.Fatalf("RPC failed: %v", err)
}
return func() {
if err := stream.Send(req); err != nil {
logger.Fatalf("Streaming RPC failed to send: %v", err)
}
if _, err := stream.Recv(); err != nil {
logger.Fatalf("Streaming RPC failed to read: %v", err)
}
}
}
func parseHist(hist *stats.Histogram) {
fmt.Println("qps:", float64(hist.Count)/float64(*duration))
fmt.Printf("Latency: (50/90/99 %%ile): %v/%v/%v\n",
time.Duration(median(.5, hist)),
time.Duration(median(.9, hist)),
time.Duration(median(.99, hist)))
}
func median(percentile float64, h *stats.Histogram) int64 {
need := int64(float64(h.Count) * percentile)
have := int64(0)
for _, bucket := range h.Buckets {
count := bucket.Count
if have+count >= need {
percent := float64(need-have) / float64(count)
return int64((1.0-percent)*bucket.LowBound + percent*bucket.LowBound*(1.0+hopts.GrowthFactor))
}
have += bucket.Count
}
panic("should have found a bound")
}
|
runWithConn
|
verifycode.py
|
"""Models about verify
"""
from django.db import models
from utils import getdate_now, randkey
from utils.checker import UserInfoChecker
class
|
(models.Model):
"""VerifyCode
"""
session_id = models.IntegerField()
phone = models.CharField(max_length=11)
code = models.CharField(max_length=8)
send_time = models.DateTimeField()
class Meta:
verbose_name = 'verifycode'
verbose_name_plural = 'verifycodes'
get_latest_by = 'id'
class VerifyHelper:
"""User Helper for pysat-server
It contains some functions about user operation.
"""
@staticmethod
def get_latest_code(session_id, phone):
"""get the latest code
"""
if not isinstance(session_id, int):
return None
if UserInfoChecker.check_phone(phone) is not True:
return None
logs = VerifyCode.objects.filter(
session_id=session_id,
phone=phone
).filter(~models.Q(code=''))
if logs.exists():
log = logs.last()
return {
'code' : log.code,
'time' : log.send_time
}
return None
@staticmethod
def del_codes(session_id, phone):
"""delete all codes with `seesion_id` and `phone`.
"""
if not isinstance(session_id, int):
return False
if UserInfoChecker.check_phone(phone) is not True:
return False
logs = VerifyCode.objects.filter(
session_id=session_id,
phone=phone
)
for log in logs:
log.code = ''
log.save()
return True
@staticmethod
def add_code(session_id, phone, default_code='GUXYNB'):
"""get the EntryLog by session_id.
"""
if not VerifyHelper.del_codes(session_id, phone):
return None
if default_code is None:
code = randkey(length=6)
else:
code = default_code
VerifyCode(
session_id=session_id,
phone=phone,
code=code,
send_time=getdate_now()
).save()
return code
@staticmethod
def check_code(session_id, phone, code):
"""check the verify_code
"""
if not isinstance(session_id, int):
return False
if not isinstance(code, str) or code == '':
return False
if UserInfoChecker.check_phone(phone) is not True:
return False
logs = VerifyCode.objects.filter(
session_id=session_id,
phone=phone,
code=code
)
if logs.exists():
log = logs.last()
log.code = ''
log.save()
return True
return False
|
VerifyCode
|
sshclient.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""High level ssh library.
Usage examples:
Execute command and get output:
ssh = sshclient.SSH("root", "example.com", port=33)
status, stdout, stderr = ssh.execute("ps ax")
if status:
raise Exception("Command failed with non-zero status.")
print stdout.splitlines()
Execute command with huge output:
class PseudoFile(object):
def write(chunk):
if "error" in chunk:
email_admin(chunk)
ssh = sshclient.SSH("root", "example.com")
ssh.run("tail -f /var/log/syslog", stdout=PseudoFile(), timeout=False)
Execute local script on remote side:
ssh = sshclient.SSH("user", "example.com")
status, out, err = ssh.execute("/bin/sh -s arg1 arg2",
stdin=open("~/myscript.sh", "r"))
Upload file:
ssh = sshclient.SSH("user", "example.com")
ssh.run("cat > ~/upload/file.gz", stdin=open("/store/file.gz", "rb"))
Eventlet:
eventlet.monkey_patch(select=True, time=True)
or
eventlet.monkey_patch()
or
sshclient = eventlet.import_patched("opentstack.common.sshclient")
"""
import os
import select
import socket
import time
import paramiko
import six
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SSHError(Exception):
pass
class
|
(SSHError):
pass
class SSH(object):
"""Represent ssh connection."""
def __init__(self, user, host, port=22, pkey=None,
key_filename=None, password=None):
"""Initialize SSH client.
:param user: ssh username
:param host: hostname or ip address of remote ssh server
:param port: remote ssh port
:param pkey: RSA or DSS private key string or file object
:param key_filename: private key filename
:param password: password
"""
self.user = user
self.host = host
self.port = port
self.pkey = self._get_pkey(pkey) if pkey else None
self.password = password
self.key_filename = key_filename
self._client = False
def _get_pkey(self, key):
if isinstance(key, six.string_types):
key = six.moves.StringIO(key)
errors = []
for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
try:
return key_class.from_private_key(key)
except paramiko.SSHException as e:
errors.append(e)
raise SSHError("Invalid pkey: %s" % errors)
def _get_client(self):
if self._client:
return self._client
try:
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._client.connect(self.host, username=self.user,
port=self.port, pkey=self.pkey,
key_filename=self.key_filename,
password=self.password, timeout=5)
return self._client
except Exception as e:
message = ("Exception %(exception_type)s was raised "
"during connect to %(user)s@%(host)s:%(port)s. "
"Exception value is: %(exception)r")
self._client = False
raise SSHError(message % {"exception": e,
"user": self.user,
"host": self.host,
"port": self.port,
"exception_type": type(e)})
def close(self):
if self._client:
self._client.close()
self._client = False
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
"""Execute specified command on the server.
:param cmd: Command to be executed.
:param stdin: Open file or string to pass to stdin.
:param stdout: Open file to connect to stdout.
:param stderr: Open file to connect to stderr.
:param raise_on_error: If False then exit code will be return. If True
then exception will be raized if non-zero code.
:param timeout: Timeout in seconds for command execution.
Default 1 hour. No timeout if set to 0.
"""
client = self._get_client()
if isinstance(stdin, six.string_types):
stdin = six.moves.StringIO(stdin)
return self._run(client, cmd, stdin=stdin, stdout=stdout,
stderr=stderr, raise_on_error=raise_on_error,
timeout=timeout)
def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(six.moves.shlex_quote(str(p)) for p in cmd)
transport = client.get_transport()
session = transport.open_session()
session.exec_command(cmd)
start_time = time.time()
data_to_send = ""
stderr_data = None
# If we have data to be sent to stdin then `select' should also
# check for stdin availability.
if stdin and not stdin.closed:
writes = [session]
else:
writes = []
while True:
# Block until data can be read/write.
r, w, e = select.select([session], writes, [session], 1)
if session.recv_ready():
data = session.recv(4096)
LOG.debug("stdout: %r" % data)
if stdout is not None:
stdout.write(data)
continue
if session.recv_stderr_ready():
stderr_data = session.recv_stderr(4096)
LOG.debug("stderr: %r" % stderr_data)
if stderr is not None:
stderr.write(stderr_data)
continue
if session.send_ready():
if stdin is not None and not stdin.closed:
if not data_to_send:
data_to_send = stdin.read(4096)
if not data_to_send:
stdin.close()
session.shutdown_write()
writes = []
continue
sent_bytes = session.send(data_to_send)
LOG.debug("sent: %s" % data_to_send[:sent_bytes])
data_to_send = data_to_send[sent_bytes:]
if session.exit_status_ready():
break
if timeout and (time.time() - timeout) > start_time:
args = {"cmd": cmd, "host": self.host}
raise SSHTimeout(("Timeout executing command "
"'%(cmd)s' on host %(host)s") % args)
if e:
raise SSHError("Socket error.")
exit_status = session.recv_exit_status()
if 0 != exit_status and raise_on_error:
fmt = ("Command '%(cmd)s' failed with exit_status %(status)d.")
details = fmt % {"cmd": cmd, "status": exit_status}
if stderr_data:
details += (" Last stderr data: '%s'.") % stderr_data
raise SSHError(details)
return exit_status
def execute(self, cmd, stdin=None, timeout=3600):
"""Execute the specified command on the server.
:param cmd: Command to be executed, can be a list.
:param stdin: Open file to be sent on process stdin.
:param timeout: Timeout for execution of the command.
:returns: tuple (exit_status, stdout, stderr)
"""
stdout = six.moves.StringIO()
stderr = six.moves.StringIO()
exit_status = self.run(cmd, stderr=stderr,
stdout=stdout, stdin=stdin,
timeout=timeout, raise_on_error=False)
stdout.seek(0)
stderr.seek(0)
return exit_status, stdout.read(), stderr.read()
def wait(self, timeout=120, interval=1):
"""Wait for the host will be available via ssh."""
start_time = time.time()
while True:
try:
return self.execute("uname")
except (socket.error, SSHError) as e:
LOG.debug("Ssh is still unavailable: %r" % e)
time.sleep(interval)
if time.time() > (start_time + timeout):
raise SSHTimeout(("Timeout waiting for '%s'") % self.host)
def _put_file_sftp(self, localpath, remotepath, mode=None):
client = self._get_client()
sftp = client.open_sftp()
sftp.put(localpath, remotepath)
if mode is None:
mode = 0o777 & os.stat(localpath).st_mode
sftp.chmod(remotepath, mode)
def _put_file_shell(self, localpath, remotepath, mode=None):
cmd = ["cat > %s" % remotepath]
if mode is not None:
cmd.append("chmod 0%o %s" % (mode, remotepath))
with open(localpath, "rb") as localfile:
cmd = "; ".join(cmd)
self.run(cmd, stdin=localfile)
def put_file(self, localpath, remotepath, mode=None):
"""Copy specified local file to the server.
:param localpath: Local filename.
:param remotepath: Remote filename.
:param mode: Permissions to set after upload
"""
try:
self._put_file_sftp(localpath, remotepath, mode=mode)
except paramiko.SSHException:
self._put_file_shell(localpath, remotepath, mode=mode)
|
SSHTimeout
|
load.py
|
from builtins import zip
from builtins import range
from builtins import object
import re
import csv
import unicodecsv
from bs4 import BeautifulSoup
from openelex.base.load import BaseLoader
from openelex.models import RawResult
from openelex.lib.text import ocd_type_id, slugify
from .datasource import Datasource
class LoadResults(object):
"""Entry point for data loading.
Determines appropriate loader for file and triggers load process.
"""
def run(self, mapping):
election_id = mapping['election']
if '2002' in election_id:
loader = MDLoader2002()
elif '2000' in election_id and 'primary' in election_id:
loader = MDLoader2000Primary()
elif '2008' in election_id and 'special' in election_id:
loader = MDLoader2008Special()
else:
loader = MDLoader()
loader.run(mapping)
class CountyOCDMixin(object):
"""
Loader mixin that adds convenience method for generating county-level
OCD IDs
"""
def _get_county_ocd_id(self, jurisdiction):
"""
Build an OCD ID for a county-level jurisdiction when the mapping
reflects the state OCD ID.
"""
# Baltimore City is treated like a county in the results, but we
# should use the city's OCD ID
if jurisdiction == "Baltimore City":
ocd_id = "{}/place:baltimore".format(self.mapping['ocd_id'])
else:
ocd_id = "{}/county:{}".format(self.mapping['ocd_id'],
ocd_type_id(jurisdiction))
return ocd_id
class MDBaseLoader(CountyOCDMixin, BaseLoader):
datasource = Datasource()
target_offices = set([
'President - Vice Pres',
'President and Vice President of the United States',
'U.S. Senator',
'U.S. Congress',
'Representative in Congress',
'Governor / Lt. Governor',
'Comptroller',
'Attorney General',
'State Senator',
'House of Delegates',
])
district_offices = set([
'U.S. Congress',
'Representative in Congress',
'State Senator',
"House of Delegates",
])
def _skip_row(self, row):
"""
Should this row be skipped?
This should be implemented in subclasses.
"""
return False
class MDLoader(MDBaseLoader):
"""
Parse Maryland election results for the 2000 general election and
all elections after 2002.
"""
def load(self):
with self._file_handle as csvfile:
results = []
reader = unicodecsv.DictReader(csvfile)
for row in reader:
# Skip non-target offices
if self._skip_row(row):
continue
elif 'state_legislative' in self.source:
results.extend(self._prep_state_leg_results(row))
elif 'precinct' in self.source:
results.append(self._prep_precinct_result(row))
else:
results.append(self._prep_county_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if row['Office Name'] == None:
return True
return row['Office Name'].strip() not in self.target_offices
def _build_contest_kwargs(self, row, primary_type):
kwargs = {
'office': row['Office Name'].strip(),
'district': row['Office District'].strip(),
}
# Add party if it's a primary
#TODO: QUESTION: Should semi-closed also have party?
if primary_type == 'closed':
kwargs['primary_party'] = row['Party'].strip()
return kwargs
def _build_candidate_kwargs(self, row):
try:
full_name = row['Candidate Name'].strip()
except KeyError:
# 2000 results use "Candidate" for the column name
full_name = row['Candidate'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _base_kwargs(self, row):
"Build base set of kwargs for RawResult"
# TODO: Can this just be called once?
kwargs = self._build_common_election_kwargs()
contest_kwargs = self._build_contest_kwargs(row, kwargs['primary_type'])
candidate_kwargs = self._build_candidate_kwargs(row)
kwargs.update(contest_kwargs)
kwargs.update(candidate_kwargs)
return kwargs
def _get_state_ocd_id(self):
"""
Get the state portion of the mapping's OCD ID
This is neccessary because the mappings for some files have OCD IDs
like 'ocd-division/country:us/state:md/sldl:all'. We need to extract
the state portion, 'ocd-division/country:us/state:md' to build OCD
IDs for lower jurisdictions.
"""
bits = []
state_bit = "state:"+ self.state
for bit in self.mapping['ocd_id'].split('/'):
bits.append(bit)
if bit == state_bit:
break
return '/'.join(bits)
def _prep_state_leg_results(self, row):
kwargs = self._base_kwargs(row)
kwargs.update({
'reporting_level': 'state_legislative',
'winner': row['Winner'].strip(),
'write_in': self._writein(row),
'party': row['Party'].strip(),
})
try:
kwargs['write_in'] = row['Write-In?'].strip() # at the contest-level
except KeyError as e:
pass
results = []
for field, val in list(row.items()):
clean_field = field.strip()
# Legislative fields prefixed with LEGS
if not clean_field.startswith('LEGS'):
continue
kwargs.update({
'jurisdiction': clean_field,
# Remove the "LEGS " from the ocd_id. This is a somewhat
# transformy action, but do it here in order to make the OCD IDs
# as usable as possible when we bake out raw results
'ocd_id': "{}/sldl:{}".format(self._get_state_ocd_id(),
ocd_type_id(clean_field.replace("LEGS ", ""))),
'votes': self._votes(val),
})
results.append(RawResult(**kwargs))
return results
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
vote_brkdown_fields = [
('election_day', 'Election Night Votes'),
('absentee', 'Absentees Votes'),
('provisional', 'Provisional Votes'),
('second_absentee', '2nd Absentees Votes'),
]
vote_breakdowns = {}
for field, key in vote_brkdown_fields:
try:
vote_breakdowns[field] = self._votes(row[key].strip())
except KeyError:
pass
kwargs.update({
'reporting_level': 'county',
'jurisdiction': self.mapping['name'],
'ocd_id': self.mapping['ocd_id'],
'party': row['Party'].strip(),
'votes': self._votes(row['Total Votes']),
'vote_breakdowns': vote_breakdowns,
})
if (kwargs['office'] not in self.district_offices
and kwargs['district'] != ''):
kwargs['reporting_level'] = 'congressional_district_by_county'
kwargs['reporting_district'] = kwargs['district']
del kwargs['district']
return RawResult(**kwargs)
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
precinct = "%s-%s" % (row['Election District'], row['Election Precinct'].strip())
ocd_id = "{}/precinct:{}".format(self.mapping['ocd_id'],
ocd_type_id(precinct))
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'parent_jurisdiction': self.mapping['name'],
'ocd_id': ocd_id,
'party': row['Party'].strip(),
'votes': self._votes(row['Election Night Votes']),
'votes_type': 'election_day',
'winner': row['Winner'],
'write_in': self._writein(row),
})
return RawResult(**kwargs)
def _votes(self, val):
"""
Returns cleaned version of votes or 0 if it's a non-numeric value.
"""
if val.strip() == '':
return 0
try:
return int(float(val))
except ValueError:
# Count'y convert value from string
return 0
def _writein(self, row):
# sometimes write-in field not present
try:
write_in = row['Write-In?'].strip()
except KeyError:
write_in = None
return write_in
class MDLoader2002(MDBaseLoader):
"""
Loads Maryland results for 2002.
Format:
Maryland results for 2002 are in a delimited text file where the delimiter
is '|'.
Fields:
0: Office
1: Office District - '-' is used to denote null values
2: County
3: Last Name - "zz998" is used for write-in candidates
4: Middle Name - "\\N" is used to denote null values
5: First Name - "Other Write-Ins" is used for write-in candidates
6: Party
7: Winner - Value is 0 or 1
8: UNKNOWN - Values are "(Vote for One)", "(Vote for No More Than Three)", etc.
9: Votes
10: UNKNOWN - Values are "\\N" for every row
Sample row:
House of Delegates |32 |Anne Arundel County |Burton |W. |Robert |Republican | 0|(Vote for No More Than Three) | 1494|\\N
Notes:
In the general election file, there are rows for judges and for
"Statewide Ballot Questions". The columns in these rows are shifted over,
but we can ignore these rows since we're not interested in these offices.
"""
def load(self):
headers = [
'office',
'district',
'jurisdiction',
'family_name',
'additional_name',
'given_name',
'party',
'winner',
'vote_type',
'votes',
'fill2'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'county'
# Store result instances for bulk loading
results = []
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile, fieldnames=headers, delimiter='|')
for row in reader:
if self._skip_row(row):
continue
rr_kwargs = self._common_kwargs.copy()
if rr_kwargs['primary_type'] == 'closed':
rr_kwargs['primary_party'] = row['party'].strip()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
jurisdiction = row['jurisdiction'].strip()
rr_kwargs.update({
'party': row['party'].strip(),
'jurisdiction': jurisdiction,
'ocd_id': self._get_county_ocd_id(jurisdiction),
'office': row['office'].strip(),
'district': row['district'].strip(),
'votes': int(row['votes'].strip()),
})
results.append(RawResult(**rr_kwargs))
RawResult.objects.insert(results)
def _skip_row(self, row):
return row['office'].strip() not in self.target_offices
def _build_contest_kwargs(self, row):
return {
'office': row['office'].strip(),
'district': row['district'].strip(),
}
def _build_candidate_kwargs(self, row):
return {
'family_name': row['family_name'].strip(),
'given_name': row['given_name'].strip(),
'additional_name': row['additional_name'].strip(),
}
class MDLoader2000Primary(MDBaseLoader):
office_choices = [
"President and Vice President of the United States",
"U.S. Senator",
"Representative in Congress",
"Judge of the Circuit Court",
"Female Delegates and Alternate to the Democratic National Convention",
"Female Delegates to the Democratic National Convention",
"Male Delegates to the Democratic National Convention",
"Male Delegates and Alternate to the Democratic National Convention",
"Delegates to the Republican National Convention",
]
def load(self):
candidates = {}
results = []
last_office = None
last_party = None
last_district = None
common_kwargs = self._build_common_election_kwargs()
with self._file_handle as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if not len(row):
continue # Skip blank lines
# determine if this is a row with an office
office, party, district = self._parse_header(row)
if office:
# It's a header row
if office in self.target_offices:
# It's an office we care about. Save the office and
# party for the next row
last_office = office
last_party = party
last_district = district
else:
last_office = None
last_party = None
last_district = None
elif last_office and row[0] == '':
# Candidate name row
candidates, winner_name = self._parse_candidates(row)
elif last_office: # has to be a county result
new_results = self._parse_results(row, last_office,
last_party, last_district,
candidates, winner_name, common_kwargs)
results.extend(new_results)
RawResult.objects.insert(results)
def _parse_header(self, row):
"""
Returns a tuple of office and party and congressional district
if the row is a header.
Returns (None, None, None) for a non-header row.
Note that the district doesn't represent the district of the office
"""
office = self._parse_office(row)
if office:
party = self._parse_party(row)
district = self._parse_district(row)
else:
party = None
district = None
return office, party, district
def _parse_office(self, row):
for o in self.office_choices:
if o in row[0]:
return o
return None
def _parse_party(self, row):
if 'Democratic' in row[0]:
return 'Democratic'
elif 'Republican' in row[0]:
return 'Republican'
else:
return None
def _parse_district(self, row):
if 'District' not in row[0]:
return None
return re.search(r'(\d+)', row[0]).groups(0)[0]
def _parse_candidates(self, row):
candidates = []
for col in row:
if col != '':
full_name = col.strip()
if 'Winner' in full_name:
# Trim winner from candidate name
full_name, remainder = full_name.split(' Winner')
winner = full_name
candidates.append(full_name)
return candidates, winner
# TODO: QUESTION: How to handle "Uncomitted to any ..." values
def _parse_results(self, row, office, party, district, candidates,
winner_name, common_kwargs):
results = []
cols = [x.strip() for x in row if x != '']
county = cols[0].strip()
cand_results = list(zip(candidates, cols[1:]))
for cand, votes in cand_results:
result_kwargs = common_kwargs.copy()
result_kwargs.update({
'jurisdiction': county,
'ocd_id': self._get_county_ocd_id(county),
'office': office,
'party': party,
'full_name': cand,
'votes': int(votes),
})
if result_kwargs['primary_type'] == 'closed':
result_kwargs['primary_party'] = party
if office == "Representative in Congress":
# In the case of U.S. representatives, the district represents
# the office district. In all other cases, it just
# represents the level of result aggregation.
result_kwargs['district'] = district
if cand == winner_name:
result_kwargs['winner'] = 'Winner'
# Try to figure out if this is a case where results are
# provided by congressional district split by county and
# record this.
result_kwargs['reporting_level'] = self._get_reporting_level(district)
if result_kwargs['reporting_level'] == 'congressional_district_by_county':
result_kwargs['reporting_district'] = district
results.append(RawResult(**result_kwargs))
return results
def _get_reporting_level(self, district):
"""
Returns the reporting level based on the value of the results' district.
This deals with the way in which results for 2000 primaries are
returned broken down by both congressional district, split by county.
"""
if district:
return "congressional_district_by_county"
else:
return "county"
class MDLoader2008Special(CountyOCDMixin, BaseLoader):
"""
Loader for the Maryland 2008 4th Congressional District Special election results
"""
datasource = Datasource()
def load(self):
table = self._get_html_table()
rows = self._parse_html_table(table)
winner_name = self._parse_winner_name(table)
candidate_attrs = self._parse_candidates_and_parties(rows[0],
winner_name)
results = self._parse_results(rows[1:3], candidate_attrs)
RawResult.objects.insert(results)
def _get_html_table(self):
soup = BeautifulSoup(self._file_handle, 'html.parser')
return soup.find(text=re.compile("Donna Edwards")).parent.parent.parent
def _parse_html_table(self, table):
rows = []
for tr in table.find_all('tr'):
rows.append(self._parse_html_table_row(tr))
return rows
def _parse_html_table_row(self, tr):
row = []
cells = tr.find_all('th') + tr.find_all('td')
for cell in cells:
row.append(cell.text.strip())
return row
def _parse_winner_name(self, table):
|
def _parse_candidates_and_parties(self, row, winner_name):
candidate_attrs = []
for cell in row[1:]:
# Skip the first cell. It's a header, "County"
attrs = {
'full_name': self._parse_name(cell),
'party': self._parse_party(cell),
'write_in': self._parse_write_in(cell),
}
if attrs['full_name'] == winner_name:
attrs['contest_winner'] = True
candidate_attrs.append(attrs)
return candidate_attrs
def _parse_name(self, s):
if s == "Other Write-Ins":
return s
# We know that all the candidate names are just first and last names
bits = re.split(r'\s', s)
return ' '.join(bits[:2])
def _parse_party(self, s):
if s == "Other Write-Ins":
return None
bits = re.split(r'\s', s)
return bits[2]
def _parse_write_in(self, s):
if s == "Other Write-Ins":
return s
elif "Write-In" in s:
return "Write-In"
else:
return ""
def _parse_results(self, rows, candidate_attrs):
# These raw result attributes will be the same for every result.
common_kwargs = self._build_common_election_kwargs()
common_kwargs.update({
'office': "Representative in Congress",
'district': '4',
'reporting_level': "county",
})
results = []
for row in rows:
county = row[0]
for i in range(1, len(row)):
kwargs = common_kwargs.copy()
kwargs.update(candidate_attrs[i-1])
kwargs['jurisdiction'] = county
kwargs['ocd_id'] = self._get_county_ocd_id(county)
kwargs['votes'] = self._parse_votes(row[i])
results.append(RawResult(**kwargs))
return results
def _parse_votes(self, s):
return int(s.split(' ')[0].replace(',', ''))
|
cell = table.select('th > img')[0].parent
return self._parse_name(cell.text.strip())
|
storage_config.go
|
package core
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"bitbucket.org/udt/wizefs/internal/globals"
"bitbucket.org/udt/wizefs/internal/tlog"
)
const (
StorageConfigVersion = 1
StorageConfigFilename = "wizedb.conf"
)
type FilesystemInfo struct {
OriginPath string `json:"originpath"`
Type globals.FSType `json:"type"`
MountpointKey string `json:"mountpoint"`
}
type MountpointInfo struct {
MountpointPath string `json:"mountpointpath"`
OriginKey string `json:"origin"`
}
type StorageConfig struct {
// header
// filesystems
Filesystems map[string]FilesystemInfo `json:"created"`
Mountpoints map[string]MountpointInfo `json:"mounted"`
filename string
mutex sync.Mutex
}
// TEST: TestWizeConfigMake
func NewStorageConfig(path string) *StorageConfig
|
// TESTS: TestWizeConfig* (several tests)
func (wc *StorageConfig) CreateFilesystem(origin, originPath string, itype globals.FSType) error {
// HACK: this fixed problems with gRPC methods (and GUI?)
wc.Load()
_, ok := wc.Filesystems[origin]
if ok {
return fmt.Errorf("This filesystem is already added!")
}
wc.Filesystems[origin] = FilesystemInfo{
OriginPath: originPath,
Type: itype,
MountpointKey: "",
}
tlog.Debug.Println("Add filesystem to the created map! ", wc)
return nil
}
func (wc *StorageConfig) DeleteFilesystem(origin string) error {
// HACK: this fixed problems with gRPC methods (and GUI?)
wc.Load()
_, ok := wc.Filesystems[origin]
if !ok {
return fmt.Errorf("This filesystem is absent!")
}
delete(wc.Filesystems, origin)
tlog.Debug.Println("Delete filesystem from the created map! ", wc)
return nil
}
func (wc *StorageConfig) MountFilesystem(origin, mountpoint, mountpointpath string) error {
// HACK: this fixed problems with gRPC methods (and GUI?)
// HACK2: for gRPC/Mount we don't need to Load() config, because it works via mount CLI app
//wc.Load()
_, ok := wc.Mountpoints[mountpoint]
if ok {
return fmt.Errorf("This filesystem is already mounted!")
}
wc.Mountpoints[mountpoint] = MountpointInfo{
MountpointPath: mountpointpath,
OriginKey: origin,
}
fsi := wc.Filesystems[origin]
wc.Filesystems[origin] = FilesystemInfo{
OriginPath: fsi.OriginPath,
Type: fsi.Type,
MountpointKey: mountpoint,
}
tlog.Debug.Println("Add filesystem to the mounted map! ", wc)
return nil
}
func (wc *StorageConfig) UnmountFilesystem(mountpoint string) error {
// HACK: this fixed problems with gRPC methods (and GUI?)
wc.Load()
mpi, ok := wc.Mountpoints[mountpoint]
if !ok {
return fmt.Errorf("This filesystem is not mounted!")
}
origin := mpi.OriginKey
delete(wc.Mountpoints, mountpoint)
fsi := wc.Filesystems[origin]
wc.Filesystems[origin] = FilesystemInfo{
OriginPath: fsi.OriginPath,
Type: fsi.Type,
MountpointKey: "",
}
tlog.Debug.Println("Delete filesystem from the mounted map! ", wc)
return nil
}
func (wc *StorageConfig) Save() error {
wc.mutex.Lock()
defer wc.mutex.Unlock()
tmp := wc.filename + ".tmp"
// 0400 permissions: wizefs.conf should be kept secret and never be written to.
//fd, err := os.OpenFile(tmp, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0400)
// temporary solution
fd, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0644)
if err != nil {
return err
}
js, err := json.MarshalIndent(wc, "", "\t")
if err != nil {
return err
}
// For convenience for the user, add a newline at the end.
js = append(js, '\n')
_, err = fd.Write(js)
if err != nil {
return err
}
err = fd.Sync()
if err != nil {
return err
}
err = fd.Close()
if err != nil {
return err
}
err = os.Rename(tmp, wc.filename)
return err
}
func (wc *StorageConfig) Load() error {
wc.mutex.Lock()
defer wc.mutex.Unlock()
// Read from disk
js, err := ioutil.ReadFile(wc.filename)
if err != nil {
//tlog.Warn.Printf("Load config file: ReadFile: %v, %s\n", err, err.Error())
return err
}
wc.clear()
// Unmarshal
err = json.Unmarshal(js, &wc)
if err != nil {
tlog.Warn.Printf("Failed to unmarshal config file")
return err
}
return nil
}
func (wc *StorageConfig) CheckOriginGetMountpoint(origin string) (mountpointPath string, err error) {
// HACK: this fixed problems with gRPC methods (and GUI?)
wc.Load()
var ok bool
var fsinfo FilesystemInfo
fsinfo, ok = wc.Filesystems[origin]
if !ok {
tlog.Warn.Printf("Filesystem %s is not exist!", origin)
return "", errors.New("Filesystem is not exist!")
}
if fsinfo.MountpointKey == "" {
tlog.Warn.Printf("Filesystem %s is not mounted!", origin)
return "", errors.New("Filesystem is not mounted!")
}
var mpinfo MountpointInfo
mpinfo, ok = wc.Mountpoints[fsinfo.MountpointKey]
if !ok {
tlog.Warn.Printf("Mounted filesystem %s is not exist!", fsinfo.MountpointKey)
return "", errors.New("Mounted filesystem is not exist!")
}
mountpointPath = mpinfo.MountpointPath
// TODO: check mountpointPath?
return mountpointPath, nil
}
func (wc *StorageConfig) Check(origin string, shouldFindOrigin, shouldMounted bool) (extCode int, err error) {
wc.Load()
existOrigin, existMountpoint := wc.checkFilesystem(origin)
if shouldFindOrigin {
if existOrigin {
return globals.ExitOrigin,
fmt.Errorf("ORIGIN: %s is already exist in common config.", origin)
}
} else {
if !existOrigin {
return globals.ExitOrigin,
fmt.Errorf("Did not find ORIGIN: %s in common config.", origin)
}
}
if shouldMounted {
if existMountpoint {
return globals.ExitMountPoint,
fmt.Errorf("This ORIGIN: %s is already mounted", origin)
}
} else {
if !existMountpoint {
// TEST: TestUnmountNotMounted
return globals.ExitMountPoint,
fmt.Errorf("This ORIGIN: %s is not mounted yet", origin)
}
}
return 0, nil
}
// just for WizeFS UI app
func (wc *StorageConfig) GetInfoByOrigin(origin string) (fsinfo FilesystemInfo, mpinfo MountpointInfo, err error) {
// HACK: this fixed problems with gRPC methods (and GUI?)
wc.Load()
fsinfo, ok := wc.Filesystems[origin]
if !ok {
tlog.Warn.Printf("Filesystem %s is not exist!", origin)
return FilesystemInfo{}, MountpointInfo{}, errors.New("Filesystem is not exist!")
}
mpinfo, ok = wc.Mountpoints[fsinfo.MountpointKey]
if !ok {
tlog.Warn.Printf("Mounted filesystem %s is not exist!", fsinfo.MountpointKey)
return fsinfo, MountpointInfo{}, errors.New("Mounted filesystem is not exist!")
}
return fsinfo, mpinfo, nil
}
func (wc *StorageConfig) clear() {
// Just clear WizeConfig maps
for k := range wc.Filesystems {
delete(wc.Filesystems, k)
}
for k := range wc.Mountpoints {
delete(wc.Mountpoints, k)
}
}
func (wc *StorageConfig) checkFilesystem(origin string) (existOrigin bool, existMountpoint bool) {
existMountpoint = false
fsinfo, existOrigin := wc.Filesystems[origin]
if existOrigin {
if fsinfo.MountpointKey != "" {
existMountpoint = true
}
}
return
}
|
{
if path == "" {
exe, err := os.Executable()
if err != nil {
panic(err)
}
path = filepath.Dir(exe)
}
return &StorageConfig{
Filesystems: make(map[string]FilesystemInfo),
Mountpoints: make(map[string]MountpointInfo),
filename: filepath.Join(path, StorageConfigFilename),
}
}
|
document.ts
|
import { SceneDataPF2e } from './data';
import { SceneConfigPF2e } from './sheet';
import { AmbientLightDocumentPF2e, LightLevels, TokenDocumentPF2e } from '.';
export class ScenePF2e extends Scene<TokenDocumentPF2e, AmbientLightDocumentPF2e> {
/** Toggle Unrestricted Global Vision according to scene darkness level */
override prepareBaseData() {
super.prepareBaseData();
if (canvas.sight?.rulesBasedVision) {
|
}
this.data.flags.pf2e ??= { syncDarkness: 'default' };
this.data.flags.pf2e.syncDarkness ??= 'default';
}
get lightLevel(): number {
return 1 - this.data.darkness;
}
}
export interface ScenePF2e {
_sheet: SceneConfigPF2e | null;
readonly data: SceneDataPF2e<this>;
get sheet(): SceneConfigPF2e;
getFlag(scope: 'pf2e', key: 'syncDarkness'): 'enabled' | 'disabled' | 'default';
getFlag(scope: string, key: string): unknown;
}
|
this.data.globalLightThreshold = 1 - LightLevels.DARKNESS;
this.data.globalLight = true;
|
virtual_flash.rs
|
//! Virtualize writing flash.
//!
//! `MuxFlash` provides shared access to a flash interface from multiple clients
//! in the kernel. For instance, a board may wish to expose the internal MCU
//! flash for multiple uses, like allowing userland apps to write their own
//! flash space, and to provide a "scratch space" as the end of flash for all
//! apps to use. Each of these requires a capsule to support the operation, and
//! must use a `FlashUser` instance to contain the per-user state for the
//! virtualization.
//!
//! Usage
//! -----
//!
//! ```
//! // Create the mux.
//! let mux_flash = static_init!(
//! capsules::virtual_flash::MuxFlash<'static, sam4l::flashcalw::FLASHCALW>,
//! capsules::virtual_flash::MuxFlash::new(&sam4l::flashcalw::FLASH_CONTROLLER));
//! hil::flash::HasClient::set_client(&sam4l::flashcalw::FLASH_CONTROLLER, mux_flash);
//!
//! // Everything that then uses the virtualized flash must use one of these.
//! let virtual_flash = static_init!(
//! capsules::virtual_flash::FlashUser<'static, sam4l::flashcalw::FLASHCALW>,
//! capsules::virtual_flash::FlashUser::new(mux_flash));
//! ```
use core::cell::Cell;
use kernel::common::cells::{OptionalCell, TakeCell};
use kernel::common::{List, ListLink, ListNode};
use kernel::hil;
use kernel::ReturnCode;
/// Handle keeping a list of active users of flash hardware and serialize their
/// requests. After each completed request the list is checked to see if there
/// is another flash user with an outstanding read, write, or erase request.
pub struct MuxFlash<'a, F: hil::flash::Flash + 'static> {
flash: &'a F,
users: List<'a, FlashUser<'a, F>>,
inflight: OptionalCell<&'a FlashUser<'a, F>>,
}
impl<F: hil::flash::Flash> hil::flash::Client<F> for MuxFlash<'a, F> {
fn read_complete(&self, pagebuffer: &'static mut F::Page, error: hil::flash::Error) {
self.inflight.take().map(move |user| {
user.read_complete(pagebuffer, error);
});
self.do_next_op();
}
fn write_complete(&self, pagebuffer: &'static mut F::Page, error: hil::flash::Error) {
self.inflight.take().map(move |user| {
user.write_complete(pagebuffer, error);
});
self.do_next_op();
}
fn erase_complete(&self, error: hil::flash::Error)
|
}
impl<F: hil::flash::Flash> MuxFlash<'a, F> {
pub const fn new(flash: &'a F) -> MuxFlash<'a, F> {
MuxFlash {
flash: flash,
users: List::new(),
inflight: OptionalCell::empty(),
}
}
/// Scan the list of users and find the first user that has a pending
/// request, then issue that request to the flash hardware.
fn do_next_op(&self) {
if self.inflight.is_none() {
let mnode = self
.users
.iter()
.find(|node| node.operation.get() != Op::Idle);
mnode.map(|node| {
node.buffer.take().map_or_else(
|| {
// Don't need a buffer for erase.
match node.operation.get() {
Op::Erase(page_number) => {
self.flash.erase_page(page_number);
}
_ => {}
};
},
|buf| {
match node.operation.get() {
Op::Write(page_number) => {
self.flash.write_page(page_number, buf);
}
Op::Read(page_number) => {
self.flash.read_page(page_number, buf);
}
Op::Erase(page_number) => {
self.flash.erase_page(page_number);
}
Op::Idle => {} // Can't get here...
}
},
);
node.operation.set(Op::Idle);
self.inflight.set(node);
});
}
}
}
#[derive(Copy, Clone, PartialEq)]
enum Op {
Idle,
Write(usize),
Read(usize),
Erase(usize),
}
/// Keep state for each flash user. All uses of the virtualized flash interface
/// need to create one of these to be a user of the flash. The `new()` function
/// handles most of the work, a user only has to pass in a reference to the
/// MuxFlash object.
pub struct FlashUser<'a, F: hil::flash::Flash + 'static> {
mux: &'a MuxFlash<'a, F>,
buffer: TakeCell<'static, F::Page>,
operation: Cell<Op>,
next: ListLink<'a, FlashUser<'a, F>>,
client: OptionalCell<&'a dyn hil::flash::Client<FlashUser<'a, F>>>,
}
impl<F: hil::flash::Flash> FlashUser<'a, F> {
pub const fn new(mux: &'a MuxFlash<'a, F>) -> FlashUser<'a, F> {
FlashUser {
mux: mux,
buffer: TakeCell::empty(),
operation: Cell::new(Op::Idle),
next: ListLink::empty(),
client: OptionalCell::empty(),
}
}
}
impl<F: hil::flash::Flash, C: hil::flash::Client<Self>> hil::flash::HasClient<'a, C>
for FlashUser<'a, F>
{
fn set_client(&'a self, client: &'a C) {
self.mux.users.push_head(self);
self.client.set(client);
}
}
impl<F: hil::flash::Flash> hil::flash::Client<F> for FlashUser<'a, F> {
fn read_complete(&self, pagebuffer: &'static mut F::Page, error: hil::flash::Error) {
self.client.map(move |client| {
client.read_complete(pagebuffer, error);
});
}
fn write_complete(&self, pagebuffer: &'static mut F::Page, error: hil::flash::Error) {
self.client.map(move |client| {
client.write_complete(pagebuffer, error);
});
}
fn erase_complete(&self, error: hil::flash::Error) {
self.client.map(move |client| {
client.erase_complete(error);
});
}
}
impl<F: hil::flash::Flash> ListNode<'a, FlashUser<'a, F>> for FlashUser<'a, F> {
fn next(&'a self) -> &'a ListLink<'a, FlashUser<'a, F>> {
&self.next
}
}
impl<F: hil::flash::Flash> hil::flash::Flash for FlashUser<'a, F> {
type Page = F::Page;
fn read_page(&self, page_number: usize, buf: &'static mut Self::Page) -> ReturnCode {
self.buffer.replace(buf);
self.operation.set(Op::Read(page_number));
self.mux.do_next_op();
ReturnCode::SUCCESS
}
fn write_page(&self, page_number: usize, buf: &'static mut Self::Page) -> ReturnCode {
self.buffer.replace(buf);
self.operation.set(Op::Write(page_number));
self.mux.do_next_op();
ReturnCode::SUCCESS
}
fn erase_page(&self, page_number: usize) -> ReturnCode {
self.operation.set(Op::Erase(page_number));
self.mux.do_next_op();
ReturnCode::SUCCESS
}
}
|
{
self.inflight.take().map(move |user| {
user.erase_complete(error);
});
self.do_next_op();
}
|
damgard_test.go
|
/*
* Copyright (c) 2018 XLAB d.o.o
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fullysec_test
|
"testing"
"github.com/fentec-project/gofe/data"
"github.com/fentec-project/gofe/innerprod/fullysec"
"github.com/fentec-project/gofe/sample"
"github.com/stretchr/testify/assert"
)
type damgardTestParam struct {
name string
modulusLength int
precomputed bool
}
func testFullySecDamgardDDHFromParam(t *testing.T, param damgardTestParam) {
l := 16
bound := big.NewInt(1024)
sampler := sample.NewUniformRange(new(big.Int).Add(new(big.Int).Neg(bound), big.NewInt(1)), bound)
var damgard *fullysec.Damgard
var err error
if param.precomputed {
damgard, err = fullysec.NewDamgardPrecomp(l, param.modulusLength, bound)
} else {
damgard, err = fullysec.NewDamgard(l, param.modulusLength, bound)
}
if err != nil {
t.Fatalf("Error during simple inner product creation: %v", err)
}
masterSecKey, masterPubKey, err := damgard.GenerateMasterKeys()
if err != nil {
t.Fatalf("Error during master key generation: %v", err)
}
y, err := data.NewRandomVector(l, sampler)
if err != nil {
t.Fatalf("Error during random generation: %v", err)
}
key, err := damgard.DeriveKey(masterSecKey, y)
if err != nil {
t.Fatalf("Error during key derivation: %v", err)
}
x, err := data.NewRandomVector(l, sampler)
if err != nil {
t.Fatalf("Error during random generation: %v", err)
}
// simulate the instantiation of encryptor (which should be given masterPubKey)
encryptor := fullysec.NewDamgardFromParams(damgard.Params)
xyCheck, err := x.Dot(y)
if err != nil {
t.Fatalf("Error during inner product calculation")
}
ciphertext, err := encryptor.Encrypt(x, masterPubKey)
if err != nil {
t.Fatalf("Error during encryption: %v", err)
}
decryptor := fullysec.NewDamgardFromParams(damgard.Params)
xy, err := decryptor.Decrypt(ciphertext, key, y)
if err != nil {
t.Fatalf("Error during decryption: %v", err)
}
assert.Equal(t, xy.Cmp(xyCheck), 0, "obtained incorrect inner product")
}
func TestFullySec_DamgardDDH(t *testing.T) {
params := []damgardTestParam{{name: "random", modulusLength: 512, precomputed: false},
{name: "precomputed", modulusLength: 2048, precomputed: true}}
for _, param := range params {
t.Run(param.name, func(t *testing.T) {
testFullySecDamgardDDHFromParam(t, param)
})
}
}
|
import (
"math/big"
|
api_types.go
|
/*
Copyright 2021 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
apiextentionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
gatewayapiv1alpha1 "sigs.k8s.io/gateway-api/apis/v1alpha1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// TODO: API definition is missing kubebuilder annotations for validation, add them.
// TODO: Add proper comments for each of the API fields and Structs, so we can create proper docs.
const (
APIKind = "API"
)
type Destination struct {
Schema string `json:"schema,omitempty"`
apiextentionsv1.ServiceReference `json:"serviceReference"`
}
func (d Destination) NamespacedName() types.NamespacedName {
return types.NamespacedName{Namespace: d.Namespace, Name: d.Name}
}
type APIMappings struct {
// Inline OAS
// +optional
OAS *string `json:"OAS,omitempty"`
// Select a HTTP route by matching the HTTP request path.
// +optional
HTTPPathMatch *gatewayapiv1alpha1.HTTPPathMatch `json:"HTTPPathMatch,omitempty"`
}
// APISpec defines the desired state of API
type APISpec struct {
Destination Destination `json:"destination"`
Mappings APIMappings `json:"mappings"`
}
// APIStatus defines the observed state of API
type APIStatus struct {
Ready bool `json:"ready"`
ObservedGeneration int64 `json:"observedGeneration"`
|
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// API is the Schema for the apis API
type API struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec APISpec `json:"spec,omitempty"`
Status APIStatus `json:"status,omitempty"`
}
func APIObjectName(base, tag string) string {
return fmt.Sprintf("%s.%s", base, tag)
}
// +kubebuilder:object:root=true
// APIList contains a list of API
type APIList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []API `json:"items"`
}
func init() {
SchemeBuilder.Register(&API{}, &APIList{})
}
| |
RootHandler.js
|
/**
*
* Root handler for "/" route
* restify each path to its handler
*
**/
var $ = require('../lib/dollar').$,
BaseHandler = require('./BaseHandler'),
|
UserHandler = require('./UserHandler'),
MeHandler = require('./MeHandler');
function RootHandler(ctx) {
$('util').inherits(this, BaseHandler);
BaseHandler.call(this, ctx);
this.dispatch = function() {
// get the current step and change ctx for the next handler
var step = ctx.shift();
var method = ctx.getMethod();
switch (step) {
case '':
this[method](ctx.getContext());
break;
case 'user':
new UserHandler(ctx).dispatch();
break;
case 'me':
new MeHandler(ctx).dispatch();
break;
default:
new DefaultHandler(ctx).dispatch();
}
};
}
module.exports = RootHandler;
|
DefaultHandler = require('./DefaultHandler'),
|
datasets.py
|
import os
import glob
import random
import numpy as np
import pandas as pd
from imageio import mimread
from skimage.color import gray2rgb
from skimage import io, img_as_float32
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
from data.augmentation import AllAugmentationTransform
def read_video(name, frame_shape):
"""
Read video which can be:
- an image of concatenated frames
- '.mp4' and'.gif'
- folder with videos
"""
if os.path.isdir(name):
frames = sorted(os.listdir(name))
num_frames = len(frames)
video_array = np.array(
[img_as_float32(io.imread(os.path.join(name, frames[idx]))) for idx in range(num_frames)])
elif name.lower().endswith('.png') or name.lower().endswith('.jpg'):
image = io.imread(name)
if len(image.shape) == 2 or image.shape[2] == 1:
image = gray2rgb(image)
if image.shape[2] == 4:
image = image[..., :3]
image = img_as_float32(image)
video_array = np.moveaxis(image, 1, 0)
video_array = video_array.reshape((-1,) + frame_shape)
video_array = np.moveaxis(video_array, 1, 2)
elif name.lower().endswith('.gif') or name.lower().endswith('.mp4') or name.lower().endswith('.mov'):
video = np.array(mimread(name))
if len(video.shape) == 3:
video = np.array([gray2rgb(frame) for frame in video])
if video.shape[-1] == 4:
video = video[..., :3]
video_array = img_as_float32(video)
else:
raise Exception("Unknown file extensions %s" % name)
return video_array
class FramesDataset(Dataset):
"""
Dataset of videos, each video can be represented as:
- an image of concatenated frames
- '.mp4' or '.gif'
- folder with all frames
"""
def __init__(self, root_dir, frame_shape=(256, 256, 3), id_sampling=False, is_train=True,
random_seed=0, pairs_list=None, augmentation_params=None):
self.root_dir = root_dir
self.videos = os.listdir(root_dir)
self.frame_shape = tuple(frame_shape)
self.pairs_list = pairs_list
self.id_sampling = id_sampling
if os.path.exists(os.path.join(root_dir, 'train')):
|
else:
print("Use random train-test split.")
train_videos, test_videos = train_test_split(self.videos, random_state=random_seed, test_size=0.2)
if is_train:
self.videos = train_videos
else:
self.videos = test_videos
self.is_train = is_train
if self.is_train:
self.transform = AllAugmentationTransform(**augmentation_params)
else:
self.transform = None
def __len__(self):
return len(self.videos)
def __getitem__(self, idx):
if self.is_train and self.id_sampling:
name = self.videos[idx]
path = np.random.choice(glob.glob(os.path.join(self.root_dir, name + '*.mp4')))
else:
name = self.videos[idx]
path = os.path.join(self.root_dir, name)
video_name = os.path.basename(path)
if self.is_train and os.path.isdir(path):
frames = os.listdir(path)
num_frames = len(frames)
frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2))
video_array = [img_as_float32(io.imread(os.path.join(path, frames[idx]))) for idx in frame_idx]
else:
video_array = read_video(path, frame_shape=self.frame_shape)
num_frames = len(video_array)
frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2)) if self.is_train else range(
num_frames)
video_array = list(video_array[frame_idx])
if self.transform is not None:
video_array = self.transform(video_array)
out = dict()
if self.is_train:
source = np.array(video_array[0], dtype='float32')
driving = np.array(video_array[1], dtype='float32')
out['source'] = source.transpose((2, 0, 1))
out['driving'] = driving.transpose((2, 0, 1))
else:
video = np.array(video_array, dtype='float32')
out['video'] = video.transpose((3, 0, 1, 2))
out['name'] = video_name
return out
class PairedDataset(Dataset):
"""Dataset of pairs for animation."""
def __init__(self, initial_dataset, number_of_pairs, seed=0):
self.initial_dataset = initial_dataset
pairs_list = self.initial_dataset.pairs_list
np.random.seed(seed)
if pairs_list is None:
max_idx = min(number_of_pairs, len(initial_dataset))
nx, ny = max_idx, max_idx
xy = np.mgrid[:nx, :ny].reshape(2, -1).T
number_of_pairs = min(xy.shape[0], number_of_pairs)
self.pairs = xy.take(np.random.choice(xy.shape[0], number_of_pairs, replace=False), axis=0)
else:
videos = self.initial_dataset.videos
name_to_index = {name: index for index, name in enumerate(videos)}
pairs = pd.read_csv(pairs_list)
pairs = pairs[np.logical_and(pairs['source'].isin(videos), pairs['driving'].isin(videos))]
number_of_pairs = min(pairs.shape[0], number_of_pairs)
self.pairs = []
self.start_frames = []
for ind in range(number_of_pairs):
self.pairs.append(
(name_to_index[pairs['driving'].iloc[ind]], name_to_index[pairs['source'].iloc[ind]]))
def __len__(self):
return len(self.pairs)
def __getitem__(self, idx):
pair = self.pairs[idx]
first = self.initial_dataset[pair[0]]
second = self.initial_dataset[pair[1]]
first = {'driving_' + key: value for key, value in first.items()}
second = {'source_' + key: value for key, value in second.items()}
return {**first, **second}
|
assert os.path.exists(os.path.join(root_dir, 'test'))
print("Use predefined train-test split.")
if id_sampling:
train_videos = {os.path.basename(video).split('#')[0] for video in
os.listdir(os.path.join(root_dir, 'train'))}
train_videos = list(train_videos)
else:
train_videos = os.listdir(os.path.join(root_dir, 'train'))
test_videos = os.listdir(os.path.join(root_dir, 'test'))
self.root_dir = os.path.join(self.root_dir, 'train' if is_train else 'test')
|
unzip.rs
|
use super::consts::*;
use chardet::{charset2encoding, detect};
use encoding::label::encoding_from_whatwg_label;
use encoding::DecoderTrap;
use filetime::{set_symlink_file_times, FileTime};
use zip::read::ZipArchive;
use zip::result::ZipError;
// https://docs.rs/filetime/ not follow symlink?
use std;
use std::error::Error;
use std::ffi::OsString;
use std::fs::read_dir;
use std::fs::{create_dir_all, File};
use std::io::{copy, BufReader};
use std::path::Path;
#[derive(Debug, PartialEq)]
pub enum Task {
Chardet, // Detect the charset for File's name from ZipArchive
List, // zipcs -l/--list
Unzip, // Extract files from archive with full paths
}
impl Default for Task {
fn default() -> Task {
Task::Unzip
}
}
#[derive(Debug, Default)]
pub struct Zips {
pub charset: CharSet, //zip -cs/--charset //utf-8
pub outdir: String, //zipcs -o/--outdir //./
pub zips: Vec<String>, //zipcs ZipArchive0 ZipArchive1 ...
pub task: Task, // UNZIP
}
impl Zips {
pub fn check_fix(&mut self) -> Result<(), String> {
let name = "ZipArchives";
for zip in &self.zips {
let path = Path::new(&zip);
if !path.exists() {
return Err(format!("Arguments({}): \"{:?}\" is not exists", name, path));
} else if path.is_dir() {
return Err(format!("Arguments({}): \"{:?}\" is a directory", name, path));
}
File::open(path).map_err(|e| format!("Arguments({}): \"{:?}\" is invalid({})", name, path, e.description()))?;
}
Ok(())
}
pub fn call(self) -> Result<(), String> {
debug!("Config_zip: {:?}", self);
for zip_arch_path in self.zips() {
if let Err(e) = for_zip_arch_file(zip_arch_path, &self) {
return Err(format!("{:?} -> {:?}", zip_arch_path, e));
}
}
Ok(())
}
pub fn charset(&self) -> &CharSet {
&self.charset
}
pub fn outdir(&self) -> &String {
&self.outdir
}
pub fn zips(&self) -> &[String] {
self.zips.as_slice()
}
pub fn task(&self) -> &Task {
&self.task
}
}
fn for_zip_arch_file(zip_arch_path: &str, config: &Zips) -> Result<(), ZipCSError> {
let zip_arch_path_ = Path::new(zip_arch_path);
let zip_arch = File::open(zip_arch_path)?;
let reader = BufReader::new(zip_arch);
let mut zip_arch = ZipArchive::new(reader)?;
// LIST
if *config.task() == Task::List {
for i in 0..zip_arch.len() {
let file = match zip_arch.by_index(i) {
Ok(o) => o,
Err(e) => {
eprintln!("{}_Error: {:?}${:?} ->{:?}", NAME, zip_arch_path, i, e);
continue;
}
};
let name = {
if let Ok(o) = config.charset().decode(file.name_raw()) {
o
} else {
file.name().to_owned()
}
};
if name.ends_with('/') {
println!("${}-> {:?}", i, name);
} else {
println!("${}-> {:?}: {:?}", i, name, file.size());
}
}
return Ok(());
}
// Chardet
if *config.task() == Task::Chardet {
for i in 0..zip_arch.len() {
let file = match zip_arch.by_index(i) {
Ok(o) => o,
Err(e) => {
eprintln!("{}_Error: {:?}${:?} ->{:?}", NAME, zip_arch_path, i, e);
continue;
}
};
let charset = detect(file.name_raw());
let name = encoding_from_whatwg_label(charset2encoding(&charset.0))
.and_then(|enc| enc.decode(file.name_raw(), DecoderTrap::Strict).ok())
.unwrap_or_else(|| file.name().to_owned());
if name.ends_with('/') {
println!("{} ${}-> {:?}", charset.0, i, name);
} else {
println!("{} ${}-> {:?}: {:?}", charset.0, i, name, file.size());
}
}
return Ok(());
}
// UNZIP
// Get ouddir
let outdir = if config.outdir.is_empty() {
zip_arch_path_
.file_stem()
.ok_or("ZipArchive's stem name is None")?
.to_os_string()
} else {
OsString::from(config.outdir())
};
// Check and create oudir
let outdir_path = Path::new(&outdir);
if outdir_path.exists() && outdir_path.is_dir() {
let dir_item = read_dir(&outdir_path)
.map_err(|e| format!("Reading OutDir({}) occurs error: {}", outdir_path.display(), e.description()))?;
if dir_item.count() != 0 {
return Err(format!("OutDir({}) is not empty!", outdir_path.display()).into());
}
} else if outdir_path.exists() && !outdir_path.is_dir() {
return Err(format!("OutDir({}) is not a Dir!", outdir_path.display()).into());
} else {
create_dir_all(outdir_path)?;
}
for i in 0..zip_arch.len() {
let mut file = match zip_arch.by_index(i) {
Ok(o) => o,
Err(e) => {
eprintln!("{}_Error: {:?}${:?} ->{:?}", NAME, zip_arch_path, i, e);
continue;
}
};
// Get name
let name = {
if let Ok(o) = config.charset().decode(file.name_raw()) {
o
} else {
file.name().to_owned()
}
};
// Get outpath, use PathBuf.push() to concat
let mut path = outdir_path.to_path_buf();
path.push(&name);
// create dir/file
if name.ends_with('/') {
println!("${}-> {:?}", i, path.as_path());
create_dir_all(&path)?;
} else {
println!("${}-> {:?}: {:?}", i, path.as_path(), file.size());
if let Some(p) = path.parent() {
if !p.exists() {
create_dir_all(&p)?;
}
}
let mut outfile = File::create(&path)?;
copy(&mut file, &mut outfile)?;
}
// Get/Set m/atime
{
let tm = file.last_modified().to_time().to_timespec();
let tm = FileTime::from_unix_time(tm.sec, tm.nsec as u32);
set_symlink_file_times(&path, tm, tm)
.map_err(|e| {
eprintln!(
"filetime::set_symlink_file_times({}, {:?}) occurs error: {}",
path.as_path().display(),
tm,
e.description()
)
})
.ok();
}
// Get/Set permissions
#[cfg(unix)]
{
use std::fs::{set_permissions, Permissions};
use std::os::unix::fs::PermissionsExt;
if let Some(mode) = file.unix_mode() {
set_permissions(&path, Permissions::from_mode(mode))
.map_err(|e| {
eprintln!(
"fs::set_permissions({}, {:?}) occurs error: {}",
path.as_path().display(),
mode,
e.description()
)
})
.ok();
}
}
}
Ok(())
}
#[derive(Debug)]
enum ZipCSError {
IO(std::io::Error),
ZIP(ZipError),
Desc(String),
}
impl std::error::Error for ZipCSError {
fn description(&self) -> &str {
match *self {
ZipCSError::IO(ref e) => e.description(),
ZipCSError::ZIP(ref e) => e.description(),
ZipCSError::Desc(ref e) => e.as_str(),
}
}
}
use std::fmt;
use std::fmt::Formatter;
impl fmt::Display for ZipCSError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl From<std::io::Error> for ZipCSError {
fn from(e: std::io::Error) -> Self {
ZipCSError::IO(e)
}
}
impl From<String> for ZipCSError {
fn from(e: String) -> Self {
ZipCSError::Desc(e)
}
}
impl<'a> From<&'a str> for ZipCSError {
fn from(e: &str) -> Self {
ZipCSError::Desc(e.to_owned())
}
}
impl From<ZipError> for ZipCSError {
fn
|
(e: ZipError) -> Self {
ZipCSError::ZIP(e)
}
}
|
from
|
utils.py
|
from functools import partial
from typing import List, Optional
from flair.data import Span
def _get_text_from_spans(text: str, spans: List[Span], tag: str) -> Optional[str]:
|
_get_type_from_spans = partial(_get_text_from_spans, tag="TYPE")
_get_brand_from_spans = partial(_get_text_from_spans, tag="BRAND")
_get_model_from_spans = partial(_get_text_from_spans, tag="MODEL")
_get_vendor_code_from_spans = partial(_get_text_from_spans, tag="ARTICLE")
|
for span in spans:
if span.tag == tag:
return text[span.start_pos : span.end_pos]
return None
|
clever-buffer-reader.js
|
const defaults = require('./defaults');
const CleverBuffer = require('./clever-buffer-common');
const ieee754ReadFn = require('ieee754').read,
ieee754Read = function (offset, isLE, mLen, nBytes) {
return ieee754ReadFn( /* buffer */ this, offset, isLE, mLen, nBytes);
};
/**
* @class
* @param {Buffer} buffer data buffer to read from
* @param {object} [options]
* @param {number} [options.offset=0]
* @param {number} [options.bigEndian=false]
*/
class CleverBufferReader extends CleverBuffer {
constructor(buffer, options = {}) {
super(buffer, options);
}
/**
* Read a string
* @param {object} [options]
* @param {object} [options.length] number of bytes to read (byte length ≠ char length depending on encoding).
* When not specified, will read to the end of buffer.
* @param {object} [options.offset] Number of bytes to skip before starting to read string.
* Default to current reader offset.
* @param {object} [options.encoding=utf8] The character encoding of string
* @returns {String} - the decoded string.
*/
AsString(options = {}) {
const offsetSpecified = (options.offset != null);
const {
length,
offset,
encoding
} = defaults(options, {
length: 0,
offset: this.offset,
encoding: 'utf8'
});
if (length === 0) {
return '';
}
const val = this.buffer.toString(encoding, offset, offset + length);
if (!offsetSpecified) {
this.offset += length;
}
return val;
}
/**
*
* @param {object} option see {@link AsString} but enforce `option.encoding = utf8`
*/
UTF8(option = {}) {
option.encoding = `utf8`;
return this.AsString(option);
}
Bytes(options = {}) {
const offsetSpecified = (options.offset != null);
const {
length,
offset
} = defaults(options, {
length: 0,
offset: this.offset
});
if (length === 0) {
return [];
}
const val = Array.prototype.slice.call(this.buffer, offset, offset + length);
if (!offsetSpecified) {
this.offset += length;
}
return val;
}
Float24_32(offset) {
return (this.bigEndian ? this.Float24_32BE : this.Float24_32LE).call(this, offset);
}
Float24_32BE(offset) {
return this._executeReadAndIncrement(4, ieee754Read, offset, false, 24, 4); // offset, isLE, mLen, nBytes
}
Float24_32LE(offset) {
return this._executeReadAndIncrement(4, ieee754Read, offset, true, 24, 4);
}
SFloat12_16(offset) {
return (this.bigEndian ? this.SFloat12_16BE : this.SFloat12_16LE).call(this, offset);
}
SFloat12_16BE(offset) {
return this._executeReadAndIncrement(2, ieee754Read, offset, false, 12, 2);
}
SFloat12_16LE(offset) {
return this._executeReadAndIncrement(2, ieee754Read, offset, true, 12, 2);
}
BigInt64(offset) {
return (this.bigEndian ? this.BigInt64BE : this.BigInt64LE).call(this, offset);
}
BigInt64BE(offset) {
return this._executeReadAndIncrement(8, Buffer.prototype.readBigInt64BE, offset);
}
|
BigUInt64(offset) {
return (this.bigEndian ? this.BigUInt64BE : this.BigUInt64LE).call(this, offset);
}
BigUInt64BE(offset) {
return this._executeReadAndIncrement(8, Buffer.prototype.readBigUInt64BE, offset);
}
BigUInt64LE(offset) {
return this._executeReadAndIncrement(8, Buffer.prototype.readBigUInt64LE, offset);
}
Double(offset) {
return (this.bigEndian ? this.DoubleBE : this.DoubleLE).call(this, offset);
}
DoubleBE(offset) {
return this._executeReadAndIncrement(1, Buffer.prototype.readDoubleBE, offset);
}
DoubleLE(offset) {
return this._executeReadAndIncrement(1, Buffer.prototype.readDoubleLE, offset);
}
Float(offset) {
return (this.bigEndian ? this.FloatBE : this.FloatLE).call(this, offset);
}
FloatBE(offset) {
return this._executeReadAndIncrement(4, Buffer.prototype.readFloatBE, offset);
}
FloatLE(offset) {
return this._executeReadAndIncrement(4, Buffer.prototype.readFloatLE, offset);
}
Int16(offset) {
return (this.bigEndian ? this.Int16BE : this.Int16LE).call(this, offset);
}
Int16BE(offset) {
return this._executeReadAndIncrement(2, Buffer.prototype.readInt16BE, offset);
}
Int16LE(offset) {
return this._executeReadAndIncrement(2, Buffer.prototype.readInt16LE, offset);
}
Int32(offset) {
return (this.bigEndian ? this.Int32BE : this.Int32LE).call(this, offset);
}
Int32BE(offset) {
return this._executeReadAndIncrement(4, Buffer.prototype.readInt32BE, offset);
}
Int32LE(offset) {
return this._executeReadAndIncrement(4, Buffer.prototype.readInt32LE, offset);
}
Int(offset, byteLength) {
return (this.bigEndian ? this.IntBE : this.IntLE).call(this, offset, byteLength);
}
IntBE(offset, byteLength) {
return this._executeReadAndIncrement(byteLength, Buffer.prototype.readIntBE, offset, byteLength);
}
IntLE(offset, byteLength) {
return this._executeReadAndIncrement(byteLength, Buffer.prototype.readIntLE, offset, byteLength);
}
Int8(offset) {
return this._executeReadAndIncrement(1, Buffer.prototype.readInt8, offset);
}
UInt16(offset) {
return (this.bigEndian ? this.UInt16BE : this.UInt16LE).call(this, offset);
}
UInt16BE(offset) {
return this._executeReadAndIncrement(2, Buffer.prototype.readUInt16BE, offset);
}
UInt16LE(offset) {
return this._executeReadAndIncrement(2, Buffer.prototype.readUInt16LE, offset);
}
UInt32(offset) {
return (this.bigEndian ? this.UInt32BE : this.UInt32LE).call(this, offset);
}
UInt32BE(offset) {
return this._executeReadAndIncrement(4, Buffer.prototype.readUInt32BE, offset);
}
UInt32LE(offset) {
return this._executeReadAndIncrement(4, Buffer.prototype.readUInt32LE, offset);
}
UInt8(offset) {
return this._executeReadAndIncrement(1, Buffer.prototype.readUInt8, offset);
}
UInt(offset, byteLength) {
return (this.bigEndian ? this.UIntBE : this.UIntLE).call(this, offset, byteLength);
}
UIntBE(offset, byteLength) {
return this._executeReadAndIncrement(byteLength, Buffer.prototype.readUIntBE, offset, byteLength);
}
UIntLE(offset, byteLength) {
return this._executeReadAndIncrement(byteLength, Buffer.prototype.readUIntLE, offset, byteLength);
}
}
// Let's build aliases of functons with lowercased names
Object.getOwnPropertyNames(CleverBufferReader.prototype)
.filter(name => !['constructor'].includes(name))
.forEach(name => CleverBufferReader.prototype[name.toLowerCase()] = CleverBufferReader.prototype[name]);
module.exports = CleverBufferReader;
|
BigInt64LE(offset) {
return this._executeReadAndIncrement(8, Buffer.prototype.readBigInt64LE, offset);
}
|
external_ocs.go
|
package storagecluster
import (
"context"
"encoding/json"
"fmt"
"time"
configv1 "github.com/openshift/api/config/v1"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v1"
statusutil "github.com/red-hat-storage/ocs-operator/controllers/util"
externalClient "github.com/red-hat-storage/ocs-operator/services/provider/client"
)
var (
// externalOCSResources will hold the ExternalResources for storageclusters
// ExternalResources can be accessible using the UID of an storagecluster
externalOCSResources = map[types.UID][]ExternalResource{}
)
const (
// grpcCallNames
OnboardConsumer = "OnboardConsumer"
OffboardConsumer = "OffboardConsumer"
UpdateCapacity = "UpdateCapacity"
GetStorageConfig = "GetStorageConfig"
AcknowledgeOnboarding = "AcknowledgeOnboarding"
)
// IsOCSConsumerMode returns true if it is ocs to ocs ExternalStorage consumer cluster
func IsOCSConsumerMode(instance *ocsv1.StorageCluster) bool {
return instance.Spec.ExternalStorage.Enable && instance.Spec.ExternalStorage.StorageProviderKind == ocsv1.KindOCS
}
// newExternalClusterClient returns the *externalClient.OCSProviderClient
func (r *StorageClusterReconciler) newExternalClusterClient(instance *ocsv1.StorageCluster) (*externalClient.OCSProviderClient, error) {
consumerClient, err := externalClient.NewProviderClient(
context.Background(), instance.Spec.ExternalStorage.StorageProviderEndpoint, time.Second*10)
if err != nil {
return nil, err
}
return consumerClient, nil
}
// onboardConsumer makes an API call to the external storage provider cluster for onboarding
func (r *StorageClusterReconciler) onboardConsumer(instance *ocsv1.StorageCluster, externalClusterClient *externalClient.OCSProviderClient) (reconcile.Result, error) {
clusterVersion := &configv1.ClusterVersion{}
err := r.Client.Get(context.Background(), types.NamespacedName{Name: "version"}, clusterVersion)
if err != nil {
r.Log.Error(err, "External-OCS:Failed to get the clusterVersion version of the OCP cluster")
return reconcile.Result{}, err
}
name := fmt.Sprintf("storageconsumer-%s", clusterVersion.Spec.ClusterID)
response, err := externalClusterClient.OnboardConsumer(
context.Background(), instance.Spec.ExternalStorage.OnboardingTicket, name,
instance.Spec.ExternalStorage.RequestedCapacity.String())
if err != nil {
if s, ok := status.FromError(err); ok {
r.logGrpcErrorAndReportEvent(instance, OnboardConsumer, err, s.Code())
}
return reconcile.Result{}, err
}
if response.StorageConsumerUUID == "" || response.GrantedCapacity == "" {
err = fmt.Errorf("External-OCS:OnboardConsumer:response is empty")
r.Log.Error(err, "empty response")
return reconcile.Result{}, err
}
instance.Status.ExternalStorage.ConsumerID = response.StorageConsumerUUID
instance.Status.ExternalStorage.GrantedCapacity = resource.MustParse(response.GrantedCapacity)
instance.Status.Phase = statusutil.PhaseOnboarding
r.Log.Info("External-OCS:Onboarding is succeed, will save status.")
return reconcile.Result{Requeue: true}, nil
}
func (r *StorageClusterReconciler) acknowledgeOnboarding(instance *ocsv1.StorageCluster, externalClusterClient *externalClient.OCSProviderClient) (reconcile.Result, error) {
_, err := externalClusterClient.AcknowledgeOnboarding(context.Background(), instance.Status.ExternalStorage.ConsumerID)
if err != nil {
if s, ok := status.FromError(err); ok {
r.logGrpcErrorAndReportEvent(instance, AcknowledgeOnboarding, err, s.Code())
}
r.Log.Error(err, "External-OCS:Failed to acknowledge onboarding.")
return reconcile.Result{}, err
}
// claims should be created only once and should not be created/updated again if user deletes/update it.
err = r.createDefaultStorageClassClaims(instance)
if err != nil {
return reconcile.Result{}, err
}
instance.Status.Phase = statusutil.PhaseProgressing
r.Log.Info("External-OCS:Onboarding is acknowledged successfully.")
return reconcile.Result{Requeue: true}, nil
}
// offboardConsumer makes an API call to the external storage provider cluster for offboarding
func (r *StorageClusterReconciler) offboardConsumer(instance *ocsv1.StorageCluster, externalClusterClient *externalClient.OCSProviderClient) (reconcile.Result, error) {
_, err := externalClusterClient.OffboardConsumer(context.Background(), instance.Status.ExternalStorage.ConsumerID)
if err != nil {
if s, ok := status.FromError(err); ok {
r.logGrpcErrorAndReportEvent(instance, OffboardConsumer, err, s.Code())
}
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// updateConsumerCapacity makes an API call to the external storage provider cluster to update the capacity
func (r *StorageClusterReconciler) updateConsumerCapacity(instance *ocsv1.StorageCluster, externalClusterClient *externalClient.OCSProviderClient) (reconcile.Result, error) {
response, err := externalClusterClient.UpdateCapacity(
context.Background(),
instance.Status.ExternalStorage.ConsumerID,
instance.Spec.ExternalStorage.RequestedCapacity.String())
if err != nil {
if s, ok := status.FromError(err); ok {
r.logGrpcErrorAndReportEvent(instance, UpdateCapacity, err, s.Code())
}
return reconcile.Result{}, err
}
responseQuantity, err := resource.ParseQuantity(response.GrantedCapacity)
if err != nil {
r.Log.Error(err, "Failed to parse GrantedCapacity from UpdateCapacity response.", "GrantedCapacity", response.GrantedCapacity)
return reconcile.Result{}, err
}
if !instance.Spec.ExternalStorage.RequestedCapacity.Equal(responseQuantity) {
klog.Warningf("GrantedCapacity is not equal to the RequestedCapacity in the UpdateCapacity response.",
"GrantedCapacity", response.GrantedCapacity, "RequestedCapacity", instance.Spec.ExternalStorage.RequestedCapacity)
}
instance.Status.ExternalStorage.GrantedCapacity = responseQuantity
return reconcile.Result{}, nil
}
// getExternalConfigFromProvider makes an API call to the external storage provider cluster for json blob
func (r *StorageClusterReconciler) getExternalConfigFromProvider(
instance *ocsv1.StorageCluster, externalClusterClient *externalClient.OCSProviderClient) ([]ExternalResource, reconcile.Result, error) {
response, err := externalClusterClient.GetStorageConfig(context.Background(), instance.Status.ExternalStorage.ConsumerID)
if err != nil {
if s, ok := status.FromError(err); ok {
r.logGrpcErrorAndReportEvent(instance, GetStorageConfig, err, s.Code())
// storage consumer is not ready yet, requeue after some time
if s.Code() == codes.Unavailable {
return nil, reconcile.Result{RequeueAfter: time.Second * 5}, nil
}
}
return nil, reconcile.Result{}, err
}
var externalResources []ExternalResource
for _, eResource := range response.ExternalResource {
data := map[string]string{}
err = json.Unmarshal(eResource.Data, &data)
if err != nil {
r.Log.Error(err, "Failed to Unmarshal response of GetStorageConfig", "Kind", eResource.Kind, "Name", eResource.Name, "Data", eResource.Data)
return nil, reconcile.Result{}, err
}
externalResources = append(externalResources, ExternalResource{
Kind: eResource.Kind,
Data: data,
Name: eResource.Name,
})
}
return externalResources, reconcile.Result{}, nil
}
func (r *StorageClusterReconciler) logGrpcErrorAndReportEvent(instance *ocsv1.StorageCluster, grpcCallName string, err error, errCode codes.Code) {
|
if errCode == codes.InvalidArgument {
msg = "Token is invalid. Verify the token again or contact the provider admin"
eventReason = "TokenInvalid"
eventType = corev1.EventTypeWarning
} else if errCode == codes.AlreadyExists {
msg = "Token is already used. Contact provider admin for a new token"
eventReason = "TokenAlreadyUsed"
eventType = corev1.EventTypeWarning
}
} else if grpcCallName == AcknowledgeOnboarding {
if errCode == codes.NotFound {
msg = "StorageConsumer not found. Contact the provider admin"
eventReason = "NotFound"
eventType = corev1.EventTypeWarning
}
} else if grpcCallName == OffboardConsumer {
if errCode == codes.InvalidArgument {
msg = "StorageConsumer UID is not valid. Contact the provider admin"
eventReason = "UIDInvalid"
eventType = corev1.EventTypeWarning
}
} else if grpcCallName == UpdateCapacity {
if errCode == codes.InvalidArgument {
msg = "StorageConsumer UID or requested capacity is not valid. Contact the provider admin"
eventReason = "UIDorCapacityInvalid"
eventType = corev1.EventTypeWarning
} else if errCode == codes.NotFound {
msg = "StorageConsumer UID not found. Contact the provider admin"
eventReason = "UIDNotFound"
eventType = corev1.EventTypeWarning
}
} else if grpcCallName == GetStorageConfig {
if errCode == codes.InvalidArgument {
msg = "StorageConsumer UID is not valid. Contact the provider admin"
eventReason = "UIDInvalid"
eventType = corev1.EventTypeWarning
} else if errCode == codes.NotFound {
msg = "StorageConsumer UID not found. Contact the provider admin"
eventReason = "UIDNotFound"
eventType = corev1.EventTypeWarning
} else if errCode == codes.Unavailable {
msg = "StorageConsumer is not ready yet. Will requeue after 5 second"
eventReason = "NotReady"
eventType = corev1.EventTypeNormal
}
}
if msg != "" {
r.Log.Error(err, "External-OCS:"+grpcCallName+":"+msg)
r.recorder.ReportIfNotPresent(instance, eventType, eventReason, msg)
}
}
|
var msg, eventReason, eventType string
if grpcCallName == OnboardConsumer {
|
update.go
|
/*
Copyright IBM Corp. 2017 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package update
import (
"bytes"
"fmt"
"github.com/golang/protobuf/proto"
cb "github.com/Matrix-Zhang/fabric-gm/protos/common"
)
func computePoliciesMapUpdate(original, updated map[string]*cb.ConfigPolicy) (readSet, writeSet, sameSet map[string]*cb.ConfigPolicy, updatedMembers bool) {
readSet = make(map[string]*cb.ConfigPolicy)
writeSet = make(map[string]*cb.ConfigPolicy)
// All modified config goes into the read/write sets, but in case the map membership changes, we retain the
// config which was the same to add to the read/write sets
sameSet = make(map[string]*cb.ConfigPolicy)
for policyName, originalPolicy := range original {
updatedPolicy, ok := updated[policyName]
if !ok {
updatedMembers = true
continue
}
if originalPolicy.ModPolicy == updatedPolicy.ModPolicy && proto.Equal(originalPolicy.Policy, updatedPolicy.Policy) {
sameSet[policyName] = &cb.ConfigPolicy{
Version: originalPolicy.Version,
}
continue
}
writeSet[policyName] = &cb.ConfigPolicy{
Version: originalPolicy.Version + 1,
ModPolicy: updatedPolicy.ModPolicy,
Policy: updatedPolicy.Policy,
}
}
for policyName, updatedPolicy := range updated {
if _, ok := original[policyName]; ok {
// If the updatedPolicy is in the original set of policies, it was already handled
continue
}
updatedMembers = true
writeSet[policyName] = &cb.ConfigPolicy{
Version: 0,
ModPolicy: updatedPolicy.ModPolicy,
Policy: updatedPolicy.Policy,
}
}
return
}
func computeValuesMapUpdate(original, updated map[string]*cb.ConfigValue) (readSet, writeSet, sameSet map[string]*cb.ConfigValue, updatedMembers bool) {
readSet = make(map[string]*cb.ConfigValue)
writeSet = make(map[string]*cb.ConfigValue)
// All modified config goes into the read/write sets, but in case the map membership changes, we retain the
// config which was the same to add to the read/write sets
sameSet = make(map[string]*cb.ConfigValue)
for valueName, originalValue := range original {
updatedValue, ok := updated[valueName]
if !ok {
updatedMembers = true
continue
}
if originalValue.ModPolicy == updatedValue.ModPolicy && bytes.Equal(originalValue.Value, updatedValue.Value) {
sameSet[valueName] = &cb.ConfigValue{
Version: originalValue.Version,
}
continue
}
writeSet[valueName] = &cb.ConfigValue{
Version: originalValue.Version + 1,
ModPolicy: updatedValue.ModPolicy,
Value: updatedValue.Value,
}
}
|
for valueName, updatedValue := range updated {
if _, ok := original[valueName]; ok {
// If the updatedValue is in the original set of values, it was already handled
continue
}
updatedMembers = true
writeSet[valueName] = &cb.ConfigValue{
Version: 0,
ModPolicy: updatedValue.ModPolicy,
Value: updatedValue.Value,
}
}
return
}
func computeGroupsMapUpdate(original, updated map[string]*cb.ConfigGroup) (readSet, writeSet, sameSet map[string]*cb.ConfigGroup, updatedMembers bool) {
readSet = make(map[string]*cb.ConfigGroup)
writeSet = make(map[string]*cb.ConfigGroup)
// All modified config goes into the read/write sets, but in case the map membership changes, we retain the
// config which was the same to add to the read/write sets
sameSet = make(map[string]*cb.ConfigGroup)
for groupName, originalGroup := range original {
updatedGroup, ok := updated[groupName]
if !ok {
updatedMembers = true
continue
}
groupReadSet, groupWriteSet, groupUpdated := computeGroupUpdate(originalGroup, updatedGroup)
if !groupUpdated {
sameSet[groupName] = groupReadSet
continue
}
readSet[groupName] = groupReadSet
writeSet[groupName] = groupWriteSet
}
for groupName, updatedGroup := range updated {
if _, ok := original[groupName]; ok {
// If the updatedGroup is in the original set of groups, it was already handled
continue
}
updatedMembers = true
_, groupWriteSet, _ := computeGroupUpdate(cb.NewConfigGroup(), updatedGroup)
writeSet[groupName] = &cb.ConfigGroup{
Version: 0,
ModPolicy: updatedGroup.ModPolicy,
Policies: groupWriteSet.Policies,
Values: groupWriteSet.Values,
Groups: groupWriteSet.Groups,
}
}
return
}
func computeGroupUpdate(original, updated *cb.ConfigGroup) (readSet, writeSet *cb.ConfigGroup, updatedGroup bool) {
readSetPolicies, writeSetPolicies, sameSetPolicies, policiesMembersUpdated := computePoliciesMapUpdate(original.Policies, updated.Policies)
readSetValues, writeSetValues, sameSetValues, valuesMembersUpdated := computeValuesMapUpdate(original.Values, updated.Values)
readSetGroups, writeSetGroups, sameSetGroups, groupsMembersUpdated := computeGroupsMapUpdate(original.Groups, updated.Groups)
// If the updated group is 'Equal' to the updated group (none of the members nor the mod policy changed)
if !(policiesMembersUpdated || valuesMembersUpdated || groupsMembersUpdated || original.ModPolicy != updated.ModPolicy) {
// If there were no modified entries in any of the policies/values/groups maps
if len(readSetPolicies) == 0 &&
len(writeSetPolicies) == 0 &&
len(readSetValues) == 0 &&
len(writeSetValues) == 0 &&
len(readSetGroups) == 0 &&
len(writeSetGroups) == 0 {
return &cb.ConfigGroup{
Version: original.Version,
}, &cb.ConfigGroup{
Version: original.Version,
}, false
}
return &cb.ConfigGroup{
Version: original.Version,
Policies: readSetPolicies,
Values: readSetValues,
Groups: readSetGroups,
}, &cb.ConfigGroup{
Version: original.Version,
Policies: writeSetPolicies,
Values: writeSetValues,
Groups: writeSetGroups,
}, true
}
for k, samePolicy := range sameSetPolicies {
readSetPolicies[k] = samePolicy
writeSetPolicies[k] = samePolicy
}
for k, sameValue := range sameSetValues {
readSetValues[k] = sameValue
writeSetValues[k] = sameValue
}
for k, sameGroup := range sameSetGroups {
readSetGroups[k] = sameGroup
writeSetGroups[k] = sameGroup
}
return &cb.ConfigGroup{
Version: original.Version,
Policies: readSetPolicies,
Values: readSetValues,
Groups: readSetGroups,
}, &cb.ConfigGroup{
Version: original.Version + 1,
Policies: writeSetPolicies,
Values: writeSetValues,
Groups: writeSetGroups,
ModPolicy: updated.ModPolicy,
}, true
}
func Compute(original, updated *cb.Config) (*cb.ConfigUpdate, error) {
if original.ChannelGroup == nil {
return nil, fmt.Errorf("no channel group included for original config")
}
if updated.ChannelGroup == nil {
return nil, fmt.Errorf("no channel group included for updated config")
}
readSet, writeSet, groupUpdated := computeGroupUpdate(original.ChannelGroup, updated.ChannelGroup)
if !groupUpdated {
return nil, fmt.Errorf("no differences detected between original and updated config")
}
return &cb.ConfigUpdate{
ReadSet: readSet,
WriteSet: writeSet,
}, nil
}
| |
general.ts
|
import { join } from "path";
import {
apply,
url,
move,
template,
mergeWith,
TemplateOptions,
branchAndMerge,
noop,
SchematicsException,
Tree,
Rule
} from "@angular-devkit/schematics";
// import { configPath, CliConfig } from '@schematics/angular/utility/config';
import { errorXplat } from "./errors";
// import * as os from 'os';
import * as fs from "fs";
import * as ts from "typescript";
export const supportedPlatforms = ["web", "nativescript", "ionic"];
export interface ITargetPlatforms {
web?: boolean;
nativescript?: boolean;
ionic?: boolean;
ssr?: boolean;
}
export const defaultPlatforms = "web,nativescript";
export type IDevMode = "web" | "nativescript" | "ionic" | "fullstack";
export interface NodeDependency {
name: string;
version: string;
type: "dependency" | "devDependency";
}
let npmScope: string;
let prefix: string;
export function getNpmScope() {
return npmScope;
}
export function getPrefix() {
return prefix;
}
export function getFileContent(tree: Tree, path: string) {
const file = tree.read(path) || "";
if (!file) {
throw new SchematicsException(`${path} could not be read.`);
}
return file.toString("utf-8");
}
export function serializeJson(json: any): string {
return `${JSON.stringify(json, null, 2)}\n`;
}
export function getJsonFromFile(tree: Tree, path: string) {
return JSON.parse(getFileContent(tree, path));
}
export function updateJsonFile(tree: Tree, path: string, jsonData: any) {
try {
// if (tree.exists(path)) {
tree.overwrite(path, JSON.stringify(jsonData, null, 2));
// }
return tree;
} catch (err) {
// console.warn(err);
throw new SchematicsException(`${path}: ${err}`);
}
}
export function updateFile(tree: Tree, path: string, content: string) {
try {
// if (tree.exists(path)) {
tree.overwrite(path, content);
// }
return tree;
} catch (err) {
// console.warn(err);
throw new SchematicsException(`${path}: ${err}`);
}
}
export function createOrUpdate(host: Tree, path: string, content: string) {
if (host.exists(path)) {
host.overwrite(path, content);
} else {
host.create(path, content);
}
}
export function getNxWorkspaceConfig(tree: Tree): any {
const nxConfig = getJsonFromFile(tree, "nx.json"); //, 'Project must be an angular cli generated project, missing angular.json');
const hasWorkspaceDirs = tree.exists("apps") && tree.exists("libs");
// determine if Nx workspace
if (nxConfig) {
// if (ngConfig.$schema.indexOf('@nrwl/schematics') > -1 || ngConfig.$schema.indexOf('@nstudio/schematics') > -1 || hasWorkspaceDirs) {
// return ngConfig;
// }
if (nxConfig.npmScope || hasWorkspaceDirs) {
return nxConfig;
}
}
throw new SchematicsException(
"@nstudio/schematics must be used inside an Nx workspace. Create a workspace first. https://nrwl.io/nx/guide-nx-workspace"
);
}
export const copy = (tree: Tree, from: string, to: string) => {
const file = tree.get(from);
if (!file) {
throw new SchematicsException(`File ${from} does not exist!`);
}
tree.create(to, file.content);
};
const setDependency = (
dependenciesMap: { [key: string]: string },
{ name, version }: NodeDependency
) => Object.assign(dependenciesMap, { [name]: version });
export function prerun(prefixArg?: string, init?: boolean) {
return (tree: Tree) => {
const nxJson = getNxWorkspaceConfig(tree);
if (nxJson) {
npmScope = nxJson.npmScope || "workspace";
}
const packageJson = getJsonFromFile(tree, "package.json");
if (packageJson) {
prefix = packageJson.xplat ? packageJson.xplat.prefix : "";
if (prefixArg) {
if (prefix) {
// console.warn(getPrefixWarning(prefix));
} else if (init) {
// initializing for first time
prefix = prefixArg;
}
}
if (!prefix && !init) {
// if not prefix was found and we're not initializing, user needs to generate xplat first
throw new SchematicsException(errorXplat);
}
}
return tree;
};
}
export function
|
(
tree: Tree,
targetPlatforms: ITargetPlatforms,
packageJson?: any
) {
const packagePath = "package.json";
if (!packageJson) {
packageJson = getJsonFromFile(tree, packagePath);
}
if (packageJson) {
const deps: NodeDependency[] = [];
let dep: NodeDependency = {
name: "@ngx-translate/core",
version: "~10.0.1",
type: "dependency"
};
deps.push(dep);
dep = {
name: "@ngx-translate/http-loader",
version: "~3.0.1",
type: "dependency"
};
deps.push(dep);
dep = {
name: `@${getNpmScope()}/scss`,
version: "file:libs/scss",
type: "dependency"
};
deps.push(dep);
dep = {
name: "reflect-metadata",
version: "^0.1.12",
type: "dependency"
};
deps.push(dep);
if (targetPlatforms.nativescript) {
dep = {
name: "nativescript-angular",
version: "~6.1.0",
type: "dependency"
};
deps.push(dep);
dep = {
name: "nativescript-ngx-fonticon",
version: "^4.2.0",
type: "dependency"
};
deps.push(dep);
dep = {
name: "nativescript-theme-core",
version: "^1.0.4",
type: "dependency"
};
deps.push(dep);
// convenience for now since some {N} plugins may not support rxjs 6.x fully yet
// remove in future
dep = {
name: "rxjs-compat",
version: "^6.2.2",
type: "dependency"
};
deps.push(dep);
dep = {
name: "tns-core-modules",
version: "~4.2.0",
type: "dependency"
};
deps.push(dep);
dep = {
name: "tns-platform-declarations",
version: "~4.2.0",
type: "devDependency"
};
deps.push(dep);
}
if (targetPlatforms.ionic) {
dep = {
name: "@ionic-native/core",
version: "^5.0.0-beta.15",
type: "dependency"
};
deps.push(dep);
dep = {
name: "@ionic-native/splash-screen",
version: "^5.0.0-beta.14",
type: "dependency"
};
deps.push(dep);
dep = {
name: "@ionic-native/status-bar",
version: "^5.0.0-beta.14",
type: "dependency"
};
deps.push(dep);
dep = {
name: "@ionic/angular",
version: "^4.0.0-beta.3",
type: "dependency"
};
deps.push(dep);
dep = {
name: "@ionic/ng-toolkit",
version: "~1.0.0",
type: "dependency"
};
deps.push(dep);
dep = {
name: "@ionic/schematics-angular",
version: "~1.0.0",
type: "dependency"
};
deps.push(dep);
dep = {
name: `@${getNpmScope()}/web`,
version: "file:xplat/web",
type: "dependency"
};
deps.push(dep);
}
const dependenciesMap = Object.assign({}, packageJson.dependencies);
const devDependenciesMap = Object.assign({}, packageJson.devDependencies);
for (const dependency of deps) {
if (dependency.type === "dependency") {
packageJson.dependencies = setDependency(dependenciesMap, dependency);
} else {
packageJson.devDependencies = setDependency(
devDependenciesMap,
dependency
);
}
}
return updateJsonFile(tree, packagePath, packageJson);
}
return tree;
}
export function updatePackageForXplat(
tree: Tree,
targetPlatforms: ITargetPlatforms
) {
const path = "package.json";
const packageJson = getJsonFromFile(tree, path);
if (packageJson) {
// TODO: track this in angular.json (or xplat.json) in future
// doing so would involve customizing Nx schema.json which unsure about right now
// Ideally would store this as 'project': { 'prefix': prefix } (or add 'xplat' key there) for entire workspace/xplat setup, however that's unsupported in schema out of the box
// prefix is important because shared code is setup with a prefix to begin with which should be known and used for all subsequent apps which are generated
packageJson.xplat = { prefix };
// core set of supported root dependencies (out of the box)
// console.log('updatePackageForXplat:', JSON.stringify(packageJson));
return addRootDeps(tree, targetPlatforms, packageJson);
}
return tree;
}
export function updatePackageForNgrx(
tree: Tree,
packagePath: string = "package.json"
) {
if (tree.exists(packagePath)) {
const packageJson = getJsonFromFile(tree, packagePath);
if (packageJson) {
// sync version with what user has store set at
let rootNgrxVersion = packageJson.dependencies["@ngrx/store"];
const deps: NodeDependency[] = [];
if (packagePath.indexOf("apps") === 0) {
// update project deps
let dep: NodeDependency = {
name: "@ngrx/entity",
version: "file:../../node_modules/@ngrx/entity",
type: "dependency"
};
deps.push(dep);
dep = {
name: "ngrx-store-freeze",
version: "file:../../node_modules/ngrx-store-freeze",
type: "dependency"
};
deps.push(dep);
dep = {
name: "@nrwl/nx",
version: "file:../../node_modules/@nrwl/nx",
type: "dependency"
};
deps.push(dep);
} else {
// update root deps
let dep: NodeDependency = {
name: "@ngrx/entity",
version: rootNgrxVersion,
type: "dependency"
};
deps.push(dep);
if (!packageJson.dependencies["@nrwl/nx"]) {
dep = {
name: "@nrwl/nx",
version: "~6.1.0",
type: "dependency"
};
deps.push(dep);
}
}
const dependenciesMap = Object.assign({}, packageJson.dependencies);
const devDependenciesMap = Object.assign({}, packageJson.devDependencies);
for (const dependency of deps) {
if (dependency.type === "dependency") {
packageJson.dependencies = setDependency(dependenciesMap, dependency);
} else {
packageJson.devDependencies = setDependency(
devDependenciesMap,
dependency
);
}
}
return updateJsonFile(tree, packagePath, packageJson);
}
}
return tree;
}
export function updateTsConfig(
tree: Tree,
callback: (data: any) => void,
targetSuffix: string = ""
) {
const tsConfigPath = `tsconfig${targetSuffix ? "." + targetSuffix : ""}.json`;
const tsConfig = getJsonFromFile(tree, tsConfigPath);
callback(tsConfig);
return updateJsonFile(tree, tsConfigPath, tsConfig);
}
export function updatePackageScripts(tree: Tree, scripts: any) {
const path = "package.json";
const packageJson = getJsonFromFile(tree, path);
const scriptsMap = Object.assign({}, packageJson.scripts);
packageJson.scripts = Object.assign(scriptsMap, scripts);
return updateJsonFile(tree, path, packageJson);
}
export function updateAngularProjects(tree: Tree, projects: any) {
const path = "angular.json";
const angularJson = getJsonFromFile(tree, path);
const projectsMap = Object.assign({}, angularJson.projects);
angularJson.projects = Object.assign(projectsMap, projects);
return updateJsonFile(tree, path, angularJson);
}
export function updateNxProjects(tree: Tree, projects: any) {
const path = "nx.json";
const nxJson = getJsonFromFile(tree, path);
const projectsMap = Object.assign({}, nxJson.projects);
nxJson.projects = Object.assign(projectsMap, projects);
return updateJsonFile(tree, path, nxJson);
}
export function updateGitIgnore() {
return (tree: Tree) => {
const gitIgnorePath = ".gitignore";
let gitIgnore = getFileContent(tree, gitIgnorePath);
if (gitIgnore) {
if (gitIgnore.indexOf("libs/**/*.js") === -1) {
gitIgnore += `
# nativescript
hooks\n
# libs
libs/**/*.js
libs/**/*.map
libs/**/*.d.ts
libs/**/*.metadata.json
libs/**/*.ngfactory.ts
libs/**/*.ngsummary.json
`;
}
if (gitIgnore.indexOf("xplat/**/*.js") === -1) {
gitIgnore += `
# xplat
xplat/**/*.js
xplat/**/*.map
xplat/**/*.d.ts
xplat/**/*.metadata.json
xplat/**/*.ngfactory.ts
xplat/**/*.ngsummary.json
`;
}
}
return updateFile(tree, gitIgnorePath, gitIgnore);
};
}
export function addReferences() {
return (tree: Tree) => {
const filename = "references.d.ts";
if (!tree.exists(filename)) {
// add references.d.ts
tree.create(
filename,
`/// <reference path="./node_modules/tns-platform-declarations/ios.d.ts" />
/// <reference path="./node_modules/tns-platform-declarations/android.d.ts" />
`
);
}
return tree;
};
}
// export function persistPrefix(prefix: string) {
// return (tree: Tree) => {
// const nxConfig = getNxWorkspaceConfig(tree);
// ngConfig.defaults.prefix = prefix;
// return updateJsonFile(tree, 'angular.json', ngConfig);
// };
// }
export function getPrefixWarning(prefix: string) {
return `A default prefix had already been set for your workspace: ${prefix}. Since xplat had already been configured we will be using '${prefix}' as the prefix.`;
}
export const addTestingFiles = (
tree: Tree,
options: any,
relativePath: string = "./"
) => {
if (tree.exists(`testing/karma.conf.js`)) {
return noop();
}
return branchAndMerge(
mergeWith(
apply(url(`${relativePath}_testing_files`), [
template(<TemplateOptions>{
...(options as any),
npmScope: getNpmScope(),
prefix: getPrefix(),
dot: ".",
utils: stringUtils
}),
move("testing")
])
)
);
};
export function updateIDESettings(
tree: Tree,
platformArg: string,
devMode?: IDevMode
) {
try {
const cwd = process.cwd();
// console.log('workspace dir:', process.cwd());
// const dirName = cwd.split('/').slice(-1);
const userUpdates: any = {};
if (!devMode || devMode === "fullstack") {
// show all
for (const p of supportedPlatforms) {
userUpdates[`**/apps/${p}-*`] = false;
userUpdates[`**/xplat/${p}`] = false;
}
} else if (platformArg) {
const platforms = platformArg.split(",");
// switch on/off platforms
for (const p of supportedPlatforms) {
const excluded = platforms.includes(p) ? false : true;
userUpdates[`**/apps/${p}-*`] = excluded;
userUpdates[`**/xplat/${p}`] = excluded;
}
}
// VS Code support
// const homedir = os.homedir();
// console.log('os.homedir():',homedir);
let userSettingsPath =
process.platform == "darwin"
? process.env.HOME +
`/Library/Application Support/Code/User/settings.json`
: "/var/local/Cole/User/settings.json";
const windowsHome = process.env.APPDATA;
if (windowsHome) {
userSettingsPath = join(windowsHome, "Code/User/settings.json");
}
// console.log('userSettingsPath:',userSettingsPath);
const isVsCode = fs.existsSync(userSettingsPath);
// console.log('isVsCode:',isVsCode);
if (isVsCode) {
const userSettings = fs.readFileSync(userSettingsPath, "UTF-8");
if (userSettings) {
const userSettingsJson = JSON.parse(userSettings);
let exclude = userSettingsJson["files.exclude"];
if (!exclude) {
exclude = {};
}
userSettingsJson["files.exclude"] = Object.assign(exclude, userUpdates);
let searchExclude = userSettingsJson["search.exclude"];
if (!searchExclude) {
searchExclude = {};
}
userSettingsJson["search.exclude"] = Object.assign(
searchExclude,
userUpdates
);
fs.writeFileSync(
userSettingsPath,
JSON.stringify(userSettingsJson, null, 2)
);
}
}
if (!devMode) {
// only when not specifying a dev mode
const workspaceUpdates: any = {
"**/node_modules": true,
"**/hooks": true,
"**/apps/nativescript-*/app/package.json": false,
"**/apps/nativescript-*/hooks": true,
"**/apps/nativescript-*/platforms": true,
"**/apps/nativescript-*/report": true,
"**/apps/nativescript-*/app/**/*.js": {
when: "$(basename).ts"
},
"**/apps/nativescript-*/app/**/*.d.ts": {
when: "$(basename).ts"
},
"**/apps/nativescript-*/app/**/*.css": {
when: "$(basename).scss"
},
"**/libs/**/*.js": {
when: "$(basename).ts"
},
"**/libs/**/*.d.ts": {
when: "$(basename).ts"
},
"**/xplat/**/*.js": {
when: "$(basename).ts"
},
"**/xplat/**/*.d.ts": {
when: "$(basename).ts"
}
};
const workspaceSettingsPath = join(cwd, ".vscode/settings.json");
// console.log('workspaceSettingsPath:',workspaceSettingsPath);
let workspaceSettingsJson: any = {};
if (fs.existsSync(workspaceSettingsPath)) {
const workspaceSettings = fs.readFileSync(
workspaceSettingsPath,
"UTF-8"
);
workspaceSettingsJson = JSON.parse(workspaceSettings);
const exclude = workspaceSettingsJson["files.exclude"];
workspaceSettingsJson["files.exclude"] = Object.assign(
exclude,
workspaceUpdates
);
} else {
// console.log('creating workspace settings...');
fs.mkdirSync(".vscode");
workspaceSettingsJson["files.exclude"] = workspaceUpdates;
}
fs.writeFileSync(
workspaceSettingsPath,
JSON.stringify(workspaceSettingsJson, null, 2)
);
}
} catch (err) {
// console.warn('IDE Settings could not be updated at this time:', err);
}
return tree;
}
/**
* Sanitizes a given string by removing all characters that
* are not letters or digits.
*
```javascript
sanitize('nativescript-app'); // 'nativescriptapp'
sanitize('action_name'); // 'actioname'
sanitize('css-class-name'); // 'cssclassname'
sanitize('my favorite items'); // 'myfavoriteitems'
```
@method sanitize
@param {String} str The string to sanitize.
@return {String} the sanitized string.
*/
export const sanitize = (str: string): string =>
str
.split("")
.filter(char => /[a-zA-Z0-9]/.test(char))
.join("");
/**
* Cannot read property 'classify' of undefined
TypeError: Cannot read property 'classify' of undefined
at Object.<anonymous> (/Users/nathan/Documents/github/nstudio/tmp/pnp-client/node_modules/@nstudio/schematics/src/utils.js:413:35)
*/
// for some reason angular-devkit/core is not resolving
// including code here manually
// const STRING_DASHERIZE_REGEXP = (/[ _]/g);
// const STRING_DECAMELIZE_REGEXP = (/([a-z\d])([A-Z])/g);
const STRING_CAMELIZE_REGEXP = /(-|_|\.|\s)+(.)?/g;
// const STRING_UNDERSCORE_REGEXP_1 = (/([a-z\d])([A-Z]+)/g);
// const STRING_UNDERSCORE_REGEXP_2 = (/-|\s+/g);
function camelize(str) {
return str
.replace(STRING_CAMELIZE_REGEXP, (_match, _separator, chr) => {
return chr ? chr.toUpperCase() : "";
})
.replace(/^([A-Z])/, match => match.toLowerCase());
}
function capitalize(str) {
return str.charAt(0).toUpperCase() + str.substr(1);
}
function classify(str) {
return str
.split(".")
.map(part => capitalize(camelize(part)))
.join(".");
}
export const stringUtils = { sanitize, classify, capitalize, camelize };
export const toComponentClassName = (name: string) =>
`${classify(name)}Component`;
export const toNgModuleClassName = (name: string) => `${classify(name)}Module`;
|
addRootDeps
|
config.go
|
package config
import (
"errors"
"sync"
"time"
"github.com/micro/go-micro/client"
"github.com/micro/go-os/config"
proto "github.com/micro/config-srv/proto/config"
"golang.org/x/net/context"
)
var (
// We need a path splitter since its structured in go-os
PathSplitter = "/"
WatchTopic = "micro.config.watch"
reader config.Reader
mtx sync.RWMutex
watchers = make(map[string][]*watcher)
)
type watcher struct {
id string
exit chan bool
next chan *proto.WatchResponse
}
func (w *watcher) Next() (*proto.WatchResponse, error) {
select {
case c := <-w.next:
return c, nil
case <-w.exit:
return nil, errors.New("watcher stopped")
}
}
func (w *watcher) Stop() error {
select {
case <-w.exit:
return errors.New("already stopped")
default:
close(w.exit)
}
mtx.Lock()
var wslice []*watcher
for _, watch := range watchers[w.id] {
if watch != w {
wslice = append(wslice, watch)
}
}
watchers[w.id] = wslice
mtx.Unlock()
return nil
}
func Init() error {
reader = config.NewReader()
return nil
}
func Parse(ch ...*config.ChangeSet) (*config.ChangeSet, error)
|
func Values(ch *config.ChangeSet) (config.Values, error) {
return reader.Values(ch)
}
// Watch created by a client RPC request
func Watch(id string) (*watcher, error) {
mtx.Lock()
w := &watcher{
id: id,
exit: make(chan bool),
next: make(chan *proto.WatchResponse),
}
watchers[id] = append(watchers[id], w)
mtx.Unlock()
return w, nil
}
// Used as a subscriber between config services for events
func Watcher(ctx context.Context, ch *proto.WatchResponse) error {
mtx.RLock()
for _, sub := range watchers[ch.Id] {
select {
case sub.next <- ch:
case <-time.After(time.Millisecond * 100):
}
}
mtx.RUnlock()
return nil
}
// Publish a change
func Publish(ctx context.Context, ch *proto.WatchResponse) error {
req := client.NewPublication(WatchTopic, ch)
return client.Publish(ctx, req)
}
|
{
return reader.Parse(ch...)
}
|
resolve.go
|
package event
import (
"errors"
"reflect"
"github.com/authgear/authgear-server/pkg/api/model"
"github.com/authgear/authgear-server/pkg/lib/authn/user"
"github.com/authgear/authgear-server/pkg/util/accesscontrol"
)
type ResolverUserQueries interface {
Get(id string, role accesscontrol.Role) (*model.User, error)
}
type ResolverImpl struct {
Users ResolverUserQueries
}
func (r *ResolverImpl) Resolve(anything interface{}) (err error) {
struc := reflect.ValueOf(anything).Elem()
typ := struc.Type()
fields := reflect.VisibleFields(typ)
for i, refField := range fields {
if jsonName, ok := refField.Tag.Lookup("resolve"); ok
|
}
return
}
|
{
for j, targetField := range fields {
if name, ok := targetField.Tag.Lookup("json"); ok {
if jsonName == name {
userRef := struc.Field(i).Interface().(model.UserRef)
var u *model.User
u, err = r.Users.Get(userRef.ID, accesscontrol.EmptyRole)
if errors.Is(err, user.ErrUserNotFound) {
continue
}
if err != nil {
return
}
struc.Field(j).Set(reflect.ValueOf(*u))
}
}
}
}
|
descriptor.go
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Code generated by ack-generate. DO NOT EDIT.
package code_signing_config
import (
ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1"
ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare"
acktypes "github.com/aws-controllers-k8s/runtime/pkg/types"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sapirt "k8s.io/apimachinery/pkg/runtime"
k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
svcapitypes "github.com/aws-controllers-k8s/lambda-controller/apis/v1alpha1"
)
const (
finalizerString = "finalizers.lambda.services.k8s.aws/CodeSigningConfig"
)
var (
GroupVersionResource = svcapitypes.GroupVersion.WithResource("codesigningconfigs")
GroupKind = metav1.GroupKind{
Group: "lambda.services.k8s.aws",
Kind: "CodeSigningConfig",
}
)
// resourceDescriptor implements the
// `aws-service-operator-k8s/pkg/types.AWSResourceDescriptor` interface
type resourceDescriptor struct {
}
// GroupKind returns a Kubernetes metav1.GroupKind struct that describes the
// API Group and Kind of CRs described by the descriptor
func (d *resourceDescriptor) GroupKind() *metav1.GroupKind {
return &GroupKind
}
// EmptyRuntimeObject returns an empty object prototype that may be used in
// apimachinery and k8s client operations
func (d *resourceDescriptor) EmptyRuntimeObject() k8sapirt.Object {
return &svcapitypes.CodeSigningConfig{}
}
// ResourceFromRuntimeObject returns an AWSResource that has been initialized
// with the supplied runtime.Object
func (d *resourceDescriptor) ResourceFromRuntimeObject(
obj k8sapirt.Object,
) acktypes.AWSResource {
return &resource{
ko: obj.(*svcapitypes.CodeSigningConfig),
}
}
// Delta returns an `ackcompare.Delta` object containing the difference between
// one `AWSResource` and another.
func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta {
return newResourceDelta(a.(*resource), b.(*resource))
}
// IsManaged returns true if the supplied AWSResource is under the management
// of an ACK service controller. What this means in practice is that the
// underlying custom resource (CR) in the AWSResource has had a
// resource-specific finalizer associated with it.
func (d *resourceDescriptor) IsManaged(
res acktypes.AWSResource,
) bool {
obj := res.RuntimeMetaObject()
if obj == nil {
// Should not happen. If it does, there is a bug in the code
panic("nil RuntimeMetaObject in AWSResource")
}
// Remove use of custom code once
// https://github.com/kubernetes-sigs/controller-runtime/issues/994 is
// fixed. This should be able to be:
//
// return k8sctrlutil.ContainsFinalizer(obj, finalizerString)
return containsFinalizer(obj, finalizerString)
}
// Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994
// is fixed.
func containsFinalizer(obj acktypes.RuntimeMetaObject, finalizer string) bool
|
// MarkManaged places the supplied resource under the management of ACK. What
// this typically means is that the resource manager will decorate the
// underlying custom resource (CR) with a finalizer that indicates ACK is
// managing the resource and the underlying CR may not be deleted until ACK is
// finished cleaning up any backend AWS service resources associated with the
// CR.
func (d *resourceDescriptor) MarkManaged(
res acktypes.AWSResource,
) {
obj := res.RuntimeMetaObject()
if obj == nil {
// Should not happen. If it does, there is a bug in the code
panic("nil RuntimeMetaObject in AWSResource")
}
k8sctrlutil.AddFinalizer(obj, finalizerString)
}
// MarkUnmanaged removes the supplied resource from management by ACK. What
// this typically means is that the resource manager will remove a finalizer
// underlying custom resource (CR) that indicates ACK is managing the resource.
// This will allow the Kubernetes API server to delete the underlying CR.
func (d *resourceDescriptor) MarkUnmanaged(
res acktypes.AWSResource,
) {
obj := res.RuntimeMetaObject()
if obj == nil {
// Should not happen. If it does, there is a bug in the code
panic("nil RuntimeMetaObject in AWSResource")
}
k8sctrlutil.RemoveFinalizer(obj, finalizerString)
}
// MarkAdopted places descriptors on the custom resource that indicate the
// resource was not created from within ACK.
func (d *resourceDescriptor) MarkAdopted(
res acktypes.AWSResource,
) {
obj := res.RuntimeMetaObject()
if obj == nil {
// Should not happen. If it does, there is a bug in the code
panic("nil RuntimeMetaObject in AWSResource")
}
curr := obj.GetAnnotations()
if curr == nil {
curr = make(map[string]string)
}
curr[ackv1alpha1.AnnotationAdopted] = "true"
obj.SetAnnotations(curr)
}
|
{
f := obj.GetFinalizers()
for _, e := range f {
if e == finalizer {
return true
}
}
return false
}
|
shapes.py
|
"""Functions that work on collections of shapes
"""
from __future__ import division, print_function
import numpy as np
from .convex import convex_area, convex_centroid
__all__ = ['recenter_polygon', 'centroid_for_shapes',
'centroid_for_uncomputed_shapes', 'recenter_system',
'rescale_and_recenter_system', 'rotate_polygon',
'rotate_system', 'mirror_polygon', 'mirror_system',
'find_concave_outline']
def recenter_polygon(vertices):
"""Returns a new convex polygon with centroid at (0,0)
Args:
vertices (list): list of (x,y) vertices of convex polygon
Returns:
A list just like the input with the recentered vertices (but possibly
transformed into numpy arrays)
"""
centroid = convex_centroid(vertices)
new_verts = []
for v in vertices:
v = np.array(v)
new_verts.append(v - centroid)
return new_verts
def centroid_for_shapes(centroids, areas = None):
"""Calculates the centroid for a set of shapes
Requires pre-computed centroids and areas
Args:
centroids (list): list of (x,y) centroids for each shape
areas (list): list of areas (floats) for each shape (if not given,
assumes they are all equal)
Returns:
The (x,y) position of the weighted centroid (as np.array)
"""
gc = np.zeros(2)
area = 0
if areas is None:
areas = np.ones(len(centroids))
for pc, a in zip(centroids, areas):
gc += np.array(pc)*a
area += a
gc /= area
return np.array(gc)
def centroid_for_uncomputed_shapes(shape_list):
"""Like centroid_for_shapes but calculates centroids & areas
Args:
shape_list (list): a list of list of vertices (one for each shape)
Returns:
The (x,y) position of the weighted centroid (as np.array)
"""
centroids = []
areas = []
for s in shape_list:
centroids.append(convex_centroid(s))
areas.append(convex_area(s))
return centroid_for_shapes(centroids, areas)
def recenter_system(shape_list):
"""Recenters a set of shapes around the centroid of all of them
Args:
shape_list (list): a list of list of vertices (one for each shape)
Returns:
List of two items:
* Similar format as input, but transformed so that calculating the
centroid_for_uncomputed_shapes() on that list returns (0,0)
* The grand centroid for the system in original coordinates
"""
centroids = []
areas = []
new_shapes = []
# Decompose each of the individual shapes
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
# Find the grand centroid & new centers of each shape
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
# Go back and change the vertices of each shape
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([s+c for s in ns])
return final_shapes, center
def rescale_and_recenter_system(shape_list, total_area):
"""Recenters a set of shapes and resizes them to have a total fixed area
Args:
shape_list (list): a list of list of vertices (one for each shape)
total_area (float): the area to fix the shapes to
Returns:
List of two items:
* Similar format as input, but transformed so that calculating the
`centroid_for_uncomputed_shapes()` on that list returns (0,0) and summing
the areas gets to `total_area`
* The grand centroid for the system in original coordinates
"""
centroids = []
areas = []
new_shapes = []
# Decompose each of the individual shapes
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
# Find the grand centroid & new centers of each shape
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
# Find rescaling factor
tot_a = sum(areas)
dim_scale = np.sqrt(total_area / tot_a)
# Go back and change the vertices of each shape
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([(s+c)*dim_scale for s in ns])
return final_shapes, center
def rotate_polygon(vertices, angle, center_point = [0., 0.]):
"""Rotates a shape around a given point (the origin)
Args:
vertices (list): A list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A list of vertices rotated around the center point
"""
np_o = np.array(center_point)
np_vs = [np.array(v) - np_o for v in vertices]
rot_mat = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
return [np.dot(rot_mat, v)+np_o for v in np_vs]
def rotate_system(shape_list, angle, center_point = None):
"""Rotates a set of shapes around a given point
If no center point is given, assume the center of mass of the shape
Args:
shape_list (list): A list of list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [rotate_polygon(s, angle, center_point) for s in shape_list]
def mirror_polygon(vertices, axes=(False, True), center_point=None):
"""Mirrors a polygon around an x or y line
If center_point is None, mirror around the center of the shape
Args:
vertices (list): A list of (x,y) vertices
axes ([bool, bool]): Whether to mirror around the (x,y) axes
center_point ([float, float]): (x,y) point to mirror around
Returns:
A new polygon with rotated vertices
"""
if center_point is None:
center_point = convex_centroid(vertices)
xm = -1 if axes[0] else 1
ym = -1 if axes[1] else 1
return [np.array([xm*(v[0]-center_point[0])+center_point[0],
ym*(v[1]-center_point[1])+center_point[1]]) for v
in vertices]
def mirror_system(shape_list, axes=(False, True), center_point=None):
"""Mirrors a polygon around an x or y line
Mirrors around the center of the system if center_point is None
Args:
shape_list (list): A list of list of (x,y) vertices
axes ([bool, bool]): Whether to mirror around the (x,y) axes
center_point ([float, float]): (x,y) point to mirror around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [mirror_polygon(s, axes, center_point) for s in shape_list]
def _point_equal(p1, p2):
return p1[0]==p2[0] and p1[1] == p2[1]
def
|
(a1, a2):
return all(_point_equal(p1,p2) for p1, p2 in zip(a1, a2))
def find_concave_outline(shape_list):
"""Find the outline of a set of shapes
Assuming all shapes have edges in common with other shapes where they touch,
provides a set of vertices for drawing the outline
Args:
shape_list (list): A list of list of (x,y) vertices
Returns:
A list of ordered (x,y) vertices for drawing an outline
"""
# Find the most lower-right point
current_shape = shape_list[0]
current_pt = current_shape[0]
test_idx = 1
next_test_dir = 1
for s in shape_list:
for i in range(len(s)):
p = s[i]
if ((p[0] < current_pt[0]) or
(p[0] == current_pt[0] and p[1] < current_pt[1])):
# Replace
current_pt = p
current_shape = s
test_idx = (i+1) % len(s)
next_test_dir = 1
vertex_list = [current_pt]
# Keep going until you reach back to the first point
while not _point_equal(current_shape[test_idx], vertex_list[0]):
# Iterate through all the shapes to try to find a matching edge
checking = True
for s in (s for s in shape_list if not _arr_eq(s, current_shape)):
if checking: # Way to break out if match found
for i in range(len(s)):
spt = s[i]
if _point_equal(current_pt, spt):
spt_after = s[(i+1) % len(s)]
spt_before = s[(i-1) % len(s)]
test_pt = current_shape[test_idx]
if _point_equal(test_pt, spt_after):
test_idx = (i-1) % len(s)
next_test_dir = -1
current_shape = s
checking = False
elif _point_equal(test_pt, spt_before):
test_idx = (i+1) % len(s)
next_test_dir = 1
current_shape = s
checking = False
# Have you exhausted all shapes?
if checking:
current_pt = current_shape[test_idx]
vertex_list.append(current_pt)
test_idx += next_test_dir
test_idx %= len(current_shape)
return vertex_list
|
_arr_eq
|
volume.py
|
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Cinder Volume."""
import os
import eventlet
from cinder import objects
if os.name == 'nt':
# eventlet monkey patching the os module causes subprocess.Popen to fail
# on Windows when using pipes due to missing non-blocking IO support.
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
import sys
import warnings
warnings.simplefilter('once', DeprecationWarning)
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder import service
from cinder import utils
from cinder import version
deprecated_host_opt = cfg.DeprecatedOpt('host')
host_opt = cfg.StrOpt('backend_host', help='Backend override of host value.',
deprecated_opts=[deprecated_host_opt])
cfg.CONF.register_cli_opt(host_opt)
CONF = cfg.CONF
def main():
|
objects.register_all()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
utils.monkey_patch()
launcher = service.get_launcher()
if CONF.enabled_backends:
for backend in CONF.enabled_backends:
CONF.register_opt(host_opt, group=backend)
backend_host = getattr(CONF, backend).backend_host
host = "%s@%s" % (backend_host or CONF.host, backend)
server = service.Service.create(host=host,
service_name=backend,
binary='cinder-volume')
launcher.launch_service(server)
else:
server = service.Service.create(binary='cinder-volume')
launcher.launch_service(server)
launcher.wait()
|
|
Mux8Way16.rs
|
#[cfg(test)]
mod tests {
use super::super::modules::*;
#[test]
fn test_Mux8Way16(){
let mut m = Mux8Way16::new();
m.a = 0;
m.b = 0;
m.c = 0;
m.d = 0;
m.e = 0;
m.f = 0;
m.g = 0;
m.h = 0;
m.sel = 0;
m.prop();
assert_eq!(m.out, 0);
assert_eq!(m.c, 0);
assert_eq!(m.a, 0);
assert_eq!(m.d, 0);
assert_eq!(m.f, 0);
|
assert_eq!(m.b, 0);
assert_eq!(m.h, 0);
assert_eq!(m.g, 0);
assert_eq!(m.e, 0);
assert_eq!(m.sel, 0);
m.sel = 1;
m.prop();
assert_eq!(m.out, 0);
assert_eq!(m.c, 0);
assert_eq!(m.a, 0);
assert_eq!(m.d, 0);
assert_eq!(m.f, 0);
assert_eq!(m.b, 0);
assert_eq!(m.h, 0);
assert_eq!(m.g, 0);
assert_eq!(m.e, 0);
assert_eq!(m.sel, 1);
m.sel = 2;
m.prop();
assert_eq!(m.out, 0);
assert_eq!(m.c, 0);
assert_eq!(m.a, 0);
assert_eq!(m.d, 0);
assert_eq!(m.f, 0);
assert_eq!(m.b, 0);
assert_eq!(m.h, 0);
assert_eq!(m.g, 0);
assert_eq!(m.e, 0);
assert_eq!(m.sel, 2);
m.sel = 3;
m.prop();
assert_eq!(m.out, 0);
assert_eq!(m.c, 0);
assert_eq!(m.a, 0);
assert_eq!(m.d, 0);
assert_eq!(m.f, 0);
assert_eq!(m.b, 0);
assert_eq!(m.h, 0);
assert_eq!(m.g, 0);
assert_eq!(m.e, 0);
assert_eq!(m.sel, 3);
m.sel = 4;
m.prop();
assert_eq!(m.out, 0);
assert_eq!(m.c, 0);
assert_eq!(m.a, 0);
assert_eq!(m.d, 0);
assert_eq!(m.f, 0);
assert_eq!(m.b, 0);
assert_eq!(m.h, 0);
assert_eq!(m.g, 0);
assert_eq!(m.e, 0);
assert_eq!(m.sel, 4);
m.sel = 5;
m.prop();
assert_eq!(m.out, 0);
assert_eq!(m.c, 0);
assert_eq!(m.a, 0);
assert_eq!(m.d, 0);
assert_eq!(m.f, 0);
assert_eq!(m.b, 0);
assert_eq!(m.h, 0);
assert_eq!(m.g, 0);
assert_eq!(m.e, 0);
assert_eq!(m.sel, 5);
m.sel = 6;
m.prop();
assert_eq!(m.out, 0);
assert_eq!(m.c, 0);
assert_eq!(m.a, 0);
assert_eq!(m.d, 0);
assert_eq!(m.f, 0);
assert_eq!(m.b, 0);
assert_eq!(m.h, 0);
assert_eq!(m.g, 0);
assert_eq!(m.e, 0);
assert_eq!(m.sel, 6);
m.sel = 7;
m.prop();
assert_eq!(m.out, 0);
assert_eq!(m.c, 0);
assert_eq!(m.a, 0);
assert_eq!(m.d, 0);
assert_eq!(m.f, 0);
assert_eq!(m.b, 0);
assert_eq!(m.h, 0);
assert_eq!(m.g, 0);
assert_eq!(m.e, 0);
assert_eq!(m.sel, 7);
m.a = 4660;
m.b = 9029;
m.c = 13398;
m.d = 17767;
m.e = 22136;
m.f = 26505;
m.g = 30874;
m.h = 35243;
m.sel = 0;
m.prop();
assert_eq!(m.out, 4660);
assert_eq!(m.c, 13398);
assert_eq!(m.a, 4660);
assert_eq!(m.d, 17767);
assert_eq!(m.f, 26505);
assert_eq!(m.b, 9029);
assert_eq!(m.h, 35243);
assert_eq!(m.g, 30874);
assert_eq!(m.e, 22136);
assert_eq!(m.sel, 0);
m.sel = 1;
m.prop();
assert_eq!(m.out, 9029);
assert_eq!(m.c, 13398);
assert_eq!(m.a, 4660);
assert_eq!(m.d, 17767);
assert_eq!(m.f, 26505);
assert_eq!(m.b, 9029);
assert_eq!(m.h, 35243);
assert_eq!(m.g, 30874);
assert_eq!(m.e, 22136);
assert_eq!(m.sel, 1);
m.sel = 2;
m.prop();
assert_eq!(m.out, 13398);
assert_eq!(m.c, 13398);
assert_eq!(m.a, 4660);
assert_eq!(m.d, 17767);
assert_eq!(m.f, 26505);
assert_eq!(m.b, 9029);
assert_eq!(m.h, 35243);
assert_eq!(m.g, 30874);
assert_eq!(m.e, 22136);
assert_eq!(m.sel, 2);
m.sel = 3;
m.prop();
assert_eq!(m.out, 17767);
assert_eq!(m.c, 13398);
assert_eq!(m.a, 4660);
assert_eq!(m.d, 17767);
assert_eq!(m.f, 26505);
assert_eq!(m.b, 9029);
assert_eq!(m.h, 35243);
assert_eq!(m.g, 30874);
assert_eq!(m.e, 22136);
assert_eq!(m.sel, 3);
m.sel = 4;
m.prop();
assert_eq!(m.out, 22136);
assert_eq!(m.c, 13398);
assert_eq!(m.a, 4660);
assert_eq!(m.d, 17767);
assert_eq!(m.f, 26505);
assert_eq!(m.b, 9029);
assert_eq!(m.h, 35243);
assert_eq!(m.g, 30874);
assert_eq!(m.e, 22136);
assert_eq!(m.sel, 4);
m.sel = 5;
m.prop();
assert_eq!(m.out, 26505);
assert_eq!(m.c, 13398);
assert_eq!(m.a, 4660);
assert_eq!(m.d, 17767);
assert_eq!(m.f, 26505);
assert_eq!(m.b, 9029);
assert_eq!(m.h, 35243);
assert_eq!(m.g, 30874);
assert_eq!(m.e, 22136);
assert_eq!(m.sel, 5);
m.sel = 6;
m.prop();
assert_eq!(m.out, 30874);
assert_eq!(m.c, 13398);
assert_eq!(m.a, 4660);
assert_eq!(m.d, 17767);
assert_eq!(m.f, 26505);
assert_eq!(m.b, 9029);
assert_eq!(m.h, 35243);
assert_eq!(m.g, 30874);
assert_eq!(m.e, 22136);
assert_eq!(m.sel, 6);
m.sel = 7;
m.prop();
assert_eq!(m.out, 35243);
assert_eq!(m.c, 13398);
assert_eq!(m.a, 4660);
assert_eq!(m.d, 17767);
assert_eq!(m.f, 26505);
assert_eq!(m.b, 9029);
assert_eq!(m.h, 35243);
assert_eq!(m.g, 30874);
assert_eq!(m.e, 22136);
assert_eq!(m.sel, 7);
}
}
| |
binance_futures.py
|
import requests
import jesse.helpers as jh
from jesse import exceptions
from jesse.modes.import_candles_mode.drivers.interface import CandleExchange
class BinanceFutures(CandleExchange):
def __init__(self) -> None:
# import here instead of the top of the file to prevent possible the circular imports issue
from jesse.modes.import_candles_mode.drivers.binance import Binance
super().__init__(
name='Binance Futures',
count=1000,
rate_limit_per_second=2,
backup_exchange_class=Binance
)
self.endpoint = 'https://fapi.binance.com/fapi/v1/klines'
def get_starting_time(self, symbol) -> int:
dashless_symbol = jh.dashless_symbol(symbol)
payload = {
'interval': '1d',
'symbol': dashless_symbol,
'limit': 1500,
}
response = requests.get(self.endpoint, params=payload)
# Exchange In Maintenance
if response.status_code == 502:
raise exceptions.ExchangeInMaintenance('ERROR: 502 Bad Gateway. Please try again later')
# unsupported symbol
if response.status_code == 400:
raise ValueError(response.json()['msg'])
|
raise Exception(response.content)
data = response.json()
# since the first timestamp doesn't include all the 1m
# candles, let's start since the second day then
first_timestamp = int(data[0][0])
return first_timestamp + 60_000 * 1440
def fetch(self, symbol, start_timestamp):
"""
note1: unlike Bitfinex, Binance does NOT skip candles with volume=0.
note2: like Bitfinex, start_time includes the candle and so does the end_time.
"""
end_timestamp = start_timestamp + (self.count - 1) * 60000
dashless_symbol = jh.dashless_symbol(symbol)
payload = {
'interval': '1m',
'symbol': dashless_symbol,
'startTime': start_timestamp,
'endTime': end_timestamp,
'limit': self.count,
}
response = requests.get(self.endpoint, params=payload)
# Exchange In Maintenance
if response.status_code == 502:
raise exceptions.ExchangeInMaintenance('ERROR: 502 Bad Gateway. Please try again later')
# unsupported symbol
if response.status_code == 400:
raise ValueError(response.json()['msg'])
if response.status_code != 200:
return
data = response.json()
return [{
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': self.name,
'timestamp': int(d[0]),
'open': float(d[1]),
'close': float(d[4]),
'high': float(d[2]),
'low': float(d[3]),
'volume': float(d[5])
} for d in data]
|
if response.status_code != 200:
|
flightSurety.js
|
var Test = require('../config/testConfig.js');
|
before('setup contract', async () => {
config = await Test.Config(accounts);
});
const fundingFee = web3.utils.toWei("10", "ether");
/****************************************************************************************/
/* Operations and Settings */
/****************************************************************************************/
it(`(multiparty) has correct initial isOperational() value`, async function () {
// Get operating status
let status = await config.flightSuretyData.isOperational.call();
assert.equal(status, true, "Incorrect initial operating status value");
});
it(`(multiparty) can block access to setOperatingStatus() for non-Contract Owner account`, async function () {
// Ensure that access is denied for non-Contract Owner account
let accessDenied = false;
try {
await config.flightSuretyData.setOperatingStatus(false, {from: config.testAddresses[2]});
} catch (e) {
accessDenied = true;
}
assert.equal(accessDenied, true, "Access not restricted to Contract Owner");
});
it(`(multiparty) can allow access to setOperatingStatus() for Contract Owner account`, async function () {
// Ensure that access is allowed for Contract Owner account
let accessDenied = false;
try {
await config.flightSuretyData.setOperatingStatus(false);
} catch (e) {
accessDenied = true;
}
assert.equal(accessDenied, false, "Access not restricted to Contract Owner");
});
it(`(multiparty) can block access to functions using requireIsOperational when operating status is false`, async function () {
let newAirline = accounts[2];
await config.flightSuretyData.setOperatingStatus(false);
let reverted = false;
try {
await config.flightSuretyData.isAirline.call(newAirline);
} catch (e) {
reverted = true;
}
assert.equal(reverted, true, "Access not blocked for requireIsOperational");
// Set it back for other tests to work
await config.flightSuretyData.setOperatingStatus(true);
});
it(`(multiparty) first airline is registered when contract is deployed`, async function () {
let isAirline = await config.flightSuretyData.isAirline.call(config.firstAirline);
assert.equal(isAirline, true, "First airline is not registered");
let isFunded = await config.flightSuretyData.isFundedAirline.call(config.firstAirline);
assert.equal(isFunded, false, "First airline is expected not to be funded");
});
it('(airline) cannot register an Airline using registerAirline() if it is not funded', async () => {
// ARRANGE
let newAirline = accounts[2];
// ACT
try {
await config.flightSuretyApp.registerAirline(newAirline, "Duo Airlines", {from: config.firstAirline});
} catch (e) {
}
let result = await config.flightSuretyData.isAirline.call(newAirline);
// ASSERT
assert.equal(result, false, "Airline should not be able to register another airline if it hasn't provided funding");
});
it('(airline) is funded after sending 10 ETH as funding fee', async () => {
const airline = config.firstAirline;
let isFunded = await config.flightSuretyData.isFundedAirline.call(airline);
assert.equal(isFunded, false, "Airline should not be funded initially");
try {
await config.flightSuretyApp.fund({value: fundingFee, from: airline});
} catch (e) {
}
isFunded = await config.flightSuretyData.isFundedAirline.call(airline);
assert.equal(isFunded, true, "Airline should be funded");
});
it('(airline) can register another airline using registerAirline() if it is funded', async () => {
// ARRANGE
const fundedAirline = config.firstAirline;
const newAirline = accounts[2];
const newAirline2 = accounts[3];
const newAirline3 = accounts[4];
// PREREQUISITES
const isFunded = await config.flightSuretyData.isFundedAirline.call(fundedAirline);
assert.equal(isFunded, true, "Airline should be funded");
// ACT
try {
await config.flightSuretyApp.registerAirline(newAirline, "Up In The Air", {from: fundedAirline});
await config.flightSuretyApp.registerAirline(newAirline2, "Sky Is The Limit", {from: fundedAirline});
await config.flightSuretyApp.registerAirline(newAirline3, "Flyer", {from: fundedAirline});
} catch (e) {
}
const registered1 = await config.flightSuretyData.isAirline.call(newAirline);
const registered2 = await config.flightSuretyData.isAirline.call(newAirline2);
const registered3 = await config.flightSuretyData.isAirline.call(newAirline3);
const result = registered1 && registered2 && registered3;
// ASSERT
assert.equal(result, true, "Airline should be able to register another airline if it has provided funding");
// FUND CREATED AIRLINES
await config.flightSuretyApp.fund({value: fundingFee, from: newAirline});
const isFunded2 = await config.flightSuretyData.isFundedAirline.call(newAirline);
assert.equal(isFunded2, true, "Airline 2 should be funded");
await config.flightSuretyApp.fund({value: fundingFee, from: newAirline2});
const isFunded3 = await config.flightSuretyData.isFundedAirline.call(newAirline2);
assert.equal(isFunded3, true, "Airline 3 should be funded");
await config.flightSuretyApp.fund({value: fundingFee, from: newAirline3});
const isFunded4 = await config.flightSuretyData.isFundedAirline.call(newAirline3);
assert.equal(isFunded4, true, "Airline 4 should be funded");
});
it('(airline) 5th airline can only be registered when more than 50% of funded arilines vote for it', async () => {
// ARRANGE
const airline1 = accounts[1];
const airline2 = accounts[2];
const airline3 = accounts[3];
const newAirline = accounts[5];
// PREREQUISITES
const airlinesCount = await config.flightSuretyData.airlinesCount.call();
assert.equal(airlinesCount, 4, "4 airlines should be registered");
// 1st vote
try {
await config.flightSuretyApp.registerAirline(newAirline, "My Airline", {from: airline1});
} catch (e) {
}
let registered = await config.flightSuretyData.isAirline.call(newAirline);
assert.equal(registered, false, "New airline should not be registered until >50% of other airlines vote for it");
// 2nd vote
try {
await config.flightSuretyApp.registerAirline(newAirline, "My Airline", {from: airline2});
} catch (e) {
}
registered = await config.flightSuretyData.isAirline.call(newAirline);
assert.equal(registered, false, "New airline should not be registered until >50% of other airlines vote for it");
// 3rd vote
try {
await config.flightSuretyApp.registerAirline(newAirline, "My Airline", {from: airline3});
} catch (e) {
}
registered = await config.flightSuretyData.isAirline.call(newAirline);
assert.equal(registered, true, "New airline should be registered after >50% of other airlines vote for it");
});
it('(airline) airline that is not funded does not participate in the contract', async () => {
// ARRANGE
const airline1 = accounts[1];
const airline2 = accounts[2];
const airline3 = accounts[3];
const notFundedAirline = accounts[5];
const newAirline = accounts[6];
// PREREQUISITES
const airlinesCount = await config.flightSuretyData.airlinesCount.call();
assert.equal(airlinesCount, 5, "5 airlines should be registered");
// 1st vote
try {
await config.flightSuretyApp.registerAirline(newAirline, "Other Airline", {from: airline1});
} catch (e) {
}
let registered = await config.flightSuretyData.isAirline.call(newAirline);
assert.equal(registered, false, "New airline should not be registered until >50% of other airlines vote for it");
// 2nd vote
try {
await config.flightSuretyApp.registerAirline(newAirline, "Other Airline", {from: airline2});
} catch (e) {
}
registered = await config.flightSuretyData.isAirline.call(newAirline);
assert.equal(registered, false, "New airline should not be registered until >50% of other airlines vote for it");
// 3rd vote - not funded airline
let exceptionCaught = false;
try {
await config.flightSuretyApp.registerAirline(newAirline, "Other Airline", {from: notFundedAirline});
} catch (e) {
exceptionCaught = true;
}
registered = await config.flightSuretyData.isAirline.call(newAirline);
assert.equal(exceptionCaught, true, "Should raise exception when not funded airline votes");
assert.equal(registered, false, "Not funded airline should not effectively participate in voting");
// 4th vote
try {
await config.flightSuretyApp.registerAirline(newAirline, "Other Airline", {from: airline3});
} catch (e) {
}
registered = await config.flightSuretyData.isAirline.call(newAirline);
assert.equal(registered, true, "New airline should be registered after >50% of other airlines vote for it");
});
it('(airline) can register a flight', async () => {
const airline = config.firstAirline;
let flightsCount = await config.flightSuretyApp.flightsCount.call();
assert.equal(flightsCount, 0, "There should be no flights registered initially");
try {
await config.flightSuretyApp.registerFlight(airline, 'FL123', Date.now(), {from: airline});
} catch (e) {
}
flightsCount = await config.flightSuretyApp.flightsCount.call();
assert.equal(flightsCount, 1, "Registered flight should increase flights count");
});
it('(passenger) can buy insurance for existing flight', async () => {
const airline = accounts[2];
const passenger = accounts[7];
const flight = 'FL900';
const flightTimestamp = Date.now();
try {
await config.flightSuretyApp.registerFlight(airline, flight, flightTimestamp, {from: airline});
} catch (e) {
}
const premium = web3.utils.toWei("1", "ether");
let purchased = true;
try {
await config.flightSuretyApp.buy(airline, flight, flightTimestamp, {value: premium, from: passenger});
} catch (e) {
purchased = false;
}
assert.equal(purchased, true, "An error occured when buying insurance");
try {
await config.flightSuretyApp.buy(airline, flight, flightTimestamp, {value: premium, from: passenger});
} catch (e) {
purchased = false;
}
assert.equal(purchased, false, "Buying double insurance for the same flight should not be allowed");
});
it('(passenger) cannot buy insurance with premium > 1 ETH', async () => {
const airline = accounts[3];
const passenger = accounts[7];
const flight = 'FL901';
const flightTimestamp = Date.now();
try {
await config.flightSuretyApp.registerFlight(airline, flight, flightTimestamp, {from: airline});
} catch (e) {
}
const premium = web3.utils.toWei("1.01", "ether");
let purchased = true;
try {
await config.flightSuretyApp.buy(airline, flight, flightTimestamp, {value: premium, from: passenger});
} catch (e) {
purchased = false;
}
assert.equal(purchased, false, "Insurance should not be purchased");
});
it('(passenger) cannot buy insurance for unregistered flight', async () => {
const airline = accounts[4];
const passenger = accounts[7];
const flight = 'FL902';
const flightTimestamp = Date.now() + 1000;
const premium = web3.utils.toWei("1", "ether");
let purchased = true;
try {
await config.flightSuretyApp.buy(airline, flight, flightTimestamp, {value: premium, from: passenger});
} catch (e) {
purchased = false;
}
assert.equal(purchased, false, "Insurance should not be purchased");
});
});
|
contract('Flight Surety Tests', async (accounts) => {
var config;
|
union_record.go
|
// Code generated by github.com/actgardner/gogen-avro/v7. DO NOT EDIT.
/*
* SOURCE:
* evolution.avsc
*/
package avro
import (
"github.com/actgardner/gogen-avro/v7/compiler"
"github.com/actgardner/gogen-avro/v7/vm"
"github.com/actgardner/gogen-avro/v7/vm/types"
"io"
)
type UnionRecord struct {
A *UnionNullString `json:"a"`
Name string `json:"name"`
}
const UnionRecordAvroCRC64Fingerprint = "\xf1\xaa\xd1\x1b\x17fj\xae"
func NewUnionRecord() *UnionRecord {
return &UnionRecord{}
}
func DeserializeUnionRecord(r io.Reader) (*UnionRecord, error) {
t := NewUnionRecord()
deser, err := compiler.CompileSchemaBytes([]byte(t.AvroRecordSchema()), []byte(t.AvroRecordSchema()))
if err != nil {
return nil, err
}
err = vm.Eval(r, deser, t)
if err != nil {
return nil, err
}
return t, err
}
func DeserializeUnionRecordFromSchema(r io.Reader, schema string) (*UnionRecord, error) {
t := NewUnionRecord()
deser, err := compiler.CompileSchemaBytes([]byte(schema), []byte(t.AvroRecordSchema()))
if err != nil {
return nil, err
}
err = vm.Eval(r, deser, t)
if err != nil {
return nil, err
}
return t, err
}
func writeUnionRecord(r *UnionRecord, w io.Writer) error
|
func (r *UnionRecord) Serialize(w io.Writer) error {
return writeUnionRecord(r, w)
}
func (r *UnionRecord) AvroRecordSchema() string {
return "{\"fields\":[{\"name\":\"a\",\"type\":[\"null\",\"string\"]},{\"name\":\"name\",\"type\":\"string\"}],\"name\":\"UnionRecord\",\"type\":\"record\"}"
}
func (r *UnionRecord) SchemaName() string {
return "UnionRecord"
}
func (_ *UnionRecord) SetBoolean(v bool) { panic("Unsupported operation") }
func (_ *UnionRecord) SetInt(v int32) { panic("Unsupported operation") }
func (_ *UnionRecord) SetLong(v int64) { panic("Unsupported operation") }
func (_ *UnionRecord) SetFloat(v float32) { panic("Unsupported operation") }
func (_ *UnionRecord) SetDouble(v float64) { panic("Unsupported operation") }
func (_ *UnionRecord) SetBytes(v []byte) { panic("Unsupported operation") }
func (_ *UnionRecord) SetString(v string) { panic("Unsupported operation") }
func (_ *UnionRecord) SetUnionElem(v int64) { panic("Unsupported operation") }
func (r *UnionRecord) Get(i int) types.Field {
switch i {
case 0:
r.A = NewUnionNullString()
return r.A
case 1:
return &types.String{Target: &r.Name}
}
panic("Unknown field index")
}
func (r *UnionRecord) SetDefault(i int) {
switch i {
}
panic("Unknown field index")
}
func (r *UnionRecord) NullField(i int) {
switch i {
case 0:
r.A = nil
return
}
panic("Not a nullable field index")
}
func (_ *UnionRecord) AppendMap(key string) types.Field { panic("Unsupported operation") }
func (_ *UnionRecord) AppendArray() types.Field { panic("Unsupported operation") }
func (_ *UnionRecord) Finalize() {}
func (_ *UnionRecord) AvroCRC64Fingerprint() []byte {
return []byte(UnionRecordAvroCRC64Fingerprint)
}
|
{
var err error
err = writeUnionNullString(r.A, w)
if err != nil {
return err
}
err = vm.WriteString(r.Name, w)
if err != nil {
return err
}
return err
}
|
event_loop.rs
|
use std::io;
use calloop::{
generic::{Fd, Generic},
EventSource, InsertError, Interest, LoopHandle, Mode, PostAction, RegistrationToken,
TokenFactory,
};
use wayland_client::{EventQueue, ReadEventsGuard};
/// An adapter to insert a Wayland `EventQueue` into a calloop event loop
///
/// This is a struct that implements `calloop::EventSource`. It generates an
/// event whenever events need to be dispatched. At this point your calloop callback
/// will be given access to the `EventQueue` and you should call `.dispatch_pending()`
/// and forward its return value, allowing you to handle orphan events as you prefer.
///
/// If you don't use orphan events, the `quick_insert` method will directly
/// insert the source into a provided `LoopHandle` with an adapter which will panic
/// whenever an oprhan event is encountered.
#[derive(Debug)]
pub struct
|
{
queue: EventQueue,
fd: Generic<Fd>,
read_guard: Option<ReadEventsGuard>,
}
impl WaylandSource {
/// Wrap an `EventQueue` as a `WaylandSource`.
pub fn new(queue: EventQueue) -> WaylandSource {
let fd = queue.display().get_connection_fd();
WaylandSource {
queue,
fd: Generic::from_fd(fd, Interest::READ, Mode::Level),
read_guard: None,
}
}
/// Insert this source into given event loop with an adapter that panics on orphan events
///
/// The adapter will pass the event loop's global shared data as `dispatch_data` too all
/// callbacks.
pub fn quick_insert<Data: 'static>(
self,
handle: LoopHandle<Data>,
) -> Result<RegistrationToken, InsertError<WaylandSource>> {
handle.insert_source(self, |(), queue, ddata| {
queue.dispatch_pending(ddata, |event, object, _| {
panic!(
"[calloop] Encountered an orphan event: {}@{} : {}",
event.interface,
object.as_ref().id(),
event.name
);
})
})
}
/// Access the underlying event queue
///
/// This method can be used if you need to access the underlying `EventQueue` while this
/// `WaylandSource` is currently inserted in an event loop.
///
/// Note that you should be careful when interacting with it if you invoke methods that
/// interact with the wayland socket (such as `dispatch()` or `prepare_read()`). These may
/// interefere with the proper waking up of this event source in the event loop.
pub fn queue(&mut self) -> &mut EventQueue {
&mut self.queue
}
}
impl EventSource for WaylandSource {
type Event = ();
type Metadata = EventQueue;
type Ret = std::io::Result<u32>;
fn process_events<F>(
&mut self,
readiness: calloop::Readiness,
token: calloop::Token,
mut callback: F,
) -> std::io::Result<PostAction>
where
F: FnMut((), &mut EventQueue) -> std::io::Result<u32>,
{
let queue = &mut self.queue;
let read_guard = &mut self.read_guard;
self.fd.process_events(readiness, token, |_, _| {
// 1. read events from the socket if any are available
if let Some(guard) = read_guard.take() {
// might be None if some other thread read events before us, concurently
if let Err(e) = guard.read_events() {
if e.kind() != io::ErrorKind::WouldBlock {
return Err(e);
}
}
}
// 2. dispatch any pending event in the queue (that's callback's job)
loop {
match queue.prepare_read() {
Some(guard) => {
*read_guard = Some(guard);
break;
}
None => {
callback((), queue)?;
}
}
}
// 3. Once dispatching is finished, flush the responses to the compositor
if let Err(e) = queue.display().flush() {
if e.kind() != io::ErrorKind::WouldBlock {
// in case of error, forward it and fast-exit
return Err(e);
}
// WouldBlock error means the compositor could not process all our messages
// quickly. Either it is slowed down or we are a spammer.
// Should not really happen, if it does we do nothing and will flush again later
}
Ok(PostAction::Continue)
})
}
fn register(
&mut self,
poll: &mut calloop::Poll,
token_factory: &mut TokenFactory,
) -> std::io::Result<()> {
self.fd.register(poll, token_factory)
}
fn reregister(
&mut self,
poll: &mut calloop::Poll,
token_factory: &mut TokenFactory,
) -> std::io::Result<()> {
self.fd.reregister(poll, token_factory)
}
fn unregister(&mut self, poll: &mut calloop::Poll) -> std::io::Result<()> {
self.fd.unregister(poll)
}
fn pre_run<F>(&mut self, mut callback: F) -> std::io::Result<()>
where
F: FnMut((), &mut EventQueue) -> std::io::Result<u32>,
{
debug_assert!(self.read_guard.is_none());
// flush the display before starting to poll
if let Err(e) = self.queue.display().flush() {
if e.kind() != io::ErrorKind::WouldBlock {
// in case of error, don't prepare a read, if the error is persitent,
// it'll trigger in other wayland methods anyway
log::error!("Error trying to flush the wayland display: {}", e);
return Err(e);
}
}
loop {
match self.queue.prepare_read() {
Some(guard) => {
self.read_guard = Some(guard);
break;
}
None => {
callback((), &mut self.queue)?;
}
}
}
Ok(())
}
fn post_run<F>(&mut self, _: F) -> std::io::Result<()>
where
F: FnMut((), &mut EventQueue) -> std::io::Result<u32>,
{
// the destructor of ReadEventsGuard does the cleanup
self.read_guard = None;
Ok(())
}
}
|
WaylandSource
|
packet.go
|
)
type Envelope struct {
Sender *Peer
Receiver *Peer
Packet *eos.Packet `json:"envelope"`
}
func NewEnvelope(sender *Peer, receiver *Peer, packet *eos.Packet) *Envelope {
return &Envelope{
Sender: sender,
Receiver: receiver,
Packet: packet,
}
}
|
package p2p
import (
"github.com/vadim-di/eos-go"
|
|
controller.js
|
import { Movement } from 'wasm-snake-game'
const MOVEMENT_KEYS = {
|
[Movement.LEFT]: [65, 37]
}
const STOP_KEY = 32
export class Controller {
constructor(onStop = () => { }) {
// passing which from rust which is which movemnt should be executed
// assing to event listtener of id keydown And looping through MOVEMENT_KEYS object keys to find key whic is equal to which
window.addEventListener('keydown', ({ which }) => {
this.movement = Object.keys(MOVEMENT_KEYS).find(key => MOVEMENT_KEYS[key].includes(which))
})
window.addEventListener('keyup', ({ which }) => {
this.movement = undefined
if (which === STOP_KEY) {
onStop()
}
})
}
}
|
[Movement.TOP]: [87, 38],
[Movement.RIGHT]: [68, 39],
[Movement.DOWN]: [83, 40],
|
ethereum_adapter.rs
|
use ethabi::Token;
use futures::future;
use futures::prelude::*;
use lazy_static::lazy_static;
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::iter::FromIterator;
use std::sync::Arc;
use std::time::Instant;
use ethabi::ParamType;
use graph::{
blockchain::{block_stream::BlockWithTriggers, BlockPtr, IngestorError},
prelude::{
anyhow, async_trait, debug, error, ethabi,
futures03::{self, compat::Future01CompatExt, FutureExt, StreamExt, TryStreamExt},
hex, retry, stream, tiny_keccak, trace, warn,
web3::{
self,
types::{
Address, Block, BlockId, BlockNumber as Web3BlockNumber, Bytes, CallRequest,
FilterBuilder, Log, H256,
},
},
BlockNumber, ChainStore, CheapClone, DynTryFuture, Error, EthereumCallCache, Logger,
TimeoutError, TryFutureExt,
},
};
use graph::{
components::ethereum::*,
prelude::web3::types::{Trace, TraceFilter, TraceFilterBuilder, H160},
};
use web3::api::Web3;
use web3::transports::batch::Batch;
use web3::types::Filter;
use crate::chain::BlockFinality;
use crate::{
adapter::{
EthGetLogsFilter, EthereumAdapter as EthereumAdapterTrait, EthereumBlockFilter,
EthereumCallFilter, EthereumContractCall, EthereumContractCallError, EthereumLogFilter,
ProviderEthRpcMetrics, SubgraphEthRpcMetrics,
},
transport::Transport,
trigger::{EthereumBlockTriggerType, EthereumTrigger},
TriggerFilter,
};
#[derive(Clone)]
pub struct EthereumAdapter {
logger: Logger,
url_hostname: Arc<String>,
provider: String,
web3: Arc<Web3<Transport>>,
metrics: Arc<ProviderEthRpcMetrics>,
supports_eip_1898: bool,
}
lazy_static! {
static ref TRACE_STREAM_STEP_SIZE: BlockNumber = std::env::var("ETHEREUM_TRACE_STREAM_STEP_SIZE")
.unwrap_or("50".into())
.parse::<BlockNumber>()
.expect("invalid trace stream step size");
/// Maximum range size for `eth.getLogs` requests that dont filter on
/// contract address, only event signature, and are therefore expensive.
///
/// According to Ethereum node operators, size 500 is reasonable here.
static ref MAX_EVENT_ONLY_RANGE: BlockNumber = std::env::var("GRAPH_ETHEREUM_MAX_EVENT_ONLY_RANGE")
.unwrap_or("500".into())
.parse::<BlockNumber>()
.expect("invalid number of parallel Ethereum block ranges to scan");
static ref BLOCK_BATCH_SIZE: usize = std::env::var("ETHEREUM_BLOCK_BATCH_SIZE")
.unwrap_or("10".into())
.parse::<usize>()
.expect("invalid ETHEREUM_BLOCK_BATCH_SIZE env var");
/// This should not be too large that it causes requests to timeout without us catching it, nor
/// too small that it causes us to timeout requests that would've succeeded. We've seen
/// successful `eth_getLogs` requests take over 120 seconds.
static ref JSON_RPC_TIMEOUT: u64 = std::env::var("GRAPH_ETHEREUM_JSON_RPC_TIMEOUT")
.unwrap_or("180".into())
.parse::<u64>()
.expect("invalid GRAPH_ETHEREUM_JSON_RPC_TIMEOUT env var");
/// This is used for requests that will not fail the subgraph if the limit is reached, but will
/// simply restart the syncing step, so it can be low. This limit guards against scenarios such
/// as requesting a block hash that has been reorged.
static ref REQUEST_RETRIES: usize = std::env::var("GRAPH_ETHEREUM_REQUEST_RETRIES")
.unwrap_or("10".into())
.parse::<usize>()
|
/// Gas limit for `eth_call`. The value of 25_000_000 is a protocol-wide parameter so this
/// should be changed only for debugging purposes and never on an indexer in the network. The
/// value of 25_000_000 was chosen because it is the Geth default
/// https://github.com/ethereum/go-ethereum/blob/54c0d573d75ab9baa239db3f071d6cb4d1ec6aad/eth/ethconfig/config.go#L86.
/// It is not safe to set something higher because Geth will silently override the gas limit
/// with the default. This means that we do not support indexing against a Geth node with
/// `RPCGasCap` set below 25 million.
// See also f0af4ab0-6b7c-4b68-9141-5b79346a5f61.
static ref ETH_CALL_GAS: u32 = std::env::var("GRAPH_ETH_CALL_GAS")
.map(|s| s.parse::<u32>().expect("invalid GRAPH_ETH_CALL_GAS env var"))
.unwrap_or(25_000_000);
}
impl CheapClone for EthereumAdapter {
fn cheap_clone(&self) -> Self {
Self {
logger: self.logger.clone(),
provider: self.provider.clone(),
url_hostname: self.url_hostname.cheap_clone(),
web3: self.web3.cheap_clone(),
metrics: self.metrics.cheap_clone(),
supports_eip_1898: self.supports_eip_1898,
}
}
}
impl EthereumAdapter {
pub async fn new(
logger: Logger,
provider: String,
url: &str,
transport: Transport,
provider_metrics: Arc<ProviderEthRpcMetrics>,
supports_eip_1898: bool,
) -> Self {
// Unwrap: The transport was constructed with this url, so it is valid and has a host.
let hostname = graph::url::Url::parse(url)
.unwrap()
.host_str()
.unwrap()
.to_string();
let web3 = Arc::new(Web3::new(transport));
// Use the client version to check if it is ganache. For compatibility with unit tests, be
// are lenient with errors, defaulting to false.
let is_ganache = web3
.web3()
.client_version()
.compat()
.await
.map(|s| s.contains("TestRPC"))
.unwrap_or(false);
EthereumAdapter {
logger,
provider,
url_hostname: Arc::new(hostname),
web3,
metrics: provider_metrics,
supports_eip_1898: supports_eip_1898 && !is_ganache,
}
}
fn traces(
&self,
logger: &Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: BlockNumber,
to: BlockNumber,
addresses: Vec<H160>,
) -> impl Future<Item = Vec<Trace>, Error = Error> {
let eth = self.clone();
let logger = logger.to_owned();
retry("trace_filter RPC call", &logger)
.limit(*REQUEST_RETRIES)
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
let trace_filter: TraceFilter = match addresses.len() {
0 => TraceFilterBuilder::default()
.from_block(from.into())
.to_block(to.into())
.build(),
_ => TraceFilterBuilder::default()
.from_block(from.into())
.to_block(to.into())
.to_address(addresses.clone())
.build(),
};
let logger_for_triggers = logger.clone();
let logger_for_error = logger.clone();
let start = Instant::now();
let subgraph_metrics = subgraph_metrics.clone();
let provider_metrics = eth.metrics.clone();
eth.web3
.trace()
.filter(trace_filter)
.map(move |traces| {
if traces.len() > 0 {
if to == from {
debug!(
logger_for_triggers,
"Received {} traces for block {}",
traces.len(),
to
);
} else {
debug!(
logger_for_triggers,
"Received {} traces for blocks [{}, {}]",
traces.len(),
from,
to
);
}
}
traces
})
.from_err()
.then(move |result| {
let elapsed = start.elapsed().as_secs_f64();
provider_metrics.observe_request(elapsed, "trace_filter");
subgraph_metrics.observe_request(elapsed, "trace_filter");
if result.is_err() {
provider_metrics.add_error("trace_filter");
subgraph_metrics.add_error("trace_filter");
debug!(
logger_for_error,
"Error querying traces error = {:?} from = {:?} to = {:?}",
result,
from,
to
);
}
result
})
})
.map_err(move |e| {
e.into_inner().unwrap_or_else(move || {
anyhow::anyhow!(
"Ethereum node took too long to respond to trace_filter \
(from block {}, to block {})",
from,
to
)
})
})
}
fn logs_with_sigs(
&self,
logger: &Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: BlockNumber,
to: BlockNumber,
filter: Arc<EthGetLogsFilter>,
too_many_logs_fingerprints: &'static [&'static str],
) -> impl Future<Item = Vec<Log>, Error = TimeoutError<web3::error::Error>> {
let eth_adapter = self.clone();
retry("eth_getLogs RPC call", &logger)
.when(move |res: &Result<_, web3::error::Error>| match res {
Ok(_) => false,
Err(e) => !too_many_logs_fingerprints
.iter()
.any(|f| e.to_string().contains(f)),
})
.limit(*REQUEST_RETRIES)
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
let start = Instant::now();
let subgraph_metrics = subgraph_metrics.clone();
let provider_metrics = eth_adapter.metrics.clone();
// Create a log filter
let log_filter: Filter = FilterBuilder::default()
.from_block(from.into())
.to_block(to.into())
.address(filter.contracts.clone())
.topics(Some(filter.event_signatures.clone()), None, None, None)
.build();
// Request logs from client
eth_adapter.web3.eth().logs(log_filter).then(move |result| {
let elapsed = start.elapsed().as_secs_f64();
provider_metrics.observe_request(elapsed, "eth_getLogs");
subgraph_metrics.observe_request(elapsed, "eth_getLogs");
if result.is_err() {
provider_metrics.add_error("eth_getLogs");
subgraph_metrics.add_error("eth_getLogs");
}
result
})
})
}
fn trace_stream(
self,
logger: &Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: BlockNumber,
to: BlockNumber,
addresses: Vec<H160>,
) -> impl Stream<Item = Trace, Error = Error> + Send {
if from > to {
panic!(
"Can not produce a call stream on a backwards block range: from = {}, to = {}",
from, to,
);
}
let step_size = *TRACE_STREAM_STEP_SIZE;
let eth = self.clone();
let logger = logger.to_owned();
stream::unfold(from, move |start| {
if start > to {
return None;
}
let end = (start + step_size - 1).min(to);
let new_start = end + 1;
if start == end {
debug!(logger, "Requesting traces for block {}", start);
} else {
debug!(logger, "Requesting traces for blocks [{}, {}]", start, end);
}
Some(futures::future::ok((
eth.traces(
&logger,
subgraph_metrics.clone(),
start,
end,
addresses.clone(),
),
new_start,
)))
})
.buffered(*BLOCK_BATCH_SIZE)
.map(stream::iter_ok)
.flatten()
}
fn log_stream(
&self,
logger: Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: BlockNumber,
to: BlockNumber,
filter: EthGetLogsFilter,
) -> DynTryFuture<'static, Vec<Log>, Error> {
// Codes returned by Ethereum node providers if an eth_getLogs request is too heavy.
// The first one is for Infura when it hits the log limit, the rest for Alchemy timeouts.
const TOO_MANY_LOGS_FINGERPRINTS: &[&str] = &[
"ServerError(-32005)",
"503 Service Unavailable",
"ServerError(-32000)",
];
if from > to {
panic!(
"cannot produce a log stream on a backwards block range (from={}, to={})",
from, to
);
}
// Collect all event sigs
let eth = self.cheap_clone();
let filter = Arc::new(filter);
let step = match filter.contracts.is_empty() {
// `to - from + 1` blocks will be scanned.
false => to - from,
true => (to - from).min(*MAX_EVENT_ONLY_RANGE - 1),
};
// Typically this will loop only once and fetch the entire range in one request. But if the
// node returns an error that signifies the request is to heavy to process, the range will
// be broken down to smaller steps.
futures03::stream::try_unfold((from, step), move |(start, step)| {
let logger = logger.cheap_clone();
let filter = filter.cheap_clone();
let eth = eth.cheap_clone();
let subgraph_metrics = subgraph_metrics.cheap_clone();
async move {
if start > to {
return Ok(None);
}
let end = (start + step).min(to);
debug!(
logger,
"Requesting logs for blocks [{}, {}], {}", start, end, filter
);
let res = eth
.logs_with_sigs(
&logger,
subgraph_metrics.cheap_clone(),
start,
end,
filter.cheap_clone(),
TOO_MANY_LOGS_FINGERPRINTS,
)
.compat()
.await;
match res {
Err(e) => {
let string_err = e.to_string();
// If the step is already 0, the request is too heavy even for a single
// block. We hope this never happens, but if it does, make sure to error.
if TOO_MANY_LOGS_FINGERPRINTS
.iter()
.any(|f| string_err.contains(f))
&& step > 0
{
// The range size for a request is `step + 1`. So it's ok if the step
// goes down to 0, in that case we'll request one block at a time.
let new_step = step / 10;
debug!(logger, "Reducing block range size to scan for events";
"new_size" => new_step + 1);
Ok(Some((vec![], (start, new_step))))
} else {
warn!(logger, "Unexpected RPC error"; "error" => &string_err);
Err(anyhow!("{}", string_err))
}
}
Ok(logs) => Ok(Some((logs, (end + 1, step)))),
}
}
})
.try_concat()
.boxed()
}
fn call(
&self,
logger: Logger,
contract_address: Address,
call_data: Bytes,
block_ptr: BlockPtr,
) -> impl Future<Item = Bytes, Error = EthereumContractCallError> + Send {
let web3 = self.web3.clone();
// Ganache does not support calls by block hash.
// See https://github.com/trufflesuite/ganache-cli/issues/745
let block_id = if !self.supports_eip_1898 {
BlockId::Number(block_ptr.number.into())
} else {
BlockId::Hash(block_ptr.hash_as_h256())
};
retry("eth_call RPC call", &logger)
.when(|result| match result {
Ok(_) | Err(EthereumContractCallError::Revert(_)) => false,
Err(_) => true,
})
.limit(10)
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
let req = CallRequest {
from: None,
to: contract_address,
gas: Some(web3::types::U256::from(*ETH_CALL_GAS)),
gas_price: None,
value: None,
data: Some(call_data.clone()),
};
web3.eth().call(req, Some(block_id)).then(|result| {
// Try to check if the call was reverted. The JSON-RPC response for reverts is
// not standardized, so we have ad-hoc checks for each of Geth, Parity and
// Ganache.
// 0xfe is the "designated bad instruction" of the EVM, and Solidity uses it for
// asserts.
const PARITY_BAD_INSTRUCTION_FE: &str = "Bad instruction fe";
// 0xfd is REVERT, but on some contracts, and only on older blocks,
// this happens. Makes sense to consider it a revert as well.
const PARITY_BAD_INSTRUCTION_FD: &str = "Bad instruction fd";
const PARITY_BAD_JUMP_PREFIX: &str = "Bad jump";
const PARITY_STACK_LIMIT_PREFIX: &str = "Out of stack";
const GANACHE_VM_EXECUTION_ERROR: i64 = -32000;
const GANACHE_REVERT_MESSAGE: &str =
"VM Exception while processing transaction: revert";
const PARITY_VM_EXECUTION_ERROR: i64 = -32015;
const PARITY_REVERT_PREFIX: &str = "Reverted 0x";
// Deterministic Geth execution errors. We might need to expand this as
// subgraphs come across other errors. See
// https://github.com/ethereum/go-ethereum/blob/cd57d5cd38ef692de8fbedaa56598b4e9fbfbabc/core/vm/errors.go
const GETH_EXECUTION_ERRORS: &[&str] = &[
"execution reverted",
"invalid jump destination",
"invalid opcode",
// Ethereum says 1024 is the stack sizes limit, so this is deterministic.
"stack limit reached 1024",
// See f0af4ab0-6b7c-4b68-9141-5b79346a5f61 for why the gas limit is considered deterministic.
"out of gas",
];
let as_solidity_revert_with_reason = |bytes: &[u8]| {
let solidity_revert_function_selector =
&tiny_keccak::keccak256(b"Error(string)")[..4];
match bytes.len() >= 4 && &bytes[..4] == solidity_revert_function_selector {
false => None,
true => ethabi::decode(&[ParamType::String], &bytes[4..])
.ok()
.and_then(|tokens| tokens[0].clone().to_string()),
}
};
match result {
// A successful response.
Ok(bytes) => Ok(bytes),
// Check for Geth revert.
Err(web3::Error::Rpc(rpc_error))
if GETH_EXECUTION_ERRORS
.iter()
.any(|e| rpc_error.message.contains(e)) =>
{
Err(EthereumContractCallError::Revert(rpc_error.message))
}
// Check for Parity revert.
Err(web3::Error::Rpc(ref rpc_error))
if rpc_error.code.code() == PARITY_VM_EXECUTION_ERROR =>
{
match rpc_error.data.as_ref().and_then(|d| d.as_str()) {
Some(data)
if data.starts_with(PARITY_REVERT_PREFIX)
|| data.starts_with(PARITY_BAD_JUMP_PREFIX)
|| data.starts_with(PARITY_STACK_LIMIT_PREFIX)
|| data == PARITY_BAD_INSTRUCTION_FE
|| data == PARITY_BAD_INSTRUCTION_FD =>
{
let reason = if data == PARITY_BAD_INSTRUCTION_FE {
PARITY_BAD_INSTRUCTION_FE.to_owned()
} else {
let payload = data.trim_start_matches(PARITY_REVERT_PREFIX);
hex::decode(payload)
.ok()
.and_then(|payload| {
as_solidity_revert_with_reason(&payload)
})
.unwrap_or("no reason".to_owned())
};
Err(EthereumContractCallError::Revert(reason))
}
// The VM execution error was not identified as a revert.
_ => Err(EthereumContractCallError::Web3Error(web3::Error::Rpc(
rpc_error.clone(),
))),
}
}
// Check for Ganache revert.
Err(web3::Error::Rpc(ref rpc_error))
if rpc_error.code.code() == GANACHE_VM_EXECUTION_ERROR
&& rpc_error.message.starts_with(GANACHE_REVERT_MESSAGE) =>
{
Err(EthereumContractCallError::Revert(rpc_error.message.clone()))
}
// The error was not identified as a revert.
Err(err) => Err(EthereumContractCallError::Web3Error(err)),
}
})
})
.map_err(|e| e.into_inner().unwrap_or(EthereumContractCallError::Timeout))
}
/// Request blocks by hash through JSON-RPC.
fn load_blocks_rpc(
&self,
logger: Logger,
ids: Vec<H256>,
) -> impl Stream<Item = LightEthereumBlock, Error = Error> + Send {
let web3 = self.web3.clone();
stream::iter_ok::<_, Error>(ids.into_iter().map(move |hash| {
let web3 = web3.clone();
retry(format!("load block {}", hash), &logger)
.limit(*REQUEST_RETRIES)
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
web3.eth()
.block_with_txs(BlockId::Hash(hash))
.from_err::<Error>()
.and_then(move |block| {
block.ok_or_else(|| {
anyhow::anyhow!("Ethereum node did not find block {:?}", hash)
})
})
})
.from_err()
}))
.buffered(*BLOCK_BATCH_SIZE)
}
/// Request blocks ptrs for numbers through JSON-RPC.
///
/// Reorg safety: If ids are numbers, they must be a final blocks.
fn load_block_ptrs_rpc(
&self,
logger: Logger,
block_nums: Vec<BlockNumber>,
) -> impl Stream<Item = BlockPtr, Error = Error> + Send {
let web3 = self.web3.clone();
stream::iter_ok::<_, Error>(block_nums.into_iter().map(move |block_num| {
let web3 = web3.clone();
retry(format!("load block ptr {}", block_num), &logger)
.no_limit()
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
web3.eth()
.block(BlockId::Number(Web3BlockNumber::Number(block_num.into())))
.from_err::<Error>()
.and_then(move |block| {
block.ok_or_else(|| {
anyhow!("Ethereum node did not find block {:?}", block_num)
})
})
})
.from_err()
}))
.buffered(*BLOCK_BATCH_SIZE)
.map(|b| b.into())
}
/// Check if `block_ptr` refers to a block that is on the main chain, according to the Ethereum
/// node.
///
/// Careful: don't use this function without considering race conditions.
/// Chain reorgs could happen at any time, and could affect the answer received.
/// Generally, it is only safe to use this function with blocks that have received enough
/// confirmations to guarantee no further reorgs, **and** where the Ethereum node is aware of
/// those confirmations.
/// If the Ethereum node is far behind in processing blocks, even old blocks can be subject to
/// reorgs.
pub(crate) async fn is_on_main_chain(
&self,
logger: &Logger,
chain_store: Arc<dyn ChainStore>,
block_ptr: BlockPtr,
) -> Result<bool, Error> {
let block_hash = self
.block_hash_by_block_number(&logger, chain_store, block_ptr.number, true)
.compat()
.await?;
block_hash
.ok_or_else(|| anyhow!("Ethereum node is missing block #{}", block_ptr.number))
.map(|block_hash| block_hash == block_ptr.hash_as_h256())
}
pub(crate) fn logs_in_block_range(
&self,
logger: &Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: BlockNumber,
to: BlockNumber,
log_filter: EthereumLogFilter,
) -> DynTryFuture<'static, Vec<Log>, Error> {
let eth: Self = self.cheap_clone();
let logger = logger.clone();
futures03::stream::iter(log_filter.eth_get_logs_filters().map(move |filter| {
eth.cheap_clone().log_stream(
logger.cheap_clone(),
subgraph_metrics.cheap_clone(),
from,
to,
filter,
)
}))
// Real limits on the number of parallel requests are imposed within the adapter.
.buffered(1000)
.try_concat()
.boxed()
}
pub(crate) fn calls_in_block_range<'a>(
&self,
logger: &Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: BlockNumber,
to: BlockNumber,
call_filter: &'a EthereumCallFilter,
) -> Box<dyn Stream<Item = EthereumCall, Error = Error> + Send + 'a> {
let eth = self.clone();
let addresses: Vec<H160> = call_filter
.contract_addresses_function_signatures
.iter()
.filter(|(_addr, (start_block, _fsigs))| start_block <= &to)
.map(|(addr, (_start_block, _fsigs))| *addr)
.collect::<HashSet<H160>>()
.into_iter()
.collect::<Vec<H160>>();
if addresses.is_empty() {
// The filter has no started data sources in the requested range, nothing to do.
// This prevents an expensive call to `trace_filter` with empty `addresses`.
return Box::new(stream::empty());
}
Box::new(
eth.trace_stream(&logger, subgraph_metrics, from, to, addresses)
.filter_map(|trace| EthereumCall::try_from_trace(&trace))
.filter(move |call| {
// `trace_filter` can only filter by calls `to` an address and
// a block range. Since subgraphs are subscribing to calls
// for a specific contract function an additional filter needs
// to be applied
call_filter.matches(&call)
}),
)
}
pub(crate) async fn calls_in_block(
&self,
logger: &Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
block_number: BlockNumber,
block_hash: H256,
) -> Result<Vec<EthereumCall>, Error> {
let eth = self.clone();
let addresses = Vec::new();
let traces = eth
.trace_stream(
&logger,
subgraph_metrics.clone(),
block_number,
block_number,
addresses,
)
.collect()
.compat()
.await?;
// `trace_stream` returns all of the traces for the block, and this
// includes a trace for the block reward which every block should have.
// If there are no traces something has gone wrong.
if traces.is_empty() {
return Err(anyhow!(
"Trace stream returned no traces for block: number = `{}`, hash = `{}`",
block_number,
block_hash,
));
}
// Since we can only pull traces by block number and we have
// all the traces for the block, we need to ensure that the
// block hash for the traces is equal to the desired block hash.
// Assume all traces are for the same block.
if traces.iter().nth(0).unwrap().block_hash != block_hash {
return Err(anyhow!(
"Trace stream returned traces for an unexpected block: \
number = `{}`, hash = `{}`",
block_number,
block_hash,
));
}
Ok(traces
.iter()
.filter_map(EthereumCall::try_from_trace)
.collect())
}
/// Reorg safety: `to` must be a final block.
pub(crate) fn block_range_to_ptrs(
&self,
logger: Logger,
from: BlockNumber,
to: BlockNumber,
) -> Box<dyn Future<Item = Vec<BlockPtr>, Error = Error> + Send> {
// Currently we can't go to the DB for this because there might be duplicate entries for
// the same block number.
debug!(&logger, "Requesting hashes for blocks [{}, {}]", from, to);
Box::new(
self.load_block_ptrs_rpc(logger, (from..=to).collect())
.collect(),
)
}
}
#[async_trait]
impl EthereumAdapterTrait for EthereumAdapter {
fn url_hostname(&self) -> &str {
&self.url_hostname
}
fn provider(&self) -> &str {
&self.provider
}
async fn net_identifiers(&self) -> Result<EthereumNetworkIdentifier, Error> {
let logger = self.logger.clone();
let web3 = self.web3.clone();
let net_version_future = retry("net_version RPC call", &logger)
.no_limit()
.timeout_secs(20)
.run(move || web3.net().version().from_err());
let web3 = self.web3.clone();
let gen_block_hash_future = retry("eth_getBlockByNumber(0, false) RPC call", &logger)
.no_limit()
.timeout_secs(30)
.run(move || {
web3.eth()
.block(BlockId::Number(Web3BlockNumber::Number(0.into())))
.from_err()
.and_then(|gen_block_opt| {
future::result(
gen_block_opt
.and_then(|gen_block| gen_block.hash)
.ok_or_else(|| {
anyhow!("Ethereum node could not find genesis block")
}),
)
})
});
net_version_future
.join(gen_block_hash_future)
.compat()
.await
.map(
|(net_version, genesis_block_hash)| EthereumNetworkIdentifier {
net_version,
genesis_block_hash,
},
)
.map_err(|e| {
e.into_inner().unwrap_or_else(|| {
anyhow!("Ethereum node took too long to read network identifiers")
})
})
}
fn latest_block_header(
&self,
logger: &Logger,
) -> Box<dyn Future<Item = web3::types::Block<H256>, Error = IngestorError> + Send> {
let web3 = self.web3.clone();
Box::new(
retry("eth_getBlockByNumber(latest) no txs RPC call", logger)
.no_limit()
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
web3.eth()
.block(Web3BlockNumber::Latest.into())
.map_err(|e| anyhow!("could not get latest block from Ethereum: {}", e))
.from_err()
.and_then(|block_opt| {
block_opt.ok_or_else(|| {
anyhow!("no latest block returned from Ethereum").into()
})
})
})
.map_err(move |e| {
e.into_inner().unwrap_or_else(move || {
anyhow!("Ethereum node took too long to return latest block").into()
})
}),
)
}
fn latest_block(
&self,
logger: &Logger,
) -> Box<dyn Future<Item = LightEthereumBlock, Error = IngestorError> + Send + Unpin> {
let web3 = self.web3.clone();
Box::new(
retry("eth_getBlockByNumber(latest) with txs RPC call", logger)
.no_limit()
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
web3.eth()
.block_with_txs(Web3BlockNumber::Latest.into())
.map_err(|e| anyhow!("could not get latest block from Ethereum: {}", e))
.from_err()
.and_then(|block_opt| {
block_opt.ok_or_else(|| {
anyhow!("no latest block returned from Ethereum").into()
})
})
})
.map_err(move |e| {
e.into_inner().unwrap_or_else(move || {
anyhow!("Ethereum node took too long to return latest block").into()
})
}),
)
}
fn load_block(
&self,
logger: &Logger,
block_hash: H256,
) -> Box<dyn Future<Item = LightEthereumBlock, Error = Error> + Send> {
Box::new(
self.block_by_hash(&logger, block_hash)
.and_then(move |block_opt| {
block_opt.ok_or_else(move || {
anyhow!(
"Ethereum node could not find block with hash {}",
block_hash
)
})
}),
)
}
fn block_by_hash(
&self,
logger: &Logger,
block_hash: H256,
) -> Box<dyn Future<Item = Option<LightEthereumBlock>, Error = Error> + Send> {
let web3 = self.web3.clone();
let logger = logger.clone();
Box::new(
retry("eth_getBlockByHash RPC call", &logger)
.limit(*REQUEST_RETRIES)
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
web3.eth()
.block_with_txs(BlockId::Hash(block_hash))
.from_err()
})
.map_err(move |e| {
e.into_inner().unwrap_or_else(move || {
anyhow!("Ethereum node took too long to return block {}", block_hash)
})
}),
)
}
fn block_by_number(
&self,
logger: &Logger,
block_number: BlockNumber,
) -> Box<dyn Future<Item = Option<LightEthereumBlock>, Error = Error> + Send> {
let web3 = self.web3.clone();
let logger = logger.clone();
Box::new(
retry("eth_getBlockByNumber RPC call", &logger)
.no_limit()
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
web3.eth()
.block_with_txs(BlockId::Number(block_number.into()))
.from_err()
})
.map_err(move |e| {
e.into_inner().unwrap_or_else(move || {
anyhow!(
"Ethereum node took too long to return block {}",
block_number
)
})
}),
)
}
fn load_full_block(
&self,
logger: &Logger,
block: LightEthereumBlock,
) -> Box<dyn Future<Item = EthereumBlock, Error = IngestorError> + Send> {
let logger = logger.clone();
let block_hash = block.hash.expect("block is missing block hash");
// The early return is necessary for correctness, otherwise we'll
// request an empty batch which is not valid in JSON-RPC.
if block.transactions.is_empty() {
trace!(logger, "Block {} contains no transactions", block_hash);
return Box::new(future::ok(EthereumBlock {
block: Arc::new(block),
transaction_receipts: Vec::new(),
}));
}
let web3 = self.web3.clone();
// Retry, but eventually give up.
// A receipt might be missing because the block was uncled, and the
// transaction never made it back into the main chain.
Box::new(
retry("batch eth_getTransactionReceipt RPC call", &logger)
.limit(16)
.no_logging()
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
let block = block.clone();
let batching_web3 = Web3::new(Batch::new(web3.transport().clone()));
let receipt_futures = block
.transactions
.iter()
.map(|tx| {
let logger = logger.clone();
let tx_hash = tx.hash;
batching_web3
.eth()
.transaction_receipt(tx_hash)
.from_err()
.map_err(IngestorError::Unknown)
.and_then(move |receipt_opt| {
receipt_opt.ok_or_else(move || {
// No receipt was returned.
//
// This can be because the Ethereum node no longer
// considers this block to be part of the main chain,
// and so the transaction is no longer in the main
// chain. Nothing we can do from here except give up
// trying to ingest this block.
//
// This could also be because the receipt is simply not
// available yet. For that case, we should retry until
// it becomes available.
IngestorError::BlockUnavailable(block_hash)
})
})
.and_then(move |receipt| {
// Parity nodes seem to return receipts with no block hash
// when a transaction is no longer in the main chain, so
// treat that case the same as a receipt being absent
// entirely.
let receipt_block_hash =
receipt.block_hash.ok_or_else(|| {
IngestorError::BlockUnavailable(block_hash)
})?;
// Check if receipt is for the right block
if receipt_block_hash != block_hash {
trace!(
logger, "receipt block mismatch";
"receipt_block_hash" =>
receipt_block_hash.to_string(),
"block_hash" =>
block_hash.to_string(),
"tx_hash" => tx_hash.to_string(),
);
// If the receipt came from a different block, then the
// Ethereum node no longer considers this block to be
// in the main chain. Nothing we can do from here
// except give up trying to ingest this block.
// There is no way to get the transaction receipt from
// this block.
Err(IngestorError::BlockUnavailable(block_hash))
} else {
Ok(receipt)
}
})
})
.collect::<Vec<_>>();
batching_web3
.transport()
.submit_batch()
.from_err()
.map_err(IngestorError::Unknown)
.and_then(move |_| {
stream::futures_ordered(receipt_futures).collect().map(
move |transaction_receipts| EthereumBlock {
block: Arc::new(block),
transaction_receipts,
},
)
})
})
.map_err(move |e| {
e.into_inner().unwrap_or_else(move || {
anyhow!(
"Ethereum node took too long to return receipts for block {}",
block_hash
)
.into()
})
}),
)
}
fn block_pointer_from_number(
&self,
logger: &Logger,
chain_store: Arc<dyn ChainStore>,
block_number: BlockNumber,
) -> Box<dyn Future<Item = BlockPtr, Error = IngestorError> + Send> {
Box::new(
// When this method is called (from the subgraph registrar), we don't
// know yet whether the block with the given number is final, it is
// therefore safer to assume it is not final
self.block_hash_by_block_number(logger, chain_store.clone(), block_number, false)
.and_then(move |block_hash_opt| {
block_hash_opt.ok_or_else(|| {
anyhow!(
"Ethereum node could not find start block hash by block number {}",
&block_number
)
})
})
.from_err()
.map(move |block_hash| BlockPtr::from((block_hash, block_number))),
)
}
fn block_hash_by_block_number(
&self,
logger: &Logger,
chain_store: Arc<dyn ChainStore>,
block_number: BlockNumber,
block_is_final: bool,
) -> Box<dyn Future<Item = Option<H256>, Error = Error> + Send> {
let web3 = self.web3.clone();
let mut hashes = match chain_store.block_hashes_by_block_number(block_number) {
Ok(hashes) => hashes,
Err(e) => return Box::new(future::result(Err(e))),
};
let num_hashes = hashes.len();
let logger1 = logger.clone();
let confirm_block_hash = move |hash: &Option<H256>| {
// If there was more than one hash, now that we know what the
// 'right' one is, get rid of all the others
if let Some(hash) = hash {
if block_is_final && num_hashes > 1 {
chain_store
.confirm_block_hash(block_number, hash)
.map(|_| ())
.unwrap_or_else(|e| {
warn!(
logger1,
"Failed to remove {} ommers for block number {} \
(hash `0x{:x}`): {}",
num_hashes - 1,
block_number,
hash,
e
);
});
}
} else {
warn!(
logger1,
"Failed to fetch block hash for block number";
"number" => block_number
);
}
};
if hashes.len() == 1 {
Box::new(future::result(Ok(hashes.pop())))
} else {
Box::new(
retry("eth_getBlockByNumber RPC call", &logger)
.no_limit()
.timeout_secs(*JSON_RPC_TIMEOUT)
.run(move || {
web3.eth()
.block(BlockId::Number(block_number.into()))
.from_err()
.map(|block_opt| block_opt.map(|block| block.hash).flatten())
})
.inspect(confirm_block_hash)
.map_err(move |e| {
e.into_inner().unwrap_or_else(move || {
anyhow!(
"Ethereum node took too long to return data for block #{}",
block_number
)
})
}),
)
}
}
fn uncles(
&self,
logger: &Logger,
block: &LightEthereumBlock,
) -> Box<dyn Future<Item = Vec<Option<Block<H256>>>, Error = Error> + Send> {
let block_hash = match block.hash {
Some(hash) => hash,
None => {
return Box::new(future::result(Err(anyhow!(
"could not get uncle for block '{}' because block has null hash",
block
.number
.map(|num| num.to_string())
.unwrap_or(String::from("null"))
))))
}
};
let n = block.uncles.len();
Box::new(
futures::stream::futures_ordered((0..n).map(move |index| {
let web3 = self.web3.clone();
retry("eth_getUncleByBlockHashAndIndex RPC call", &logger)
.no_limit()
.timeout_secs(60)
.run(move || {
web3.eth()
.uncle(block_hash.clone().into(), index.into())
.map_err(move |e| {
anyhow!(
"could not get uncle {} for block {:?} ({} uncles): {}",
index,
block_hash,
n,
e
)
})
})
.map_err(move |e| {
e.into_inner().unwrap_or_else(move || {
anyhow!("Ethereum node took too long to return uncle")
})
})
}))
.collect(),
)
}
fn contract_call(
&self,
logger: &Logger,
call: EthereumContractCall,
cache: Arc<dyn EthereumCallCache>,
) -> Box<dyn Future<Item = Vec<Token>, Error = EthereumContractCallError> + Send> {
// Emit custom error for type mismatches.
for (token, kind) in call
.args
.iter()
.zip(call.function.inputs.iter().map(|p| &p.kind))
{
if !token.type_check(kind) {
return Box::new(future::err(EthereumContractCallError::TypeError(
token.clone(),
kind.clone(),
)));
}
}
// Encode the call parameters according to the ABI
let call_data = match call.function.encode_input(&call.args) {
Ok(data) => data,
Err(e) => return Box::new(future::err(EthereumContractCallError::EncodingError(e))),
};
trace!(logger, "eth_call";
"address" => hex::encode(&call.address),
"data" => hex::encode(&call_data)
);
// Check if we have it cached, if not do the call and cache.
Box::new(
match cache
.get_call(call.address, &call_data, call.block_ptr.clone())
.map_err(|e| error!(logger, "call cache get error"; "error" => e.to_string()))
.ok()
.flatten()
{
Some(result) => {
Box::new(future::ok(result)) as Box<dyn Future<Item = _, Error = _> + Send>
}
None => {
let cache = cache.clone();
let call = call.clone();
let logger = logger.clone();
Box::new(
self.call(
logger.clone(),
call.address,
Bytes(call_data.clone()),
call.block_ptr.clone(),
)
.map(move |result| {
// Don't block handler execution on writing to the cache.
let for_cache = result.0.clone();
let _ = graph::spawn_blocking_allow_panic(move || {
cache
.set_call(call.address, &call_data, call.block_ptr, &for_cache)
.map_err(|e| {
error!(logger, "call cache set error";
"error" => e.to_string())
})
});
result.0
}),
)
}
}
// Decode the return values according to the ABI
.and_then(move |output| {
if output.is_empty() {
// We got a `0x` response. For old Geth, this can mean a revert. It can also be
// that the contract actually returned an empty response. A view call is meant
// to return something, so we treat empty responses the same as reverts.
Err(EthereumContractCallError::Revert("empty response".into()))
} else {
// Decode failures are reverts. The reasoning is that if Solidity fails to
// decode an argument, that's a revert, so the same goes for the output.
call.function.decode_output(&output).map_err(|e| {
EthereumContractCallError::Revert(format!("failed to decode output: {}", e))
})
}
}),
)
}
/// Load Ethereum blocks in bulk, returning results as they come back as a Stream.
fn load_blocks(
&self,
logger: Logger,
chain_store: Arc<dyn ChainStore>,
block_hashes: HashSet<H256>,
) -> Box<dyn Stream<Item = LightEthereumBlock, Error = Error> + Send> {
// Search for the block in the store first then use json-rpc as a backup.
let mut blocks = chain_store
.blocks(block_hashes.iter().cloned().collect())
.map_err(|e| error!(&logger, "Error accessing block cache {}", e))
.unwrap_or_default();
let missing_blocks = Vec::from_iter(
block_hashes
.into_iter()
.filter(|hash| !blocks.iter().any(|b| b.hash == Some(*hash))),
);
// Return a stream that lazily loads batches of blocks.
debug!(logger, "Requesting {} block(s)", missing_blocks.len());
Box::new(
self.load_blocks_rpc(logger.clone(), missing_blocks.into_iter().collect())
.collect()
.map(move |new_blocks| {
if let Err(e) = chain_store.upsert_light_blocks(new_blocks.clone()) {
error!(logger, "Error writing to block cache {}", e);
}
blocks.extend(new_blocks);
blocks.sort_by_key(|block| block.number);
stream::iter_ok(blocks)
})
.flatten_stream(),
)
}
}
/// Returns blocks with triggers, corresponding to the specified range and filters.
/// If a block contains no triggers, there may be no corresponding item in the stream.
/// However the `to` block will always be present, even if triggers are empty.
///
/// Careful: don't use this function without considering race conditions.
/// Chain reorgs could happen at any time, and could affect the answer received.
/// Generally, it is only safe to use this function with blocks that have received enough
/// confirmations to guarantee no further reorgs, **and** where the Ethereum node is aware of
/// those confirmations.
/// If the Ethereum node is far behind in processing blocks, even old blocks can be subject to
/// reorgs.
/// It is recommended that `to` be far behind the block number of latest block the Ethereum
/// node is aware of.
pub(crate) async fn blocks_with_triggers(
adapter: Arc<EthereumAdapter>,
logger: Logger,
chain_store: Arc<dyn ChainStore>,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: BlockNumber,
to: BlockNumber,
filter: &TriggerFilter,
) -> Result<Vec<BlockWithTriggers<crate::Chain>>, Error> {
// Each trigger filter needs to be queried for the same block range
// and the blocks yielded need to be deduped. If any error occurs
// while searching for a trigger type, the entire operation fails.
let eth = adapter.clone();
let call_filter = EthereumCallFilter::from(filter.block.clone());
let mut trigger_futs: futures::stream::FuturesUnordered<
Box<dyn Future<Item = Vec<EthereumTrigger>, Error = Error> + Send>,
> = futures::stream::FuturesUnordered::new();
// Scan the block range from triggers to find relevant blocks
if !filter.log.is_empty() {
trigger_futs.push(Box::new(
eth.logs_in_block_range(
&logger,
subgraph_metrics.clone(),
from,
to,
filter.log.clone(),
)
.map_ok(|logs: Vec<Log>| {
logs.into_iter()
.map(Arc::new)
.map(EthereumTrigger::Log)
.collect()
})
.compat(),
))
}
if !filter.call.is_empty() {
trigger_futs.push(Box::new(
eth.calls_in_block_range(&logger, subgraph_metrics.clone(), from, to, &filter.call)
.map(Arc::new)
.map(EthereumTrigger::Call)
.collect(),
));
}
if filter.block.trigger_every_block {
trigger_futs.push(Box::new(
adapter
.block_range_to_ptrs(logger.clone(), from, to)
.map(move |ptrs| {
ptrs.into_iter()
.map(|ptr| EthereumTrigger::Block(ptr, EthereumBlockTriggerType::Every))
.collect()
}),
))
} else if !filter.block.contract_addresses.is_empty() {
// To determine which blocks include a call to addresses
// in the block filter, transform the `block_filter` into
// a `call_filter` and run `blocks_with_calls`
trigger_futs.push(Box::new(
eth.calls_in_block_range(&logger, subgraph_metrics.clone(), from, to, &call_filter)
.map(|call| {
EthereumTrigger::Block(
BlockPtr::from(&call),
EthereumBlockTriggerType::WithCallTo(call.to),
)
})
.collect(),
));
}
let logger1 = logger.cheap_clone();
let logger2 = logger.cheap_clone();
let eth_clone = eth.cheap_clone();
let (triggers, to_hash) = trigger_futs
.concat2()
.join(
adapter
.clone()
.block_hash_by_block_number(&logger, chain_store.clone(), to, true)
.then(move |to_hash| match to_hash {
Ok(n) => n.ok_or_else(|| {
warn!(logger2,
"Ethereum endpoint is behind";
"url" => eth_clone.url_hostname()
);
anyhow!("Block {} not found in the chain", to)
}),
Err(e) => Err(e),
}),
)
.compat()
.await?;
let mut block_hashes: HashSet<H256> =
triggers.iter().map(EthereumTrigger::block_hash).collect();
let mut triggers_by_block: HashMap<BlockNumber, Vec<EthereumTrigger>> =
triggers.into_iter().fold(HashMap::new(), |mut map, t| {
map.entry(t.block_number()).or_default().push(t);
map
});
debug!(logger, "Found {} relevant block(s)", block_hashes.len());
// Make sure `to` is included, even if empty.
block_hashes.insert(to_hash);
triggers_by_block.entry(to).or_insert(Vec::new());
let mut blocks = adapter
.load_blocks(logger1, chain_store, block_hashes)
.and_then(
move |block| match triggers_by_block.remove(&(block.number() as BlockNumber)) {
Some(triggers) => Ok(BlockWithTriggers::new(
BlockFinality::Final(Arc::new(block)),
triggers,
)),
None => Err(anyhow!(
"block {:?} not found in `triggers_by_block`",
block
)),
},
)
.collect()
.compat()
.await?;
blocks.sort_by_key(|block| block.ptr().number);
// Sanity check that the returned blocks are in the correct range.
// Unwrap: `blocks` always includes at least `to`.
let first = blocks.first().unwrap().ptr().number;
let last = blocks.last().unwrap().ptr().number;
if first < from {
return Err(anyhow!(
"block {} returned by the Ethereum node is before {}, the first block of the requested range",
first,
from,
));
}
if last > to {
return Err(anyhow!(
"block {} returned by the Ethereum node is after {}, the last block of the requested range",
last,
to,
));
}
Ok(blocks)
}
pub(crate) async fn get_calls(
adapter: &EthereumAdapter,
logger: Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
requires_traces: bool,
block: BlockFinality,
) -> Result<BlockFinality, Error> {
// For final blocks, or nonfinal blocks where we already checked
// (`calls.is_some()`), do nothing; if we haven't checked for calls, do
// that now
match block {
BlockFinality::Final(_)
| BlockFinality::NonFinal(EthereumBlockWithCalls {
ethereum_block: _,
calls: Some(_),
}) => Ok(block),
BlockFinality::NonFinal(EthereumBlockWithCalls {
ethereum_block,
calls: None,
}) => {
let calls = if !requires_traces || ethereum_block.transaction_receipts.is_empty() {
vec![]
} else {
adapter
.calls_in_block(
&logger,
subgraph_metrics.clone(),
BlockNumber::try_from(ethereum_block.block.number.unwrap().as_u64())
.unwrap(),
ethereum_block.block.hash.unwrap(),
)
.await?
};
Ok(BlockFinality::NonFinal(EthereumBlockWithCalls {
ethereum_block,
calls: Some(calls),
}))
}
}
}
pub(crate) fn parse_log_triggers(
log_filter: &EthereumLogFilter,
block: &EthereumBlock,
) -> Vec<EthereumTrigger> {
block
.transaction_receipts
.iter()
.flat_map(move |receipt| {
receipt
.logs
.iter()
.filter(move |log| log_filter.matches(log))
.map(move |log| EthereumTrigger::Log(Arc::new(log.clone())))
})
.collect()
}
pub(crate) fn parse_call_triggers(
call_filter: &EthereumCallFilter,
block: &EthereumBlockWithCalls,
) -> Vec<EthereumTrigger> {
match &block.calls {
Some(calls) => calls
.iter()
.filter(move |call| call_filter.matches(call))
.map(move |call| EthereumTrigger::Call(Arc::new(call.clone())))
.collect(),
None => vec![],
}
}
pub(crate) fn parse_block_triggers(
block_filter: EthereumBlockFilter,
block: &EthereumBlockWithCalls,
) -> Vec<EthereumTrigger> {
let block_ptr = BlockPtr::from(&block.ethereum_block);
let trigger_every_block = block_filter.trigger_every_block;
let call_filter = EthereumCallFilter::from(block_filter);
let block_ptr2 = block_ptr.cheap_clone();
let mut triggers = match &block.calls {
Some(calls) => calls
.iter()
.filter(move |call| call_filter.matches(call))
.map(move |call| {
EthereumTrigger::Block(
block_ptr2.clone(),
EthereumBlockTriggerType::WithCallTo(call.to),
)
})
.collect::<Vec<EthereumTrigger>>(),
None => vec![],
};
if trigger_every_block {
triggers.push(EthereumTrigger::Block(
block_ptr,
EthereumBlockTriggerType::Every,
));
}
triggers
}
|
.expect("invalid GRAPH_ETHEREUM_REQUEST_RETRIES env var");
|
movement.go
|
package simulation
import (
"math"
)
// Velocity represents the direction and speed of an entity
type Velocity struct {
direction float64
speed float64
}
// NextPos finds the next theoretical (non-rounded) position
func NextPos(x float64, y float64, vel Velocity) (nextX float64, nextY float64) {
|
// FloatPosToGridPos rounds a float position to a discrete position
func FloatPosToGridPos(x float64, y float64) (gridX int, gridY int) {
return int(math.Round(x)), int(math.Round(y))
}
// Radians converts a degree angle to a radian angle
func Radians(angle float64) (radianAngle float64) {
return angle * math.Pi / 180
}
// LinearVelocity calculates linear velocity from speed * direction
func LinearVelocity(vel Velocity) (xVel float64, yVel float64) {
xVel = math.Cos(Radians(vel.direction))
yVel = math.Sin(Radians(vel.direction))
return vel.speed * xVel, vel.speed * yVel
}
|
xVel, yVel := LinearVelocity(vel)
return x + xVel, y + yVel
}
|
class_member.rs
|
use classfile::member_info::MemberInfo;
use rtda::heap::access_flags::*;
#[derive(Debug)]
pub struct ClassMember {
pub access_flags: u16,
pub name: String,
pub descriptor: String,
}
impl ClassMember {
pub fn new(member_info: &MemberInfo) -> ClassMember {
let MemberInfo {
access_flags,
name,
descriptor,
..
} = member_info;
ClassMember {
access_flags: *access_flags,
name: name.to_string(),
|
}
pub fn is_static(&self) -> bool {
self.access_flags & ACC_STATIC != 0
}
pub fn is_final(&self) -> bool {
self.access_flags & ACC_FINAL != 0
}
}
|
descriptor: descriptor.to_string(),
}
|
constructor-self-receiver-04.rs
|
use ink_lang as ink;
#[ink::contract]
mod contract {
#[ink(storage)]
pub struct Contract {}
impl Contract {
#[ink(constructor)]
pub fn
|
(self) -> Self {
Self {}
}
#[ink(message)]
pub fn message(&self) {}
}
}
fn main() {}
|
constructor
|
axis.rs
|
use ilattice::glam::{IVec3, UVec3};
/// Either the X, Y, or Z axis.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[repr(u8)]
pub enum Axis {
X = 0,
Y = 1,
Z = 2,
}
impl Axis {
/// The index for a point's component on this axis.
#[inline]
pub fn index(&self) -> usize {
*self as usize
}
#[inline]
pub const fn get_unit_vector(&self) -> UVec3 {
match self {
Axis::X => UVec3::X,
Axis::Y => UVec3::Y,
Axis::Z => UVec3::Z,
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum AxisPermutation {
// Even permutations
Xyz,
Zxy,
Yzx,
|
}
impl AxisPermutation {
#[inline]
pub const fn even_with_normal_axis(axis: Axis) -> Self {
match axis {
Axis::X => AxisPermutation::Xyz,
Axis::Y => AxisPermutation::Yzx,
Axis::Z => AxisPermutation::Zxy,
}
}
#[inline]
pub const fn odd_with_normal_axis(axis: Axis) -> Self {
match axis {
Axis::X => AxisPermutation::Xzy,
Axis::Y => AxisPermutation::Yxz,
Axis::Z => AxisPermutation::Zyx,
}
}
#[inline]
pub const fn sign(&self) -> i32 {
match self {
AxisPermutation::Xyz => 1,
AxisPermutation::Zxy => 1,
AxisPermutation::Yzx => 1,
AxisPermutation::Zyx => -1,
AxisPermutation::Xzy => -1,
AxisPermutation::Yxz => -1,
}
}
#[inline]
pub const fn axes(&self) -> [Axis; 3] {
match self {
AxisPermutation::Xyz => [Axis::X, Axis::Y, Axis::Z],
AxisPermutation::Zxy => [Axis::Z, Axis::X, Axis::Y],
AxisPermutation::Yzx => [Axis::Y, Axis::Z, Axis::X],
AxisPermutation::Zyx => [Axis::Z, Axis::Y, Axis::X],
AxisPermutation::Xzy => [Axis::X, Axis::Z, Axis::Y],
AxisPermutation::Yxz => [Axis::Y, Axis::X, Axis::Z],
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[repr(u8)]
pub enum SignedAxis {
NegX = 0,
PosX = 1,
NegY = 2,
PosY = 3,
NegZ = 4,
PosZ = 5,
}
impl SignedAxis {
#[inline]
pub fn new(sign: i32, axis: Axis) -> Self {
assert!(sign != 0);
match (sign > 0, axis) {
(false, Axis::X) => Self::NegX,
(false, Axis::Y) => Self::NegY,
(false, Axis::Z) => Self::NegZ,
(true, Axis::X) => Self::PosX,
(true, Axis::Y) => Self::PosY,
(true, Axis::Z) => Self::PosZ,
}
}
#[inline]
pub fn unsigned_axis(&self) -> Axis {
match self {
Self::NegX => Axis::X,
Self::NegY => Axis::Y,
Self::NegZ => Axis::Z,
Self::PosX => Axis::X,
Self::PosY => Axis::Y,
Self::PosZ => Axis::Z,
}
}
#[inline]
pub fn signum(&self) -> i32 {
match self {
Self::NegX => -1,
Self::NegY => -1,
Self::NegZ => -1,
Self::PosX => 1,
Self::PosY => 1,
Self::PosZ => 1,
}
}
#[inline]
pub fn get_unit_vector(&self) -> IVec3 {
match self {
Self::NegX => -IVec3::X,
Self::NegY => -IVec3::Y,
Self::NegZ => -IVec3::Z,
Self::PosX => IVec3::X,
Self::PosY => IVec3::Y,
Self::PosZ => IVec3::Z,
}
}
#[inline]
pub fn from_vector(v: IVec3) -> Option<Self> {
match v.to_array() {
[x, 0, 0] => Some(SignedAxis::new(x, Axis::X)),
[0, y, 0] => Some(SignedAxis::new(y, Axis::Y)),
[0, 0, z] => Some(SignedAxis::new(z, Axis::Z)),
_ => None,
}
}
}
|
// Odd permutations
Zyx,
Xzy,
Yxz,
|
phases.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
IDAES Phase objects
Created on Tue Feb 18 10:54:52 2020
@author: alee
"""
from enum import Enum
from pyomo.environ import Set
from pyomo.common.config import ConfigBlock, ConfigValue
from idaes.core.base.process_base import declare_process_block_class, ProcessBlockData
# Enumerate recognised Phase types
class PhaseType(Enum):
undefined = 0
liquidPhase = 1
vaporPhase = 2
solidPhase = 3
aqueousPhase = 4
# TODO: Document EoS options and parameter_Data
@declare_process_block_class("Phase")
class PhaseData(ProcessBlockData):
CONFIG = ConfigBlock()
CONFIG.declare(
"component_list",
ConfigValue(
default=None,
domain=list,
description="List of components in phase",
doc="List of components which are present in phase. This is used "
"to construct the phase-component Set for the property package.",
),
)
CONFIG.declare(
"equation_of_state",
ConfigValue(
default=None,
description="Equation of state for phase",
doc="""A valid Python class with the necessary methods for
constructing the desired equation of state (or similar
model).""",
),
)
CONFIG.declare(
"equation_of_state_options",
ConfigValue(
default=None,
description="Options for equation of state",
doc="""A dict or ConfigBlock of options to be used when setting
up equation of state for phase.""",
),
)
CONFIG.declare(
"parameter_data",
ConfigValue(
default={},
domain=dict,
description="Dict containing initialization data for parameters",
),
)
CONFIG.declare(
"_phase_list_exists",
ConfigValue(
default=False,
doc="Internal config argument indicating whether phase_list "
"needs to be populated.",
),
)
CONFIG.declare(
"therm_cond_phase",
ConfigValue(description="Method to calculate thermal conductivity phase"),
)
CONFIG.declare(
"surf_tens_phase",
ConfigValue(description="Method to calculate surface tension of phase"),
)
CONFIG.declare(
"visc_d_phase",
ConfigValue(description="Method to calculate dynamic viscosity of phase"),
)
def build(self):
super(PhaseData, self).build()
# If the phase_list does not exist, add a reference to the new Phase
# The IF is mostly for backwards compatability, to allow for old-style
# property packages where the phase_list already exists but we need to
# add new Phase objects
if not self.config._phase_list_exists:
self.__add_to_phase_list()
# For the base Phase class, determine phase type based on component name
# Derived classes will overload these and return the correct type
# This will handle backwards compatability for old-style property packages
def is_liquid_phase(self):
if "Liq" in self.name:
return True
else:
return False
def is_solid_phase(self):
if "Sol" in self.name:
return True
else:
return False
def is_vapor_phase(self):
if "Vap" in self.name:
return True
else:
return False
def is_aqueous_phase(self):
# Returns bool indicating if this phase involve electrolytes
return False
def __add_to_phase_list(self):
"""
Method to add reference to new Phase in phase_list
"""
parent = self.parent_block()
try:
phase_list = getattr(parent, "phase_list")
phase_list.add(self.local_name)
except AttributeError:
# Parent does not have a phase_list yet, so create one
parent.phase_list = Set(initialize=[self.local_name], ordered=True)
@declare_process_block_class("LiquidPhase", block_class=Phase)
class LiquidPhaseData(PhaseData):
def is_liquid_phase(self):
return True
def is_solid_phase(self):
return False
def is_vapor_phase(self):
return False
@declare_process_block_class("SolidPhase", block_class=Phase)
class SolidPhaseData(PhaseData):
def is_liquid_phase(self):
return False
def is_solid_phase(self):
return True
def is_vapor_phase(self):
return False
@declare_process_block_class("VaporPhase", block_class=Phase)
|
def is_solid_phase(self):
return False
def is_vapor_phase(self):
return True
@declare_process_block_class("AqueousPhase", block_class=LiquidPhase)
class AqueousPhaseData(LiquidPhaseData):
# Special phase type for liquid phases involving electrolytes
# This is used to determine if we need to do the more complex component
# list determinations
def is_aqueous_phase(self):
return True
# List of all Phase types to use for validation
__all_phases__ = [Phase, LiquidPhase, SolidPhase, VaporPhase, AqueousPhase]
|
class VaporPhaseData(PhaseData):
def is_liquid_phase(self):
return False
|
icon.vector-js.min.js
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[370],{4412:function(t,e,n){"use strict";n.r(e),n.d(e,"icon",(function(){return c}));n(12),n(2),n(4),n(8),n(3),n(10);var r=n(0),o=n.n(r);function i(){return(i=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(t[r]=n[r])}return t}).apply(this,arguments)}function
|
(t,e){if(null==t)return{};var n,r,o=function(t,e){if(null==t)return{};var n,r,o={},i=Object.keys(t);for(r=0;r<i.length;r++)n=i[r],e.indexOf(n)>=0||(o[n]=t[n]);return o}(t,e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(t);for(r=0;r<i.length;r++)n=i[r],e.indexOf(n)>=0||Object.prototype.propertyIsEnumerable.call(t,n)&&(o[n]=t[n])}return o}var a=function(t){var e=t.title,n=t.titleId,r=l(t,["title","titleId"]);return o.a.createElement("svg",i({width:16,height:16,viewBox:"0 0 16 16",xmlns:"http://www.w3.org/2000/svg","aria-labelledby":n},r),e?o.a.createElement("title",{id:n},e):null,o.a.createElement("path",{d:"M12.5 11V5H11V3.5H5V5H3.5v6H5v1.5h6V11h1.5zm1 0H15v4h-4v-1.5H5V15H1v-4h1.5V5H1V1h4v1.5h6V1h4v4h-1.5v6zM4 4V2H2v2h2zm8 0h2V2h-2v2zM2 14h2v-2H2v2zm10 0h2v-2h-2v2z"}))},c=a;a.__docgenInfo={description:"",methods:[],displayName:"EuiIconVector"}}}]);
//# sourceMappingURL=icon.vector-js.min.js.map
|
l
|
term-details-routing.module.ts
|
import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { TermDetailsPage } from './term-details.page';
const routes: Routes = [
{
path: '',
component: TermDetailsPage
}
];
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule],
})
export class
|
{}
|
TermDetailsPageRoutingModule
|
exif_tool.py
|
import subprocess
import json
import os
class ExifTool:
sentinel = "{ready}\n"
def __init__(self, executable="/usr/bin/exiftool"):
self.executable = executable
def __enter__(self):
self.process = subprocess.Popen(
[self.executable, "-stay_open", "True", "-@", "-"],
universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return self
def
|
(self, exc_type, exc_value, traceback):
self.process.stdin.write("-stay_open\nFalse\n")
self.process.stdin.flush()
def execute(self, *args):
args = args + ("-execute\n",)
self.process.stdin.write(str.join("\n", args))
self.process.stdin.flush()
output = ""
fd = self.process.stdout.fileno()
while not output.endswith(self.sentinel):
output += os.read(fd, 4096).decode('utf-8')
return output[:-len(self.sentinel)]
def get_metadata(self, *filenames):
return json.loads(self.execute("-G", "-j", "-n", *filenames))
|
__exit__
|
models.py
|
"""SQL alchemy models for tweettweet"""
from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
class Tweeter(DB.Model):
"""Twitter users that we pull and analyze tweets for"""
id = DB.Column(DB.BigInteger, primary_key=True)
handle = DB.Column(DB.String(15), nullable=False)
newest_tweet_id = DB.Column(DB.BigInteger)
def
|
(self):
return "<Tweeter {}>".format(self.handle)
class Tweet(DB.Model):
"""Tweets tweeted from the tweeters"""
id = DB.Column(DB.BigInteger, primary_key=True)
text = DB.Column(DB.Unicode(300))
embedding = DB.Column(DB.PickleType, nullable=False)
tweeter_id = DB.Column(DB.BigInteger, DB.ForeignKey('tweeter.id'), nullable=False)
tweeter = DB.relationship('Tweeter', backref=DB.backref('tweets', lazy=True))
def __repr__(self):
return "<Tweet {}>".format(self.text)
|
__repr__
|
basic_SPN_tests.py
|
import basic_SPN as cipher
pbox = {0:0, 1:4, 2:8, 3:12, 4:1, 5:5, 6:9, 7:13, 8:2, 9:6, 10:10, 11:14, 12:3, 13:7, 14:11, 15:15}
# test pbox functionality/symmetry
def
|
(statem: list, pbox: dict):
staten = [0]*len(pbox)
for tpi, tp in enumerate(statem):
staten[pbox[tpi]] = tp
#print (staten)
return staten
testpBoxm = ['a','b','c','d', 'e','f','g','h', 'i','j','k','l', 'm','n','o','p']
testpBoxn = testPBox(testpBoxm, pbox)
testpBoxo = testPBox(testpBoxn, pbox)
if testpBoxm != testpBoxo:
print('FAIL: pbox inverse failed')
else:
print('PASS: pbox inverse functional')
# test that encryption and decryption are symmetric operations
def testEncDecSymmetry(n):
k = cipher.keyGeneration()
ct = [cipher.encrypt(pt, k) for pt in range(0, n)]
for pt,ct in enumerate(ct):
if pt != cipher.decrypt(ct, k):
print('FAIL: cipher encrypt-decrypt failed for {:04x}:{:04x}:{:04x}'.format(pt,ct, cipher.decrypt(ct, k)))
print('PASS: cipher encrypt-decrypt symmetry')
testEncDecSymmetry(100)
|
testPBox
|
Markov Reversal.py
|
from numpy import *
from numpy.linalg import *
def reverse(prev_t, prev_pop, next_pop):
|
prev_t = matrix([[0.4, 0.2, 0.4],
[0.1, 0, 0.9],
[0,0,1]]).T
prev_pop = matrix([10,203,12.3]).T
next_pop = prev_t*prev_pop
print(next_pop)
print(sum(next_pop), sum(prev_pop))
print(inv(prev_t), "\n\n", inv(prev_t)*next_pop)
rev_t = reverse(prev_t, prev_pop, next_pop)
print(rev_t, "\n\n", sum(rev_t, 0))
print(rev_t*next_pop)
print(prev_pop)
|
length = len(prev_t)
top = prev_t*(multiply(prev_pop, eye(length)))
bot = multiply(next_pop, ones((length,length)))
return (top/bot).T
|
file_field.js
|
ShareDrop.App.FileField = Ember.TextField.extend({
type: 'file',
classNames: ['invisible'],
click: function (event) {
|
},
change: function (event) {
var input = event.target,
files = input.files,
file = files[0];
this.sendAction('action', { file: file });
},
// Hackish way to reset file input when sender cancels file transfer,
// so if sender wants later to send the same file again,
// the 'change' event is triggered correctly.
fileDidChange: function () {
if (!this.get('file')) {
var field = this.$();
field.wrap('<form>').closest('form').get(0).reset();
field.unwrap();
}
}.observes('file')
});
|
event.stopPropagation();
|
metrics.rs
|
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use prometheus::*;
use prometheus_static_metric::*;
use crate::storage::ErrorHeaderKind;
use prometheus::exponential_buckets;
make_auto_flush_static_metric! {
pub label_enum GrpcTypeKind {
invalid,
kv_get,
kv_scan,
kv_prewrite,
kv_pessimistic_lock,
kv_pessimistic_rollback,
kv_commit,
kv_cleanup,
kv_batch_get,
kv_batch_get_command,
kv_batch_rollback,
kv_txn_heart_beat,
kv_check_txn_status,
kv_check_secondary_locks,
kv_scan_lock,
kv_resolve_lock,
kv_gc,
kv_delete_range,
raw_get,
raw_batch_get,
raw_batch_get_command,
raw_scan,
raw_batch_scan,
raw_put,
raw_batch_put,
raw_delete,
raw_delete_range,
raw_batch_delete,
ver_get,
ver_batch_get,
ver_mut,
ver_batch_mut,
ver_scan,
ver_delete_range,
unsafe_destroy_range,
physical_scan_lock,
register_lock_observer,
check_lock_observer,
remove_lock_observer,
coprocessor,
coprocessor_stream,
mvcc_get_by_key,
mvcc_get_by_start_ts,
split_region,
read_index,
}
pub label_enum GcCommandKind {
gc,
unsafe_destroy_range,
physical_scan_lock,
validate_config,
}
pub label_enum SnapTask {
send,
recv,
}
pub label_enum ResolveStore {
resolving,
resolve,
failed,
success,
tombstone,
}
pub label_enum GcKeysCF {
default,
lock,
write,
}
pub label_enum GcKeysDetail {
total,
processed,
get,
next,
prev,
seek,
seek_for_prev,
over_seek_bound,
}
pub struct GcCommandCounterVec: LocalIntCounter {
"type" => GcCommandKind,
}
pub struct SnapTaskCounterVec: LocalIntCounter {
"type" => SnapTask,
}
pub struct GcTaskCounterVec: LocalIntCounter {
"task" => GcCommandKind,
}
pub struct GcTaskFailCounterVec: LocalIntCounter {
"task" => GcCommandKind,
}
pub struct ResolveStoreCounterVec: LocalIntCounter {
"type" => ResolveStore
}
pub struct GrpcMsgFailCounterVec: LocalIntCounter {
"type" => GrpcTypeKind,
}
pub struct GcKeysCounterVec: LocalIntCounter {
"cf" => GcKeysCF,
"tag" => GcKeysDetail,
}
pub struct GrpcMsgHistogramVec: LocalHistogram {
"type" => GrpcTypeKind,
}
}
make_static_metric! {
pub label_enum GlobalGrpcTypeKind {
kv_get,
}
pub label_enum BatchableRequestKind {
point_get,
prewrite,
commit,
}
pub struct GrpcMsgHistogramGlobal: Histogram {
"type" => GlobalGrpcTypeKind,
}
pub struct RequestBatchSizeHistogramVec: Histogram {
"type" => BatchableRequestKind,
}
pub struct RequestBatchRatioHistogramVec: Histogram {
"type" => BatchableRequestKind,
}
}
lazy_static! {
pub static ref GC_COMMAND_COUNTER_VEC: IntCounterVec = register_int_counter_vec!(
"gc_command_total",
"Total number of GC commands received.",
&["type"]
)
.unwrap();
pub static ref SNAP_TASK_COUNTER: IntCounterVec = register_int_counter_vec!(
"tikv_server_snapshot_task_total",
"Total number of snapshot task",
&["type"]
)
.unwrap();
pub static ref GC_GCTASK_COUNTER_VEC: IntCounterVec = register_int_counter_vec!(
"tikv_gcworker_gc_tasks_vec",
"Counter of gc tasks processed by gc_worker",
&["task"]
)
.unwrap();
pub static ref GC_GCTASK_FAIL_COUNTER_VEC: IntCounterVec = register_int_counter_vec!(
"tikv_gcworker_gc_task_fail_vec",
"Counter of gc tasks that is failed",
&["task"]
)
.unwrap();
pub static ref RESOLVE_STORE_COUNTER: IntCounterVec = register_int_counter_vec!(
"tikv_server_resolve_store_total",
"Total number of resolving store",
&["type"]
)
.unwrap();
pub static ref GRPC_MSG_FAIL_COUNTER_VEC: IntCounterVec = register_int_counter_vec!(
"tikv_grpc_msg_fail_total",
"Total number of handle grpc message failure",
&["type"]
)
.unwrap();
pub static ref GC_KEYS_COUNTER_VEC: IntCounterVec = register_int_counter_vec!(
"tikv_gcworker_gc_keys",
"Counter of keys affected during gc",
&["cf", "tag"]
)
.unwrap();
pub static ref GRPC_MSG_HISTOGRAM_VEC: HistogramVec = register_histogram_vec!(
"tikv_grpc_msg_duration_seconds",
"Bucketed histogram of grpc server messages",
&["type"],
exponential_buckets(0.0005, 2.0, 20).unwrap()
)
.unwrap();
}
lazy_static! {
pub static ref GRPC_MSG_HISTOGRAM_STATIC: GrpcMsgHistogramVec =
auto_flush_from!(GRPC_MSG_HISTOGRAM_VEC, GrpcMsgHistogramVec);
pub static ref GRPC_MSG_HISTOGRAM_GLOBAL: GrpcMsgHistogramGlobal =
GrpcMsgHistogramGlobal::from(&GRPC_MSG_HISTOGRAM_VEC);
pub static ref GC_COMMAND_COUNTER_VEC_STATIC: GcCommandCounterVec =
auto_flush_from!(GC_COMMAND_COUNTER_VEC, GcCommandCounterVec);
pub static ref SNAP_TASK_COUNTER_STATIC: SnapTaskCounterVec =
auto_flush_from!(SNAP_TASK_COUNTER, SnapTaskCounterVec);
pub static ref GC_GCTASK_COUNTER_STATIC: GcTaskCounterVec =
auto_flush_from!(GC_GCTASK_COUNTER_VEC, GcTaskCounterVec);
pub static ref GC_GCTASK_FAIL_COUNTER_STATIC: GcTaskFailCounterVec =
auto_flush_from!(GC_GCTASK_FAIL_COUNTER_VEC, GcTaskFailCounterVec);
pub static ref RESOLVE_STORE_COUNTER_STATIC: ResolveStoreCounterVec =
auto_flush_from!(RESOLVE_STORE_COUNTER, ResolveStoreCounterVec);
pub static ref GRPC_MSG_FAIL_COUNTER: GrpcMsgFailCounterVec =
auto_flush_from!(GRPC_MSG_FAIL_COUNTER_VEC, GrpcMsgFailCounterVec);
pub static ref GC_KEYS_COUNTER_STATIC: GcKeysCounterVec =
auto_flush_from!(GC_KEYS_COUNTER_VEC, GcKeysCounterVec);
}
lazy_static! {
pub static ref SEND_SNAP_HISTOGRAM: Histogram = register_histogram!(
"tikv_server_send_snapshot_duration_seconds",
"Bucketed histogram of server send snapshots duration",
exponential_buckets(0.05, 2.0, 20).unwrap()
)
.unwrap();
pub static ref GRPC_REQ_BATCH_COMMANDS_SIZE: Histogram = register_histogram!(
"tikv_server_grpc_req_batch_size",
"grpc batch size of gRPC requests",
exponential_buckets(1f64, 2f64, 10).unwrap()
)
.unwrap();
pub static ref GRPC_RESP_BATCH_COMMANDS_SIZE: Histogram = register_histogram!(
"tikv_server_grpc_resp_batch_size",
"grpc batch size of gRPC responses",
exponential_buckets(1f64, 2f64, 10).unwrap()
)
.unwrap();
pub static ref GC_EMPTY_RANGE_COUNTER: IntCounter = register_int_counter!(
"tikv_storage_gc_empty_range_total",
"Total number of empty range found by gc"
)
.unwrap();
pub static ref GC_SKIPPED_COUNTER: IntCounter = register_int_counter!(
"tikv_storage_gc_skipped_counter",
"Total number of gc command skipped owing to optimization"
)
.unwrap();
pub static ref GC_TASK_DURATION_HISTOGRAM_VEC: HistogramVec = register_histogram_vec!(
"tikv_gcworker_gc_task_duration_vec",
"Duration of gc tasks execution",
&["task"],
exponential_buckets(0.0005, 2.0, 20).unwrap()
)
.unwrap();
pub static ref GC_TOO_BUSY_COUNTER: IntCounter = register_int_counter!(
"tikv_gc_worker_too_busy",
"Counter of occurrence of gc_worker being too busy"
)
.unwrap();
pub static ref AUTO_GC_STATUS_GAUGE_VEC: IntGaugeVec = register_int_gauge_vec!(
"tikv_gcworker_autogc_status",
"State of the auto gc manager",
&["state"]
)
.unwrap();
pub static ref AUTO_GC_SAFE_POINT_GAUGE: IntGauge = register_int_gauge!(
"tikv_gcworker_autogc_safe_point",
"Safe point used for auto gc"
)
.unwrap();
pub static ref AUTO_GC_PROCESSED_REGIONS_GAUGE_VEC: IntGaugeVec = register_int_gauge_vec!(
"tikv_gcworker_autogc_processed_regions",
"Processed regions by auto gc",
&["type"]
)
.unwrap();
pub static ref RAFT_MESSAGE_RECV_COUNTER: IntCounter = register_int_counter!(
"tikv_server_raft_message_recv_total",
"Total number of raft messages received"
)
.unwrap();
pub static ref RAFT_MESSAGE_BATCH_SIZE: Histogram = register_histogram!(
"tikv_server_raft_message_batch_size",
"Raft messages batch size",
exponential_buckets(1f64, 2f64, 10).unwrap()
)
.unwrap();
pub static ref REPORT_FAILURE_MSG_COUNTER: IntCounterVec = register_int_counter_vec!(
"tikv_server_report_failure_msg_total",
"Total number of reporting failure messages",
&["type", "store_id"]
)
.unwrap();
pub static ref RAFT_MESSAGE_FLUSH_COUNTER: IntCounter = register_int_counter!(
"tikv_server_raft_message_flush_total",
"Total number of raft messages flushed immediately"
)
.unwrap();
pub static ref RAFT_MESSAGE_DELAY_FLUSH_COUNTER: IntCounter = register_int_counter!(
"tikv_server_raft_message_delay_flush_total",
"Total number of raft messages flushed delay"
)
.unwrap();
pub static ref CONFIG_ROCKSDB_GAUGE: GaugeVec = register_gauge_vec!(
"tikv_config_rocksdb",
"Config information of rocksdb",
&["cf", "name"]
)
.unwrap();
pub static ref REQUEST_BATCH_SIZE_HISTOGRAM_VEC: RequestBatchSizeHistogramVec =
register_static_histogram_vec!(
RequestBatchSizeHistogramVec,
"tikv_server_request_batch_size",
"Size of request batch input",
&["type"],
exponential_buckets(1f64, 5f64, 10).unwrap()
)
.unwrap();
pub static ref REQUEST_BATCH_RATIO_HISTOGRAM_VEC: RequestBatchRatioHistogramVec =
register_static_histogram_vec!(
RequestBatchRatioHistogramVec,
"tikv_server_request_batch_ratio",
"Ratio of request batch output to input",
&["type"],
exponential_buckets(1f64, 5f64, 10).unwrap()
)
.unwrap();
}
make_auto_flush_static_metric! {
pub label_enum RequestStatusKind {
all,
success,
err_timeout,
err_empty_request,
err_other,
err_io,
err_server,
err_invalid_resp,
err_invalid_req,
err_not_leader,
err_region_not_found,
err_key_not_in_region,
err_epoch_not_match,
err_server_is_busy,
err_stale_command,
err_store_not_match,
err_raft_entry_too_large,
}
pub label_enum RequestTypeKind {
write,
snapshot,
}
pub struct AsyncRequestsCounterVec: LocalIntCounter {
"type" => RequestTypeKind,
"status" => RequestStatusKind,
}
pub struct AsyncRequestsDurationVec: LocalHistogram {
"type" => RequestTypeKind,
}
}
impl From<ErrorHeaderKind> for RequestStatusKind {
fn from(kind: ErrorHeaderKind) -> Self
|
}
lazy_static! {
pub static ref ASYNC_REQUESTS_COUNTER: IntCounterVec = register_int_counter_vec!(
"tikv_storage_engine_async_request_total",
"Total number of engine asynchronous requests",
&["type", "status"]
)
.unwrap();
pub static ref ASYNC_REQUESTS_DURATIONS: HistogramVec = register_histogram_vec!(
"tikv_storage_engine_async_request_duration_seconds",
"Bucketed histogram of processing successful asynchronous requests.",
&["type"],
exponential_buckets(0.0005, 2.0, 20).unwrap()
)
.unwrap();
}
lazy_static! {
pub static ref ASYNC_REQUESTS_COUNTER_VEC: AsyncRequestsCounterVec =
auto_flush_from!(ASYNC_REQUESTS_COUNTER, AsyncRequestsCounterVec);
pub static ref ASYNC_REQUESTS_DURATIONS_VEC: AsyncRequestsDurationVec =
auto_flush_from!(ASYNC_REQUESTS_DURATIONS, AsyncRequestsDurationVec);
}
|
{
match kind {
ErrorHeaderKind::NotLeader => RequestStatusKind::err_not_leader,
ErrorHeaderKind::RegionNotFound => RequestStatusKind::err_region_not_found,
ErrorHeaderKind::KeyNotInRegion => RequestStatusKind::err_key_not_in_region,
ErrorHeaderKind::EpochNotMatch => RequestStatusKind::err_epoch_not_match,
ErrorHeaderKind::ServerIsBusy => RequestStatusKind::err_server_is_busy,
ErrorHeaderKind::StaleCommand => RequestStatusKind::err_stale_command,
ErrorHeaderKind::StoreNotMatch => RequestStatusKind::err_store_not_match,
ErrorHeaderKind::RaftEntryTooLarge => RequestStatusKind::err_raft_entry_too_large,
ErrorHeaderKind::Other => RequestStatusKind::err_other,
}
}
|
select.js
|
import {greatest, group, least} from "d3";
import {maybeZ, valueof} from "../mark.js";
import {basic} from "./basic.js";
export function selectFirst(options) {
return select(first, undefined, options);
}
export function selectLast(options) {
return select(last, undefined, options);
}
export function selectMinX(options = {}) {
const x = options.x;
if (x == null) throw new Error("missing channel: x");
return select(min, x, options);
}
export function
|
(options = {}) {
const y = options.y;
if (y == null) throw new Error("missing channel: y");
return select(min, y, options);
}
export function selectMaxX(options = {}) {
const x = options.x;
if (x == null) throw new Error("missing channel: x");
return select(max, x, options);
}
export function selectMaxY(options = {}) {
const y = options.y;
if (y == null) throw new Error("missing channel: y");
return select(max, y, options);
}
// TODO If the value (for some required channel) is undefined, scan forward?
function* first(I) {
yield I[0];
}
// TODO If the value (for some required channel) is undefined, scan backward?
function* last(I) {
yield I[I.length - 1];
}
function* min(I, X) {
yield least(I, i => X[i]);
}
function* max(I, X) {
yield greatest(I, i => X[i]);
}
function select(selectIndex, v, options) {
const z = maybeZ(options);
return basic(options, (data, facets) => {
const Z = valueof(data, z);
const V = valueof(data, v);
const selectFacets = [];
for (const facet of facets) {
const selectFacet = [];
for (const I of Z ? group(facet, i => Z[i]).values() : [facet]) {
for (const i of selectIndex(I, V)) {
selectFacet.push(i);
}
}
selectFacets.push(selectFacet);
}
return {data, facets: selectFacets};
});
}
|
selectMinY
|
context.go
|
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
|
type Context struct{}
|
|
ddsketch.rs
|
//! DDSketch quantile sketch with relative-error guarantees.
//! DDSketch is a fast and fully-mergeable quantile sketch with relative-error guarantees.
//!
//! The main difference between this approach and previous art is DDSKetch employ a new method to
//! compute the error. Traditionally, the error rate of one sketch is evaluated by rank accuracy,
//! which can still generate a relative large variance if the dataset has long tail.
//!
//! DDSKetch, on the contrary, employs relative error rate that could work well on long tail dataset.
//!
//! The detail of this algorithm can be found in https://arxiv.org/pdf/1908.10693
use std::{
any::Any,
cmp::Ordering,
mem,
ops::AddAssign,
sync::{Arc, RwLock},
};
use crate::{
metrics::{Descriptor, MetricsError, Number, NumberKind, Result},
sdk::export::metrics::{
Aggregator, Count, Distribution, Max, Min, MinMaxSumCount, Quantile, Sum,
},
};
const INITIAL_NUM_BINS: usize = 128;
const GROW_LEFT_BY: i64 = 128;
const DEFAULT_MAX_NUM_BINS: i64 = 2048;
const DEFAULT_ALPHA: f64 = 0.01;
const DEFAULT_MIN_BOUNDARY: f64 = 1.0e-9;
/// An aggregator to calculate quantile
pub fn ddsketch(config: &DDSketchConfig, kind: NumberKind) -> DDSKetchAggregator {
DDSKetchAggregator::new(config, kind)
}
/// DDSKetch quantile sketch algorithm
///
/// It can give q-quantiles with α-accurate for any 0<=q<=1.
///
/// Here the accurate is calculated based on relative-error rate. Thus, the error guarantee adapts the scale of the output data. With relative error guarantee, the histogram can be more accurate in the area of low data density. For example, the long tail of response time data.
///
/// For example, if the actual percentile is 1 second, and relative-error guarantee
/// is 2%, then the value should within the range of 0.98 to 1.02
/// second. But if the actual percentile is 1 millisecond, with the same relative-error
/// guarantee, the value returned should within the range of 0.98 to 1.02 millisecond.
///
/// In order to support both negative and positive inputs, DDSketchAggregator has two DDSketch store within itself to store the negative and positive inputs.
#[derive(Debug)]
pub struct DDSKetchAggregator {
inner: RwLock<Inner>,
}
impl DDSKetchAggregator {
/// Create a new DDSKetchAggregator that would yield a quantile with relative error rate less
/// than `alpha`
///
/// The input should have a granularity larger than `key_epsilon`
pub fn new(config: &DDSketchConfig, kind: NumberKind) -> DDSKetchAggregator {
DDSKetchAggregator {
inner: RwLock::new(Inner::new(config, kind)),
}
}
}
impl Default for DDSKetchAggregator {
fn default() -> Self {
DDSKetchAggregator::new(
&DDSketchConfig::new(DEFAULT_ALPHA, DEFAULT_MAX_NUM_BINS, DEFAULT_MIN_BOUNDARY),
NumberKind::F64,
)
}
}
impl Sum for DDSKetchAggregator {
fn sum(&self) -> Result<Number> {
self.inner
.read()
.map_err(From::from)
.map(|inner| inner.sum.clone())
}
}
impl Min for DDSKetchAggregator {
fn min(&self) -> Result<Number> {
self.inner
.read()
.map_err(From::from)
.map(|inner| inner.min_value.clone())
}
}
impl Max for DDSKetchAggregator {
fn max(&self) -> Result<Number> {
self.inner
.read()
.map_err(From::from)
.map(|inner| inner.max_value.clone())
}
}
impl Count for DDSKetchAggregator {
fn count(&self) -> Result<u64> {
self.inner
.read()
.map_err(From::from)
.map(|inner| inner.count())
}
}
impl MinMaxSumCount for DDSKetchAggregator {}
impl Distribution for DDSKetchAggregator {}
impl Quantile for DDSKetchAggregator {
fn quantile(&self, q: f64) -> Result<Number> {
if !(0.0..=1.0).contains(&q) {
return Err(MetricsError::InvalidQuantile);
}
self.inner.read().map_err(From::from).and_then(|inner| {
if inner.count() == 0 {
return Err(MetricsError::NoDataCollected);
}
if q == 0.0 {
return Ok(inner.min_value.clone());
}
if (q - 1.0).abs() < std::f64::EPSILON {
return Ok(inner.max_value.clone());
}
// determine whether the quantile will fall in positive or negative
let rank = (q * (inner.count() - 1) as f64 + 1.0).floor() as u64;
let mut key = if rank > inner.negative_store.count {
inner
.positive_store
.key_at_rank(rank - inner.negative_store.count)
} else {
inner.negative_store.key_at_rank(rank)
};
// Calculate the actual value based on the key of bins.
let quantile_val = match key.cmp(&0) {
Ordering::Less => {
key += inner.offset;
-2.0 * (inner.gamma_ln * (-key as f64)).exp() / (1.0 + inner.gamma)
}
Ordering::Greater => {
key -= inner.offset;
2.0 * (inner.gamma_ln * (key as f64)).exp() / (1.0 + inner.gamma)
}
Ordering::Equal => 0f64,
};
let mut quantile = match inner.kind {
NumberKind::F64 => Number::from(quantile_val),
NumberKind::U64 => Number::from(quantile_val as u64),
NumberKind::I64 => Number::from(quantile_val as i64),
};
// Make sure the result of quantile is within [min_value, max_value]
if quantile.partial_cmp(&inner.kind, &inner.min_value) == Some(Ordering::Less) {
quantile = inner.min_value.clone();
}
if quantile.partial_cmp(&inner.kind, &inner.max_value) == Some(Ordering::Greater) {
quantile = inner.max_value.clone();
}
Ok(quantile)
})
}
}
impl Aggregator for DDSKetchAggregator {
fn update(&self, number: &Number, descriptor: &Descriptor) -> Result<()> {
self.inner
.write()
.map_err(From::from)
.map(|mut inner| inner.add(number, descriptor.number_kind()))
}
fn synchronized_move(
&self,
destination: &Arc<(dyn Aggregator + Send + Sync)>,
descriptor: &Descriptor,
) -> Result<()> {
if let Some(other) = destination.as_any().downcast_ref::<Self>() {
other
.inner
.write()
.map_err(From::from)
.and_then(|mut other| {
self.inner.write().map_err(From::from).map(|mut inner| {
let kind = descriptor.number_kind();
other.max_value = mem::replace(&mut inner.max_value, kind.zero());
other.min_value = mem::replace(&mut inner.min_value, kind.zero());
other.key_epsilon = mem::take(&mut inner.key_epsilon);
other.offset = mem::take(&mut inner.offset);
other.gamma = mem::take(&mut inner.gamma);
other.gamma_ln = mem::take(&mut inner.gamma_ln);
other.positive_store = mem::take(&mut inner.positive_store);
other.negative_store = mem::take(&mut inner.negative_store);
other.sum = mem::replace(&mut inner.sum, kind.zero());
})
})
} else {
Err(MetricsError::InconsistentAggregator(format!(
"Expected {:?}, got: {:?}",
self, destination
)))
}
}
fn merge(
&self,
other: &(dyn Aggregator + Send + Sync),
_descriptor: &Descriptor,
) -> Result<()> {
if let Some(other) = other.as_any().downcast_ref::<DDSKetchAggregator>() {
self.inner.write()
.map_err(From::from)
.and_then(|mut inner| {
other.inner.read()
.map_err(From::from)
.and_then(|other| {
// assert that it can merge
if inner.positive_store.max_num_bins != other.positive_store.max_num_bins {
return Err(MetricsError::InconsistentAggregator(format!(
"When merging two DDSKetchAggregators, their max number of bins must be the same. Expect max number of bins to be {:?}, but get {:?}", inner.positive_store.max_num_bins, other.positive_store.max_num_bins
)));
}
if inner.negative_store.max_num_bins != other.negative_store.max_num_bins {
return Err(MetricsError::InconsistentAggregator(format!(
"When merging two DDSKetchAggregators, their max number of bins must be the same. Expect max number of bins to be {:?}, but get {:?}", inner.negative_store.max_num_bins, other.negative_store.max_num_bins
)));
}
if (inner.gamma - other.gamma).abs() > std::f64::EPSILON {
return Err(MetricsError::InconsistentAggregator(format!(
"When merging two DDSKetchAggregators, their gamma must be the same. Expect max number of bins to be {:?}, but get {:?}", inner.gamma, other.gamma
)));
}
if other.count() == 0 {
return Ok(());
}
if inner.count() == 0 {
inner.positive_store.merge(&other.positive_store);
inner.negative_store.merge(&other.negative_store);
inner.sum = other.sum.clone();
inner.min_value = other.min_value.clone();
inner.max_value = other.max_value.clone();
return Ok(());
}
inner.positive_store.merge(&other.positive_store);
inner.negative_store.merge(&other.negative_store);
inner.sum = match inner.kind {
NumberKind::F64 =>
Number::from(inner.sum.to_f64(&inner.kind) + other.sum.to_f64(&other.kind)),
NumberKind::U64 => Number::from(inner.sum.to_u64(&inner.kind) + other.sum.to_u64(&other.kind)),
NumberKind::I64 => Number::from(inner.sum.to_i64(&inner.kind) + other.sum.to_i64(&other.kind))
};
if inner.min_value.partial_cmp(&inner.kind, &other.min_value) == Some(Ordering::Greater) {
inner.min_value = other.min_value.clone();
};
if inner.max_value.partial_cmp(&inner.kind, &other.max_value) == Some(Ordering::Less) {
inner.max_value = other.max_value.clone();
}
Ok(())
})
})
} else {
Err(MetricsError::InconsistentAggregator(format!(
"Expected {:?}, got: {:?}",
self, other
)))
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
/// DDSKetch Configuration.
#[derive(Debug)]
pub struct DDSketchConfig {
alpha: f64,
max_num_bins: i64,
key_epsilon: f64,
}
impl DDSketchConfig {
/// Create a new DDSKetch config
pub fn new(alpha: f64, max_num_bins: i64, key_epsilon: f64) -> Self {
DDSketchConfig {
alpha,
max_num_bins,
key_epsilon,
}
}
}
/// DDSKetch implementation.
///
/// Note that Inner is not thread-safe. All operation should be protected by a lock or other
/// synchronization.
///
/// Inner will also convert all Number into actual primitive type and back.
///
/// According to the paper, the DDSKetch only support positive number. Inner support
/// either positive or negative number. But cannot yield actual result when input has
/// both positive and negative number.
#[derive(Debug)]
struct Inner {
positive_store: Store,
negative_store: Store,
kind: NumberKind,
// sum of all value within store
sum: Number,
// γ = (1 + α)/(1 - α)
gamma: f64,
// ln(γ)
gamma_ln: f64,
// The epsilon when map value to bin key. Any value between [-key_epsilon, key_epsilon] will
// be mapped to bin key 0. Must be a positive number.
key_epsilon: f64,
// offset is here to ensure that keys for positive numbers that are larger than min_value are
// greater than or equal to 1 while the keys for negative numbers are less than or equal to -1.
offset: i64,
// minimum number that in store.
min_value: Number,
// maximum number that in store.
max_value: Number,
}
impl Inner {
fn new(config: &DDSketchConfig, kind: NumberKind) -> Inner {
let gamma: f64 = 1.0 + 2.0 * config.alpha / (1.0 - config.alpha);
let mut inner = Inner {
positive_store: Store::new(config.max_num_bins / 2),
negative_store: Store::new(config.max_num_bins / 2),
min_value: kind.max(),
max_value: kind.min(),
sum: kind.zero(),
gamma,
gamma_ln: gamma.ln(),
key_epsilon: config.key_epsilon,
offset: 0,
kind,
};
// reset offset based on key_epsilon
inner.offset = -(inner.log_gamma(inner.key_epsilon)).ceil() as i64 + 1i64;
inner
}
fn add(&mut self, v: &Number, kind: &NumberKind) {
let key = self.key(v, kind);
match v.partial_cmp(kind, &Number::from(0.0)) {
Some(Ordering::Greater) | Some(Ordering::Equal) => {
self.positive_store.add(key);
}
Some(Ordering::Less) => {
self.negative_store.add(key);
}
_ => {
// if return none. Do nothing and return
return;
}
}
// update min and max
if self.min_value.partial_cmp(&self.kind, v) == Some(Ordering::Greater) {
self.min_value = v.clone();
}
if self.max_value.partial_cmp(&self.kind, v) == Some(Ordering::Less) {
self.max_value = v.clone();
}
match &self.kind {
NumberKind::I64 => {
self.sum = Number::from(self.sum.to_i64(&self.kind) + v.to_i64(kind));
}
NumberKind::U64 => {
self.sum = Number::from(self.sum.to_u64(&self.kind) + v.to_u64(kind));
}
NumberKind::F64 => {
self.sum = Number::from(self.sum.to_f64(&self.kind) + v.to_f64(kind));
}
}
}
fn key(&self, num: &Number, kind: &NumberKind) -> i64 {
if num.to_f64(kind) < -self.key_epsilon {
let positive_num = match kind {
NumberKind::F64 => Number::from(-num.to_f64(kind)),
NumberKind::U64 => Number::from(num.to_u64(kind)),
NumberKind::I64 => Number::from(-num.to_i64(kind)),
};
(-self.log_gamma(positive_num.to_f64(kind)).ceil()) as i64 - self.offset
} else if num.to_f64(kind) > self.key_epsilon {
self.log_gamma(num.to_f64(&kind)).ceil() as i64 + self.offset
} else {
0i64
}
}
/// get the index of the bucket based on num
fn log_gamma(&self, num: f64) -> f64 {
num.ln() / self.gamma_ln
}
fn count(&self) -> u64 {
self.negative_store.count + self.positive_store.count
}
}
#[derive(Debug)]
struct Store {
bins: Vec<u64>,
count: u64,
min_key: i64,
max_key: i64,
// maximum number of bins Store can have.
// In the worst case, the bucket can grow as large as the number of the elements inserted into.
// max_num_bins helps control the number of bins.
max_num_bins: i64,
}
impl Default for Store {
fn default() -> Self {
Store {
bins: vec![0; INITIAL_NUM_BINS],
count: 0,
min_key: 0,
max_key: 0,
max_num_bins: DEFAULT_MAX_NUM_BINS,
}
}
}
/// DDSKetchInner stores the data
impl Store {
fn new(max_num_bins: i64) -> Store {
Store {
bins: vec![
0;
if max_num_bins as usize > INITIAL_NUM_BINS {
INITIAL_NUM_BINS
} else {
max_num_bins as usize
}
],
count: 0u64,
min_key: 0i64,
max_key: 0i64,
max_num_bins,
}
}
/// Add count based on key.
///
/// If key is not in [min_key, max_key], we will expand to left or right
///
///
/// The bins are essentially working in a round-robin fashion where we can use all space in bins
/// to represent any continuous space within length. That's why we need to offset the key
/// with `min_key` so that we get the actual bin index.
fn add(&mut self, key: i64) {
if self.count == 0 {
self.max_key = key;
self.min_key = key - self.bins.len() as i64 + 1
}
if key < self.min_key {
self.grow_left(key)
} else if key > self.max_key {
self.grow_right(key)
}
let idx = if key - self.min_key < 0 {
0
} else {
key - self.min_key
};
// we unwrap here because grow_left or grow_right will make sure the idx is less than vector size
let bin_count = self.bins.get_mut(idx as usize).unwrap();
*bin_count += 1;
self.count += 1;
}
fn grow_left(&mut self, key: i64) {
if self.min_key < key || self.bins.len() >= self.max_num_bins as usize {
return;
}
let min_key = if self.max_key - key >= self.max_num_bins {
self.max_key - self.max_num_bins + 1
} else {
let mut min_key = self.min_key;
while min_key > key {
min_key -= GROW_LEFT_BY;
}
min_key
};
// The new vector will contain three parts.
// First part is all 0, which is the part expended
// Second part is from existing bins.
// Third part is what's left.
let expected_len = (self.max_key - min_key + 1) as usize;
let mut new_bins = vec![0u64; expected_len];
let old_bin_slice = &mut new_bins[(self.min_key - min_key) as usize..];
old_bin_slice.copy_from_slice(&self.bins);
self.bins = new_bins;
self.min_key = min_key;
}
fn grow_right(&mut self, key: i64) {
if self.max_key > key {
return;
}
if key - self.max_key >= self.max_num_bins {
// if currently key minus currently max key is larger than maximum number of bins.
// Move all elements in current bins into the first bin
self.bins = vec![0; self.max_num_bins as usize];
self.max_key = key;
self.min_key = key - self.max_num_bins + 1;
self.bins.get_mut(0).unwrap().add_assign(self.count);
} else if key - self.min_key >= self.max_num_bins {
let min_key = key - self.max_num_bins + 1;
let upper_bound = if min_key < self.max_key + 1 {
min_key
} else {
self.max_key + 1
} - self.min_key;
let n = self.bins.iter().take(upper_bound as usize).sum::<u64>();
if self.bins.len() < self.max_num_bins as usize {
let mut new_bins = vec![0; self.max_num_bins as usize];
new_bins[0..self.bins.len() - (min_key - self.min_key) as usize]
.as_mut()
.copy_from_slice(&self.bins[(min_key - self.min_key) as usize..]);
self.bins = new_bins;
} else {
// bins length is equal to max number of bins
self.bins.drain(0..(min_key - self.min_key) as usize);
if self.max_num_bins > self.max_key - min_key + 1 {
self.bins.resize(
self.bins.len()
+ (self.max_num_bins - (self.max_key - min_key + 1)) as usize,
0,
)
}
}
self.max_key = key;
self.min_key = min_key;
self.bins.get_mut(0).unwrap().add_assign(n);
} else {
let mut new_bin = vec![0; (key - self.min_key + 1) as usize];
new_bin[0..self.bins.len()]
.as_mut()
.copy_from_slice(&self.bins);
self.bins = new_bin;
self.max_key = key;
}
}
/// Returns the key of values at rank
fn key_at_rank(&self, rank: u64) -> i64 {
self.bins
.iter()
.enumerate()
.scan(0, |state, (key, &count)| {
*state += count;
Some((key, *state))
})
.filter(|(_key, accumulated)| *accumulated >= rank)
.map(|(key, _)| key as i64 + self.min_key)
.next()
.unwrap_or(self.max_key)
}
/// Merge two stores
fn merge(&mut self, other: &Store) {
if self.count == 0 {
return;
}
if other.count == 0 {
self.bins = other.bins.clone();
self.min_key = other.min_key;
self.max_key = other.max_key;
self.count = other.count;
}
if self.max_key > other.max_key {
if other.min_key < self.min_key {
self.grow_left(other.min_key);
}
let start = if other.min_key > self.min_key {
other.min_key
} else {
self.min_key
} as usize;
for i in start..other.max_key as usize {
self.bins[i - self.min_key as usize] = other.bins[i - other.min_key as usize];
}
let mut n = 0;
for i in other.min_key as usize..self.min_key as usize {
n += other.bins[i - other.min_key as usize]
}
self.bins[0] += n;
} else if other.min_key < self.min_key {
|
{
self.grow_right(other.max_key);
for i in other.min_key as usize..(other.max_key + 1) as usize {
self.bins[i - self.min_key as usize] += other.bins[i - other.min_key as usize];
}
}
self.count += other.count;
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::metrics::{Descriptor, InstrumentKind, Number, NumberKind};
use crate::sdk::export::metrics::{Aggregator, Count, Max, Min, Quantile, Sum};
use rand_distr::{Distribution, Exp, LogNormal, Normal};
use std::cmp::Ordering;
use std::sync::Arc;
const TEST_MAX_BINS: i64 = 1024;
const TEST_ALPHA: f64 = 0.01;
const TEST_KEY_EPSILON: f64 = 1.0e-9;
// Test utils
struct Dataset {
data: Vec<Number>,
kind: NumberKind,
}
impl Dataset {
fn from_f64_vec(data: Vec<f64>) -> Dataset {
Dataset {
data: data.into_iter().map(Number::from).collect::<Vec<Number>>(),
kind: NumberKind::F64,
}
}
fn from_u64_vec(data: Vec<u64>) -> Dataset {
Dataset {
data: data.into_iter().map(Number::from).collect::<Vec<Number>>(),
kind: NumberKind::U64,
}
}
fn from_i64_vec(data: Vec<i64>) -> Dataset {
Dataset {
data: data.into_iter().map(Number::from).collect::<Vec<Number>>(),
kind: NumberKind::I64,
}
}
// Given a quantile, get the minimum possible value that not exceed error range.
// data must have at least one element and should be sorted.
// q must in range [0,1]
fn lower_quantile(&self, q: f64, alpha: f64) -> f64 {
let rank = q * (self.data.len() - 1) as f64;
let number = self
.data
.get(rank.floor() as usize)
.expect("data should at least contains one element, quantile should be in [0,1]")
.clone()
.to_f64(&self.kind);
if number < 0.0 {
number * (1.0 + alpha)
} else {
number * (1.0 - alpha)
}
}
// Given a quantile, get the maximum possible value that not exceed error range.
// data must have at least one element and should be sorted.
// q must in range [0,1]
fn upper_quantile(&self, q: f64, alpha: f64) -> f64 {
let rank = q * (self.data.len() - 1) as f64;
let number = self
.data
.get(rank.ceil() as usize)
.expect("data should at least contains one element, quantile should be in [0,1]")
.clone()
.to_f64(&self.kind);
if number > 0.0 {
number * (1.0 + alpha)
} else {
number * (1.0 - alpha)
}
}
fn sum(&self) -> Number {
match self.kind {
NumberKind::F64 => {
Number::from(self.data.iter().map(|e| e.to_f64(&self.kind)).sum::<f64>())
}
NumberKind::U64 => {
Number::from(self.data.iter().map(|e| e.to_u64(&self.kind)).sum::<u64>())
}
NumberKind::I64 => {
Number::from(self.data.iter().map(|e| e.to_i64(&self.kind)).sum::<i64>())
}
}
}
}
fn generate_linear_dataset_f64(start: f64, step: f64, num: usize) -> Vec<f64> {
let mut vec = Vec::with_capacity(num);
for i in 0..num {
vec.push((start + i as f64 * step) as f64);
}
vec
}
fn generate_linear_dataset_u64(start: u64, step: u64, num: usize) -> Vec<u64> {
let mut vec = Vec::with_capacity(num);
for i in 0..num {
vec.push(start + i as u64 * step);
}
vec
}
fn generate_linear_dataset_i64(start: i64, step: i64, num: usize) -> Vec<i64> {
let mut vec = Vec::with_capacity(num);
for i in 0..num {
vec.push(start + i as i64 * step);
}
vec
}
/// generate a dataset with normal distribution. Return sorted dataset.
fn generate_normal_dataset(mean: f64, stddev: f64, num: usize) -> Vec<f64> {
let normal = Normal::new(mean, stddev).unwrap();
let mut data = Vec::with_capacity(num);
for _ in 0..num {
data.push(normal.sample(&mut rand::thread_rng()));
}
data.as_mut_slice()
.sort_by(|a, b| a.partial_cmp(b).unwrap());
data
}
/// generate a dataset with log normal distribution. Return sorted dataset.
fn generate_log_normal_dataset(mean: f64, stddev: f64, num: usize) -> Vec<f64> {
let normal = LogNormal::new(mean, stddev).unwrap();
let mut data = Vec::with_capacity(num);
for _ in 0..num {
data.push(normal.sample(&mut rand::thread_rng()));
}
data.as_mut_slice()
.sort_by(|a, b| a.partial_cmp(b).unwrap());
data
}
fn generate_exponential_dataset(rate: f64, num: usize) -> Vec<f64> {
let exponential = Exp::new(rate).unwrap();
let mut data = Vec::with_capacity(num);
for _ in 0..num {
data.push(exponential.sample(&mut rand::thread_rng()));
}
data.as_mut_slice()
.sort_by(|a, b| a.partial_cmp(b).unwrap());
data
}
fn test_quantiles() -> &'static [f64] {
&[0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999, 1.0]
}
/// Insert all element of data into ddsketch and assert the quantile result is within the error range.
/// Note that data must be sorted.
fn evaluate_sketch(dataset: Dataset) {
let kind = &dataset.kind;
let ddsketch = DDSKetchAggregator::new(
&DDSketchConfig::new(TEST_ALPHA, TEST_MAX_BINS, TEST_KEY_EPSILON),
kind.clone(),
);
let descriptor = Descriptor::new(
"test".to_string(),
"test",
None,
InstrumentKind::ValueRecorder,
kind.clone(),
);
for i in &dataset.data {
let _ = ddsketch.update(i, &descriptor);
}
for q in test_quantiles() {
let lower = dataset.lower_quantile(*q, TEST_ALPHA);
let upper = dataset.upper_quantile(*q, TEST_ALPHA);
let result = ddsketch
.quantile(*q)
.expect("Error when calculate quantile");
match kind {
NumberKind::F64 => {
let result_f64 = result.to_f64(kind);
assert!(result_f64 - lower >= 0.0);
assert!(upper - result_f64 >= 0.0);
}
NumberKind::U64 => {
let result_u64 = result.to_u64(kind);
assert!(result_u64 >= lower as u64);
assert!(upper as u64 >= result_u64);
}
NumberKind::I64 => {
let result_i64 = result.to_i64(kind);
assert!(result_i64 - lower as i64 >= 0);
assert!(upper as i64 - result_i64 >= 0);
}
}
}
assert_eq!(
ddsketch
.min()
.unwrap()
.partial_cmp(kind, dataset.data.get(0).unwrap()),
Some(Ordering::Equal)
);
assert_eq!(
ddsketch
.max()
.unwrap()
.partial_cmp(kind, dataset.data.last().unwrap()),
Some(Ordering::Equal)
);
assert_eq!(
ddsketch.sum().unwrap().partial_cmp(kind, &dataset.sum()),
Some(Ordering::Equal)
);
assert_eq!(ddsketch.count().unwrap(), dataset.data.len() as u64);
}
// Test basic operation of Store
/// First set max_num_bins < number of keys, test to see if the store will collapse to left
/// most bin instead of expending beyond the max_num_bins
#[test]
fn test_insert_into_store() {
let mut store = Store::new(200);
for i in -100..1300 {
store.add(i)
}
assert_eq!(store.count, 1400);
assert_eq!(store.bins.len(), 200);
}
/// Test to see if copy_from_slice will panic because the range size is different in left and right
#[test]
fn test_grow_right() {
let mut store = Store::new(150);
for i in &[-100, -50, 150, -20, 10] {
store.add(*i)
}
assert_eq!(store.count, 5);
}
/// Test to see if copy_from_slice will panic because the range size is different in left and right
#[test]
fn test_grow_left() {
let mut store = Store::new(150);
for i in &[500, 150, 10] {
store.add(*i)
}
assert_eq!(store.count, 3);
}
/// Before merge, store1 should hold 300 bins that looks like [201,1,1,1,...],
/// store 2 should hold 200 bins looks like [301,1,1,...]
/// After merge, store 1 should still hold 300 bins with following distribution
///
/// index [0,0] -> 201
///
/// index [1,99] -> 1
///
/// index [100, 100] -> 302
///
/// index [101, 299] -> 2
#[test]
fn test_merge_stores() {
let mut store1 = Store::new(300);
let mut store2 = Store::new(200);
for i in 500..1000 {
store1.add(i);
store2.add(i);
}
store1.merge(&store2);
assert_eq!(store1.bins.get(0), Some(&201));
assert_eq!(&store1.bins[1..100], vec![1u64; 99].as_slice());
assert_eq!(store1.bins[100], 302);
assert_eq!(&store1.bins[101..], vec![2u64; 199].as_slice());
assert_eq!(store1.count, 1000);
}
// Test ddsketch with different distribution
#[test]
fn test_linear_distribution() {
// test u64
let mut dataset = Dataset::from_u64_vec(generate_linear_dataset_u64(12, 3, 5000));
evaluate_sketch(dataset);
// test i64
dataset = Dataset::from_i64_vec(generate_linear_dataset_i64(-12, 3, 5000));
evaluate_sketch(dataset);
// test f64
dataset = Dataset::from_f64_vec(generate_linear_dataset_f64(-12.0, 3.0, 5000));
evaluate_sketch(dataset);
}
#[test]
fn test_normal_distribution() {
let mut dataset = Dataset::from_f64_vec(generate_normal_dataset(150.0, 1.2, 100));
evaluate_sketch(dataset);
dataset = Dataset::from_f64_vec(generate_normal_dataset(-30.0, 4.4, 100));
evaluate_sketch(dataset);
}
#[test]
fn test_log_normal_distribution() {
let dataset = Dataset::from_f64_vec(generate_log_normal_dataset(120.0, 0.5, 100));
evaluate_sketch(dataset);
}
#[test]
fn test_exponential_distribution() {
let dataset = Dataset::from_f64_vec(generate_exponential_dataset(2.0, 500));
evaluate_sketch(dataset);
}
// Test Aggregator operation of DDSketch
#[test]
fn test_synchronized_move() {
let dataset = Dataset::from_f64_vec(generate_normal_dataset(1.0, 3.5, 100));
let kind = &dataset.kind;
let ddsketch = DDSKetchAggregator::new(
&DDSketchConfig::new(TEST_ALPHA, TEST_MAX_BINS, TEST_KEY_EPSILON),
kind.clone(),
);
let descriptor = Descriptor::new(
"test".to_string(),
"test",
None,
InstrumentKind::ValueRecorder,
kind.clone(),
);
for i in &dataset.data {
let _ = ddsketch.update(i, &descriptor);
}
let mut expected = vec![];
for q in test_quantiles() {
expected.push(ddsketch.quantile(*q).unwrap().to_f64(&NumberKind::F64));
}
let mut expected_iter = expected.iter();
let expected_sum = ddsketch.sum().unwrap().to_f64(&NumberKind::F64);
let expected_count = ddsketch.count().unwrap();
let expected_min = ddsketch.min().unwrap().to_f64(&NumberKind::F64);
let expected_max = ddsketch.max().unwrap().to_f64(&NumberKind::F64);
let moved_ddsketch: Arc<(dyn Aggregator + Send + Sync)> =
Arc::new(DDSKetchAggregator::new(
&DDSketchConfig::new(TEST_ALPHA, TEST_MAX_BINS, TEST_KEY_EPSILON),
NumberKind::F64,
));
let _ = ddsketch
.synchronized_move(&moved_ddsketch, &descriptor)
.expect("Fail to sync move");
let moved_ddsketch = moved_ddsketch
.as_any()
.downcast_ref::<DDSKetchAggregator>()
.expect("Fail to cast dyn Aggregator down to DDSketchAggregator");
// assert sum, max, min and count
assert!(
(moved_ddsketch.max().unwrap().to_f64(&NumberKind::F64) - expected_max).abs()
< std::f64::EPSILON
);
assert!(
(moved_ddsketch.min().unwrap().to_f64(&NumberKind::F64) - expected_min).abs()
< std::f64::EPSILON
);
assert!(
(moved_ddsketch.sum().unwrap().to_f64(&NumberKind::F64) - expected_sum).abs()
< std::f64::EPSILON
);
assert_eq!(moved_ddsketch.count().unwrap(), expected_count);
// assert can generate same result
for q in test_quantiles() {
assert!(
(moved_ddsketch
.quantile(*q)
.unwrap()
.to_f64(&NumberKind::F64)
- expected_iter.next().unwrap())
.abs()
< std::f64::EPSILON
);
}
}
}
|
let mut tmp_bins = vec![0u64; other.bins.len()];
tmp_bins.as_mut_slice().copy_from_slice(&other.bins);
for i in self.min_key as usize..self.max_key as usize {
tmp_bins[i - other.min_key as usize] += self.bins[i - self.min_key as usize];
}
self.bins = tmp_bins;
self.max_key = other.max_key;
self.min_key = other.min_key;
} else
|
privacy_analysis.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
*Based on Google's TF Privacy:* https://github.com/tensorflow/privacy/blob/master/tensorflow_privacy/privacy/analysis/rdp_accountant.py.
*Here, we update this code to Python 3, and optimize dependencies.*
Functionality for computing Renyi Differential Privacy (RDP) of an additive
Sampled Gaussian Mechanism (SGM).
Example:
Suppose that we have run an SGM applied to a function with L2-sensitivity of 1.
Its parameters are given as a list of tuples
``[(q_1, sigma_1, steps_1), ..., (q_k, sigma_k, steps_k)],``
and we wish to compute epsilon for a given target delta.
The example code would be:
>>> max_order = 32
>>> orders = range(2, max_order + 1)
>>> rdp = np.zeros_like(orders, dtype=float)
>>> for q, sigma, steps in parameters:
>>> rdp += privacy_analysis.compute_rdp(q, sigma, steps, orders)
>>> epsilon, opt_order = privacy_analysis.get_privacy_spent(orders, rdp, delta)
"""
import math
from typing import List, Tuple, Union
import numpy as np
from scipy import special
########################
# LOG-SPACE ARITHMETIC #
########################
def _log_add(logx: float, logy: float) -> float:
r"""Adds two numbers in the log space.
Args:
logx: First term in log space.
logy: Second term in log space.
Returns:
Sum of numbers in log space.
"""
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf: # adding 0
return b
# Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)
def _log_sub(logx: float, logy: float) -> float:
r"""Subtracts two numbers in the log space.
Args:
logx: First term in log space. Expected to be greater than the second term.
logy: First term in log space. Expected to be less than the first term.
Returns:
Difference of numbers in log space.
Raises:
ValueError
If the result is negative.
"""
if logx < logy:
raise ValueError("The result of subtraction must be non-negative.")
if logy == -np.inf: # subtracting 0
return logx
if logx == logy:
return -np.inf # 0 is represented as -np.inf in the log space.
try:
# Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).
return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1
except OverflowError:
return logx
def _compute_log_a_for_int_alpha(q: float, sigma: float, alpha: int) -> float:
r"""Computes :math:`log(A_\alpha)` for integer ``alpha``.
Notes:
Note that
:math:`A_\alpha` is real valued function of ``alpha`` and ``q``,
and that 0 < ``q`` < 1.
Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
:math:`log(A_\alpha)` as defined in Section 3.3 of
https://arxiv.org/pdf/1908.10530.pdf.
"""
# Initialize with 0 in the log space.
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
math.log(special.binom(alpha, i))
+ i * math.log(q)
+ (alpha - i) * math.log(1 - q)
)
s = log_coef_i + (i * i - i) / (2 * (sigma ** 2))
log_a = _log_add(log_a, s)
return float(log_a)
def _compute_log_a_for_frac_alpha(q: float, sigma: float, alpha: float) -> float:
r"""Computes :math:`log(A_\alpha)` for fractional ``alpha``.
Notes:
Note that
:math:`A_\alpha` is real valued function of ``alpha`` and ``q``,
and that 0 < ``q`` < 1.
Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
:math:`log(A_\alpha)` as defined in Section 3.3 of
https://arxiv.org/pdf/1908.10530.pdf.
"""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma ** 2 * math.log(1 / q - 1) + 0.5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(0.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
log_e1 = math.log(0.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma ** 2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma ** 2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _compute_log_a(q: float, sigma: float, alpha: float) -> float:
r"""Computes :math:`log(A_\alpha)` for any positive finite ``alpha``.
Notes:
Note that
:math:`A_\alpha` is real valued function of ``alpha`` and ``q``,
and that 0 < ``q`` < 1.
Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf
for details.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
:math:`log(A_\alpha)` as defined in the paper mentioned above.
"""
if float(alpha).is_integer():
return _compute_log_a_for_int_alpha(q, sigma, int(alpha))
else:
return _compute_log_a_for_frac_alpha(q, sigma, alpha)
def _log_erfc(x: float) -> float:
r"""Computes :math:`log(erfc(x))` with high accuracy for large ``x``.
Helper function used in computation of :math:`log(A_\alpha)`
for a fractional alpha.
Args:
x: The input to the function
Returns:
:math:`log(erfc(x))`
"""
return math.log(2) + special.log_ndtr(-x * 2 ** 0.5)
def _compute_rdp(q: float, sigma: float, alpha: float) -> float:
r"""Computes RDP of the Sampled Gaussian Mechanism at order ``alpha``.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at order ``alpha``; can be np.inf.
"""
if q == 0:
return 0
# no privacy
if sigma == 0:
return np.inf
if q == 1.0:
return alpha / (2 * sigma ** 2)
if np.isinf(alpha):
return np.inf
return _compute_log_a(q, sigma, alpha) / (alpha - 1)
def compute_rdp(
q: float, noise_multiplier: float, steps: int, orders: Union[List[float], float]
) -> Union[List[float], float]:
r"""Computes Renyi Differential Privacy (RDP) guarantees of the
Sampled Gaussian Mechanism (SGM) iterated ``steps`` times.
Args:
q: Sampling rate of SGM.
noise_multiplier: The ratio of the standard deviation of the
additive Gaussian noise to the L2-sensitivity of the function
to which it is added. Note that this is same as the standard
deviation of the additive Gaussian noise when the L2-sensitivity
of the function is 1.
steps: The number of iterations of the mechanism.
orders: An array (or a scalar) of RDP orders.
Returns:
The RDP guarantees at all orders; can be ``np.inf``.
"""
if isinstance(orders, float):
rdp = _compute_rdp(q, noise_multiplier, orders)
else:
rdp = np.array([_compute_rdp(q, noise_multiplier, order) for order in orders])
return rdp * steps
def get_privacy_spent(
orders: Union[List[float], float], rdp: Union[List[float], float], delta: float
) -> Tuple[float, float]:
r"""Computes epsilon given a list of Renyi Differential Privacy (RDP) values at
multiple RDP orders and target ``delta``.
The computation of epslion, i.e. conversion from RDP to (eps, delta)-DP,
is based on the theorem presented in the following work:
Borja Balle et al. "Hypothesis testing interpretations and Renyi differential privacy."
International Conference on Artificial Intelligence and Statistics. PMLR, 2020.
Particullary, Theorem 21 in the arXiv version https://arxiv.org/abs/1905.09982.
Args:
orders: An array (or a scalar) of orders (alphas).
rdp: A list (or a scalar) of RDP guarantees.
delta: The target delta.
Returns:
Pair of epsilon and optimal order alpha.
Raises:
ValueError
If the lengths of ``orders`` and ``rdp`` are not equal.
"""
orders_vec = np.atleast_1d(orders)
rdp_vec = np.atleast_1d(rdp)
if len(orders_vec) != len(rdp_vec):
raise ValueError(
f"Input lists must have the same length.\n"
f"\torders_vec = {orders_vec}\n"
f"\trdp_vec = {rdp_vec}\n"
)
|
eps = (
rdp_vec
- (np.log(delta) + np.log(orders_vec)) / (orders_vec - 1)
+ np.log((orders_vec - 1) / orders_vec)
)
# special case when there is no privacy
if np.isnan(eps).all():
return np.inf, np.nan
idx_opt = np.nanargmin(eps) # Ignore NaNs
return eps[idx_opt], orders_vec[idx_opt]
| |
SearchRides.js
|
import React, { Component } from "react";
import "./searchRides.css";
import {
Container,
Row,
Col,
Form,
Button,
Card,
FormGroup,
Label,
Input
} from "reactstrap";
import moment from 'moment';
export default class
|
extends Component {
constructor(props) {
super(props);
this.state = {
origin: '',
destination: '',
passengers: 0,
posts: [],
yourBid: ""
};
this.textInput = React.createRef();
}
handleChange = event => {
event.preventDefault();
this.setState({
[event.target.name]: event.target.value
})
}
handleSubmit = async (event) => {
event.preventDefault();
let baseurl = "http://localhost:5000";
if (process.env.NODE_ENV === 'production') {
baseurl = "https://rideshare-app-nus.herokuapp.com";
}
const response = await fetch(`${baseurl}/api/rides?origin=${this.state.origin}&destination=${this.state.destination}&seats=${this.state.passengers}`, {
method: 'GET'
});
if (response.ok) {
const resp = await response.json();
resp.data.forEach(async (val, idx, arr) => {
arr[idx].ridestarttime = moment(val.ridestarttime).format("dddd, MMMM Do YYYY, h:mm a");
})
this.setState({
posts: resp.data
});
if (this.state.posts.length == 0) {
alert("No rides found for that route!");
}
} else {
const error = await response.json();
alert(error.message);
}
}
fSubmitBid = async (e) => {
e.preventDefault();
const { history } = this.props;
let baseurl = "http://localhost:5000";
if (process.env.NODE_ENV === 'production') {
baseurl = "https://rideshare-app-nus.herokuapp.com";
}
const username = localStorage.getItem('myUsernameStorage');
if (!username) {
alert('You are not logged in');
history.push('/login');
}
const response = await fetch(`${baseurl}/api/bid`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
"username": username,
"bid": 10
})
})
if (response.ok) {
alert("New bid successful")
} else {
alert("Failed to bid")
}
}
render() {
return (
<div className="advertiseForm">
<Container className="mt-5">
<Row>
<Col xs={4}>
<Form className="formGroup searchRidesForm" onSubmit={this.handleSubmit}>
<h5 className="mb-3">Search for rides</h5>
<FormGroup row>
<Label sm={3}>From</Label>
<Col sm={9}>
<Input type="text" name="origin" value={this.state.origin} id="formStartLocation" placeholder="Enter Start Address" onChange={this.handleChange} />
</Col>
</FormGroup>
<FormGroup row>
<Label sm={3}>To</Label>
<Col sm={9}>
<Input type="text" name="destination" value={this.state.destination} id="formEndLocation" placeholder="Enter Destination Address" onChange={this.handleChange} />
</Col>
</FormGroup>
<FormGroup row>
<Label sm={3}>Passengers</Label>
<Col sm={9}>
<Input type="text" name="passengers" value={this.state.passengers} id="formNumPassenger" min="0" max="5" placeholder="Passengers" onChange={this.handleChange} />
</Col>
</FormGroup>
<div className="clearfix">
<Button outline color="success" className="float-right"> Submit</Button>{' '}
</div>
</Form>
</Col>
<Col xs="8">
<div >
{
this.state.posts.map(post =>
<div key={post.id} align="start" className="rides-list mb-3 line-separator">
<Container>
<Row>
<Col xs={3}>
<Row>
<h3 className="post-driver">{post.driver}</h3>
</Row>
<Row>
<small className="post-car">{post.car}</small>
{/* <small className="post-car">{this.state.car}</small> */}
</Row>
<Row>
<small className="post-capacity">{post.capacity + " seats available"} </small>
{/* <small className="post-capacity">{this.state.capacity + " seats available"} </small> */}
</Row>
</Col>
<Col xs={7}>
<Row>
<p className="pull-left"><b>Departure: </b></p>
<p className="pull-right">{post.origin}</p>
{/* <p className="pull-right">{this.state.origin}</p> */}
</Row>
<Row>
<p className="pull-left"><b>Arrival: </b></p>
<p className="pull-right">{post.destination}</p>
{/* <p className="pull-right">{this.props.destination}</p> */}
</Row>
<Row>
<p className="pull-left"><b>Departure Time: </b></p>
<p className="pull-right">{post.ridestarttime}</p>
{/* <p className="pull-right">{this.props.ridestarttime}</p> */}
</Row>
<Row>
<p className="pull-left"><b>Departure Time</b></p>
<p className="pull-right">
{" " + " : " + post.startTime}</p>
</Row>
</Col>
<Col xs={2}>
<Row>
<h5 className="post-price">{post.price}</h5>
{/* <h5 className="post-price">{this.props.price}</h5> */}
</Row>
<Row>
<Label>Enter Your Bid :</Label>
<input ref={this.textInput} type="number" name="yourBid" id="yourBid" step="0.1" className="submitBidForm" placeholder="Starting Bid ($)" />
<Button onClick={(e) => this.fSubmitBid(e)} outline color="success">Submit Bid</Button>{' '}
</Row>
</Col>
</Row>
</Container>
</div>
)
}
</div>
</Col>
</Row>
</Container>
</div >
)
}
}
|
SearchRides
|
baseline.py
|
import torch
from torch import nn
import torch.nn.functional as F
import sys
from .backbones.resnet import ResNet
sys.path.append('.')
EPSILON = 1e-12
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return y
class BAP(nn.Module):
def __init__(self, pool='GAP'):
super(BAP, self).__init__()
assert pool in ['GAP', 'GMP']
if pool == 'GAP':
self.pool = None
else:
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, features, attentions, counterfactual=False):
B, C, H, W = features.size()
_, M, AH, AW = attentions.size()
# match size
if AH != H or AW != W:
attentions = F.upsample_bilinear(attentions, size=(H, W))
# feature_matrix: (B, M, C) -> (B, M * C)
if self.pool is None:
feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1)
else:
feature_matrix = []
for i in range(M):
AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1)
feature_matrix.append(AiF)
feature_matrix = torch.cat(feature_matrix, dim=1)
# sign-sqrt
feature_matrix_raw = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON)
# l2 normalization along dimension M and C
feature_matrix = F.normalize(feature_matrix_raw, dim=-1)
if counterfactual:
if self.training:
fake_att = torch.zeros_like(attentions).uniform_(0, 2)
else:
fake_att = torch.ones_like(attentions)
# mean_feature = features.mean(3).mean(2).view(B, 1, C)
# counterfactual_feature = mean_feature.expand(B, M, C).contiguous().view(B, -1)
counterfactual_feature = (torch.einsum('imjk,injk->imn', (fake_att, features)) / float(H * W)).view(B, -1)
counterfactual_feature = torch.sign(counterfactual_feature) * torch.sqrt(torch.abs(counterfactual_feature) + EPSILON)
counterfactual_feature = F.normalize(counterfactual_feature, dim=-1)
return feature_matrix, counterfactual_feature
else:
return feature_matrix
class MultiHeadAtt(nn.Module):
"""
Extend the channel attention into MultiHeadAtt.
It is modified from "Zhang H, Wu C, Zhang Z, et al. Resnest: Split-attention networks."
"""
def __init__(self, in_channels, channels,
radix=4, reduction_factor=4,
rectify=False, norm_layer=nn.BatchNorm2d):
super(MultiHeadAtt, self).__init__()
inter_channels = max(in_channels*radix//reduction_factor, 32)
self.radix = radix
self.channels = channels
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Conv2d(channels, inter_channels, 1, groups=1)
self.bn1 = norm_layer(inter_channels)
self.fc2 = nn.Conv2d(inter_channels, channels*radix, 1, groups=1)
def forward(self, x):
batch, channel = x.shape[:2]
splited = torch.split(x, channel//self.radix, dim=1)
gap = sum(splited)
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap).view((batch, self.radix, self.channels))
atten = F.softmax(atten, dim=1).view(batch, -1, 1, 1)
atten = torch.split(atten, channel//self.radix, dim=1)
out= torch.cat([att*split for (att, split) in zip(atten, splited)],1)
return out.contiguous()
class BN2d(nn.Module):
def __init__(self, planes):
super(BN2d, self).__init__()
self.bottleneck2 = nn.BatchNorm2d(planes)
self.bottleneck2.bias.requires_grad_(False) # no shift
self.bottleneck2.apply(weights_init_kaiming)
def forward(self, x):
return self.bottleneck2(x)
class Baseline(nn.Module):
in_planes = 2048
def __init__(self, num_classes, last_stride, model_path, using_cal):
super(Baseline, self).__init__()
self.using_cal = using_cal
self.base = ResNet(last_stride)
self.base.load_param(model_path)
self.radix = 2
self.base_1 = nn.Sequential(*list(self.base.children())[0:3])
self.BN1 = BN2d(64)
self.att1 = SELayer(64,8)
self.att_s1=MultiHeadAtt(64,int(64/self.radix),radix=self.radix)
self.base_2 = nn.Sequential(*list(self.base.children())[3:4])
self.BN2 = BN2d(256)
self.att2 = SELayer(256,32)
self.att_s2=MultiHeadAtt(256,int(256/self.radix),radix=self.radix)
self.base_3 = nn.Sequential(*list(self.base.children())[4:5])
self.BN3 = BN2d(512)
self.att3 = SELayer(512,64)
self.att_s3 = MultiHeadAtt(512,int(512/self.radix),radix=self.radix)
self.base_4 = nn.Sequential(*list(self.base.children())[5:6])
self.BN4 = BN2d(1024)
self.att4 = SELayer(1024,128)
self.att_s4=MultiHeadAtt(1024,int(1024/self.radix),radix=self.radix)
self.base_5 = nn.Sequential(*list(self.base.children())[6:])
self.BN5 = BN2d(2048)
self.att5 = SELayer(2048,256)
self.att_s5=MultiHeadAtt(2048,int(2048/self.radix),radix=self.radix)
self.M = 8
self.attentions = BasicConv2d(2048, self.M, kernel_size=1)
self.bap = BAP(pool='GAP')
self.gap = nn.AdaptiveAvgPool2d(1)
self.num_classes = num_classes
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False) # no shift
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.classifier_bap = nn.Linear(self.in_planes*self.M, self.in_planes, bias=False)
self.classifier.apply(weights_init_classifier)
self.classifier_bap.apply(weights_init_classifier)
def forward(self, x):
############
|
x_1 = self.base_1(x)
x_1 = self.att_s1(x_1)
x_1 = self.BN1(x_1)
y_1 = self.att1(x_1)
x_att1=x_1*y_1.expand_as(x_1)
x_2 = self.base_2(x_att1)
x_2 = self.att_s2(x_2)
x_2 = self.BN2(x_2)
y_2 = self.att2(x_2)
x_att2=x_2*y_2.expand_as(x_2)
x_3 = self.base_3(x_att2)
x_3 = self.att_s3(x_3)
x_3 = self.BN3(x_3)
y_3 = self.att3(x_3)
x_att3=x_3*y_3.expand_as(x_3)
x_4 = self.base_4(x_att3)
x_4 = self.att_s4(x_4)
x_4 = self.BN4(x_4)
y_4 = self.att4(x_4)
x_att4=x_4*y_4.expand_as(x_4)
x_5 = self.base_5(x_att4)
x_5 = self.att_s5(x_5)
x_5 = self.BN5(x_5)
y_5 = self.att5(x_5)
x=x_5*y_5.expand_as(x_5)
############
# x = self.base(x) replace above with this to use base network
attention_maps = self.attentions(x)
global_feat,global_feat_hat = self.bap(x, attention_maps,counterfactual=True)
global_feat = global_feat.view(global_feat.shape[0], -1)
global_feat_hat = global_feat_hat.view(global_feat.shape[0], -1)
global_feat = self.classifier_bap(global_feat)
global_feat_hat = self.classifier_bap(global_feat_hat)
feat_hat = self.bottleneck(global_feat_hat)
feat = self.bottleneck(global_feat) # normalize for angular softmax
cls_score = self.classifier(feat)
cls_score_hat = self.classifier(feat_hat)
if self.training:
if self.using_cal:
return cls_score, cls_score-cls_score_hat, global_feat # global feature for triplet loss
else:
return cls_score, global_feat
else:
return cls_score
|
|
functions_a.js
|
var searchData=
|
['kickgroupusersasync_0',['KickGroupUsersAsync',['../class_nakama_1_1_client.html#ae0b1081795d5a8ce9b94ae3757effc79',1,'Nakama.Client.KickGroupUsersAsync()'],['../interface_nakama_1_1_i_client.html#a984fa00bdf9b43484e1c34d11bf4194e',1,'Nakama.IClient.KickGroupUsersAsync()']]]
];
|
[
|
dropdown-wrapper.component.spec.ts
|
import { ComponentFixture, TestBed } from '@angular/core/testing';
import { DropdownWrapperComponent } from './dropdown-wrapper.component';
describe('DropdownWrapperComponent', () => {
let component: DropdownWrapperComponent;
let fixture: ComponentFixture<DropdownWrapperComponent>;
beforeEach(async () => {
await TestBed.configureTestingModule({
declarations: [ DropdownWrapperComponent ]
})
.compileComponents();
});
beforeEach(() => {
fixture = TestBed.createComponent(DropdownWrapperComponent);
|
});
it('should create', () => {
expect(component).toBeTruthy();
});
});
|
component = fixture.componentInstance;
fixture.detectChanges();
|
f4249b4ba6fa_adding_feature_vectors.py
|
"""Adding feature vectors
Revision ID: f4249b4ba6fa
Revises: 863114f0c659
Create Date: 2020-11-24 14:43:08.789873
"""
import sqlalchemy as sa
from alembic import op
from mlrun.api.utils.db.sql_collation import SQLCollationUtil
# revision identifiers, used by Alembic.
revision = "f4249b4ba6fa"
down_revision = "863114f0c659"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"feature_vectors",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"name",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column(
"project",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column("created", sa.TIMESTAMP(), nullable=True),
sa.Column("updated", sa.TIMESTAMP(), nullable=True),
sa.Column(
"state",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column(
"uid", sa.String(255, collation=SQLCollationUtil.collation()), nullable=True
),
sa.Column("object", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name", "project", "uid", name="_feature_vectors_uc"),
)
op.create_table(
"feature_vectors_labels",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"name",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column(
"value",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column("parent", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["parent"],
["feature_vectors.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name", "parent", name="_feature_vectors_labels_uc"),
)
op.create_table(
"feature_vectors_tags",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"project",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column(
"name",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.Column("obj_id", sa.Integer(), nullable=True),
sa.Column(
"obj_name",
sa.String(255, collation=SQLCollationUtil.collation()),
nullable=True,
),
sa.ForeignKeyConstraint(
["obj_id"],
["feature_vectors.id"],
),
sa.ForeignKeyConstraint(
["obj_name"],
["feature_vectors.name"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint(
"project", "name", "obj_name", name="_feature_vectors_tags_uc"
),
)
# ### end Alembic commands ###
def
|
():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("feature_vectors_tags")
op.drop_table("feature_vectors_labels")
op.drop_table("feature_vectors")
# ### end Alembic commands ###
|
downgrade
|
mock_drt_unittest.py
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for MockDRT."""
import io
import optparse
import unittest
from webkitpy.common.system.system_host_mock import MockSystemHost
from webkitpy.layout_tests.port import mock_drt
from webkitpy.layout_tests.port import port_testcase
from webkitpy.layout_tests.port import test
from webkitpy.layout_tests.port.factory import PortFactory
class MockDRTPortTest(port_testcase.PortTestCase):
def make_port(self, host=None, options=optparse.Values({'configuration': 'Release'})):
host = host or MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
return mock_drt.MockDRTPort(host, port_name='mock-mac', options=options)
def test_port_name_in_constructor(self):
self.assertTrue(mock_drt.MockDRTPort(MockSystemHost(), port_name='mock-test'))
def test_check_sys_deps(self):
pass
def test_default_max_locked_shards(self):
pass
def test_diff_image(self):
pass
def test_diff_image_crashed(self):
pass
def test_uses_apache(self):
pass
def test_get_crash_log(self):
pass
def test_check_build(self):
pass
def test_virtual_test_suites(self):
|
def test_path_to_apache_config_file(self):
pass
class MockDRTTest(unittest.TestCase):
def input_line(self, port, test_name, pixel_tests, checksum=None):
url = port.create_driver(0).test_to_uri(test_name)
if url.startswith('file://'):
url = url[len('file://'):]
if pixel_tests:
url += "'--pixel-test"
if checksum:
url += "'" + checksum
return url + '\n'
def make_drt(self, options, args, host, stdin, stdout, stderr):
return mock_drt.MockDRT(options, args, host, stdin, stdout, stderr)
def make_input_output(self, port, test_name, pixel_tests,
expected_checksum, drt_output, drt_input=None, expected_text=None):
if pixel_tests:
if not expected_checksum:
expected_checksum = port.expected_checksum(test_name)
if not drt_input:
drt_input = self.input_line(port, test_name, pixel_tests, expected_checksum)
text_output = expected_text or port.expected_text(test_name) or ''
if not drt_output:
drt_output = self.expected_output(port, test_name, pixel_tests,
text_output, expected_checksum)
return (drt_input, drt_output)
def expected_output(self, port, test_name, pixel_tests, text_output, expected_checksum):
output = ['#READY\n', 'Content-Type: text/plain\n']
if text_output:
output.append(text_output)
output.append('#EOF\n')
if pixel_tests and expected_checksum:
output.extend(['\n',
'ActualHash: %s\n' % expected_checksum,
'ExpectedHash: %s\n' % expected_checksum])
output.append('#EOF\n')
return output
def assertTest(self, test_name, pixel_tests, expected_checksum=None, drt_output=None, host=None, expected_text=None):
port_name = 'test'
host = host or MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
port = PortFactory(host).get(port_name)
drt_input, drt_output = self.make_input_output(
port, test_name, pixel_tests, expected_checksum, drt_output, drt_input=None, expected_text=expected_text)
args = ['--run-layout-test', '--platform', port_name, '-']
stdin = io.BytesIO(drt_input)
stdout = io.BytesIO()
stderr = io.BytesIO()
options, args = mock_drt.parse_options(args)
drt = self.make_drt(options, args, host, stdin, stdout, stderr)
res = drt.run()
self.assertEqual(res, 0)
self.assertEqual(stdout.getvalue(), ''.join(drt_output))
self.assertEqual(stderr.getvalue(), '#EOF\n')
def test_main(self):
host = MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
stdin = io.BytesIO()
stdout = io.BytesIO()
stderr = io.BytesIO()
res = mock_drt.main(['--run-layout-test', '--platform', 'test', '-'],
host, stdin, stdout, stderr)
self.assertEqual(res, 0)
self.assertEqual(stdout.getvalue(), '#READY\n')
self.assertEqual(stderr.getvalue(), '')
self.assertEqual(host.filesystem.written_files, {})
def test_pixeltest_passes(self):
# This also tests that we handle HTTP: test URLs properly.
self.assertTest('http/tests/passes/text.html', True)
def test_pixeltest__fails(self):
self.assertTest('failures/expected/image_checksum.html',
pixel_tests=True,
expected_checksum='image_checksum-checksum',
drt_output=[
'#READY\n',
'Content-Type: text/plain\n',
'image_checksum-txt',
'#EOF\n',
'\n',
'ActualHash: image_checksum-checksum\n',
'ExpectedHash: image_checksum-checksum\n',
'#EOF\n',
])
def test_textonly(self):
self.assertTest('passes/image.html', False)
def test_checksum_in_png(self):
self.assertTest('passes/checksum_in_image.html', True)
def test_reftest_match(self):
self.assertTest('passes/reftest.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
def test_reftest_mismatch(self):
self.assertTest('passes/mismatch.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
def test_audio(self):
self.assertTest('passes/audio.html',
pixel_tests=True,
drt_output=[
'#READY\n',
'Content-Type: audio/wav\n',
'Content-Transfer-Encoding: base64\n',
'YXVkaW8td2F2',
'\n',
'#EOF\n',
'#EOF\n',
])
def test_virtual(self):
self.assertTest('virtual/passes/text.html', True)
|
pass
|
worker.go
|
package master
import (
"dister/protos"
"dister/worker"
"google.golang.org/grpc"
"time"
)
type Worker struct {
id string
cpu int32
mem int32
tasks []string
status protos.StateResponse_StatueType
conn *grpc.ClientConn
closeC chan struct{}
}
func
|
(id string, conn *grpc.ClientConn) *Worker {
return &Worker{
id: id,
cpu: 0,
mem: 0,
tasks: []string{},
status: protos.StateResponse_UnReady,
conn: conn,
closeC: make(chan struct{}),
}
}
func (w *Worker) Start() error {
for {
tick := time.NewTicker(time.Second)
select {
case <-tick.C:
state, err := worker.State(w.conn, &protos.StateRequest{})
if err != nil {
return err
}
w.tasks = state.Tasks
w.status = state.St
case <-w.closeC:
return nil
}
}
}
func (w *Worker) UnitTest(req *protos.TaskData) (*protos.TaskData, error) {
return worker.UnitTest(w.conn, req)
}
func (w *Worker) Prepare(req *protos.TaskData) (*protos.TaskProcessResponse, error) {
return worker.Prepare(w.conn, req)
}
func (w *Worker) Commit(req *protos.TaskCommitRequest) (*protos.TaskCommitResponse, error) {
return worker.Commit(w.conn, req)
}
func (w *Worker) Close() {
close(w.closeC)
w.conn.Close()
}
|
NewWorker
|
euler_test_base.py
|
import signal
import unittest
import time
from . import website as w
class EulerProblem(unittest.TestCase):
problem_id = None
def solver(self, input_val):
raise NotImplementedError()
simple_input = None
simple_output = None
real_input = None
def solve_real(self):
"""
Returns the solution of the Problem for the real input
"""
return self.solver(self.real_input)
def solve_simple(self):
"""
Returns the solution of the Problem for the simple input
"""
return self.solver(self.simple_input)
@classmethod
def setUpClass(cls):
if cls.solver is EulerProblem.solver:
raise unittest.SkipTest(
"Not running the tests for a not implemented problem")
def test_simple(self):
|
def test_real(self):
"""
Checks the real problem against the website
"""
website = w.Website()
real_output = self.solve_real()
self.assertTrue(w.check_solution(
website, self.problem_id, solution=real_output))
# Windows has no Alarm signal. Sorry pal.
use_signal = hasattr(signal, "SIGALRM")
def test_time(self):
"""
Checks that the real problem runs under a minute
"""
time_limit = 60
try:
if self.use_signal:
def handler(signum, frame): # pylint: disable=unused-argument
raise TimeoutError()
old_handler = signal.signal(signal.SIGALRM, handler)
signal.alarm(time_limit)
before = time.time()
self.solve_real()
after = time.time()
if after - before > time_limit:
raise TimeoutError()
except TimeoutError:
self.fail("Test failed to end in less than a minute.")
finally:
if self.use_signal:
signal.signal(signal.SIGALRM, old_handler)
|
"""
Checks the simple example
"""
self.assertEqual(self.solve_simple(), self.simple_output)
|
util.go
|
package util
|
)
func FormatDuration(d time.Duration) string {
d = d.Round(time.Second)
h := d / time.Hour
d -= h * time.Hour
m := d / time.Minute
d -= m * time.Minute
return fmt.Sprintf("%d:%02d:%02d", h, m, d/time.Second)
}
|
import (
"fmt"
"time"
|
test_normalization.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import numpy as np
import numpy.testing as npt
import six
from caffe2.python import core, workspace
from ml.rl.caffe_utils import C2
from ml.rl.preprocessing import identify_types, normalization
from ml.rl.preprocessing.identify_types import BOXCOX, CONTINUOUS, ENUM
from ml.rl.preprocessing.normalization import (
NormalizationParameters,
sort_features_by_normalization,
)
from ml.rl.preprocessing.preprocessor_net import PreprocessorNet
from ml.rl.test.preprocessing_util import (
BOXCOX_FEATURE_ID,
ENUM_FEATURE_ID,
PROBABILITY_FEATURE_ID,
id_to_type,
read_data,
)
from ml.rl.test.utils import NumpyFeatureProcessor
from scipy import special
class TestNormalization(unittest.TestCase):
def _feature_type_override(self, feature_id):
"""
This should only be used to test CONTINUOUS_ACTION
"""
if id_to_type(feature_id) == identify_types.CONTINUOUS_ACTION:
return identify_types.CONTINUOUS_ACTION
return None
def test_prepare_normalization_and_normalize(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
values, 10, feature_type=self._feature_type_override(name)
)
for k, v in normalization_parameters.items():
if id_to_type(k) == CONTINUOUS:
self.assertEqual(v.feature_type, CONTINUOUS)
self.assertIs(v.boxcox_lambda, None)
self.assertIs(v.boxcox_shift, None)
elif id_to_type(k) == BOXCOX:
self.assertEqual(v.feature_type, BOXCOX)
self.assertIsNot(v.boxcox_lambda, None)
self.assertIsNot(v.boxcox_shift, None)
else:
assert v.feature_type == id_to_type(k)
sorted_features, _ = sort_features_by_normalization(normalization_parameters)
norm_net = core.Net("net")
C2.set_net(norm_net)
preprocessor = PreprocessorNet()
input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32)
for i, feature in enumerate(sorted_features):
input_matrix[:, i] = feature_value_map[feature]
input_matrix_blob = "input_matrix_blob"
workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32))
output_blob, _ = preprocessor.normalize_dense_matrix(
input_matrix_blob, sorted_features, normalization_parameters, "", False
)
workspace.FeedBlob(input_matrix_blob, input_matrix)
workspace.RunNetOnce(norm_net)
normalized_feature_matrix = workspace.FetchBlob(output_blob)
normalized_features = {}
on_column = 0
for feature in sorted_features:
norm = normalization_parameters[feature]
if norm.feature_type == ENUM:
column_size = len(norm.possible_values)
else:
column_size = 1
normalized_features[feature] = normalized_feature_matrix[
:, on_column : (on_column + column_size)
]
on_column += column_size
self.assertTrue(
all(
[
np.isfinite(parameter.stddev) and np.isfinite(parameter.mean)
for parameter in normalization_parameters.values()
]
)
)
for k, v in six.iteritems(normalized_features):
self.assertTrue(np.all(np.isfinite(v)))
feature_type = normalization_parameters[k].feature_type
if feature_type == identify_types.PROBABILITY:
sigmoidv = special.expit(v)
self.assertTrue(
np.all(
np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1))
)
)
elif feature_type == identify_types.ENUM:
possible_values = normalization_parameters[k].possible_values
self.assertEqual(v.shape[0], len(feature_value_map[k]))
self.assertEqual(v.shape[1], len(possible_values))
possible_value_map = {}
for i, possible_value in enumerate(possible_values):
possible_value_map[possible_value] = i
for i, row in enumerate(v):
original_feature = feature_value_map[k][i]
self.assertEqual(
possible_value_map[original_feature], np.where(row == 1)[0][0]
)
elif feature_type == identify_types.QUANTILE:
for i, feature in enumerate(v[0]):
original_feature = feature_value_map[k][i]
expected = NumpyFeatureProcessor.value_to_quantile(
original_feature, normalization_parameters[k].quantiles
)
self.assertAlmostEqual(feature, expected, 2)
elif feature_type == identify_types.BINARY:
pass
elif (
feature_type == identify_types.CONTINUOUS
or feature_type == identify_types.BOXCOX
):
one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01)
zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01)
zero_mean = np.isclose(np.mean(v), 0, atol=0.01)
self.assertTrue(
np.all(zero_mean),
"mean of feature {} is {}, not 0".format(k, np.mean(v)),
)
self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev)))
elif feature_type == identify_types.CONTINUOUS_ACTION:
less_than_max = v < 1
more_than_min = v > -1
self.assertTrue(
np.all(less_than_max),
"values are not less than 1: {}".format(v[less_than_max == False]),
)
self.assertTrue(
np.all(more_than_min),
"values are not more than -1: {}".format(v[more_than_min == False]),
)
else:
raise NotImplementedError()
def test_normalize_dense_matrix_enum(self):
normalization_parameters = {
1: NormalizationParameters(
identify_types.ENUM,
None,
None,
None,
None,
[12, 4, 2],
None,
None,
None,
),
2: NormalizationParameters(
identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None
),
3: NormalizationParameters(
identify_types.ENUM, None, None, None, None, [15, 3], None, None, None
),
}
norm_net = core.Net("net")
C2.set_net(norm_net)
preprocessor = PreprocessorNet()
inputs = np.zeros([4, 3], dtype=np.float32)
feature_ids = [2, 1, 3] # Sorted according to feature type
inputs[:, feature_ids.index(1)] = [12, 4, 2, 2]
inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0]
inputs[:, feature_ids.index(3)] = [15, 3, 15, normalization.MISSING_VALUE]
input_blob = C2.NextBlob("input_blob")
workspace.FeedBlob(input_blob, np.array([0], dtype=np.float32))
normalized_output_blob, _ = preprocessor.normalize_dense_matrix(
input_blob, feature_ids, normalization_parameters, "", False
)
workspace.FeedBlob(input_blob, inputs)
workspace.RunNetOnce(norm_net)
normalized_feature_matrix = workspace.FetchBlob(normalized_output_blob)
np.testing.assert_allclose(
np.array(
[
[1.0, 1, 0, 0, 1, 0],
[2.0, 0, 1, 0, 0, 1],
[3.0, 0, 0, 1, 1, 0],
[3.0, 0, 0, 1, 0, 0], # Missing values should go to all 0
]
),
normalized_feature_matrix,
)
def
|
(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
values, feature_type=self._feature_type_override(name)
)
s = normalization.serialize(normalization_parameters)
read_parameters = normalization.deserialize(s)
# Unfortunately, Thrift serializatin seems to lose a bit of precision.
# Using `==` will be false.
self.assertEqual(read_parameters.keys(), normalization_parameters.keys())
for k in normalization_parameters:
self.assertEqual(
read_parameters[k].feature_type,
normalization_parameters[k].feature_type,
)
self.assertEqual(
read_parameters[k].possible_values,
normalization_parameters[k].possible_values,
)
for field in [
"boxcox_lambda",
"boxcox_shift",
"mean",
"stddev",
"quantiles",
"min_value",
"max_value",
]:
if getattr(normalization_parameters[k], field) is None:
self.assertEqual(
getattr(read_parameters[k], field),
getattr(normalization_parameters[k], field),
)
else:
npt.assert_allclose(
getattr(read_parameters[k], field),
getattr(normalization_parameters[k], field),
)
def test_preprocessing_network(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
values, feature_type=self._feature_type_override(name)
)
test_features = NumpyFeatureProcessor.preprocess(
feature_value_map, normalization_parameters
)
net = core.Net("PreprocessingTestNet")
C2.set_net(net)
preprocessor = PreprocessorNet()
name_preprocessed_blob_map = {}
for feature_name in feature_value_map:
workspace.FeedBlob(str(feature_name), np.array([0], dtype=np.int32))
preprocessed_blob, _ = preprocessor.preprocess_blob(
str(feature_name), [normalization_parameters[feature_name]]
)
name_preprocessed_blob_map[feature_name] = preprocessed_blob
workspace.CreateNet(net)
for feature_name, feature_value in six.iteritems(feature_value_map):
feature_value = np.expand_dims(feature_value, -1)
workspace.FeedBlob(str(feature_name), feature_value)
workspace.RunNetOnce(net)
for feature_name in feature_value_map:
normalized_features = workspace.FetchBlob(
name_preprocessed_blob_map[feature_name]
)
if feature_name != ENUM_FEATURE_ID:
normalized_features = np.squeeze(normalized_features, -1)
tolerance = 0.01
if feature_name == BOXCOX_FEATURE_ID:
# At the limit, boxcox has some numerical instability
tolerance = 0.5
non_matching = np.where(
np.logical_not(
np.isclose(
normalized_features,
test_features[feature_name],
rtol=tolerance,
atol=tolerance,
)
)
)
self.assertTrue(
np.all(
np.isclose(
normalized_features,
test_features[feature_name],
rtol=tolerance,
atol=tolerance,
)
),
"{} does not match: {} {}".format(
feature_name,
normalized_features[non_matching].tolist(),
test_features[feature_name][non_matching].tolist(),
),
)
def test_type_override(self):
# Take a feature that should be identified as probability
feature_value_map = read_data()
probability_values = feature_value_map[PROBABILITY_FEATURE_ID]
# And ask for a binary anyways
parameter = normalization.identify_parameter(
probability_values, feature_type=identify_types.BINARY
)
self.assertEqual(parameter.feature_type, "BINARY")
|
test_persistency
|
main.py
|
"""
Climate Change Project
"""
import plotly.graph_objects as go
from PIL import Image, ImageDraw, ImageFont
from computing_data import calc_high_actual_pd, \
calc_low_actual_pd, \
calc_median_actual_pd, \
make_high_rcp_list, make_low_rcp_list, \
make_median_rcp_list, rcp_to_slice, temp_to_rgb
from reading_data import read_actual_data, read_predicted_data, CITY_SET, MAP, CITY_TEMPS
def plot_temp_data(actual_temps_dict: dict, final_low_rcp_list: list, final_median_rcp_list: list,
final_high_rcp_list: list) -> None:
"""Plot a line and scatter graph of real and predicted temperatures
using plotly's line and scatter plots
"""
x = list(actual_temps_dict.keys())
actual_y = list(actual_temps_dict.values())
low_predicted_y = final_low_rcp_list
median_predicted_y = final_median_rcp_list
high_predicted_y = final_high_rcp_list
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=low_predicted_y,
mode='lines+markers',
name='RCP 2.6 Predicted Temperature'))
fig.add_trace(go.Scatter(x=x, y=median_predicted_y,
mode='lines+markers',
name='RCP 4.5 Predicted Temperature'))
fig.add_trace(go.Scatter(x=x, y=high_predicted_y,
mode='lines+markers',
name='RCP 8.5 Predicted Temperature'))
fig.add_trace(go.Scatter(x=x, y=actual_y,
mode='lines+markers',
name='Actual Temperature'))
fig.update_layout(
title="Actual vs Predicted Temperature of " + city[3],
xaxis_title="Years",
yaxis_title="Temperature (Celsius)",
font=dict(
family="Courier New, monospace",
size=18)
)
fig.show()
def draw_table(actual_temps_dict: dict,
final_low_rcp_list: list,
final_median_rcp_list: list,
final_high_rcp_list: list,
low_rcp_percentage_difference: list,
median_rcp_percentage_difference: list,
high_rcp_percentage_difference: list) -> None:
|
def draw_map(rcp_type: str) -> None:
"""
Draws both maps for predicted and actual temperature of the cities in Canada
"""
map = Image.open(MAP)
width, height = map.size
new_map = Image.new('RGB', (width * 2, height + 80))
# fills the cities for the actual map
for city in CITY_SET:
temp = CITY_TEMPS[city][0]
ImageDraw.floodfill(map, city[2], temp_to_rgb(temp), thresh=50)
map2 = Image.open(MAP)
# fills the cities for the predicted map
for city in CITY_SET:
temp = CITY_TEMPS[city][rcp_to_slice(rcp_type)]
ImageDraw.floodfill(map2, city[2], temp_to_rgb(temp), thresh=50)
new_map.paste(map, (0, 80))
new_map.paste(map2, (width, 80))
# Writes the titles
title_font = ImageFont.truetype("arial.ttf", 50)
new_map_editable = ImageDraw.Draw(new_map)
new_map_editable.text((width // 3, 10),
'Actual Temperatures(' + year + ')', font=title_font)
new_map_editable.text((int(1.3 * width), 10),
'Predicted Temperatures(' + year + ')', font=title_font)
new_map.show()
def run(city: tuple, year: int, city_name: str) -> None:
"""
Runs the code for one city
"""
actual_temps_dict = read_actual_data(city[0])
predicted_temps_dict = read_predicted_data(city[1], actual_temps_dict)
if city[3].lower() == city_name.lower():
final_low_rcp_list = make_low_rcp_list(predicted_temps_dict)
low_rcp_percentage_difference = \
calc_low_actual_pd(actual_temps_dict, final_low_rcp_list)
final_median_rcp_list = make_median_rcp_list(predicted_temps_dict)
median_rcp_percentage_difference = \
calc_median_actual_pd(actual_temps_dict,
final_median_rcp_list)
final_high_rcp_list = make_high_rcp_list(predicted_temps_dict)
high_rcp_percentage_difference = \
calc_high_actual_pd(actual_temps_dict,
final_high_rcp_list)
plot_temp_data(actual_temps_dict, final_low_rcp_list,
final_median_rcp_list, final_high_rcp_list)
draw_table(actual_temps_dict, final_low_rcp_list, final_median_rcp_list,
final_high_rcp_list,
low_rcp_percentage_difference, median_rcp_percentage_difference,
high_rcp_percentage_difference)
temperatures = [actual_temps_dict[year], predicted_temps_dict[year]['RCP 2.6'],
predicted_temps_dict[year]['RCP 4.5'], predicted_temps_dict[year]['RCP 8.5']]
CITY_TEMPS[city] = temperatures
# this is the main part of the program that calls every function
if __name__ == '__main__':
year = input('Write the year for the map to display data from '
'(in range of 2003-2019 inclusive)')
if not 2003 <= int(year) <= 2019:
year = input('Try again. Write the number between 2003 and 2019 inclusive')
city_name = input(
'Type the name of the city you want to display its stats on graph'
'(TORONTO, QUEBEC, HALIFAX, WINNIPEG)')
if city_name.lower() not in ('toronto', 'halifax', 'quebec', 'winnipeg'):
city_name = input(
'Try again. Type Toronto or Quebec or Halifax or Winnipeg')
rcp_type = input(
'Write an RCP value for the map to display on the "predicted" side.'
'(write RCP 2.6 or RCP 4.5 or RCP 8.5)')
if rcp_type not in ('RCP 2.6', 'RCP 4.5', 'RCP 8.5'):
rcp_type = input('Try again. Write RCP 2.6 or RCP 4.5 or RCP 8.5)')
while True:
for city in CITY_SET:
run(city, int(year), city_name)
draw_map(rcp_type)
year = input('Write the year for the map to display data from '
'(in range of 2003-2019 inclusive). '
'Type 2 wrong answers to exit')
if not 2003 <= int(year) <= 2019:
year = input('Try again. Write the number between 2003 and 2019 inclusive. '
'Type a wrong answer to exit')
if not 2003 <= int(year) <= 2019:
break
city_name = input(
'Type the name of the city you want to display its stats on graph'
'(TORONTO, QUEBEC, HALIFAX, WINNIPEG) Type 2 wrong answers to exit.')
if city_name.lower() not in ('toronto', 'halifax', 'quebec', 'winnipeg'):
city_name = input(
'Try again. Type Toronto or Quebec or Halifax or Winnipeg. '
'Type a wrong answer to exit.')
if city_name.lower() not in ('toronto', 'halifax', 'quebec', 'winnipeg'):
break
rcp_type = input(
'Write an RCP value for the map to display on the "predicted" side.'
'(write RCP 2.6 or RCP 4.5 or RCP 8.5) Type 2 wrong answers to exit')
if rcp_type not in ('RCP 2.6', 'RCP 4.5', 'RCP 8.5'):
rcp_type = input('Try again. Write RCP 2.6 or RCP 4.5 or RCP 8.5'
'Type a wrong answer to exit.')
if rcp_type not in ('RCP 2.6', 'RCP 4.5', 'RCP 8.5'):
break
|
"""
Draw a table using a plotly's basic table
"""
fig = go.Figure(data=[go.Table(header=dict(values=['Actual Temperature', 'RCP 2.6',
'% Difference of RCP 2.6 and Actual Temp',
'RCP 4.5',
'% Difference of RCP 4.5 and Actual Temp',
'RCP 8.5',
'% Difference of RCP 8.5 and Actual Temp'],
line_color='darkslategray',
fill_color='lightskyblue'),
cells=dict(values=[list(actual_temps_dict.values()),
final_low_rcp_list,
low_rcp_percentage_difference,
final_median_rcp_list,
median_rcp_percentage_difference,
final_high_rcp_list,
high_rcp_percentage_difference]))])
fig.update_layout(
title="Actual vs Predicted Temperature of " + city[3]
)
fig.show()
|
purchase.service.ts
|
import { Injectable } from '@angular/core';
import { HttpClient, HttpHeaders } from '@angular/common/http';
@Injectable({
providedIn: 'root'
})
export class PurchaseService {
rootURL="http://natraj-mobile.somee.com/api/";
constructor(private http:HttpClient) { }
getPurchaseItems(){
return this.http.get(this.rootURL + "Purchases")
}
addPurchase(purchase){
|
}
deletePurchase(Id:number){
return this.http.delete(this.rootURL + "Purchases/"+Id);
}
getDateFilter(model:any){
return this.http.post(this.rootURL+ "PurchaseDateFilter",model);
}
}
|
return this.http.post(this.rootURL + "Purchases",purchase);
|
HelloRouteParameter.go
|
package main
import "github.com/gin-gonic/gin"
func HelloRouteParameter()
|
{
r := gin.Default()
///users/:id 就是一种路由匹配模式,也是一个通配符,其中:id就是一个路由参数,我们可以通过c.Param("id")获取定义的路由参数的值,然后用来做事情,比如打印出来。
r.GET("/user/:id", func(context *gin.Context) {
id := context.Param("id")
context.String(200, "The user id is %s", id)
})
_ = r.Run(":8081")
}
|
|
index.ts
|
import * as cdk from '@aws-cdk/core';
import { readFileSync } from 'fs';
import { resolve } from 'path';
import { BackEnd } from './backend';
import { MedplumInfraConfig } from './config';
import { FrontEnd } from './frontend';
import { Storage } from './storage';
class MedplumStack extends cdk.Stack {
backEnd: BackEnd;
frontEnd: FrontEnd;
storage: Storage;
constructor(scope: cdk.App, config: MedplumInfraConfig) {
|
region: config.region,
account: config.accountNumber,
},
});
this.backEnd = new BackEnd(this, config);
this.frontEnd = new FrontEnd(this, config);
this.storage = new Storage(this, config);
}
}
export function main(context?: Record<string, string>): void {
const app = new cdk.App({ context });
const configFileName = app.node.tryGetContext('config');
if (!configFileName) {
console.log('Missing "config" context variable');
console.log('Usage: cdk deploy -c config=my-config.json');
return;
}
const config = JSON.parse(readFileSync(resolve(configFileName), 'utf-8')) as MedplumInfraConfig;
const stack = new MedplumStack(app, config);
console.log('Stack', stack.stackId);
console.log('BackEnd', stack.backEnd.node.id);
console.log('FrontEnd', stack.frontEnd.node.id);
console.log('Storage', stack.storage.node.id);
app.synth();
}
if (process.argv[1].endsWith('index.ts')) {
main();
}
|
super(scope, config.stackName, {
env: {
|
Interrupts.js
|
import React from 'react'
import {Trans} from '@lingui/react'
|
export default class SchInterrupts extends Interrupts {
suggestionContent = <Trans id="sch.interrupts.suggestion.content">If you can, try to preposition yourself so you don't have to move during mechanics as much as possible. Utilizing slidecasting will lower the need to use <ActionLink {...ACTIONS.SCH_RUIN_II}/> to instantly relocate or interrupt your current Broil III cast</Trans>
}
|
import {Interrupts} from 'parser/core/modules/Interrupts'
import ACTIONS from 'data/ACTIONS'
import {ActionLink} from 'components/ui/DbLink'
|
manage.py
|
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
|
if __name__ == '__main__':
main()
|
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'translate.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
log-total-deleted.ts
|
import styles from 'ansi-styles'
|
const totalDeleted = `${styles.bold.open}${styles.blueBright.open}Stale Branches Deleted${styles.blueBright.close}: [${styles.redBright.open}${outputDeletes}${styles.redBright.close}/${styles.yellowBright.open}${outputStales}${styles.yellowBright.close}]${styles.bold.close}`
return totalDeleted
}
|
export function logTotalDeleted(outputDeletes, outputStales): string {
|
audioSegmentation.py
|
from __future__ import print_function
import os
import csv
import glob
import scipy
import sklearn
import numpy as np
import hmmlearn.hmm
import sklearn.cluster
import pickle as cpickle
import matplotlib.pyplot as plt
from scipy.spatial import distance
import sklearn.discriminant_analysis
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioTrainTest as at
from pyAudioAnalysis import MidTermFeatures as mtf
from pyAudioAnalysis import ShortTermFeatures as stf
""" General utility functions """
def smooth_moving_avg(signal, window=11):
window = int(window)
if signal.ndim != 1:
raise ValueError("")
if signal.size < window:
raise ValueError("Input vector needs to be bigger than window size.")
if window < 3:
return signal
s = np.r_[2 * signal[0] - signal[window - 1::-1],
signal, 2 * signal[-1] - signal[-1:-window:-1]]
w = np.ones(window, 'd')
y = np.convolve(w/w.sum(), s, mode='same')
return y[window:-window + 1]
def self_similarity_matrix(feature_vectors):
"""
This function computes the self-similarity matrix for a sequence
of feature vectors.
ARGUMENTS:
- feature_vectors: a np matrix (nDims x nVectors) whose i-th column
corresponds to the i-th feature vector
RETURNS:
- sim_matrix: the self-similarity matrix (nVectors x nVectors)
"""
norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])
norm_feature_vectors = norm_feature_vectors[0].T
sim_matrix = 1.0 - distance.squareform(
distance.pdist(norm_feature_vectors.T, 'cosine'))
return sim_matrix
def labels_to_segments(labels, window):
"""
ARGUMENTS:
- labels: a sequence of class labels (per time window)
- window: window duration (in seconds)
RETURNS:
- segments: a sequence of segment's limits: segs[i, 0] is start and
segs[i, 1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of
the i-th segment
"""
if len(labels)==1:
segs = [0, window]
classes = labels
return segs, classes
num_segs = 0
index = 0
classes = []
segment_list = []
cur_label = labels[index]
while index < len(labels) - 1:
previous_value = cur_label
while True:
index += 1
compare_flag = labels[index]
if (compare_flag != cur_label) | (index == len(labels) - 1):
num_segs += 1
cur_label = labels[index]
segment_list.append((index * window))
classes.append(previous_value)
break
segments = np.zeros((len(segment_list), 2))
for i in range(len(segment_list)):
if i > 0:
segments[i, 0] = segment_list[i-1]
segments[i, 1] = segment_list[i]
return segments, classes
def segments_to_labels(start_times, end_times, labels, window):
"""
This function converts segment endpoints and respective segment
labels to fix-sized class labels.
ARGUMENTS:
- start_times: segment start points (in seconds)
- end_times: segment endpoints (in seconds)
- labels: segment labels
- window: fix-sized window (in seconds)
RETURNS:
- flags: np array of class indices
- class_names: list of classnames (strings)
"""
flags = []
class_names = list(set(labels))
index = window / 2.0
while index < end_times[-1]:
for i in range(len(start_times)):
if start_times[i] < index <= end_times[i]:
break
flags.append(class_names.index(labels[i]))
index += window
return np.array(flags), class_names
def compute_metrics(confusion_matrix, class_names):
"""
This function computes the precision, recall and f1 measures,
given a confusion matrix
"""
f1 = []
recall = []
precision = []
n_classes = confusion_matrix.shape[0]
if len(class_names) != n_classes:
print("Error in computePreRec! Confusion matrix and class_names "
"list must be of the same size!")
else:
for i, c in enumerate(class_names):
|
np.sum(confusion_matrix[:, i]))
recall.append(confusion_matrix[i, i] /
np.sum(confusion_matrix[i, :]))
f1.append(2 * precision[-1] * recall[-1] /
(precision[-1] + recall[-1]))
return recall, precision, f1
def read_segmentation_gt(gt_file):
"""
This function reads a segmentation ground truth file,
following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gt_file: the path of the CSV segment file
RETURNS:
- seg_start: a np array of segments' start positions
- seg_end: a np array of segments' ending positions
- seg_label: a list of respective class labels (strings)
"""
with open(gt_file, 'rt') as f_handle:
reader = csv.reader(f_handle, delimiter='\t')
start_times = []
end_times = []
labels = []
for row in reader:
if len(row) == 3:
start_times.append(float(row[0]))
end_times.append(float(row[1]))
labels.append((row[2]))
return np.array(start_times), np.array(end_times), labels
def plot_segmentation_results(flags_ind, flags_ind_gt, class_names, mt_step,
evaluate_only=False):
"""
This function plots statistics on the classification-segmentation results
produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method
if ground-truth is available.
"""
flags = [class_names[int(f)] for f in flags_ind]
segments, classes = labels_to_segments(flags, mt_step)
min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0])
if min_len > 0:
accuracy = np.sum(flags_ind[0:min_len] ==
flags_ind_gt[0:min_len]) / float(min_len)
else:
accuracy = -1
if not evaluate_only:
duration = segments[-1, 1]
s_percentages = np.zeros((len(class_names), ))
percentages = np.zeros((len(class_names), ))
av_durations = np.zeros((len(class_names), ))
for i_seg in range(segments.shape[0]):
s_percentages[class_names.index(classes[i_seg])] += \
(segments[i_seg, 1]-segments[i_seg, 0])
for i in range(s_percentages.shape[0]):
percentages[i] = 100.0 * s_percentages[i] / duration
class_sum = sum(1 for c in classes if c == class_names[i])
if class_sum > 0:
av_durations[i] = s_percentages[i] / class_sum
else:
av_durations[i] = 0.0
for i in range(percentages.shape[0]):
print(class_names[i], percentages[i], av_durations[i])
font = {'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(flags_ind))) * mt_step +
mt_step / 2.0, flags_ind)
if flags_ind_gt.shape[0] > 0:
ax1.plot(np.array(range(len(flags_ind_gt))) * mt_step +
mt_step / 2.0, flags_ind_gt + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(class_names) + 1, 0, 100))
ax2.set_xticks(np.array(range(len(class_names) + 1)))
ax2.set_xticklabels([" "] + class_names)
print(np.array(range(len(class_names))), percentages)
ax2.bar(np.array(range(len(class_names))) + 0.5, percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(class_names)+1, 0, av_durations.max()))
ax3.set_xticks(np.array(range(len(class_names) + 1)))
ax3.set_xticklabels([" "] + class_names)
ax3.bar(np.array(range(len(class_names))) + 0.5, av_durations)
fig.tight_layout()
plt.show()
return accuracy
def evaluate_speaker_diarization(labels, labels_gt):
min_len = min(labels.shape[0], labels_gt.shape[0])
labels = labels[0:min_len]
labels_gt = labels_gt[0:min_len]
unique_flags = np.unique(labels)
unique_flags_gt = np.unique(labels_gt)
# compute contigency table:
contigency_matrix = np.zeros((unique_flags.shape[0],
unique_flags_gt.shape[0]))
for i in range(min_len):
contigency_matrix[int(np.nonzero(unique_flags == labels[i])[0]),
int(np.nonzero(unique_flags_gt == labels_gt[i])[0])] += 1.0
columns, rows = contigency_matrix.shape
row_sum = np.sum(contigency_matrix, axis=0)
column_sum = np.sum(contigency_matrix, axis=1)
matrix_sum = np.sum(contigency_matrix)
purity_clust = np.zeros((columns, ))
purity_speak = np.zeros((rows, ))
# compute cluster purity:
for i in range(columns):
purity_clust[i] = np.max((contigency_matrix[i, :])) / (column_sum[i])
for j in range(rows):
purity_speak[j] = np.max((contigency_matrix[:, j])) / (row_sum[j])
purity_cluster_m = np.sum(purity_clust * column_sum) / matrix_sum
purity_speaker_m = np.sum(purity_speak * row_sum) / matrix_sum
return purity_cluster_m, purity_speaker_m
def train_hmm_compute_statistics(features, labels):
"""
This function computes the statistics used to train
an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a np matrix of feature vectors (numOfDimensions x n_wins)
- labels: a np array of class indices (n_wins x 1)
RETURNS:
- class_priors: matrix of prior class probabilities
(n_classes x 1)
- transmutation_matrix: transition matrix (n_classes x n_classes)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
"""
unique_labels = np.unique(labels)
n_comps = len(unique_labels)
n_feats = features.shape[0]
if features.shape[1] < labels.shape[0]:
print("trainHMM warning: number of short-term feature vectors "
"must be greater or equal to the labels length!")
labels = labels[0:features.shape[1]]
# compute prior probabilities:
class_priors = np.zeros((n_comps,))
for i, u_label in enumerate(unique_labels):
class_priors[i] = np.count_nonzero(labels == u_label)
# normalize prior probabilities
class_priors = class_priors / class_priors.sum()
# compute transition matrix:
transmutation_matrix = np.zeros((n_comps, n_comps))
for i in range(labels.shape[0]-1):
transmutation_matrix[int(labels[i]), int(labels[i + 1])] += 1
# normalize rows of transition matrix:
for i in range(n_comps):
transmutation_matrix[i, :] /= transmutation_matrix[i, :].sum()
means = np.zeros((n_comps, n_feats))
for i in range(n_comps):
means[i, :] = \
np.array(features[:,
np.nonzero(labels == unique_labels[i])[0]].mean(axis=1))
cov = np.zeros((n_comps, n_feats))
for i in range(n_comps):
"""
cov[i, :, :] = np.cov(features[:, np.nonzero(labels == u_labels[i])[0]])
"""
# use line above if HMM using full gaussian distributions are to be used
cov[i, :] = np.std(features[:,
np.nonzero(labels == unique_labels[i])[0]],
axis=1)
return class_priors, transmutation_matrix, means, cov
def train_hmm_from_file(wav_file, gt_file, hmm_model_name, mid_window, mid_step):
"""
This function trains a HMM model for segmentation-classification
using a single annotated audio file
ARGUMENTS:
- wav_file: the path of the audio filename
- gt_file: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,
<segment end in seconds>,<segment label> in each row
- hmm_model_name: the name of the HMM model to be stored
- mt_win: mid-term window size
- mt_step: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- class_names: a list of class_names
After training, hmm, class_names, along with the mt_win and mt_step
values are stored in the hmm_model_name file
"""
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
class_priors, transumation_matrix, means, cov = \
train_hmm_compute_statistics(features, flags)
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transumation_matrix
save_hmm(hmm_model_name, hmm, class_names, mid_window, mid_step)
return hmm, class_names
def train_hmm_from_directory(folder_path, hmm_model_name, mid_window, mid_step):
"""
This function trains a HMM model for segmentation-classification using
a where WAV files and .segment (ground-truth files) are stored
ARGUMENTS:
- folder_path: the path of the data diretory
- hmm_model_name: the name of the HMM model to be stored
- mt_win: mid-term window size
- mt_step: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- class_names: a list of class_names
After training, hmm, class_names, along with the mt_win
and mt_step values are stored in the hmm_model_name file
"""
flags_all = np.array([])
class_names_all = []
for i, f in enumerate(glob.glob(folder_path + os.sep + '*.wav')):
# for each WAV file
wav_file = f
gt_file = f.replace('.wav', '.segments')
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = \
segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
for c in class_names:
# update class names:
if c not in class_names_all:
class_names_all.append(c)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
feature_vector, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
flag_len = len(flags)
feat_cols = feature_vector.shape[1]
min_sm = min(feat_cols, flag_len)
feature_vector = feature_vector[:, 0:min_sm]
flags = flags[0:min_sm]
flags_new = []
# append features and labels
for j, fl in enumerate(flags):
flags_new.append(class_names_all.index(class_names_all[flags[j]]))
flags_all = np.append(flags_all, np.array(flags_new))
if i == 0:
f_all = feature_vector
else:
f_all = np.concatenate((f_all, feature_vector), axis=1)
# compute HMM statistics
class_priors, transmutation_matrix, means, cov = \
train_hmm_compute_statistics(f_all, flags_all)
# train the HMM
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transmutation_matrix
save_hmm(hmm_model_name, hmm, class_names_all, mid_window, mid_step)
return hmm, class_names_all
def save_hmm(hmm_model_name, model, classes, mid_window, mid_step):
"""Save HMM model"""
with open(hmm_model_name, "wb") as f_handle:
cpickle.dump(model, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(classes, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_window, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_step, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
def hmm_segmentation(audio_file, hmm_model_name, plot_results=False,
gt_file=""):
sampling_rate, signal = audioBasicIO.read_audio_file(audio_file)
with open(hmm_model_name, "rb") as f_handle:
hmm = cpickle.load(f_handle)
class_names = cpickle.load(f_handle)
mid_window = cpickle.load(f_handle)
mid_step = cpickle.load(f_handle)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
# apply model
labels = hmm.predict(features.T)
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth_segments(gt_file, mt_step):
seg_start, seg_end, seg_labels = read_segmentation_gt(gt_file)
labels, class_names = segments_to_labels(seg_start, seg_end, seg_labels,
mt_step)
labels_temp = []
for index, label in enumerate(labels):
# "align" labels with GT
if class_names[labels[index]] in class_names:
labels_temp.append(class_names.index(class_names[
labels[index]]))
else:
labels_temp.append(-1)
labels = np.array(labels_temp)
return labels, class_names
def calculate_confusion_matrix(predictions, ground_truth, classes):
cm = np.zeros((len(classes), len(classes)))
for index in range(min(predictions.shape[0], ground_truth.shape[0])):
cm[int(ground_truth[index]), int(predictions[index])] += 1
return cm
def mid_term_file_classification(input_file, model_name, model_type,
plot_results=False, gt_file=""):
"""
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used,
i.e. a pre-trained classifier.
ARGUMENTS:
- input_file: path of the input WAV file
- model_name: name of the classification model
- model_type: svm or knn depending on the classifier type
- plot_results: True if results are to be plotted using
matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the
endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the
class ID of the i-th segment
"""
labels = []
accuracy = 0.0
class_names = []
cm = np.array([])
if not os.path.isfile(model_name):
print("mtFileClassificationError: input model_type not found!")
return labels, class_names, accuracy, cm
# Load classifier:
if model_type == "knn":
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model_knn(model_name)
else:
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model(model_name)
if compute_beat:
print("Model " + model_name + " contains long-term music features "
"(beat etc) and cannot be used in "
"segmentation")
return labels, class_names, accuracy, cm
# load input file
sampling_rate, signal = audioBasicIO.read_audio_file(input_file)
# could not read file
if sampling_rate == 0:
return labels, class_names, accuracy, cm
# convert stereo (if) to mono
signal = audioBasicIO.stereo_to_mono(signal)
# mid-term feature extraction:
mt_feats, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mt_win * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * st_win),
round(sampling_rate * st_step))
posterior_matrix = []
# for each feature vector (i.e. for each fix-sized segment):
for col_index in range(mt_feats.shape[1]):
# normalize current feature v
feature_vector = (mt_feats[:, col_index] - mean) / std
# classify vector:
label_predicted, posterior = \
at.classifier_wrapper(classifier, model_type, feature_vector)
labels.append(label_predicted)
# update probability matrix
posterior_matrix.append(np.max(posterior))
labels = np.array(labels)
# convert fix-sized flags to segments and classes
segs, classes = labels_to_segments(labels, mid_step)
segs[-1] = len(signal) / float(sampling_rate)
# Load grount-truth:
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth(gt_file, labels, class_names, mid_step, plot_results):
accuracy = 0
cm = np.array([])
labels_gt = np.array([])
if os.path.isfile(gt_file):
# load ground truth and class names
labels_gt, class_names_gt = load_ground_truth_segments(gt_file,
mid_step)
# map predicted labels to ground truth class names
# Note: if a predicted label does not belong to the ground truth
# classes --> -1
labels_new = []
for il, l in enumerate(labels):
if class_names[int(l)] in class_names_gt:
labels_new.append(class_names_gt.index(class_names[int(l)]))
else:
labels_new.append(-1)
labels_new = np.array(labels_new)
cm = calculate_confusion_matrix(labels_new, labels_gt, class_names_gt)
accuracy = plot_segmentation_results(labels_new, labels_gt,
class_names, mid_step, not plot_results)
if accuracy >= 0:
print("Overall Accuracy: {0:.2f}".format(accuracy))
return labels_gt, class_names, accuracy, cm
def evaluate_segmentation_classification_dir(dir_name, model_name, method_name):
accuracies = []
class_names = []
cm_total = np.array([])
for index, wav_file in enumerate(glob.glob(dir_name + os.sep + '*.wav')):
print(wav_file)
gt_file = wav_file.replace('.wav', '.segments')
if method_name.lower() in ["svm", "svm_rbf", "knn", "randomforest",
"gradientboosting", "extratrees"]:
flags_ind, class_names, accuracy, cm_temp = \
mid_term_file_classification(wav_file, model_name, method_name,
False, gt_file)
else:
flags_ind, class_names, accuracy, cm_temp = \
hmm_segmentation(wav_file, model_name, False, gt_file)
if accuracy > 0:
if not index:
cm_total = np.copy(cm_temp)
else:
cm_total = cm_total + cm_temp
accuracies.append(accuracy)
print(cm_temp, class_names)
print(cm_total)
if len(cm_total.shape) > 1:
cm_total = cm_total / np.sum(cm_total)
rec, pre, f1 = compute_metrics(cm_total, class_names)
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("Average Accuracy: {0:.1f}".
format(100.0*np.array(accuracies).mean()))
print("Average recall: {0:.1f}".format(100.0*np.array(rec).mean()))
print("Average precision: {0:.1f}".format(100.0*np.array(pre).mean()))
print("Average f1: {0:.1f}".format(100.0*np.array(f1).mean()))
print("Median Accuracy: {0:.1f}".
format(100.0*np.median(np.array(accuracies))))
print("Min Accuracy: {0:.1f}".format(100.0*np.array(accuracies).min()))
print("Max Accuracy: {0:.1f}".format(100.0*np.array(accuracies).max()))
else:
print("Confusion matrix was empty, accuracy for every file was 0")
def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,
weight=0.5, plot=False):
"""
Event Detection (silence removal)
ARGUMENTS:
- signal: the input audio signal
- sampling_rate: sampling freq
- st_win, st_step: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- weight: (optinal) weight factor (0 < weight < 1)
the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9],
[1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds
and (1.4, 3.0) seconds
"""
if weight >= 1:
weight = 0.99
if weight <= 0:
weight = 0.01
# Step 1: feature extraction
signal = audioBasicIO.stereo_to_mono(signal)
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
st_win * sampling_rate,
st_step * sampling_rate)
# Step 2: train binary svm classifier of low vs high energy frames
# keep only the energy short-term sequence (2nd feature)
st_energy = st_feats[1, :]
en = np.sort(st_energy)
# number of 10% of the total short-term windows
st_windows_fraction = int(len(en) / 10)
# compute "lower" 10% energy threshold
low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15
# compute "higher" 10% energy threshold
high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15
# get all features that correspond to low energy
low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]
# get all features that correspond to high energy
high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]
# form the binary classification task and ...
features = [low_energy.T, high_energy.T]
# normalize and train the respective svm probabilistic model
# (ONSET vs SILENCE)
features_norm, mean, std = at.normalize_features(features)
svm = at.train_svm(features_norm, 1.0)
# Step 3: compute onset probability based on the trained svm
prob_on_set = []
for index in range(st_feats.shape[1]):
# for each frame
cur_fv = (st_feats[:, index] - mean) / std
# get svm probability (that it belongs to the ONSET class)
prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])
prob_on_set = np.array(prob_on_set)
# smooth probability:
prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)
# Step 4A: detect onset frame indices:
prog_on_set_sort = np.sort(prob_on_set)
# find probability Threshold as a weighted average
# of top 10% and lower 10% of the values
nt = int(prog_on_set_sort.shape[0] / 10)
threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +
weight * np.mean(prog_on_set_sort[-nt::]))
max_indices = np.where(prob_on_set > threshold)[0]
# get the indices of the frames that satisfy the thresholding
index = 0
seg_limits = []
time_clusters = []
# Step 4B: group frame indices to onset segments
while index < len(max_indices):
# for each of the detected onset indices
cur_cluster = [max_indices[index]]
if index == len(max_indices)-1:
break
while max_indices[index+1] - cur_cluster[-1] <= 2:
cur_cluster.append(max_indices[index+1])
index += 1
if index == len(max_indices)-1:
break
index += 1
time_clusters.append(cur_cluster)
seg_limits.append([cur_cluster[0] * st_step,
cur_cluster[-1] * st_step])
# Step 5: Post process: remove very small segments:
min_duration = 0.2
seg_limits_2 = []
for s_lim in seg_limits:
if s_lim[1] - s_lim[0] > min_duration:
seg_limits_2.append(s_lim)
seg_limits = seg_limits_2
if plot:
time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /
sampling_rate)
plt.subplot(2, 1, 1)
plt.plot(time_x, signal)
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.subplot(2, 1, 2)
plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step),
prob_on_set)
plt.title('Signal')
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.title('svm Probability')
plt.show()
return seg_limits
def speaker_diarization(filename, n_speakers, mid_window=2.0, mid_step=0.2,
short_window=0.05, lda_dim=35, plot_res=False):
"""
ARGUMENTS:
- filename: the name of the WAV file to be analyzed
- n_speakers the number of speakers (clusters) in
the recording (<=0 for unknown)
- mid_window (opt) mid-term window size
- mid_step (opt) mid-term window step
- short_window (opt) short-term window size
- lda_dim (opt LDA dimension (0 for no LDA)
- plot_res (opt) 0 for not plotting the results 1 for plotting
"""
sampling_rate, signal = audioBasicIO.read_audio_file(filename)
signal = audioBasicIO.stereo_to_mono(signal)
duration = len(signal) / sampling_rate
base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"data/models")
classifier_all, mean_all, std_all, class_names_all, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_10"))
classifier_fm, mean_fm, std_fm, class_names_fm, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_male_female"))
mid_feats, st_feats, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * short_window),
round(sampling_rate * short_window * 0.5))
mid_term_features = np.zeros((mid_feats.shape[0] + len(class_names_all) +
len(class_names_fm), mid_feats.shape[1]))
for index in range(mid_feats.shape[1]):
feature_norm_all = (mid_feats[:, index] - mean_all) / std_all
feature_norm_fm = (mid_feats[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn", feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
start = mid_feats.shape[0]
end = mid_feats.shape[0] + len(class_names_all)
mid_term_features[0:mid_feats.shape[0], index] = mid_feats[:, index]
mid_term_features[start:end, index] = p1 + 1e-4
mid_term_features[end::, index] = p2 + 1e-4
mid_feats = mid_term_features # TODO
feature_selected = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]
mid_feats = mid_feats[feature_selected, :]
mid_feats_norm, mean, std = at.normalize_features([mid_feats.T])
mid_feats_norm = mid_feats_norm[0].T
n_wins = mid_feats.shape[1]
# remove outliers:
dist_all = np.sum(distance.squareform(distance.pdist(mid_feats_norm.T)),
axis=0)
m_dist_all = np.mean(dist_all)
i_non_outliers = np.nonzero(dist_all < 1.2 * m_dist_all)[0]
# TODO: Combine energy threshold for outlier removal:
# EnergyMin = np.min(mt_feats[1,:])
# EnergyMean = np.mean(mt_feats[1,:])
# Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
# i_non_outliers = np.nonzero(mt_feats[1,:] > Thres)[0]
# print i_non_outliers
mt_feats_norm_or = mid_feats_norm
mid_feats_norm = mid_feats_norm[:, i_non_outliers]
# LDA dimensionality reduction:
if lda_dim > 0:
# extract mid-term features with minimum step:
window_ratio = int(round(mid_window / short_window))
step_ratio = int(round(short_window / short_window))
mt_feats_to_red = []
num_of_features = len(st_feats)
num_of_stats = 2
for index in range(num_of_stats * num_of_features):
mt_feats_to_red.append([])
# for each of the short-term features:
for index in range(num_of_features):
cur_pos = 0
feat_len = len(st_feats[index])
while cur_pos < feat_len:
n1 = cur_pos
n2 = cur_pos + window_ratio
if n2 > feat_len:
n2 = feat_len
short_features = st_feats[index][n1:n2]
mt_feats_to_red[index].append(np.mean(short_features))
mt_feats_to_red[index + num_of_features].\
append(np.std(short_features))
cur_pos += step_ratio
mt_feats_to_red = np.array(mt_feats_to_red)
mt_feats_to_red_2 = np.zeros((mt_feats_to_red.shape[0] +
len(class_names_all) +
len(class_names_fm),
mt_feats_to_red.shape[1]))
limit = mt_feats_to_red.shape[0] + len(class_names_all)
for index in range(mt_feats_to_red.shape[1]):
feature_norm_all = (mt_feats_to_red[:, index] - mean_all) / std_all
feature_norm_fm = (mt_feats_to_red[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn",
feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
mt_feats_to_red_2[0:mt_feats_to_red.shape[0], index] = \
mt_feats_to_red[:, index]
mt_feats_to_red_2[mt_feats_to_red.shape[0]:limit, index] = p1 + 1e-4
mt_feats_to_red_2[limit::, index] = p2 + 1e-4
mt_feats_to_red = mt_feats_to_red_2
mt_feats_to_red = mt_feats_to_red[feature_selected, :]
mt_feats_to_red, mean, std = at.normalize_features([mt_feats_to_red.T])
mt_feats_to_red = mt_feats_to_red[0].T
labels = np.zeros((mt_feats_to_red.shape[1], ))
lda_step = 1.0
lda_step_ratio = lda_step / short_window
for index in range(labels.shape[0]):
labels[index] = int(index * short_window / lda_step_ratio)
clf = sklearn.discriminant_analysis.\
LinearDiscriminantAnalysis(n_components=lda_dim)
clf.fit(mt_feats_to_red.T, labels)
mid_feats_norm = (clf.transform(mid_feats_norm.T)).T
if n_speakers <= 0:
s_range = range(2, 10)
else:
s_range = [n_speakers]
cluster_labels = []
sil_all = []
cluster_centers = []
for speakers in s_range:
k_means = sklearn.cluster.KMeans(n_clusters=speakers)
k_means.fit(mid_feats_norm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
cluster_labels.append(cls)
cluster_centers.append(means)
sil_1, sil_2 = [], []
for c in range(speakers):
# for each speaker (i.e. for each extracted cluster)
clust_per_cent = np.nonzero(cls == c)[0].shape[0] / float(len(cls))
if clust_per_cent < 0.020:
sil_1.append(0.0)
sil_2.append(0.0)
else:
# get subset of feature vectors
mt_feats_norm_temp = mid_feats_norm[:, cls == c]
# compute average distance between samples
# that belong to the cluster (a values)
dist = distance.pdist(mt_feats_norm_temp.T)
sil_1.append(np.mean(dist)*clust_per_cent)
sil_temp = []
for c2 in range(speakers):
# compute distances from samples of other clusters
if c2 != c:
clust_per_cent_2 = np.nonzero(cls == c2)[0].shape[0] /\
float(len(cls))
mid_features_temp = mid_feats_norm[:, cls == c2]
dist = distance.cdist(mt_feats_norm_temp.T,
mid_features_temp.T)
sil_temp.append(np.mean(dist)*(clust_per_cent
+ clust_per_cent_2)/2.0)
sil_temp = np.array(sil_temp)
# ... and keep the minimum value (i.e.
# the distance from the "nearest" cluster)
sil_2.append(min(sil_temp))
sil_1 = np.array(sil_1)
sil_2 = np.array(sil_2)
sil = []
for c in range(speakers):
# for each cluster (speaker) compute silhouette
sil.append((sil_2[c] - sil_1[c]) / (max(sil_2[c], sil_1[c]) + 1e-5))
# keep the AVERAGE SILLOUETTE
sil_all.append(np.mean(sil))
imax = int(np.argmax(sil_all))
# optimal number of clusters
num_speakers = s_range[imax]
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows:
# this is achieved by giving them the value of their
# nearest non-outlier window)
cls = np.zeros((n_wins,))
for index in range(n_wins):
j = np.argmin(np.abs(index-i_non_outliers))
cls[index] = cluster_labels[imax][j]
# Post-process method 1: hmm smoothing
for index in range(1):
# hmm training
start_prob, transmat, means, cov = \
train_hmm_compute_statistics(mt_feats_norm_or, cls)
hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag")
hmm.startprob_ = start_prob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
cls = hmm.predict(mt_feats_norm_or.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
class_names = ["speaker{0:d}".format(c) for c in range(num_speakers)]
# load ground-truth if available
gt_file = filename.replace('.wav', '.segments')
# if groundtruth exists
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags_gt, class_names_gt = segments_to_labels(seg_start, seg_end,
seg_labs, mid_step)
if plot_res:
fig = plt.figure()
if n_speakers > 0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(cls))) * mid_step + mid_step / 2.0, cls)
if os.path.isfile(gt_file):
if plot_res:
ax1.plot(np.array(range(len(flags_gt))) *
mid_step + mid_step / 2.0, flags_gt, 'r')
purity_cluster_m, purity_speaker_m = \
evaluate_speaker_diarization(cls, flags_gt)
print("{0:.1f}\t{1:.1f}".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.title("Cluster purity: {0:.1f}% - "
"Speaker purity: {1:.1f}%".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.xlabel("time (seconds)")
if n_speakers <= 0:
plt.subplot(212)
plt.plot(s_range, sil_all)
plt.xlabel("number of clusters")
plt.ylabel("average clustering's sillouette")
plt.show()
return cls
def speaker_diarization_evaluation(folder_name, lda_dimensions):
"""
This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
segment (ground-truth) files are stored
- lda_dimensions: a list of LDA dimensions (0 for no LDA)
"""
types = ('*.wav', )
wav_files = []
for files in types:
wav_files.extend(glob.glob(os.path.join(folder_name, files)))
wav_files = sorted(wav_files)
# get number of unique speakers per file (from ground-truth)
num_speakers = []
for wav_file in wav_files:
gt_file = wav_file.replace('.wav', '.segments')
if os.path.isfile(gt_file):
_, _, seg_labs = read_segmentation_gt(gt_file)
num_speakers.append(len(list(set(seg_labs))))
else:
num_speakers.append(-1)
for dim in lda_dimensions:
print("LDA = {0:d}".format(dim))
for i, wav_file in enumerate(wav_files):
speaker_diarization(wav_file, num_speakers[i], 2.0, 0.2, 0.05, dim,
plot_res=False)
def music_thumbnailing(signal, sampling_rate, short_window=1.0, short_step=0.5,
thumb_size=10.0, limit_1=0, limit_2=1):
"""
This function detects instances of the most representative part of a
music recording, also called "music thumbnails".
A technique similar to the one proposed in [1], however a wider set of
audio features is used instead of chroma features.
In particular the following steps are followed:
- Extract short-term audio features. Typical short-term window size: 1
second
- Compute the self-similarity matrix, i.e. all pairwise similarities
between feature vectors
- Apply a diagonal mask is as a moving average filter on the values of the
self-similarty matrix.
The size of the mask is equal to the desirable thumbnail length.
- Find the position of the maximum value of the new (filtered)
self-similarity matrix. The audio segments that correspond to the
diagonial around that position are the selected thumbnails
ARGUMENTS:
- signal: input signal
- sampling_rate: sampling frequency
- short_window: window size (in seconds)
- short_step: window step (in seconds)
- thumb_size: desider thumbnail size (in seconds)
RETURNS:
- A1: beginning of 1st thumbnail (in seconds)
- A2: ending of 1st thumbnail (in seconds)
- B1: beginning of 2nd thumbnail (in seconds)
- B2: ending of 2nd thumbnail (in seconds)
USAGE EXAMPLE:
import audioFeatureExtraction as aF
[fs, x] = basicIO.readAudioFile(input_file)
[A1, A2, B1, B2] = musicThumbnailing(x, fs)
[1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing
of popular music using chroma-based representations.
Multimedia, IEEE Transactions on, 7(1), 96-104.
"""
signal = audioBasicIO.stereo_to_mono(signal)
# feature extraction:
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
sampling_rate * short_window,
sampling_rate * short_step)
# self-similarity matrix
sim_matrix = self_similarity_matrix(st_feats)
# moving filter:
m_filter = int(round(thumb_size / short_step))
diagonal = np.eye(m_filter, m_filter)
sim_matrix = scipy.signal.convolve2d(sim_matrix, diagonal, 'valid')
# post-processing (remove main diagonal elements)
min_sm = np.min(sim_matrix)
for i in range(sim_matrix.shape[0]):
for j in range(sim_matrix.shape[1]):
if abs(i-j) < 5.0 / short_step or i > j:
sim_matrix[i, j] = min_sm
# find max position:
sim_matrix[0:int(limit_1 * sim_matrix.shape[0]), :] = min_sm
sim_matrix[:, 0:int(limit_1 * sim_matrix.shape[0])] = min_sm
sim_matrix[int(limit_2 * sim_matrix.shape[0])::, :] = min_sm
sim_matrix[:, int(limit_2 * sim_matrix.shape[0])::] = min_sm
rows, cols = np.unravel_index(sim_matrix.argmax(), sim_matrix.shape)
i1 = rows
i2 = rows
j1 = cols
j2 = cols
while i2-i1 < m_filter:
if i1 <= 0 or j1 <= 0 or i2 >= sim_matrix.shape[0]-2 or \
j2 >= sim_matrix.shape[1]-2:
break
if sim_matrix[i1-1, j1-1] > sim_matrix[i2 + 1, j2 + 1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return short_step * i1, short_step * i2, short_step * j1, short_step * j2, \
sim_matrix
|
precision.append(confusion_matrix[i, i] /
|
roll.rs
|
use std::convert::TryFrom;
use std::iter::FromIterator;
use super::checks::is_place;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Hardway {
Four,
Six,
Eight,
Ten,
}
impl TryFrom<u8> for Hardway {
type Error = std::io::Error;
fn try_from(target: u8) -> Result<Hardway, Self::Error> {
match target {
4 => Ok(Hardway::Four),
6 => Ok(Hardway::Six),
8 => Ok(Hardway::Eight),
10 => Ok(Hardway::Ten),
other => Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("'{}' is not a hardway", other),
)),
}
}
}
impl From<&u8> for Hardway {
fn from(way: &u8) -> Hardway {
match way {
4 => Hardway::Four,
6 => Hardway::Six,
8 => Hardway::Eight,
10 => Hardway::Ten,
_ => unreachable!(),
}
}
}
impl From<&Hardway> for u8 {
fn from(way: &Hardway) -> u8 {
match way {
Hardway::Four => 4,
Hardway::Six => 6,
Hardway::Eight => 8,
Hardway::Ten => 10,
}
}
}
#[derive(Clone, PartialEq)]
pub struct Roll(u8, u8);
impl<U> FromIterator<U> for Roll
where
U: Into<u8>,
{
fn from_iter<T: IntoIterator<Item = U>>(target: T) -> Self {
let mut iter = target.into_iter();
let first = iter.next().map_or(1, |u| u.into());
let second = iter.next().map_or(1, |u| u.into());
Roll(first, second)
}
}
impl std::fmt::Debug for Roll {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "Roll({}, {} | {})", self.0, self.1, self.total())
}
}
impl From<&Roll> for (u8, u8) {
fn from(roll: &Roll) -> (u8, u8) {
(roll.0, roll.1)
}
}
impl Roll {
pub fn total(&self) -> u8 {
self.0 + self.1
}
pub fn left(&self) -> u8 {
self.0
}
pub fn right(&self) -> u8 {
self.1
}
pub fn easyway(&self) -> Option<Hardway> {
let total = self.total();
let hardway = self.hardway();
match (total, hardway) {
(4, None) => Some(Hardway::Four),
(6, None) => Some(Hardway::Six),
(8, None) => Some(Hardway::Eight),
(10, None) => Some(Hardway::Ten),
_ => None,
}
}
pub fn hardway(&self) -> Option<Hardway> {
match (self.0, self.1) {
(2, 2) => Some(Hardway::Four),
(3, 3) => Some(Hardway::Six),
(4, 4) => Some(Hardway::Eight),
(5, 5) => Some(Hardway::Ten),
_ => None,
}
}
pub fn result(&self, button: &Option<u8>) -> RollResult {
match (button, self.total()) {
(None, target) if is_place(target) => RollResult::Button(self.total()),
(Some(target), value) if value == *target => RollResult::Hit,
|
(None, 2) | (None, 12) | (None, 3) => RollResult::Craps,
(Some(_), _) => RollResult::Nothing,
(None, _) => RollResult::Nothing,
}
}
}
#[derive(Debug)]
pub enum RollResult {
Yo,
Hit,
Craps,
Button(u8),
Nothing,
}
impl RollResult {
pub fn button(&self, existing: Option<u8>) -> Option<u8> {
match self {
RollResult::Button(value) => Some(*value),
RollResult::Hit => None,
RollResult::Nothing => existing,
RollResult::Craps => None,
RollResult::Yo => existing,
}
}
}
#[cfg(test)]
mod test {
use super::{Hardway, Roll};
#[test]
fn easyway_four() {
let roll = vec![1u8, 3u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Four));
let roll = vec![3u8, 1u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Four));
let roll = vec![2u8, 2u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), None);
let roll = vec![1u8, 2u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), None);
}
#[test]
fn easyway_six() {
let roll = vec![1u8, 5u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Six));
let roll = vec![2u8, 4u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Six));
let roll = vec![4u8, 2u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Six));
let roll = vec![2u8, 4u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Six));
let roll = vec![3u8, 3u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), None);
}
#[test]
fn easyway_eight() {
let roll = vec![2u8, 6u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Eight));
let roll = vec![3u8, 5u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Eight));
let roll = vec![6u8, 2u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Eight));
let roll = vec![5u8, 3u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Eight));
let roll = vec![4u8, 4u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), None);
}
#[test]
fn easyway_ten() {
let roll = vec![4u8, 6u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Ten));
let roll = vec![6u8, 4u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), Some(Hardway::Ten));
let roll = vec![5u8, 5u8].into_iter().collect::<Roll>();
assert_eq!(roll.easyway(), None);
}
#[test]
fn hardway_four() {
let roll = vec![1u8, 3u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![3u8, 1u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![2u8, 2u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), Some(Hardway::Four));
let roll = vec![1u8, 2u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
}
#[test]
fn hardway_six() {
let roll = vec![1u8, 5u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![2u8, 4u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![4u8, 2u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![2u8, 4u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![3u8, 3u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), Some(Hardway::Six));
}
#[test]
fn hardway_eight() {
let roll = vec![2u8, 6u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![3u8, 5u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![6u8, 2u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![5u8, 3u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![4u8, 4u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), Some(Hardway::Eight));
}
#[test]
fn hardway_ten() {
let roll = vec![4u8, 6u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![6u8, 4u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), None);
let roll = vec![5u8, 5u8].into_iter().collect::<Roll>();
assert_eq!(roll.hardway(), Some(Hardway::Ten));
}
}
|
(Some(_), 7) => RollResult::Craps,
|
types.go
|
package Imaging
import (
"gitlab.markany.wm/external/goonvif/xsd"
"gitlab.markany.wm/external/goonvif/xsd/onvif"
)
type GetServiceCapabilities struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl GetServiceCapabilities" json"-"`
}
type GetImagingSettings struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl GetImagingSettings" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
}
type SetImagingSettings struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl SetImagingSettings" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
ImagingSettings onvif.ImagingSettings20 `xml:"http://www.onvif.org/ver20/imaging/wsdl ImagingSettings"`
ForcePersistence xsd.Boolean `xml:"http://www.onvif.org/ver20/imaging/wsdl ForcePersistence"`
}
type GetOptions struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl GetOptions" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
}
type Move struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl Move" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
Focus onvif.FocusMove `xml:"http://www.onvif.org/ver20/imaging/wsdl Focus"`
}
type GetMoveOptions struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl GetMoveOptions" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
}
|
type Stop struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl Stop" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
}
type GetStatus struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl GetStatus" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
}
type GetPresets struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl GetPresets" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
}
type GetCurrentPreset struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl GetCurrentPreset" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
}
type SetCurrentPreset struct {
XMLName string `xml:"http://www.onvif.org/ver20/imaging/wsdl SetCurrentPreset" json"-"`
VideoSourceToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl VideoSourceToken"`
PresetToken onvif.ReferenceToken `xml:"http://www.onvif.org/ver20/imaging/wsdl PresetToken"`
}
| |
test_ln_no_hard_link.py
|
# -*- coding: utf-8 -*-
import pytest
from dwim.rules.ln_no_hard_link import match, get_new_command
from tests.utils import Command
error = "hard link not allowed for directory"
@pytest.mark.parametrize('script, stderr', [
("ln barDir barLink", "ln: ‘barDir’: {}"),
("sudo ln a b", "ln: ‘a’: {}"),
("sudo ln -nbi a b", "ln: ‘a’: {}")])
def test_match(script, stderr):
command = Command(script, stderr=stderr.format(error))
assert match(command)
|
('', ''),
("ln a b", "... hard link"),
("sudo ln a b", "... hard link"),
("a b", error)])
def test_not_match(script, stderr):
command = Command(script, stderr=stderr)
assert not match(command)
@pytest.mark.parametrize('script, result', [
("ln barDir barLink", "ln -s barDir barLink"),
("sudo ln barDir barLink", "sudo ln -s barDir barLink"),
("sudo ln -nbi a b", "sudo ln -s -nbi a b"),
("ln -nbi a b && ls", "ln -s -nbi a b && ls"),
("ln a ln", "ln -s a ln"),
("sudo ln a ln", "sudo ln -s a ln")])
def test_get_new_command(script, result):
command = Command(script)
assert get_new_command(command) == result
|
@pytest.mark.parametrize('script, stderr', [
|
credits_table_test.rs
|
// Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_base::tokio;
|
use crate::catalogs::ToReadDataSourcePlan;
use crate::datasources::database::system::CreditsTable;
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_credits_table() -> Result<()> {
let ctx = crate::tests::try_create_context()?;
let table: Arc<dyn Table> = Arc::new(CreditsTable::create(1));
let source_plan = table.read_plan(ctx.clone(), None)?;
let stream = table.read(ctx, &source_plan).await?;
let result = stream.try_collect::<Vec<_>>().await?;
let block = &result[0];
assert_eq!(block.num_columns(), 3);
Ok(())
}
|
use common_exception::Result;
use futures::TryStreamExt;
use crate::catalogs::Table;
|
deprecated.py
|
import functools
import inspect
import warnings
string_types = (type(b''), type(u''))
def warn_deprecation(text):
|
def deprecated(reason):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warn_deprecation(
fmt1.format(name=func1.__name__, reason=reason),
)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warn_deprecation(
fmt2.format(name=func2.__name__),
)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
|
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
text,
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
|
vcs.rs
|
use std::fmt;
use std::path::PathBuf;
use failure::{bail, format_err, Error};
use if_chain::if_chain;
use lazy_static::lazy_static;
use log::{debug, info};
use regex::Regex;
use crate::api::{Ref, Repo};
#[derive(Copy, Clone)]
pub enum GitReference<'a> {
Commit(git2::Oid),
Symbolic(&'a str),
}
impl<'a> fmt::Display for GitReference<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
GitReference::Commit(ref c) => write!(f, "{}", c),
GitReference::Symbolic(ref s) => write!(f, "{}", s),
}
}
}
#[derive(Debug)]
pub struct CommitSpec {
pub repo: String,
pub path: Option<PathBuf>,
pub rev: String,
pub prev_rev: Option<String>,
}
impl fmt::Display for CommitSpec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}@{}", &self.repo, &self.rev)
}
}
#[derive(Debug, PartialEq, Eq)]
struct VcsUrl {
pub provider: String,
pub id: String,
}
fn parse_rev_range(rng: &str) -> (Option<String>, String) {
if rng == "" {
return (None, "HEAD".into());
}
let mut iter = rng.rsplitn(2, "..");
let rev = iter.next().unwrap_or("HEAD");
(iter.next().map(str::to_owned), rev.to_string())
}
impl CommitSpec {
pub fn parse(s: &str) -> Result<CommitSpec, Error> {
lazy_static! {
static ref SPEC_RE: Regex = Regex::new(r"^([^@#]+)(?:#([^@]+))?(?:@(.+))?$").unwrap();
}
if let Some(caps) = SPEC_RE.captures(s) {
let (prev_rev, rev) = parse_rev_range(caps.get(3).map(|x| x.as_str()).unwrap_or(""));
Ok(CommitSpec {
repo: caps[1].to_string(),
path: caps.get(2).map(|x| PathBuf::from(x.as_str())),
rev,
prev_rev,
})
} else {
bail!("Could not parse commit spec '{}'", s)
}
}
pub fn reference(&self) -> GitReference<'_> {
if let Ok(oid) = git2::Oid::from_str(&self.rev) {
GitReference::Commit(oid)
} else {
GitReference::Symbolic(&self.rev)
}
}
pub fn prev_reference(&self) -> Option<GitReference<'_>> {
self.prev_rev.as_ref().map(|rev| {
if let Ok(oid) = git2::Oid::from_str(rev) {
GitReference::Commit(oid)
} else {
GitReference::Symbolic(rev)
}
})
}
}
fn strip_git_suffix(s: &str) -> &str {
s.trim_end_matches(".git")
}
impl VcsUrl {
pub fn parse(url: &str) -> VcsUrl {
lazy_static! {
static ref GIT_URL_RE: Regex =
Regex::new(r"^(?:ssh|https?)://(?:[^@]+@)?([^/]+)/(.+)$").unwrap();
static ref GIT_SSH_RE: Regex = Regex::new(r"^(?:[^@]+@)?([^/]+):(.+)$").unwrap();
}
if let Some(caps) = GIT_URL_RE.captures(url) {
return VcsUrl::from_git_parts(&caps[1], &caps[2]);
}
if let Some(caps) = GIT_SSH_RE.captures(url) {
return VcsUrl::from_git_parts(&caps[1], &caps[2]);
}
VcsUrl {
provider: "".into(),
id: url.into(),
}
}
fn from_git_parts(host: &str, path: &str) -> VcsUrl {
lazy_static! {
static ref VS_DOMAIN_RE: Regex = Regex::new(r"^([^.]+)\.visualstudio.com$").unwrap();
static ref VS_GIT_PATH_RE: Regex = Regex::new(r"^_git/(.+?)(?:\.git)?$").unwrap();
}
if let Some(caps) = VS_DOMAIN_RE.captures(host) {
let username = &caps[1];
if let Some(caps) = VS_GIT_PATH_RE.captures(path) {
return VcsUrl {
provider: host.into(),
id: format!("{}/{}", username, &caps[1]),
};
}
}
VcsUrl {
provider: host.into(),
id: strip_git_suffix(path).into(),
}
}
}
fn is_matching_url(a: &str, b: &str) -> bool {
VcsUrl::parse(a) == VcsUrl::parse(b)
}
fn find_reference_url(repo: &str, repos: &[Repo]) -> Result<String, Error> {
let mut found_non_git = false;
for configured_repo in repos {
if configured_repo.name != repo {
continue;
}
match configured_repo.provider.id.as_str() {
"git"
| "github"
| "bitbucket"
| "visualstudio"
| "integrations:github"
| "integrations:github_enterprise"
| "integrations:gitlab"
| "integrations:bitbucket"
| "integrations:vsts" => {
if let Some(ref url) = configured_repo.url {
debug!(" Got reference URL for repo {}: {}", repo, url);
return Ok(url.clone());
}
}
_ => {
debug!(" unknown repository {} skipped", configured_repo);
found_non_git = true;
}
}
}
if found_non_git {
bail!("For non git repositories explicit revisions are required");
} else {
bail!("Could not find matching repository for {}", repo);
}
}
fn find_matching_rev(
reference: GitReference<'_>,
spec: &CommitSpec,
repos: &[Repo],
disable_discovery: bool,
) -> Result<Option<String>, Error> {
macro_rules! log_match {
($ex:expr) => {{
let val = $ex;
info!(" -> found matching revision {}", val);
val
}};
}
info!("Resolving {} ({})", &reference, spec);
let r = match reference {
GitReference::Commit(commit) => {
return Ok(Some(log_match!(commit.to_string())));
}
GitReference::Symbolic(r) => r,
};
let (repo, discovery) = if let Some(ref path) = spec.path {
(git2::Repository::open(path)?, false)
} else {
(git2::Repository::open_from_env()?, !disable_discovery)
};
let reference_url = find_reference_url(&spec.repo, repos)?;
debug!(" Looking for reference URL {}", &reference_url);
// direct reference in root repository found. If we are in discovery
// mode we want to also check for matching URLs.
if_chain! {
if let Ok(remote) = repo.find_remote("origin");
if let Some(url) = remote.url();
then {
if !discovery || is_matching_url(url, &reference_url) {
debug!(" found match: {} == {}", url, &reference_url);
let head = repo.revparse_single(r)?;
return Ok(Some(log_match!(head.id().to_string())));
} else {
debug!(" not a match: {} != {}", url, &reference_url);
}
}
}
// in discovery mode we want to find that repo in associated submodules.
for submodule in repo.submodules()? {
if let Some(submodule_url) = submodule.url() {
debug!(" found submodule with URL {}", submodule_url);
if is_matching_url(submodule_url, &reference_url) {
debug!(
" found submodule match: {} == {}",
submodule_url, &reference_url
);
// heads on submodules is easier so let's start with that
// because that does not require the submodule to be
// checked out.
if r == "HEAD" {
if let Some(head_oid) = submodule.head_id() {
return Ok(Some(log_match!(head_oid.to_string())));
}
}
// otherwise we need to open the submodule which requires
// it to be checked out.
if let Ok(subrepo) = submodule.open() {
let head = subrepo.revparse_single(r)?;
return Ok(Some(log_match!(head.id().to_string())));
}
} else {
debug!(
" not a submodule match: {} != {}",
submodule_url, &reference_url
);
}
}
}
info!(" -> no matching revision found");
Ok(None)
}
|
repos: &[Repo],
disable_discovery: bool,
) -> Result<(Option<String>, String), Error> {
fn error(r: GitReference<'_>, repo: &str) -> Error {
format_err!(
"Could not find commit '{}' for '{}'. If you do not have local \
checkouts of the repositories in question referencing tags or \
other references will not work and you need to refer to \
revisions explicitly.",
r,
repo
)
}
let rev = if let Some(rev) =
find_matching_rev(spec.reference(), &spec, &repos[..], disable_discovery)?
{
rev
} else {
return Err(error(spec.reference(), &spec.repo));
};
let prev_rev = if let Some(rev) = spec.prev_reference() {
if let Some(rv) = find_matching_rev(rev, &spec, &repos[..], disable_discovery)? {
Some(rv)
} else {
return Err(error(rev, &spec.repo));
}
} else {
None
};
Ok((prev_rev, rev))
}
pub fn find_head() -> Result<String, Error> {
let repo = git2::Repository::open_from_env()?;
let head = repo.revparse_single("HEAD")?;
Ok(head.id().to_string())
}
/// Given commit specs and repos this returns a list of head commits
/// from it.
pub fn find_heads(specs: Option<Vec<CommitSpec>>, repos: &[Repo]) -> Result<Vec<Ref>, Error> {
let mut rv = vec![];
// if commit specs were explicitly provided find head commits with
// limited amounts of magic.
if let Some(specs) = specs {
for spec in &specs {
let (prev_rev, rev) = find_matching_revs(&spec, &repos[..], specs.len() == 1)?;
rv.push(Ref {
repo: spec.repo.clone(),
rev,
prev_rev,
});
}
// otherwise apply all the magic available
} else {
for repo in repos {
let spec = CommitSpec {
repo: repo.name.to_string(),
path: None,
rev: "HEAD".into(),
prev_rev: None,
};
if let Some(rev) = find_matching_rev(spec.reference(), &spec, &repos[..], false)? {
rv.push(Ref {
repo: repo.name.to_string(),
rev,
prev_rev: None,
});
}
}
}
Ok(rv)
}
#[test]
fn test_url_parsing() {
assert_eq!(
VcsUrl::parse("http://github.com/mitsuhiko/flask"),
VcsUrl {
provider: "github.com".into(),
id: "mitsuhiko/flask".into(),
}
);
assert_eq!(
VcsUrl::parse("[email protected]:mitsuhiko/flask.git"),
VcsUrl {
provider: "github.com".into(),
id: "mitsuhiko/flask".into(),
}
);
assert_eq!(
VcsUrl::parse("http://bitbucket.org/mitsuhiko/flask"),
VcsUrl {
provider: "bitbucket.org".into(),
id: "mitsuhiko/flask".into(),
}
);
assert_eq!(
VcsUrl::parse("[email protected]:mitsuhiko/flask.git"),
VcsUrl {
provider: "bitbucket.org".into(),
id: "mitsuhiko/flask".into(),
}
);
assert_eq!(
VcsUrl::parse("https://neilmanvar.visualstudio.com/_git/sentry-demo"),
VcsUrl {
provider: "neilmanvar.visualstudio.com".into(),
id: "neilmanvar/sentry-demo".into(),
}
);
assert_eq!(
VcsUrl::parse("https://github.myenterprise.com/mitsuhiko/flask.git"),
VcsUrl {
provider: "github.myenterprise.com".into(),
id: "mitsuhiko/flask".into(),
}
);
assert_eq!(
VcsUrl::parse("https://gitlab.example.com/gitlab-org/gitlab-ce"),
VcsUrl {
provider: "gitlab.example.com".into(),
id: "gitlab-org/gitlab-ce".into(),
}
);
assert_eq!(
VcsUrl::parse("[email protected]:gitlab-org/gitlab-ce.git"),
VcsUrl {
provider: "gitlab.example.com".into(),
id: "gitlab-org/gitlab-ce".into(),
}
);
assert_eq!(
VcsUrl::parse("https://gitlab.com/gitlab-org/gitlab-ce"),
VcsUrl {
provider: "gitlab.com".into(),
id: "gitlab-org/gitlab-ce".into(),
}
);
assert_eq!(
VcsUrl::parse("[email protected]:gitlab-org/gitlab-ce.git"),
VcsUrl {
provider: "gitlab.com".into(),
id: "gitlab-org/gitlab-ce".into(),
}
)
}
#[test]
fn test_url_normalization() {
assert!(!is_matching_url(
"http://github.mycompany.com/mitsuhiko/flask",
"[email protected]:mitsuhiko/flask.git"
));
assert!(!is_matching_url(
"[email protected]/mitsuhiko/flask",
"[email protected]:mitsuhiko/flask.git"
));
assert!(is_matching_url(
"http://github.com/mitsuhiko/flask",
"[email protected]:mitsuhiko/flask.git"
));
assert!(is_matching_url(
"https://gitlab.com/gitlab-org/gitlab-ce",
"[email protected]:gitlab-org/gitlab-ce.git"
));
assert!(is_matching_url(
"https://gitlab.example.com/gitlab-org/gitlab-ce",
"[email protected]:gitlab-org/gitlab-ce.git"
))
}
|
fn find_matching_revs(
spec: &CommitSpec,
|
snapshot.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manila
import (
"fmt"
"time"
"github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/cloud-provider-openstack/pkg/csi/manila/manilaclient"
clouderrors "k8s.io/cloud-provider-openstack/pkg/util/errors"
"k8s.io/klog/v2"
)
const (
snapshotCreating = "creating"
snapshotDeleting = "deleting"
snapshotError = "error"
snapshotErrorDeleting = "error_deleting"
snapshotAvailable = "available"
snapshotDescription = "snapshotted-by=manila.csi.openstack.org"
)
// getOrCreateSnapshot retrieves an existing snapshot with name=snapName, or creates a new one if it doesn't exist yet.
// Instead of waiting for the snapshot to become available (as getOrCreateShare does), CSI's ready_to_use flag is used to signal readiness
func getOrCreateSnapshot(snapName, sourceShareID string, manilaClient manilaclient.Interface) (*snapshots.Snapshot, error) {
var (
snapshot *snapshots.Snapshot
err error
)
// First, check if the snapshot already exists or needs to be created
if snapshot, err = manilaClient.GetSnapshotByName(snapName); err != nil {
if clouderrors.IsNotFound(err) {
// It doesn't exist, create it
opts := snapshots.CreateOpts{
ShareID: sourceShareID,
Name: snapName,
Description: snapshotDescription,
}
var createErr error
if snapshot, createErr = manilaClient.CreateSnapshot(opts); createErr != nil {
return nil, createErr
}
} else {
// Something else is wrong
return nil, fmt.Errorf("failed to probe for a snapshot named %s: %v", snapName, err)
}
} else {
klog.V(4).Infof("a snapshot named %s already exists", snapName)
}
return snapshot, nil
}
func deleteSnapshot(snapID string, manilaClient manilaclient.Interface) error {
if err := manilaClient.DeleteSnapshot(snapID); err != nil {
if clouderrors.IsNotFound(err) {
klog.V(4).Infof("snapshot %s not found, assuming it to be already deleted", snapID)
} else {
return err
}
}
return nil
}
func tryDeleteSnapshot(snapshot *snapshots.Snapshot, manilaClient manilaclient.Interface) {
if snapshot == nil {
return
}
if err := deleteSnapshot(snapshot.ID, manilaClient); err != nil {
// TODO failure to delete a snapshot in an error state needs proper monitoring support
klog.Errorf("couldn't delete snapshot %s in a roll-back procedure: %v", snapshot.ID, err)
return
}
_, _, err := waitForSnapshotStatus(snapshot.ID, snapshotDeleting, "", true, manilaClient)
if err != nil && err != wait.ErrWaitTimeout {
klog.Errorf("couldn't retrieve snapshot %s in a roll-back procedure: %v", snapshot.ID, err)
}
}
func
|
(snapshotID, currentStatus, desiredStatus string, successOnNotFound bool, manilaClient manilaclient.Interface) (*snapshots.Snapshot, manilaError, error) {
var (
backoff = wait.Backoff{
Duration: time.Second * waitForAvailableShareTimeout,
Factor: 1.2,
Steps: waitForAvailableShareRetries,
}
snapshot *snapshots.Snapshot
manilaErrCode manilaError
err error
)
return snapshot, manilaErrCode, wait.ExponentialBackoff(backoff, func() (bool, error) {
snapshot, err = manilaClient.GetSnapshotByID(snapshotID)
if err != nil {
if clouderrors.IsNotFound(err) && successOnNotFound {
return true, nil
}
return false, err
}
var isAvailable bool
switch snapshot.Status {
case currentStatus:
isAvailable = false
case desiredStatus:
isAvailable = true
case shareError:
manilaErrMsg, err := lastResourceError(snapshotID, manilaClient)
if err != nil {
return false, fmt.Errorf("snapshot %s is in error state, error description could not be retrieved: %v", snapshotID, err)
}
manilaErrCode = manilaErrMsg.errCode
return false, fmt.Errorf("snapshot %s is in error state: %s", snapshotID, manilaErrMsg.message)
default:
return false, fmt.Errorf("snapshot %s is in an unexpected state: wanted either %s or %s, got %s", snapshotID, currentStatus, desiredStatus, snapshot.Status)
}
return isAvailable, nil
})
}
|
waitForSnapshotStatus
|
stream.go
|
package server
import (
"context"
"crypto/rand"
"errors"
"io"
"net"
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/ssdev-go/rpcx/log"
"github.com/ssdev-go/rpcx/share"
)
var ErrNotAccept = errors.New("server refused the connection")
// StreamHandler handles a streaming connection with client.
type StreamHandler func(conn net.Conn, args *share.StreamServiceArgs)
// StreamAcceptor accepts connection from clients or not.
// You can use it to validate clients and determine if accept or drop the connection.
type StreamAcceptor func(ctx context.Context, args *share.StreamServiceArgs) bool
type streamTokenInfo struct {
token []byte
args *share.StreamServiceArgs
}
// StreamService support streaming between clients and server.
// It registers a streaming service and listens on the given port.
// Clients will invokes this service to get the token and send the token and begin to stream.
type StreamService struct {
Addr string
AdvertiseAddr string
handler StreamHandler
acceptor StreamAcceptor
cachedTokens *lru.Cache
startOnce sync.Once
done chan struct{}
}
// NewStreamService creates a stream service.
func
|
(addr string, streamHandler StreamHandler, acceptor StreamAcceptor, waitNum int) *StreamService {
cachedTokens, _ := lru.New(waitNum)
fi := &StreamService{
Addr: addr,
handler: streamHandler,
cachedTokens: cachedTokens,
}
return fi
}
// EnableFileTransfer supports filetransfer service in this server.
func (s *Server) EnableStreamService(serviceName string, streamService *StreamService) {
if serviceName == "" {
serviceName = share.StreamServiceName
}
_ = streamService.Start()
_ = s.RegisterName(serviceName, streamService, "")
}
func (s *StreamService) Stream(ctx context.Context, args *share.StreamServiceArgs, reply *share.StreamServiceReply) error {
// clientConn := ctx.Value(server.RemoteConnContextKey).(net.Conn)
if s.acceptor != nil && !s.acceptor(ctx, args) {
return ErrNotAccept
}
token := make([]byte, 32)
_, err := rand.Read(token)
if err != nil {
return err
}
*reply = share.StreamServiceReply{
Token: token,
Addr: s.Addr,
}
if s.AdvertiseAddr != "" {
reply.Addr = s.AdvertiseAddr
}
s.cachedTokens.Add(string(token), &streamTokenInfo{token, args})
return nil
}
func (s *StreamService) Start() error {
s.startOnce.Do(func() {
go s.start()
})
return nil
}
func (s *StreamService) start() error {
ln, err := net.Listen("tcp", s.Addr)
if err != nil {
return err
}
var tempDelay time.Duration
for {
select {
case <-s.done:
return nil
default:
conn, e := ln.Accept()
if e != nil {
if ne, ok := e.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
log.Errorf("filetransfer: accept error: %v; retrying in %v", e, tempDelay)
time.Sleep(tempDelay)
continue
}
return e
}
tempDelay = 0
if tc, ok := conn.(*net.TCPConn); ok {
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
tc.SetLinger(10)
}
token := make([]byte, 32)
_, err := io.ReadFull(conn, token)
if err != nil {
conn.Close()
log.Errorf("failed to read token from %s", conn.RemoteAddr().String())
continue
}
tokenStr := string(token)
info, ok := s.cachedTokens.Get(tokenStr)
if !ok {
conn.Close()
log.Errorf("failed to read token from %s", conn.RemoteAddr().String())
continue
}
s.cachedTokens.Remove(tokenStr)
switch ti := info.(type) {
case *streamTokenInfo:
if s.handler == nil {
conn.Close()
continue
}
go s.handler(conn, ti.args)
default:
conn.Close()
}
}
}
}
func (s *StreamService) Stop() error {
close(s.done)
return nil
}
|
NewStreamService
|
noop.rs
|
use crate::sys::Notification;
use anyhow::Error;
use futures::future;
use std::path::Path;
#[derive(Clone)]
pub struct System;
impl System {
pub async fn wait_for_shutdown(&self) {
future::pending().await
}
pub async fn wait_for_restart(&self)
|
pub fn clear(&self) {}
pub fn error(&self, _error: String) {}
pub fn notification(&self, _: Notification) {}
pub fn join(&self) -> Result<(), Error> {
Ok(())
}
pub fn is_installed(&self) -> Result<bool, Error> {
Ok(true)
}
pub fn install(&self) -> Result<(), Error> {
Ok(())
}
pub fn uninstall(&self) -> Result<(), Error> {
Ok(())
}
}
pub fn setup(_root: &Path, _log_file: &Path) -> Result<System, Error> {
Ok(System)
}
|
{
future::pending().await
}
|
directive.js
|
;(function(angular){
var app = angular.module('app');
app.directive('focus', function(){
return {
restrict: 'EA' ,
link: function(scope, element , attr){
element.on('click', function(){
element.parent().children().attr('class','');
element.addClass('active');
})
}
}
|
})
})(angular)
|
|
stream.py
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import concurrent
import json
from abc import ABC, abstractmethod
from copy import deepcopy
from datetime import datetime
from functools import lru_cache
from operator import itemgetter
from traceback import format_exc
from typing import Any, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Union
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models.airbyte_protocol import SyncMode
from airbyte_cdk.sources.streams import Stream
from wcmatch.glob import GLOBSTAR, SPLIT, globmatch
from .formats.csv_parser import CsvParser
from .formats.parquet_parser import ParquetParser
JSON_TYPES = ["string", "number", "integer", "object", "array", "boolean", "null"]
LOGGER = AirbyteLogger()
class
|
(Exception):
"""Client mis-configured"""
class FileStream(Stream, ABC):
@property
def fileformatparser_map(self):
"""Mapping where every key is equal 'filetype' and values are corresponding parser classes."""
return {
"csv": CsvParser,
"parquet": ParquetParser,
}
# TODO: make these user configurable in spec.json
ab_additional_col = "_ab_additional_properties"
ab_last_mod_col = "_ab_source_file_last_modified"
ab_file_name_col = "_ab_source_file_url"
airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col]
datetime_format_string = "%Y-%m-%dT%H:%M:%S%z"
def __init__(self, dataset: str, provider: dict, format: dict, path_pattern: str, schema: str = None):
"""
:param dataset: table name for this stream
:param provider: provider specific mapping as described in spec.json
:param format: file format specific mapping as described in spec.json
:param path_pattern: glob-style pattern for file-matching (https://facelessuser.github.io/wcmatch/glob/)
:param schema: JSON-syntax user provided schema, defaults to None
"""
self.dataset = dataset
self._path_pattern = path_pattern
self._provider = provider
self._format = format
self._schema = {}
if schema:
self._schema = self._parse_user_input_schema(schema)
self.master_schema = None
LOGGER.info(f"initialised stream with format: {format}")
@staticmethod
def _parse_user_input_schema(schema: str) -> Mapping[str, str]:
"""
If the user provided a schema, we run this method to convert to a python dict and verify it
This verifies:
- that the provided string is valid JSON
- that it is a key:value map with no nested values (objects or arrays)
- that all values in the map correspond to a JsonSchema datatype
If this passes, we are confident that the user-provided schema is valid and will work as expected with the rest of the code
:param schema: JSON-syntax user provided schema
:raises ConfigurationError: if any of the verification steps above fail
:return: the input schema (json string) as a python dict
"""
try:
py_schema = json.loads(schema)
except json.decoder.JSONDecodeError as err:
error_msg = f"Failed to parse schema {repr(err)}\n{schema}\n{format_exc()}"
raise ConfigurationError(error_msg) from err
# enforce all keys and values are of type string as required (i.e. no nesting)
if not all([isinstance(k, str) and isinstance(v, str) for k, v in py_schema.items()]):
raise ConfigurationError("Invalid schema provided, all column names and datatypes must be in string format")
# enforce all values (datatypes) are valid JsonSchema datatypes
if not all([datatype in JSON_TYPES for datatype in py_schema.values()]):
raise ConfigurationError(f"Invalid schema provided, datatypes must each be one of {JSON_TYPES}")
return py_schema
@property
def name(self) -> str:
return self.dataset
@property
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return None
@property
def fileformatparser_class(self) -> type:
"""
:return: reference to the relevant fileformatparser class e.g. CsvParser
"""
filetype = self._format.get("filetype")
file_reader = self.fileformatparser_map.get(self._format.get("filetype"))
if not file_reader:
raise RuntimeError(
f"Detected mismatched file format '{filetype}'. Available values: '{list( self.fileformatparser_map.keys())}''."
)
return file_reader
@property
@abstractmethod
def storagefile_class(self) -> type:
"""
Override this to point to the relevant provider-specific StorageFile class e.g. S3File
:return: reference to relevant class
"""
@abstractmethod
def filepath_iterator() -> Iterator[str]:
"""
Provider-specific method to iterate through bucket/container/etc. and yield each full filepath.
This should supply the 'url' to use in StorageFile(). This is possibly better described as blob or file path.
e.g. for AWS: f"s3://{aws_access_key_id}:{aws_secret_access_key}@{self.url}" <- self.url is what we want to yield here
:yield: url filepath to use in StorageFile()
"""
def pattern_matched_filepath_iterator(self, filepaths: Iterable[str]) -> Iterator[str]:
"""
iterates through iterable filepaths and yields only those filepaths that match user-provided path patterns
:param filepaths: filepath_iterator(), this is a param rather than method reference in order to unit test this
:yield: url filepath to use in StorageFile(), if matching on user-provided path patterns
"""
for filepath in filepaths:
if globmatch(filepath, self._path_pattern, flags=GLOBSTAR | SPLIT):
yield filepath
@lru_cache(maxsize=None)
def get_time_ordered_filepaths(self) -> Iterable[Tuple[datetime, str]]:
"""
Iterates through pattern_matched_filepath_iterator(), acquiring last_modified property of each file to return in time ascending order.
Uses concurrent.futures to thread this asynchronously in order to improve performance when there are many files (network I/O)
Caches results after first run of method to avoid repeating network calls as this is used more than once
:return: list in time-ascending order
"""
def get_storagefile_with_lastmod(filepath: str) -> Tuple[datetime, str]:
fc = self.storagefile_class(filepath, self._provider)
return (fc.last_modified, filepath)
storagefiles = []
# use concurrent future threads to parallelise grabbing last_modified from all the files
# TODO: don't hardcode max_workers like this
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
filepath_gen = self.pattern_matched_filepath_iterator(self.filepath_iterator())
futures = [executor.submit(get_storagefile_with_lastmod, fp) for fp in filepath_gen]
for future in concurrent.futures.as_completed(futures):
# this will failfast on any errors
storagefiles.append(future.result())
# The array storagefiles contain tuples of (last_modified, filepath), so sort by last_modified
return sorted(storagefiles, key=itemgetter(0))
def _get_schema_map(self) -> Mapping[str, Any]:
if self._schema != {}:
return_schema = deepcopy(self._schema)
else: # we have no provided schema or schema state from a previous incremental run
return_schema = self._get_master_schema()
return_schema[self.ab_additional_col] = "object"
return_schema[self.ab_last_mod_col] = "string"
return_schema[self.ab_file_name_col] = "string"
return return_schema
def get_json_schema(self) -> Mapping[str, Any]:
"""
:return: the JSON schema representing this stream.
"""
# note: making every non-airbyte column nullable for compatibility
# TODO: ensure this behaviour still makes sense as we add new file formats
properties = {}
for column, typ in self._get_schema_map().items():
properties[column] = {"type": ["null", typ]} if column not in self.airbyte_columns else {"type": typ}
properties[self.ab_last_mod_col]["format"] = "date-time"
return {"type": "object", "properties": properties}
def _get_master_schema(self, min_datetime: datetime = None) -> Mapping[str, Any]:
"""
In order to auto-infer a schema across many files and/or allow for additional properties (columns),
we need to determine the superset of schemas across all relevant files.
This method iterates through get_time_ordered_filepaths() obtaining the inferred schema (process implemented per file format),
to build up this superset schema (master_schema).
This runs datatype checks to Warn or Error if we find incompatible schemas (e.g. same column is 'date' in one file but 'float' in another).
This caches the master_schema after first run in order to avoid repeated compute and network calls to infer schema on all files.
:param min_datetime: if passed, will only use files with last_modified >= this to determine master schema
:raises RuntimeError: if we find datatype mismatches between files or between a file and schema state (provided or from previous inc. batch)
:return: A dict of the JSON schema representing this stream.
"""
# TODO: could implement a (user-beware) 'lazy' mode that skips schema checking to improve performance
# TODO: could utilise min_datetime to add a start_date parameter in spec for user
if self.master_schema is None:
master_schema = deepcopy(self._schema)
file_reader = self.fileformatparser_class(self._format)
for last_mod, filepath in self.get_time_ordered_filepaths():
# skip this file if it's earlier than min_datetime
if (min_datetime is not None) and (last_mod < min_datetime):
continue
storagefile = self.storagefile_class(filepath, self._provider)
with storagefile.open(file_reader.is_binary) as f:
this_schema = file_reader.get_inferred_schema(f)
if this_schema == master_schema:
continue # exact schema match so go to next file
# creates a superset of columns retaining order of master_schema with any additional columns added to end
column_superset = list(master_schema.keys()) + [c for c in this_schema.keys() if c not in master_schema.keys()]
# this compares datatype of every column that the two schemas have in common
for col in column_superset:
if (col in master_schema.keys()) and (col in this_schema.keys()) and (master_schema[col] != this_schema[col]):
# if this column exists in a provided schema or schema state, we'll WARN here rather than throw an error
# this is to allow more leniency as we may be able to coerce this datatype mismatch on read according to provided schema state
# if not, then the read will error anyway
if col in self._schema.keys():
LOGGER.warn(
f"Detected mismatched datatype on column '{col}', in file '{storagefile.url}'. "
+ f"Should be '{master_schema[col]}', but found '{this_schema[col]}'. "
+ f"Airbyte will attempt to coerce this to {master_schema[col]} on read."
)
# else we're inferring the schema (or at least this column) from scratch and therefore throw an error on mismatching datatypes
else:
raise RuntimeError(
f"Detected mismatched datatype on column '{col}', in file '{storagefile.url}'. "
+ f"Should be '{master_schema[col]}', but found '{this_schema[col]}'."
)
# missing columns in this_schema doesn't affect our master_schema so we don't check for it here
# add to master_schema any columns from this_schema that aren't already present
for col, datatype in this_schema.items():
if col not in master_schema.keys():
master_schema[col] = datatype
LOGGER.info(f"determined master schema: {master_schema}")
self.master_schema = master_schema
return self.master_schema
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
"""
This builds full-refresh stream_slices regardless of sync_mode param.
For full refresh, 1 file == 1 stream_slice.
The structure of a stream slice is [ {file}, ... ].
In incremental mode, a stream slice may have more than one file so we mirror that format here.
Incremental stream_slices are implemented in the IncrementalFileStream child class.
"""
# TODO: this could be optimised via concurrent reads, however we'd lose chronology and need to deal with knock-ons of that
# we could do this concurrently both full and incremental by running batches in parallel
# and then incrementing the cursor per each complete batch
for last_mod, filepath in self.get_time_ordered_filepaths():
storagefile = self.storagefile_class(filepath, self._provider)
yield [{"unique_url": storagefile.url, "last_modified": last_mod, "storagefile": storagefile}]
def _match_target_schema(self, record: Mapping[str, Any], target_columns: List) -> Mapping[str, Any]:
"""
This method handles missing or additional fields in each record, according to the provided target_columns.
All missing fields are added, with a value of None (null)
All additional fields are packed into the _ab_additional_properties object column
We start off with a check to see if we're already lined up to target in order to avoid unnecessary iterations (useful if many columns)
:param record: json-like representation of a data row {column:value}
:param target_columns: list of column names to mutate this record into (obtained via self._get_schema_map().keys() as of now)
:return: mutated record with columns lining up to target_columns
"""
compare_columns = [c for c in target_columns if c not in [self.ab_last_mod_col, self.ab_file_name_col]]
# check if we're already matching to avoid unnecessary iteration
if set(list(record.keys()) + [self.ab_additional_col]) == set(compare_columns):
record[self.ab_additional_col] = {}
return record
# missing columns
for c in [col for col in compare_columns if col != self.ab_additional_col]:
if c not in record.keys():
record[c] = None
# additional columns
record[self.ab_additional_col] = {c: deepcopy(record[c]) for c in record.keys() if c not in compare_columns}
for c in record[self.ab_additional_col].keys():
del record[c]
return record
def _add_extra_fields_from_map(self, record: Mapping[str, Any], extra_map: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Simple method to take a mapping of columns:values and add them to the provided record
:param record: json-like representation of a data row {column:value}
:param extra_map: map of additional columns and values to add
:return: mutated record with additional fields
"""
for key, value in extra_map.items():
record[key] = value
return record
def _read_from_slice(
self,
file_reader,
stream_slice: Mapping[str, Any],
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""
Uses provider-relevant StorageFile to open file and then iterates through stream_records() using format-relevant AbstractFileParser.
Records are mutated on the fly using _match_target_schema() and _add_extra_fields_from_map() to achieve desired final schema.
Since this is called per stream_slice, this method works for both full_refresh and incremental.
"""
# TODO: read all files in a stream_slice concurrently
for file_info in stream_slice:
with file_info["storagefile"].open(file_reader.is_binary) as f:
# TODO: make this more efficient than mutating every record one-by-one as they stream
for record in file_reader.stream_records(f):
schema_matched_record = self._match_target_schema(record, list(self._get_schema_map().keys()))
complete_record = self._add_extra_fields_from_map(
schema_matched_record,
{
self.ab_last_mod_col: datetime.strftime(file_info["last_modified"], self.datetime_format_string),
self.ab_file_name_col: file_info["unique_url"],
},
)
yield complete_record
LOGGER.info("finished reading a stream slice")
# Always return an empty generator just in case no records were ever yielded
yield from []
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""
The heavy lifting sits in _read_from_slice() which is full refresh / incremental agnostic
"""
stream_slice = stream_slice if stream_slice is not None else []
file_reader = self.fileformatparser_class(self._format, self._get_master_schema())
yield from self._read_from_slice(file_reader, stream_slice)
class IncrementalFileStream(FileStream, ABC):
# TODO: ideally want to checkpoint after every file or stream slice rather than N records
state_checkpoint_interval = None
@property
def cursor_field(self) -> str:
"""
:return: The name of the cursor field.
"""
return self.ab_last_mod_col
def _get_datetime_from_stream_state(self, stream_state: Mapping[str, Any] = None) -> datetime:
"""if no state, we default to 1970-01-01 in order to pick up all files present."""
if stream_state is not None and self.cursor_field in stream_state.keys():
return datetime.strptime(stream_state[self.cursor_field], self.datetime_format_string)
else:
return datetime.strptime("1970-01-01T00:00:00+0000", self.datetime_format_string)
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Inspects the latest record extracted from the data source and the current state object and return an updated state object.
In the case where current_stream_state is null, we default to 1970-01-01 in order to pick up all files present.
We also save the schema into the state here so that we can use it on future incremental batches, allowing for additional/missing columns.
:param current_stream_state: The stream's current state object
:param latest_record: The latest record extracted from the stream
:return: An updated state object
"""
state_dict = {}
current_parsed_datetime = self._get_datetime_from_stream_state(current_stream_state)
latest_record_datetime = datetime.strptime(
latest_record.get(self.cursor_field, "1970-01-01T00:00:00+0000"), self.datetime_format_string
)
state_dict[self.cursor_field] = datetime.strftime(max(current_parsed_datetime, latest_record_datetime), self.datetime_format_string)
state_dict["schema"] = self._get_schema_map()
return state_dict
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
"""
Builds either full_refresh or incremental stream_slices based on sync_mode.
An incremental stream_slice is a group of all files with the exact same last_modified timestamp.
This ensures we only update the cursor state to a given timestamp after ALL files with that timestamp have been successfully read.
Slight nuance: as we iterate through get_time_ordered_filepaths(),
we yield the stream_slice containing file(s) up to and EXcluding the file on the current iteration.
The stream_slice is then cleared (if we yielded it) and this iteration's file appended to the (next) stream_slice
"""
if sync_mode == SyncMode.full_refresh:
yield from super().stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=stream_state)
else:
# if necessary and present, let's update this object's schema attribute to the schema stored in state
# TODO: ideally we could do this on __init__ but I'm not sure that's possible without breaking from cdk style implementation
if self._schema == {} and stream_state is not None and "schema" in stream_state.keys():
self._schema = stream_state["schema"]
# logic here is to bundle all files with exact same last modified timestamp together in each slice
prev_file_last_mod = None # init variable to hold previous iterations last modified
stream_slice = []
for last_mod, filepath in self.get_time_ordered_filepaths():
# skip this file if last_mod is earlier than our cursor value from state
if (
stream_state is not None
and self.cursor_field in stream_state.keys()
and last_mod <= self._get_datetime_from_stream_state(stream_state)
):
continue
storagefile = self.storagefile_class(filepath, self._provider)
# check if this storagefile belongs in the next slice, if so yield the current slice before this file
if (prev_file_last_mod is not None) and (last_mod != prev_file_last_mod):
yield stream_slice
stream_slice.clear()
# now we either have an empty stream_slice or a stream_slice that this file shares a last modified with, so append it
stream_slice.append({"unique_url": storagefile.url, "last_modified": last_mod, "storagefile": storagefile})
# update our prev_file_last_mod to the current one for next iteration
prev_file_last_mod = last_mod
# now yield the final stream_slice. This is required because our loop only yields the slice previous to its current iteration.
if len(stream_slice) > 0:
yield stream_slice
# in case we have no files
yield from [None]
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""
The heavy lifting sits in _read_from_slice() which is full refresh / incremental agnostic.
We override this for incremental so we can pass our minimum datetime from state into _get_master_schema().
This means we only parse the schema of new files on incremental runs rather than all files in the bucket.
"""
if sync_mode == SyncMode.full_refresh:
yield from super().read_records(sync_mode, cursor_field, stream_slice, stream_state)
else:
stream_slice = stream_slice if stream_slice is not None else []
file_reader = self.fileformatparser_class(
self._format, self._get_master_schema(self._get_datetime_from_stream_state(stream_state))
)
yield from self._read_from_slice(file_reader, stream_slice)
|
ConfigurationError
|
factory.go
|
/*
Copyright 2020 The hostpath provisioner operator Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
versioned "kubevirt.io/hostpath-provisioner-operator/pkg/client/clientset/versioned"
hostpathprovisioner "kubevirt.io/hostpath-provisioner-operator/pkg/client/informers/externalversions/hostpathprovisioner"
internalinterfaces "kubevirt.io/hostpath-provisioner-operator/pkg/client/informers/externalversions/internalinterfaces"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func
|
(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// WaitForCacheSync waits for all started informers' cache were synced.
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Hostpathprovisioner() hostpathprovisioner.Interface
}
func (f *sharedInformerFactory) Hostpathprovisioner() hostpathprovisioner.Interface {
return hostpathprovisioner.New(f, f.namespace, f.tweakListOptions)
}
|
NewFilteredSharedInformerFactory
|
urls.py
|
from django.conf import settings
from django.conf.urls import patterns, url
# from django.contrib import admin
urlpatterns = patterns('',
url(r'^$', 'app.views.home', name='home'),
url(r'^graph/$', 'app.views.graph', name='graph'),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
# url(r'^admin/', include(admin.site.urls)),
|
)
|
|
admin.py
|
from django.contrib import admin
from profiles_api import models
|
admin.site.register(models.UserProfile)
# Register your models here.
| |
event.rs
|
//! Traits and types describing timely dataflow events.
//!
//! The `Event` type describes the information an operator can observe about a timely dataflow
//! stream. There are two types of events, (i) the receipt of data and (ii) reports of progress
//! of timestamps.
/// Data and progress events of the captured stream.
#[derive(Debug, Clone, Abomonation, Hash, Ord, PartialOrd, Eq, PartialEq)]
pub enum EventCore<T, D> {
/// Progress received via `push_external_progress`.
Progress(Vec<(T, i64)>),
/// Messages received via the data stream.
Messages(T, D),
}
/// Data and progress events of the captured stream, specialized to vector-based containers.
pub type Event<T, D> = EventCore<T, Vec<D>>;
/// Iterates over contained `EventCore<T, D>`.
///
/// The `EventIterator` trait describes types that can iterate over references to events,
/// and which can be used to replay a stream into a new timely dataflow computation.
///
/// This method is not simply an iterator because of the lifetime in the result.
pub trait EventIteratorCore<T, D> {
/// Iterates over references to `EventCore<T, D>` elements.
fn next(&mut self) -> Option<&EventCore<T, D>>;
}
/// A [EventIteratorCore] specialized to vector-based containers.
// TODO: use trait aliases once stable.
pub trait EventIterator<T, D>: EventIteratorCore<T, Vec<D>> {
/// Iterates over references to `Event<T, D>` elements.
fn next(&mut self) -> Option<&Event<T, D>>;
}
impl<T, D, E: EventIteratorCore<T, Vec<D>>> EventIterator<T, D> for E {
fn next(&mut self) -> Option<&Event<T, D>> {
<Self as EventIteratorCore<_, _>>::next(self)
}
}
/// Receives `EventCore<T, D>` events.
pub trait EventPusherCore<T, D> {
/// Provides a new `Event<T, D>` to the pusher.
fn push(&mut self, event: EventCore<T, D>);
}
/// A [EventPusherCore] specialized to vector-based containers.
// TODO: use trait aliases once stable.
pub trait EventPusher<T, D>: EventPusherCore<T, Vec<D>> {}
impl<T, D, E: EventPusherCore<T, Vec<D>>> EventPusher<T, D> for E {}
// implementation for the linked list behind a `Handle`.
impl<T, D> EventPusherCore<T, D> for ::std::sync::mpsc::Sender<EventCore<T, D>> {
fn push(&mut self, event: EventCore<T, D>) {
// NOTE: An Err(x) result just means "data not accepted" most likely
// because the receiver is gone. No need to panic.
let _ = self.send(event);
}
}
/// A linked-list event pusher and iterator.
pub mod link {
use std::rc::Rc;
use std::cell::RefCell;
use super::{EventCore, EventPusherCore, EventIteratorCore};
/// A linked list of EventCore<T, D>.
pub struct EventLinkCore<T, D> {
/// An event, if one exists.
///
/// An event might not exist, if either we want to insert a `None` and have the output iterator pause,
/// or in the case of the very first linked list element, which has no event when constructed.
pub event: Option<EventCore<T, D>>,
/// The next event, if it exists.
pub next: RefCell<Option<Rc<EventLinkCore<T, D>>>>,
}
/// A [EventLinkCore] specialized to vector-based containers.
pub type EventLink<T, D> = EventLinkCore<T, Vec<D>>;
impl<T, D> EventLinkCore<T, D> {
/// Allocates a new `EventLink`.
pub fn new() -> EventLinkCore<T, D> {
EventLinkCore { event: None, next: RefCell::new(None) }
}
}
// implementation for the linked list behind a `Handle`.
impl<T, D> EventPusherCore<T, D> for Rc<EventLinkCore<T, D>> {
fn
|
(&mut self, event: EventCore<T, D>) {
*self.next.borrow_mut() = Some(Rc::new(EventLinkCore { event: Some(event), next: RefCell::new(None) }));
let next = self.next.borrow().as_ref().unwrap().clone();
*self = next;
}
}
impl<T, D> EventIteratorCore<T, D> for Rc<EventLinkCore<T, D>> {
fn next(&mut self) -> Option<&EventCore<T, D>> {
let is_some = self.next.borrow().is_some();
if is_some {
let next = self.next.borrow().as_ref().unwrap().clone();
*self = next;
self.event.as_ref()
}
else {
None
}
}
}
// Drop implementation to prevent stack overflow through naive drop impl.
impl<T, D> Drop for EventLinkCore<T, D> {
fn drop(&mut self) {
while let Some(link) = self.next.replace(None) {
if let Ok(head) = Rc::try_unwrap(link) {
*self = head;
}
}
}
}
impl<T, D> Default for EventLinkCore<T, D> {
fn default() -> Self {
Self::new()
}
}
#[test]
fn avoid_stack_overflow_in_drop() {
let mut event1 = Rc::new(EventLinkCore::<(),()>::new());
let _event2 = event1.clone();
for _ in 0 .. 1_000_000 {
event1.push(EventCore::Progress(vec![]));
}
}
}
/// A binary event pusher and iterator.
pub mod binary {
use std::io::Write;
use abomonation::Abomonation;
use super::{EventCore, EventPusherCore, EventIteratorCore};
/// A wrapper for `W: Write` implementing `EventPusherCore<T, D>`.
pub struct EventWriterCore<T, D, W: ::std::io::Write> {
stream: W,
phant: ::std::marker::PhantomData<(T,D)>,
}
/// [EventWriterCore] specialized to vector-based containers.
pub type EventWriter<T, D, W> = EventWriterCore<T, Vec<D>, W>;
impl<T, D, W: ::std::io::Write> EventWriterCore<T, D, W> {
/// Allocates a new `EventWriter` wrapping a supplied writer.
pub fn new(w: W) -> Self {
Self {
stream: w,
phant: ::std::marker::PhantomData,
}
}
}
impl<T: Abomonation, D: Abomonation, W: ::std::io::Write> EventPusherCore<T, D> for EventWriterCore<T, D, W> {
fn push(&mut self, event: EventCore<T, D>) {
// TODO: `push` has no mechanism to report errors, so we `unwrap`.
unsafe { ::abomonation::encode(&event, &mut self.stream).expect("Event abomonation/write failed"); }
}
}
/// A Wrapper for `R: Read` implementing `EventIterator<T, D>`.
pub struct EventReaderCore<T, D, R: ::std::io::Read> {
reader: R,
bytes: Vec<u8>,
buff1: Vec<u8>,
buff2: Vec<u8>,
consumed: usize,
valid: usize,
phant: ::std::marker::PhantomData<(T,D)>,
}
/// [EventReaderCore] specialized to vector-based containers.
pub type EventReader<T, D, R> = EventReaderCore<T, Vec<D>, R>;
impl<T, D, R: ::std::io::Read> EventReaderCore<T, D, R> {
/// Allocates a new `EventReader` wrapping a supplied reader.
pub fn new(r: R) -> Self {
Self {
reader: r,
bytes: vec![0u8; 1 << 20],
buff1: vec![],
buff2: vec![],
consumed: 0,
valid: 0,
phant: ::std::marker::PhantomData,
}
}
}
impl<T: Abomonation, D: Abomonation, R: ::std::io::Read> EventIteratorCore<T, D> for EventReaderCore<T, D, R> {
fn next(&mut self) -> Option<&EventCore<T, D>> {
// if we can decode something, we should just return it! :D
if unsafe { ::abomonation::decode::<EventCore<T,D>>(&mut self.buff1[self.consumed..]) }.is_some() {
let (item, rest) = unsafe { ::abomonation::decode::<EventCore<T,D>>(&mut self.buff1[self.consumed..]) }.unwrap();
self.consumed = self.valid - rest.len();
return Some(item);
}
// if we exhaust data we should shift back (if any shifting to do)
if self.consumed > 0 {
self.buff2.clear();
self.buff2.write_all(&self.buff1[self.consumed..]).unwrap();
::std::mem::swap(&mut self.buff1, &mut self.buff2);
self.valid = self.buff1.len();
self.consumed = 0;
}
if let Ok(len) = self.reader.read(&mut self.bytes[..]) {
self.buff1.write_all(&self.bytes[..len]).unwrap();
self.valid = self.buff1.len();
}
None
}
}
}
|
push
|
easy-button.js
|
(function(){
// This is for grouping buttons into a bar
// takes an array of `L.easyButton`s and
// then the usual `.addTo(map)`
L.Control.EasyBar = L.Control.extend({
options: {
position: 'topleft', // part of leaflet's defaults
id: null, // an id to tag the Bar with
leafletClasses: true // use leaflet classes?
},
initialize: function(buttons, options){
if(options){
L.Util.setOptions( this, options );
}
this._buildContainer();
this._buttons = [];
for(var i = 0; i < buttons.length; i++){
buttons[i]._bar = this;
buttons[i]._container = buttons[i].button;
this._buttons.push(buttons[i]);
this.container.appendChild(buttons[i].button);
}
},
_buildContainer: function(){
this._container = this.container = L.DomUtil.create('div', '');
this.options.leafletClasses && L.DomUtil.addClass(this.container, 'leaflet-bar easy-button-container leaflet-control');
this.options.id && (this.container.id = this.options.id);
},
enable: function(){
L.DomUtil.addClass(this.container, 'enabled');
L.DomUtil.removeClass(this.container, 'disabled');
this.container.setAttribute('aria-hidden', 'false');
return this;
},
disable: function(){
L.DomUtil.addClass(this.container, 'disabled');
L.DomUtil.removeClass(this.container, 'enabled');
this.container.setAttribute('aria-hidden', 'true');
return this;
},
onAdd: function () {
return this.container;
},
addTo: function (map) {
this._map = map;
for(var i = 0; i < this._buttons.length; i++){
this._buttons[i]._map = map;
}
var container = this._container = this.onAdd(map),
pos = this.getPosition(),
corner = map._controlCorners[pos];
L.DomUtil.addClass(container, 'leaflet-control');
if (pos.indexOf('bottom') !== -1) {
corner.insertBefore(container, corner.firstChild);
} else {
corner.appendChild(container);
}
return this;
}
});
L.easyBar = function(){
var args = [L.Control.EasyBar];
for(var i = 0; i < arguments.length; i++){
args.push( arguments[i] );
}
return new (Function.prototype.bind.apply(L.Control.EasyBar, args));
};
// L.EasyButton is the actual buttons
// can be called without being grouped into a bar
L.Control.EasyButton = L.Control.extend({
options: {
position: 'topleft', // part of leaflet's defaults
id: null, // an id to tag the button with
type: 'replace', // [(replace|animate)]
// replace swaps out elements
// animate changes classes with all elements inserted
states: [], // state names look like this
// {
// stateName: 'untracked',
// onClick: function(){ handle_nav_manually(); };
// title: 'click to make inactive',
// icon: 'fa-circle', // wrapped with <a>
// }
leafletClasses: true, // use leaflet styles for the button
tagName: 'button',
},
initialize: function(icon, onClick, title, id){
// clear the states manually
this.options.states = [];
// add id to options
if(id != null){
this.options.id = id;
}
// storage between state functions
this.storage = {};
// is the last item an object?
if( typeof arguments[arguments.length-1] === 'object' ){
// if so, it should be the options
L.Util.setOptions( this, arguments[arguments.length-1] );
}
// if there aren't any states in options
// use the early params
if( this.options.states.length === 0 &&
typeof icon === 'string' &&
typeof onClick === 'function'){
// turn the options object into a state
this.options.states.push({
icon: icon,
onClick: onClick,
title: typeof title === 'string' ? title : ''
});
}
// curate and move user's states into
// the _states for internal use
this._states = [];
for(var i = 0; i < this.options.states.length; i++){
this._states.push( new State(this.options.states[i], this) );
}
this._buildButton();
this._activateState(this._states[0]);
},
_buildButton: function(){
this.button = L.DomUtil.create(this.options.tagName, '');
// the next three if statements should be collapsed into the options
// when it's time for breaking changes.
if (this.tagName === 'button') {
this.button.type = 'button';
}
if (this.options.id ){
this.button.id = this.options.id;
}
if (this.options.leafletClasses){
L.DomUtil.addClass(this.button, 'easy-button-button leaflet-bar-part leaflet-interactive');
}
// don't let double clicks and mousedown get to the map
L.DomEvent.addListener(this.button, 'dblclick', L.DomEvent.stop);
L.DomEvent.addListener(this.button, 'mousedown', L.DomEvent.stop);
// take care of normal clicks
L.DomEvent.addListener(this.button,'click', function(e){
L.DomEvent.stop(e);
this._currentState.onClick(this, this._map ? this._map : null );
this._map.getContainer().focus();
}, this);
// prep the contents of the control
if(this.options.type == 'replace'){
this.button.appendChild(this._currentState.icon);
} else {
for(var i=0;i<this._states.length;i++){
this.button.appendChild(this._states[i].icon);
}
}
},
_currentState: {
// placeholder content
stateName: 'unnamed',
icon: (function(){ return document.createElement('span'); })()
},
_states: null, // populated on init
state: function(newState){
// activate by name
if(typeof newState == 'string'){
this._activateStateNamed(newState);
// activate by index
} else if (typeof newState == 'number'){
this._activateState(this._states[newState]);
}
return this;
},
_activateStateNamed: function(stateName){
for(var i = 0; i < this._states.length; i++){
if( this._states[i].stateName == stateName ){
this._activateState( this._states[i] );
}
}
},
_activateState: function(newState){
if( newState === this._currentState ){
// don't touch the dom if it'll just be the same after
return;
} else {
// swap out elements... if you're into that kind of thing
if( this.options.type == 'replace' ){
this.button.appendChild(newState.icon);
this.button.removeChild(this._currentState.icon);
}
if( newState.title ){
this.button.title = newState.title;
} else {
this.button.removeAttribute('title');
}
// update classes for animations
for(var i=0;i<this._states.length;i++){
L.DomUtil.removeClass(this._states[i].icon, this._currentState.stateName + '-active');
L.DomUtil.addClass(this._states[i].icon, newState.stateName + '-active');
}
// update classes for animations
L.DomUtil.removeClass(this.button, this._currentState.stateName + '-active');
L.DomUtil.addClass(this.button, newState.stateName + '-active');
// update the record
this._currentState = newState;
|
enable: function(){
L.DomUtil.addClass(this.button, 'enabled');
L.DomUtil.removeClass(this.button, 'disabled');
this.button.setAttribute('aria-hidden', 'false');
return this;
},
disable: function(){
L.DomUtil.addClass(this.button, 'disabled');
L.DomUtil.removeClass(this.button, 'enabled');
this.button.setAttribute('aria-hidden', 'true');
return this;
},
removeFrom: function (map) {
this._container.parentNode.removeChild(this._container);
this._map = null;
return this;
},
onAdd: function(){
var containerObj = L.easyBar([this], {
position: this.options.position,
leafletClasses: this.options.leafletClasses
});
this._container = containerObj.container;
return this._container;
}
});
L.easyButton = function(/* args will pass automatically */){
var args = Array.prototype.concat.apply([L.Control.EasyButton],arguments);
return new (Function.prototype.bind.apply(L.Control.EasyButton, args));
};
/*************************
*
* util functions
*
*************************/
// constructor for states so only curated
// states end up getting called
function State(template, easyButton){
this.title = template.title;
this.stateName = template.stateName ? template.stateName : 'unnamed-state';
// build the wrapper
this.icon = L.DomUtil.create('span', '');
L.DomUtil.addClass(this.icon, 'button-state state-' + this.stateName.replace(/(^\s*|\s*$)/g,''));
this.icon.innerHTML = buildIcon(template.icon);
this.onClick = L.Util.bind(template.onClick?template.onClick:function(){}, easyButton);
}
function buildIcon(ambiguousIconString) {
var tmpIcon;
// does this look like html? (i.e. not a class)
if( ambiguousIconString.match(/[&;=<>"']/) ){
// if so, the user should have put in html
// so move forward as such
tmpIcon = ambiguousIconString;
// then it wasn't html, so
// it's a class list, figure out what kind
} else {
ambiguousIconString = ambiguousIconString.replace(/(^\s*|\s*$)/g,'');
tmpIcon = L.DomUtil.create('span', '');
if( ambiguousIconString.indexOf('fa-') === 0 ){
L.DomUtil.addClass(tmpIcon, 'fa ' + ambiguousIconString)
} else if ( ambiguousIconString.indexOf('glyphicon-') === 0 ) {
L.DomUtil.addClass(tmpIcon, 'glyphicon ' + ambiguousIconString)
} else {
L.DomUtil.addClass(tmpIcon, /*rollwithit*/ ambiguousIconString)
}
// make this a string so that it's easy to set innerHTML below
tmpIcon = tmpIcon.outerHTML;
}
return tmpIcon;
}
})();
|
}
},
|
dev_settings.py
|
"""
Django settings for IdentityAccessManager project.
"""
from os import path
PROJECT_ROOT = path.dirname(path.abspath(path.dirname(__file__)))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
HOST = {
'PROTOCOL': "http",
'IP': "192.168.1.2", #"192.168.1.68",
'PORT': 8000,
'PATH': ''
}
ALLOWED_HOSTS = (
'*',
)
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
OPENAM_HOST = "http://192.168.1.3/openam/"
OPENAM = {
'PROTOCOL': "http",
'IP': "192.168.1.3",
'PORT': 80
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'openam',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '3306',
}
}
LOGIN_URL = '/login'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
|
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
#MEDIA_ROOT = ''
MEDIA_ROOT = path.join(PROJECT_ROOT, 'media').replace('\\', '/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
#MEDIA_URL = ''
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = path.join(PROJECT_ROOT, 'static').replace('\\', '/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n(bd1f1c%e8=_xad02x5qtfn%wgwpi492e$8_erx+d)!tpeoim'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [path.join(PROJECT_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.static',
'app.context_processors.iam_processor',
'django.contrib.messages.context_processors.messages',
"django.template.context_processors.request",
"django.template.context_processors.media",
],
# "debug": DEBUG,
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# django debug tools
'debug_toolbar.middleware.DebugToolbarMiddleware',
'app.middleware.token_validation.TokenValidationMiddleware',
)
ROOT_URLCONF = 'IdentityAccessManager.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'IdentityAccessManager.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'restapi',
'rest_framework',
'rest_framework_swagger',
'corsheaders',
'debug_toolbar',
'analytical',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': "[%(asctime)s] - [%(name)s:%(lineno)s] - [%(levelname)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': str(PROJECT_ROOT) + "/logs/error.log",
'maxBytes': 2024 * 2024,
'backupCount': 5,
'formatter': 'standard',
},
'apifile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': str(PROJECT_ROOT) + "/logs/api_error.log",
'maxBytes': 2024 * 2024,
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'level': 'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'app': {
'handlers': ['logfile'],
'level': 'INFO',
},
'restapi': {
'handlers': ['apifile'],
'level': 'INFO',
},
}
}
# Specify the default test runner.
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Django rest framework
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PERMISSION_CLASSES': [
#'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
'rest_framework.permissions.AllowAny',
],
#'DEFAULT_FILTER_BACKENDS': (
# 'rest_framework.filters.DjangoFilterBackend',
#),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# Settings for swagger
SWAGGER_SETTINGS = {
'exclude_namespaces': ['private_api'],
'api_version': '1.0',
'api_path': '/',
'base_path': str(HOST['IP']) + ":" + str(HOST['PORT']) + str(HOST['PATH']) + '/docs',
'enabled_methods': [
'get',
'post',
'put',
'patch',
'delete',
'options'
],
'api_key': '',
'is_authenticated': False,
'is_superuser': False,
'unauthenticated_user': 'django.contrib.auth.models.AnonymousUser',
#'permission_denied_handler': None,
#'resource_access_handler': None,
'info': {
'contact': '[email protected]',
'description': 'This project has received funding from the European Union\'s Seventh Programme for research, technological development and demonstration under grant agreement, No 610510. Find below the documentation of REST web services that OpenAM plugin provides.',
'license': 'Apache 2.0',
'licenseUrl': 'http://www.apache.org/licenses/LICENSE-2.0.html',
'title': 'Prosperity4All project: documentation of Identity & Access Manager web services',
},
'doc_expansion': 'none',
}
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS'
)
# Email account for notifications
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'xxxxxxxxxxxxx'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Redis
REDIS_IP = '127.0.0.1'
REDIS_PORT = 6379
REDIS_MAX_CON = 50 # Maximum connections in a redis pool
REDIS_TIMEOUT = 5 # Connection timeout in seconds
# Celery settings
# REDIS_CELERY_DATABASE = 0 # Database number that celery uses as a broker
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'UTC'
INTERNAL_IPS = ('192.168.1.2',)
# Google analytics
GOOGLE_ANALYTICS_PROPERTY_ID = 'UA-xxxxxxxxxxxx'
ANALYTICAL_INTERNAL_IPS = ['192.168.1.2']
ANALYTICAL_AUTO_IDENTIFY = False
GOOGLE_ANALYTICS_DISPLAY_ADVERTISING = True
|
# to load the internationalization machinery.
|
test_endians.py
|
# -*- coding: utf-8 -*-
"""
"""
import sys
import ctypes
import pytest
from rockhopper._ragged_array import slug
pytestmark = pytest.mark.order(1)
def log_range(start, stop, base):
while start < stop:
# These sequences give a quick way to test the full range of an
# integer type.
yield start
start *= base
def test_log_range():
assert list(log_range(1, 10, 3)) == [1, 3, 9]
@pytest.mark.parametrize("int_base", range(4))
def test_endian_swap(int_base):
"""Test the family of :c:`swap_endian_xx()` functions."""
bytes = (1 << int_base)
bits = bytes * 8
swap = getattr(slug.dll, f"swap_endian_{bits}")
for i in log_range(1, 1 << bytes, 3):
assert swap(i).to_bytes(bytes, "big") == i.to_bytes(bytes, "little")
def test_is_big_endian():
"""Test :c:`is_big_endian()` matched :attr:`sys.byteorder`."""
assert slug.dll.is_big_endian() == (sys.byteorder == "big")
def f_ptr(f):
"""Get the raw memory address of a :mod:`ctypes` function pointer."""
return ctypes.cast(f, ctypes.c_void_p).value
@pytest.mark.parametrize("int_base", range(4))
@pytest.mark.parametrize("byteorder", ["little", "big"])
def test_int_write(int_base, byteorder):
"""
Test the family of :c:`write_xx()` and :c:`write_swap_xx()` integer
writing functions and the selector :c:`choose_int_write()`.
"""
|
# The real return type of `choose_int_write()` is `IntWrite` which is a
# typedef (which cslug doesn't support) to a function pointer (which
# cslug also doesn't support). We only need to test which function it
# returns so raw a void pointer is sufficient.
slug.dll.choose_int_write.restype = ctypes.c_void_p
# Get the writer we expect to get.
name = f"write_{bits}" if native else f"write_swap_{bits}"
write = getattr(slug.dll, name)
# Check it matches the output of choose_int_write()`.
assert slug.dll.choose_int_write(int_base,
byteorder == "big") == f_ptr(write)
# Try writing an integer with it.
x = 0x1122334455667788 & ((1 << bits) - 1)
out = ctypes.create_string_buffer(bytes)
write(x, out)
assert list(out[:]) == list(x.to_bytes(bytes, byteorder))
read = getattr(slug.dll, name.replace("write", "read"))
assert read(out) == x
|
bytes = 1 << int_base
bits = 8 * bytes
native = sys.byteorder == byteorder
|
test_signature.py
|
#!/usr/bin/env python
# Copyright 2017 H2O.ai; Apache License Version 2.0; -*- encoding: utf-8 -*-
import pytest
import time
from tests import typed, py3only, TTypeError
# Stub
def foo():
return False
def test_func_0args0kws():
@typed()
def foo():
return True
assert foo()
with pytest.raises(TTypeError) as e:
foo(1)
assert str(e.value) == "`foo()` doesn't take any arguments"
with pytest.raises(TTypeError) as e:
foo(1, 2, 3)
assert str(e.value) == "`foo()` doesn't take any arguments"
with pytest.raises(TTypeError) as e:
foo(w=1)
assert str(e.value) == "`foo()` got an unexpected keyword argument `w`"
with pytest.raises(TTypeError) as e:
foo(1, q=2)
assert str(e.value) == "`foo()` doesn't take any arguments"
def test_method_noargs():
class Foo(object):
@typed()
def bar(self):
return True
foo = Foo()
assert foo.bar()
with pytest.raises(TTypeError) as e:
foo.bar(1)
assert str(e.value) == "`bar()` doesn't take any arguments"
with pytest.raises(TTypeError) as e:
foo.bar(ww=1)
assert str(e.value) == "`bar()` got an unexpected keyword argument `ww`"
def test_func_1arg0kws():
@typed(x=int)
def foo(x):
return True
assert foo(1)
assert foo(x=15)
with pytest.raises(TTypeError) as e:
foo()
assert str(e.value) == "`foo()` missing 1 required positional argument `x`"
with pytest.raises(TTypeError) as e:
foo(1, 2, 3)
assert str(e.value) == "`foo()` takes 1 positional argument but 3 were " \
"given"
with pytest.raises(TTypeError) as e:
foo("bar")
assert str(e.value) == ("Parameter `x` of type `int` received value 'bar' "
"of type str")
with pytest.raises(TTypeError) as e:
foo(1, x=2)
assert str(e.value) == "`foo()` got multiple values for argument `x`"
def test_func_3args0kws():
@typed(x=int, y=float, z=str)
def foo(x, y, z):
return "%d %.3f %s" % (x, y, z)
assert foo(1, 2, "bar")
assert foo(z="reverse", x=7, y=0.001)
with pytest.raises(TTypeError) as e:
foo()
assert str(e.value) == "`foo()` missing 3 required positional arguments: "\
"`x`, `y` and `z`"
with pytest.raises(TTypeError) as e:
foo(y=0)
assert str(e.value) == "`foo()` missing 2 required positional arguments: "\
"`x` and `z`"
with pytest.raises(TTypeError) as e:
foo(x=[3], y=4, z="")
assert str(e.value) == ("Parameter `x` of type `int` received value [3] "
"of type list")
@py3only
def test_func_1varg1kw():
exec("@typed(nums=int, force=bool)\n"
"def foo(*nums, force=False):\n"
" return True\n", locals(), globals())
assert foo()
assert foo(1, 2, 5)
assert foo(force=False)
assert foo(-1, 7, force=True)
with pytest.raises(TTypeError) as e:
foo(1, 10, 100, True)
assert str(e.value) == "Vararg parameter of type `int` received " \
"value True"
def test_func_varargs():
@typed(args=int)
def
|
(*args):
return sum(args)
assert foo() == 0
assert foo(1) == 1
assert foo(1, 2, 3, 4, 5) == 15
with pytest.raises(TTypeError) as e:
foo(1, 3, "bar")
assert str(e.value) == ("Vararg parameter of type `int` received "
"value 'bar' of type str")
def test_func_varkws():
@typed(kws=float)
def foo(**kws):
return sum(kws.values())
assert foo() == 0
assert foo(x=1) == 1
assert foo(x=1, y=3.3, z=0.7) == 5
with pytest.raises(TTypeError) as e:
foo(x=1, xx=3, xxx="bar")
assert str(e.value) == ("Parameter `xxx` of type `float` received value "
"'bar' of type str")
with pytest.raises(TTypeError) as e:
foo(1, 2, x=10)
assert str(e.value) == "`foo()` accepts only keyword arguments"
def test_return_value():
@typed(_return=float)
def foo():
return time.time()
@typed(_return=int)
def bar(x):
return x
assert foo() > 0
assert bar(1) == 1
with pytest.raises(TTypeError) as e:
bar("test")
assert str(e.value) == "Incorrect return type in `bar()`: " \
"expected int got str"
def test_bad_declaration():
with pytest.raises(RuntimeError) as e:
@typed(z=int)
def foo1():
pass
assert str(e.value) == "Invalid function argument(s): z"
with pytest.raises(RuntimeError) as e:
class A(object):
@typed(self=int)
def __init__(self):
pass
assert str(e.value) == "`self` parameter must not be typed"
with pytest.raises(RuntimeError) as e:
@typed(z={str: str, int: int})
def foo2(z):
pass
assert "Keys in the dict literal must be string constants" in str(e.value)
def test_wrapped_function():
import functools
def decorator(f):
@functools.wraps(f)
def decorated(*args):
print("Running f(%r)" % args)
return f(*args)
if not hasattr(decorated, "__wrapped__"):
decorated.__wrapped__ = f
return decorated
@typed(x=int)
@decorator
@decorator
def foo(x):
return True
assert foo(5)
with pytest.raises(TTypeError) as e:
foo("bar")
assert str(e.value) == ("Parameter `x` of type `int` received value 'bar' "
"of type str")
@py3only
def test_function_with_signature():
# We have to use exec() here, otherwise it would be a syntax error on
# Python-2
exec("@typed()\n"
"def foo(x: int = None) -> bool:\n"
" return True\n", locals(), globals())
assert foo() # noqa
assert foo(1) # noqa
with pytest.raises(TTypeError) as e:
foo("oo")
assert str(e.value) == ("Parameter `x` of type `int` received value 'oo' "
"of type str")
with pytest.raises(RuntimeError) as e:
exec("@typed(x=str)\n"
"def foo(x: int = None):\n"
" return True\n", locals(), globals())
assert str(e.value) == ("Parameter `x` should not have its type specified "
"both in @typed() and in the annotations")
def test_defaults():
@typed(x=int)
def foo(x=None):
return True
assert foo()
assert foo(5)
assert foo(None)
assert foo(x=10)
assert foo(x=None)
with pytest.raises(TTypeError) as e:
foo(x="")
assert str(e.value) == ("Parameter `x` of type `int` received value '' "
"of type str")
|
foo
|
gdc.py
|
#!/usr/bin/python3
import sys
import json
import os
import getopt
import ipaddress
import uuid
ip = ''
function = ''
gdc = None
description = ''
listofips = None
if len(sys.argv) <= 4:
print("Error - Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -d <GDC_Description>")
print("")
print("This simple tool will update a JSON file with ip addresses (v4 and v6) used with the Generic Data Center Objects as described in SK167210 for R81.")
print("Examples:")
print("Add a new IP address to a Generic Data Center to an existing JSON file")
print("gdc.py -g GDC_LIST1 -j gdc.json -f AddIP -i 10.2.0.1")
print("")
print("Add a new IP addresses to a Generic Data Center to an existing JSON file from a list of ip's")
print("gdc.py -g GDC_LIST1 -j gdc.json -f AddIP -l listofip_address.txt")
print("")
print("Delete an IP address to a Generic Data Center to an existing JSON file")
print("gdc.py -g GDC_LIST1 -j gdc.json -f DelIP -i 10.2.0.1")
print("")
print("Add a new Generic Data Center to an existing JSON file. IP address must be included.")
print("gdc.py -g GDC_LIST_New -j gdc.json -f AddGDC -d GDC_LIST_NEW_Description -i 10.2.0.1")
print("")
print("Delete a Generic Data Center in an existing JSON file. ")
print("gdc.py -g GDC_LIST_New -j gdc.json -f DelGDC")
print("")
exit(1)
try:
opts, args = getopt.getopt(sys.argv[1:],"g:j:f:i:d:l:", ['gdc=','function=','ip=','desc=','listofips' 'help'])
except getopt.GetoptError:
print('Error - Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -l <list_of_ip_in_File> -d <GDC_Description>')
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -l <list_of_ip_in_File> -d <GDC_Description>')
sys.exit()
elif opt in ("-g", "--gdc"):
gdc = arg
elif opt in ("-f", "--function"):
function = arg
elif opt in ("-j", "--json"):
jsonfile = arg
elif opt in ('-i', '--ip'):
ip = arg
elif opt in ('-d', '--desc'):
desc = arg
elif opt in ('-l', '--listofips'):
listofips = arg
### Functions
# Function to Remove Duplicates - Used to make sure IP's are uniuqe
def remove_dupe_dicts(l):
list_of_strings = [
json.dumps(d, sort_keys=True)
for d in l
]
list_of_strings = set(list_of_strings)
return [
json.loads(s)
for s in list_of_strings
]
# Function to Check for name in json
def gdc_exist(gdc,jsondata):
match = False
for dc in jsondata:
if dc["name"] == gdc:
match = True
return match
# Function to check if JSON file exists
def fileexists(fn):
try:
open(fn,"r")
except IOError:
print('File: %s - specified does not appear to exist' % fn)
sys.exit()
# Function to check for valid ip address
|
range = checkip.split("-")
# Check if range ip 1 is less than 2
ip = (ipaddress.ip_address(range[0]) < ipaddress.ip_address(range[1]))
if ip == True:
return
else:
print('address/netmask is invalid: %s' % checkip)
print('If adding a new Generic Data Center Object an IP has to be defined!')
sys.exit()
try:
ip = ipaddress.ip_address(checkip)
except ValueError:
try:
ip = ipaddress.ip_network(checkip)
except ValueError:
print('address/netmask is invalid: %s' % checkip)
print('If adding a new Generic Data Center Object an IP has to be defined!')
sys.exit()
#### Verify that GDC was passed from CLI ####
if not gdc:
print("Generic Data Center was not passed as a flag to the command. Include -g <Data_Center_Name>")
sys.exit()
#### Add IP to Data Center ####
if function == "AddIP":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
# Check and see if the name of the Data Center exists
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s was not found in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
# Check to see if this is a list of ips from a file
if not listofips:
# Add an IP to the list
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].append(ip)
item['ranges'] = remove_dupe_dicts(item['ranges'])
else:
# Read list of ip addresses from file and extend
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].extend(iplist)
item['ranges'] = remove_dupe_dicts(item['ranges'])
# Output the updated file with pretty JSON
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
#### Remove IP from Data Center ####
if function == "DelIP":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
# Check and see if the name of the Data Center exists
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s was not found in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
item = obj['objects']
if not listofips:
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
for a in item['ranges'][:]:
if (a == ip):
item['ranges'].remove(a)
else:
# Read list of ip addresses from file and extend
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
for t in iplist:
try:
item['ranges'].remove(t)
except:
print('IP address %s is not in the file %s.' % (t, listofips))
item['ranges'] = remove_dupe_dicts(item['ranges'])
# Output the updated file with pretty JSON
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
#### Add Data Center ####
if function == "AddGDC":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
item = obj['objects']
uuid = uuid.uuid4()
# Make sure Description is set
try:
desc
except NameError:
print("Description was not provided as a paramater, please use -d to add the description while adding a new Data Center")
sys.exit()
# Check and see if the name of the Data Center already exists
match = gdc_exist(gdc,obj['objects'])
if match == True:
print('Data Center Object : %s already exists in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
# Add GDC data to JSON
item = obj['objects']
add = {"description": desc,
"id": str(uuid),
"name": gdc,
"ranges": []}
item.append(add)
# Check to see if this is a list of ips from a file
if not listofips:
# Add an IP to the list
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].append(ip)
item['ranges'] = remove_dupe_dicts(item['ranges'])
else:
# Read list of ip addresses from file and extend
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].extend(iplist)
item['ranges'] = remove_dupe_dicts(item['ranges'])
# Output the updated file with pretty JSON
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
#### Delete Data Center ####
if function == "DelGDC":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
# Check if Data Center exists before deletion
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s does not exist in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
for i in range(len(obj['objects'])):
if obj['objects'][i]['name'] == gdc:
obj['objects'].pop(i)
break
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
|
def check_ip(checkip):
# Check if range is provided by a dash in the ip address
isrange = ("-" in checkip)
if isrange == True:
|
extract-types.js
|
import fs from 'fs';
import ts from 'typescript';
import prettier from 'prettier';
/** @typedef {{ name: string, comment: string, snippet: string }} Extracted */
/** @type {Array<{ name: string, comment: string, exports: Extracted[], types: Extracted[] }>} */
const modules = [];
/**
* @param {string} code
* @param {ts.NodeArray<ts.Statement>} statements
*/
function
|
(code, statements) {
/** @type {Extracted[]} */
const exports = [];
/** @type {Extracted[]} */
const types = [];
for (const statement of statements) {
if (
ts.isClassDeclaration(statement) ||
ts.isInterfaceDeclaration(statement) ||
ts.isTypeAliasDeclaration(statement) ||
ts.isModuleDeclaration(statement) ||
ts.isVariableStatement(statement) ||
ts.isFunctionDeclaration(statement)
) {
const name_node = ts.isVariableStatement(statement)
? statement.declarationList.declarations[0]
: statement;
// @ts-ignore no idea why it's complaining here
const name = name_node.name?.escapedText;
let start = statement.pos;
let comment = '';
// @ts-ignore i think typescript is bad at typescript
if (statement.jsDoc) {
// @ts-ignore
comment = statement.jsDoc[0].comment;
// @ts-ignore
start = statement.jsDoc[0].end;
}
const i = code.indexOf('export', start);
start = i + 6;
const snippet = prettier.format(code.slice(start, statement.end).trim(), {
parser: 'typescript',
printWidth: 60,
useTabs: true,
singleQuote: true,
trailingComma: 'none'
});
const collection =
ts.isVariableStatement(statement) || ts.isFunctionDeclaration(statement) ? exports : types;
collection.push({ name, comment, snippet });
} else {
// console.log(statement.kind);
}
}
types.sort((a, b) => (a.name < b.name ? -1 : 1));
exports.sort((a, b) => (a.name < b.name ? -1 : 1));
return { types, exports };
}
{
const code = fs.readFileSync('types/index.d.ts', 'utf-8');
const node = ts.createSourceFile('index.d.ts', code, ts.ScriptTarget.Latest);
modules.push({
name: '@sveltejs/kit',
comment: 'The following types can be imported from `@sveltejs/kit`:',
...get_types(code, node.statements)
});
}
{
const code = fs.readFileSync('types/private.d.ts', 'utf-8');
const node = ts.createSourceFile('private.d.ts', code, ts.ScriptTarget.Latest);
modules.push({
name: 'Additional types',
comment:
'The following are referenced by the public types documented above, but cannot be imported directly:',
...get_types(code, node.statements)
});
}
modules.push({
name: '$lib',
comment:
'This is a simple alias to `src/lib`, or whatever directory is specified as [`config.kit.files.lib`](/docs/configuration#files). It allows you to access common components and utility modules without `../../../../` nonsense.',
exports: [],
types: []
});
{
const code = fs.readFileSync('types/ambient.d.ts', 'utf-8');
const node = ts.createSourceFile('ambient.d.ts', code, ts.ScriptTarget.Latest);
for (const statement of node.statements) {
if (ts.isModuleDeclaration(statement)) {
// @ts-ignore
const name = statement.name.text || statement.name.escapedText;
// @ts-ignore
const comment = statement.jsDoc?.[0].comment ?? '';
modules.push({
name,
comment,
// @ts-ignore
...get_types(code, statement.body.statements)
});
}
}
}
modules.sort((a, b) => (a.name < b.name ? -1 : 1));
fs.writeFileSync(
'../../documentation/types.js',
`
/* This file is generated by running \`node scripts/extract-types.js\`
in the packages/kit directory — do not edit it */
export const modules = ${JSON.stringify(modules, null, ' ')};
`.trim()
);
|
get_types
|
integration_test.go
|
// +build integration
package test
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLicense(t *testing.T) {
assert.True(t, true)
|
}
|
|
config.rs
|
pub mod config {
use clap::{Arg, App};
#[derive(Clone, Debug)]
pub struct Config {
pub matrix_server: String,
pub matrix_password: String,
pub lnbits_url: String,
pub lnbits_x_api_key: String,
pub database_url: String,
pub debug_level: String,
pub donate_user: String,
pub btc_donation_address: String
}
impl Config {
pub fn new(matrix_server: &str,
matrix_password: &str,
lnbits_url: &str,
lnbits_x_api_key: &str,
database_url: &str,
debug_level: &str,
donate_user: &str,
btc_donation_address: &str) -> Config {
Config {
matrix_server: matrix_server.to_string(),
matrix_password: matrix_password.to_string(),
lnbits_url: lnbits_url.to_string(),
lnbits_x_api_key: lnbits_x_api_key.to_string(),
database_url: database_url.to_string(),
debug_level: debug_level.to_string(),
donate_user: donate_user.to_string(),
btc_donation_address: btc_donation_address.to_string()
}
}
}
pub fn
|
() -> Config {
let args = wild::args_os();
let args = argfile::expand_args_from(
args,
argfile::parse_fromfile,
argfile::PREFIX,
).unwrap();
let matches = App::new("LN-Matrix-Bot")
.version("0.1.0")
.author("AE")
.about("LN-Matrix-Bot")
.arg(Arg::with_name("matrix-server")
.long("matrix-server")
.takes_value(true)
.required(true)
.help("Server"))
.arg(Arg::with_name("matrix-password")
.long("matrix-password")
.takes_value(true)
.required(true)
.help("Bot password"))
.arg(Arg::with_name("lnbits-url")
.long("lnbits-url")
.takes_value(true)
.required(true)
.help("lnbits url"))
.arg(Arg::with_name("lnbits-x-api-key")
.long("lnbits-x-api-key")
.takes_value(true)
.required(true)
.help("lnbits x api key"))
.arg(Arg::with_name("database-url")
.long("database-url")
.takes_value(true)
.required(true)
.help("database url"))
.arg(Arg::with_name("debug-level")
.long("debug-level")
.takes_value(true)
.default_value("Info")
.required(false)
.help("debugging level"))
.arg(Arg::with_name("donate-user")
.long("donate-user")
.takes_value(true)
.required(true)
.help("The user receiving any donations"))
.arg(Arg::with_name("btc-donation-address")
.long("btc-donation-address")
.takes_value(true)
.default_value("bc1q72dzh04fwxx780w05twtmn5fxzegpawdn5zg3g")
.required(false)
.help("The BTC address to display for donations"))
.get_matches_from(args);
let matrix_server = matches.value_of("matrix-server").unwrap();
let matrix_password = matches.value_of("matrix-password").unwrap();
let lnbits_url = matches.value_of("lnbits-url").unwrap();
let lnbits_x_api_key = matches.value_of("lnbits-x-api-key").unwrap();
let database_url = matches.value_of("database-url").unwrap();
let debug_level = matches.value_of("debug-level").unwrap();
let donate_user = matches.value_of("donate-user").unwrap();
let btc_donation_address = matches.value_of("btc-donation-address").unwrap();
Config::new(matrix_server,
matrix_password,
lnbits_url,
lnbits_x_api_key,
database_url,
debug_level,
donate_user,
btc_donation_address)
}
}
|
config_from_cmd
|
unsized5.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test `Sized?` types not allowed in fields (except the last one).
struct S1<X: ?Sized> {
f1: X, //~ ERROR `core::kinds::Sized` is not implemented
f2: int,
}
struct S2<X: ?Sized> {
f: int,
g: X, //~ ERROR `core::kinds::Sized` is not implemented
h: int,
}
struct S3 {
f: str, //~ ERROR `core::kinds::Sized` is not implemented
g: [uint]
}
struct S4 {
f: str, //~ ERROR `core::kinds::Sized` is not implemented
g: uint
}
enum E<X: ?Sized> {
V1(X, int), //~ERROR `core::kinds::Sized` is not implemented
}
enum F<X: ?Sized> {
V2{f1: X, f: int}, //~ERROR `core::kinds::Sized` is not implemented
}
pub fn
|
() {
}
|
main
|
blockActions.js
|
"use strict";
|
});
exports.renderBlockActions = renderBlockActions;
var _ui = require("@sanity/ui");
var _react = _interopRequireWildcard(require("react"));
var _icons = require("@sanity/icons");
var _portableTextEditor = require("@sanity/portable-text-editor");
function _getRequireWildcardCache(nodeInterop) { if (typeof WeakMap !== "function") return null; var cacheBabelInterop = new WeakMap(); var cacheNodeInterop = new WeakMap(); return (_getRequireWildcardCache = function _getRequireWildcardCache(nodeInterop) { return nodeInterop ? cacheNodeInterop : cacheBabelInterop; })(nodeInterop); }
function _interopRequireWildcard(obj, nodeInterop) { if (!nodeInterop && obj && obj.__esModule) { return obj; } if (obj === null || typeof obj !== "object" && typeof obj !== "function") { return { default: obj }; } var cache = _getRequireWildcardCache(nodeInterop); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (key !== "default" && Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj.default = obj; if (cache) { cache.set(obj, newObj); } return newObj; }
function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; }
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { _defineProperty(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
var BlockActions = /*#__PURE__*/(0, _react.memo)(function BlockActions(props) {
var block = props.block,
insert = props.insert;
var handleDuplicate = (0, _react.useCallback)(() => {
var dupBlock = _objectSpread(_objectSpread({}, block), {}, {
_key: (0, _portableTextEditor.keyGenerator)()
});
if (dupBlock.children) {
dupBlock.children = dupBlock.children.map(c => _objectSpread(_objectSpread({}, c), {}, {
_key: (0, _portableTextEditor.keyGenerator)()
}));
}
insert(dupBlock);
}, [block, insert]);
return /*#__PURE__*/_react.default.createElement(_ui.Tooltip, {
content: /*#__PURE__*/_react.default.createElement(_ui.Box, {
padding: 2
}, /*#__PURE__*/_react.default.createElement(_ui.Text, {
size: 1
}, "Duplicate")),
placement: "right",
portal: "default"
}, /*#__PURE__*/_react.default.createElement(_ui.Button, {
"aria-label": "Duplicate",
fontSize: 1,
icon: _icons.CopyIcon,
onClick: handleDuplicate,
padding: 2,
mode: "bleed"
}));
});
function renderBlockActions(_ref) {
var block = _ref.block,
insert = _ref.insert;
return /*#__PURE__*/_react.default.createElement(BlockActions, {
block: block,
insert: insert
});
}
|
Object.defineProperty(exports, "__esModule", {
value: true
|
deps.ts
|
export * as path from "https://deno.land/[email protected]/path/mod.ts";
export {
|
export type {
compileOptions,
PreprocessorGroup,
} from "https://deno.land/x/[email protected]/compiler/types.ts";
|
compile,
preprocess,
} from "https://deno.land/x/[email protected]/compiler/core.js";
|
header.go
|
package headerHolder
import (
"bytes"
"crypto/cipher"
"encoding/base64"
"fmt"
"github.com/lunixbochs/struc"
"github.com/secure-io/siv-go"
"github.com/xiaokangwang/VLite/proto"
"golang.org/x/crypto/sha3"
"io"
"io/ioutil"
)
func NewHttpHeaderHolderProcessor(password string) *HttpHeaderHolderProcessor
|
func NewHttpHeaderHolderProcessor2(password string, salt string) *HttpHeaderHolderProcessor {
hp := &HttpHeaderHolderProcessor{password: password, salt: salt}
hp.prepare()
return hp
}
type HttpHeaderHolderProcessor struct {
password string
salt string
}
func (pc *HttpHeaderHolderProcessor) prepare() cipher.AEAD {
hasher := sha3.NewCShake128(nil, []byte(pc.salt))
hasher.Write([]byte(pc.password))
keyin := make([]byte, 64)
io.ReadFull(hasher, keyin[:])
aeadBlock, err2 := siv.NewCMAC(keyin)
if err2 != nil {
fmt.Println(err2.Error())
}
return aeadBlock
}
func (pc *HttpHeaderHolderProcessor) Open(input string) *proto.HttpHeaderHolder {
inputb := bytes.NewBufferString(input)
decodeReader := base64.NewDecoder(base64.URLEncoding, inputb)
cont, err := ioutil.ReadAll(decodeReader)
if err != nil {
fmt.Println(err)
return nil
}
opened, erro := pc.prepare().Open(nil, nil, cont, nil)
if erro != nil {
fmt.Println(err)
return nil
}
openedReader := bytes.NewReader(opened)
HeaderHolder := &proto.HttpHeaderHolder{}
err = struc.Unpack(openedReader, HeaderHolder)
if err != nil {
fmt.Println(err)
return nil
}
return HeaderHolder
}
func (pc *HttpHeaderHolderProcessor) Seal(src proto.HttpHeaderHolder) string {
var err error
buf := bytes.NewBuffer(nil)
err = struc.Pack(buf, &src)
if err != nil {
fmt.Println(err)
return ""
}
sealed := pc.prepare().Seal(nil, nil, buf.Bytes(), nil)
obuf := bytes.NewBuffer(nil)
decodeReader := base64.NewEncoder(base64.URLEncoding, obuf)
_, err = decodeReader.Write(sealed)
if err != nil {
fmt.Println(err)
return ""
}
err = decodeReader.Close()
if err != nil {
fmt.Println(err)
return ""
}
return obuf.String()
}
|
{
hp := &HttpHeaderHolderProcessor{password: password, salt: "HTTPHeaderSecret"}
hp.prepare()
return hp
}
|
index.tsx
|
/**
*
* WorkoutHistory
*
*/
import * as React from 'react';
import { useSelector, useDispatch } from 'react-redux';
import styled from 'styled-components/macro';
import uniqid from 'uniqid';
import { useHistory } from 'react-router-dom';
import { useInjectReducer, useInjectSaga } from 'utils/redux-injectors';
import { reducer, sliceKey, actions } from './slice';
import { selectWorkoutHistory } from './selectors';
import { workoutHistorySaga } from './saga';
import { selectExercises } from '../Exercises/selectors';
import { ButtonChip } from '../../components/ButtonChip';
import { Workout } from '../AddWorkout/types';
import { ExerciseInWorkout } from '../EditExerciseInWorkout/types';
interface Props {}
export function
|
(props: Props) {
useInjectReducer({ key: sliceKey, reducer: reducer });
useInjectSaga({ key: sliceKey, saga: workoutHistorySaga });
const workoutHistory = useSelector(selectWorkoutHistory);
const allExercises = useSelector(selectExercises);
const dispatch = useDispatch();
const history = useHistory();
React.useEffect(() => {
dispatch(actions.fetchWorkoutsAction());
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const clickEdit = (workout: Workout): void => {
dispatch(actions.editWorkoutAction(workout));
history.push('/dashboard/workoutEditor');
};
const exerciseItems = (x: Array<ExerciseInWorkout>) =>
x.length < 1 ? (
<div>none</div>
) : (
x.map(exercise => {
const exerciseDetails = allExercises.find(y => exercise.id === y._id);
if (exerciseDetails) {
return <li key={exercise.instanceId}>{exerciseDetails.name}</li>;
} else {
return <li key={uniqid()}>Exercise Deleted</li>;
}
})
);
const workoutEntries = workoutHistory.map(x => (
<WorkoutEntries key={x._id}>
<div>Date: {x.date}</div>
<div>Name: {x.name}</div>
<div>Exercises:</div>
<ol>{exerciseItems(x.exercises)}</ol>
<ButtonDiv>
<ButtonChip text="Edit" clickHandler={() => clickEdit(x)} />
</ButtonDiv>
</WorkoutEntries>
));
return <>{workoutEntries}</>;
}
const WorkoutEntries = styled.div`
width: 250px;
display: flex;
flex-direction: column;
align-items: flex-start;
border: 1px solid var(--light-500);
border-radius: 5px;
margin: 5px;
padding: 5px;
text-align: left;
`;
const ButtonDiv = styled.div`
width: 100%;
display: flex;
flex-direction: row;
align-items: center;
margin: 10px 0px;
`;
|
WorkoutHistory
|
alignments.py
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sam_alignment_reconstructor.pairwise import pairwise_alignment
from sam_alignment_reconstructor.pairwise import cigar_split
from restui.lib.external import ensembl_sequence
from restui.lib.external import ensembl_protein
def
|
(mapping):
"""
Function to fetch the pairwise sequence alignment for a given mapping
between a mapped sequence in ensembl and UniProt.
Parameters
----------
mappings
Returns
-------
dict
mapping_id : int
alignments : list
List of the alignments and the matching strings
"""
pairwise_alignments = []
enst = mapping.transcript.enst_id
uniprot_id = mapping.uniprot.uniprot_acc
for alignment in mapping.alignments.all():
if alignment.alignment_run.score1_type == 'identity':
pairwise_alignments.append(
_fetch_alignment(alignment, enst, uniprot_id)
)
# Break out of the loop, we're done
break
elif (
alignment.alignment_run.score1_type == 'perfect_match' and
alignment.score1 == 1
):
pairwise_alignments.append(
_fetch_alignment(alignment, enst, uniprot_id)
)
return {
'mapping_id': mapping.mapping_id,
'alignments': pairwise_alignments
}
def _fetch_alignment(alignment, enst, uniprot_id):
"""
Parameters
----------
alignment
enst : str
uniprot_id : str
Returns
-------
pw_alignment : dict
Alignment object
"""
ens_release = alignment.alignment_run.ensembl_release
ensp = ensembl_protein(enst, ens_release)
seq = ensembl_sequence(ensp, ens_release)
ensembl_seq = seq
uniprot_seq = seq
match_str = '|' * len(seq)
alignment_type = 'perfect_match'
if alignment.alignment_run.score1_type == 'identity':
cigarplus = alignment.pairwise.cigarplus
mdz = alignment.pairwise.mdz
if mdz.startswith('MD:Z:'):
mdz = mdz[len('MD:Z:'):]
uniprot_seq, match_str, ensembl_seq = pairwise_alignment(seq, cigarplus, mdz)
alignment_type = 'identity'
pw_alignment = {
'uniprot_alignment': ensembl_seq,
'ensembl_alignment': uniprot_seq,
'match_str': match_str,
'alignment_id': alignment.alignment_id,
'ensembl_release': ens_release,
'ensembl_id': ensp,
'uniprot_id': uniprot_id,
'alignment_type': alignment_type
}
return pw_alignment
def calculate_difference(cigar):
"""
Calculate the difference between 2 sequences based on the cigar string
Parameters
----------
cigar : str
Returns
-------
diff_count : int
"""
diff_count = 0
for c, op in cigar_split(cigar):
if op in ('I', 'D', 'X'):
diff_count += c
return diff_count
|
fetch_pairwise
|
mod.rs
|
mod boolean;
#[cfg(feature = "dtype-categorical")]
mod categorical;
#[cfg(any(
feature = "dtype-datetime",
feature = "dtype-date",
feature = "dtype-time"
))]
mod dates_time;
mod floats;
mod list;
#[cfg(feature = "object")]
mod object;
mod utf8;
#[cfg(feature = "object")]
use std::any::Any;
use super::private;
use super::IntoSeries;
use super::SeriesTrait;
use crate::chunked_array::comparison::*;
#[cfg(feature = "rolling_window")]
use crate::chunked_array::ops::rolling_window::RollingOptions;
use crate::chunked_array::{
ops::{
aggregate::{ChunkAggSeries, VarAggSeries},
compare_inner::{IntoPartialEqInner, IntoPartialOrdInner, PartialEqInner, PartialOrdInner},
explode::ExplodeByOffsets,
},
AsSinglePtr, ChunkIdIter,
};
use crate::fmt::FmtList;
#[cfg(feature = "pivot")]
use crate::frame::groupby::pivot::*;
use crate::frame::groupby::*;
use crate::frame::hash_join::{HashJoin, ZipOuterJoinColumn};
use crate::prelude::*;
#[cfg(feature = "checked_arithmetic")]
use crate::series::arithmetic::checked::NumOpsDispatchChecked;
use ahash::RandomState;
use arrow::array::ArrayRef;
use std::borrow::Cow;
use std::ops::Deref;
use std::ops::{BitAnd, BitOr, BitXor};
// Utility wrapper struct
pub(crate) struct SeriesWrap<T>(pub T);
impl<T> From<ChunkedArray<T>> for SeriesWrap<ChunkedArray<T>> {
fn from(ca: ChunkedArray<T>) -> Self {
SeriesWrap(ca)
}
}
impl<T> Deref for SeriesWrap<ChunkedArray<T>> {
type Target = ChunkedArray<T>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
macro_rules! impl_dyn_series {
($ca: ident) => {
impl IntoSeries for $ca {
fn into_series(self) -> Series {
Series(Arc::new(SeriesWrap(self)))
}
}
impl private::PrivateSeries for SeriesWrap<$ca> {
fn _field(&self) -> Cow<Field> {
Cow::Borrowed(self.0.ref_field())
}
fn _dtype(&self) -> &DataType {
self.0.ref_field().data_type()
}
fn explode_by_offsets(&self, offsets: &[i64]) -> Series {
self.0.explode_by_offsets(offsets)
}
#[cfg(feature = "rolling_window")]
fn _rolling_sum(&self, options: RollingOptions) -> Result<Series> {
self.0.rolling_sum(options)
}
#[cfg(feature = "rolling_window")]
fn _rolling_min(&self, options: RollingOptions) -> Result<Series> {
self.0.rolling_min(options)
}
#[cfg(feature = "rolling_window")]
fn _rolling_max(&self, options: RollingOptions) -> Result<Series> {
self.0.rolling_max(options)
}
#[cfg(feature = "rolling_window")]
fn _rolling_std(&self, options: RollingOptions) -> Result<Series> {
let s = self.cast(&DataType::Float64).unwrap();
|
}
#[cfg(feature = "rolling_window")]
fn _rolling_mean(&self, options: RollingOptions) -> Result<Series> {
let s = self.cast(&DataType::Float64).unwrap();
s.f64().unwrap().rolling_mean(options)
}
#[cfg(feature = "rolling_window")]
fn _rolling_var(&self, options: RollingOptions) -> Result<Series> {
let s = self.cast(&DataType::Float64).unwrap();
s.f64().unwrap().rolling_var(options)
}
#[cfg(feature = "cum_agg")]
fn _cummax(&self, reverse: bool) -> Series {
self.0.cummax(reverse).into_series()
}
#[cfg(feature = "cum_agg")]
fn _cummin(&self, reverse: bool) -> Series {
self.0.cummin(reverse).into_series()
}
#[cfg(feature = "cum_agg")]
fn _cumsum(&self, reverse: bool) -> Series {
self.0.cumsum(reverse).into_series()
}
#[cfg(feature = "cum_agg")]
fn _cumprod(&self, reverse: bool) -> Series {
self.0.cumprod(reverse).into_series()
}
#[cfg(feature = "asof_join")]
fn join_asof(&self, other: &Series) -> Result<Vec<Option<u32>>> {
self.0.join_asof(other)
}
fn set_sorted(&mut self, reverse: bool) {
self.0.set_sorted(reverse)
}
unsafe fn equal_element(
&self,
idx_self: usize,
idx_other: usize,
other: &Series,
) -> bool {
self.0.equal_element(idx_self, idx_other, other)
}
#[cfg(feature = "zip_with")]
fn zip_with_same_type(&self, mask: &BooleanChunked, other: &Series) -> Result<Series> {
ChunkZip::zip_with(&self.0, mask, other.as_ref().as_ref())
.map(|ca| ca.into_series())
}
fn into_partial_eq_inner<'a>(&'a self) -> Box<dyn PartialEqInner + 'a> {
(&self.0).into_partial_eq_inner()
}
fn into_partial_ord_inner<'a>(&'a self) -> Box<dyn PartialOrdInner + 'a> {
(&self.0).into_partial_ord_inner()
}
fn vec_hash(&self, random_state: RandomState) -> AlignedVec<u64> {
self.0.vec_hash(random_state)
}
fn vec_hash_combine(&self, build_hasher: RandomState, hashes: &mut [u64]) {
self.0.vec_hash_combine(build_hasher, hashes)
}
fn agg_mean(&self, groups: &[(u32, Vec<u32>)]) -> Option<Series> {
self.0.agg_mean(groups)
}
fn agg_min(&self, groups: &[(u32, Vec<u32>)]) -> Option<Series> {
self.0.agg_min(groups)
}
fn agg_max(&self, groups: &[(u32, Vec<u32>)]) -> Option<Series> {
self.0.agg_max(groups)
}
fn agg_sum(&self, groups: &[(u32, Vec<u32>)]) -> Option<Series> {
self.0.agg_sum(groups)
}
fn agg_first(&self, groups: &[(u32, Vec<u32>)]) -> Series {
self.0.agg_first(groups)
}
fn agg_last(&self, groups: &[(u32, Vec<u32>)]) -> Series {
self.0.agg_last(groups)
}
fn agg_std(&self, groups: &[(u32, Vec<u32>)]) -> Option<Series> {
self.0.agg_std(groups)
}
fn agg_var(&self, groups: &[(u32, Vec<u32>)]) -> Option<Series> {
self.0.agg_var(groups)
}
fn agg_n_unique(&self, groups: &[(u32, Vec<u32>)]) -> Option<UInt32Chunked> {
self.0.agg_n_unique(groups)
}
fn agg_list(&self, groups: &[(u32, Vec<u32>)]) -> Option<Series> {
self.0.agg_list(groups)
}
fn agg_quantile(&self, groups: &[(u32, Vec<u32>)], quantile: f64) -> Option<Series> {
self.0.agg_quantile(groups, quantile)
}
fn agg_median(&self, groups: &[(u32, Vec<u32>)]) -> Option<Series> {
self.0.agg_median(groups)
}
#[cfg(feature = "lazy")]
fn agg_valid_count(&self, groups: &[(u32, Vec<u32>)]) -> Option<Series> {
self.0.agg_valid_count(groups)
}
#[cfg(feature = "pivot")]
fn pivot<'a>(
&self,
pivot_series: &'a Series,
keys: Vec<Series>,
groups: &[(u32, Vec<u32>)],
agg_type: PivotAgg,
) -> Result<DataFrame> {
self.0.pivot(pivot_series, keys, groups, agg_type)
}
#[cfg(feature = "pivot")]
fn pivot_count<'a>(
&self,
pivot_series: &'a Series,
keys: Vec<Series>,
groups: &[(u32, Vec<u32>)],
) -> Result<DataFrame> {
self.0.pivot_count(pivot_series, keys, groups)
}
fn hash_join_inner(&self, other: &Series) -> Vec<(u32, u32)> {
HashJoin::hash_join_inner(&self.0, other.as_ref().as_ref())
}
fn hash_join_left(&self, other: &Series) -> Vec<(u32, Option<u32>)> {
HashJoin::hash_join_left(&self.0, other.as_ref().as_ref())
}
fn hash_join_outer(&self, other: &Series) -> Vec<(Option<u32>, Option<u32>)> {
HashJoin::hash_join_outer(&self.0, other.as_ref().as_ref())
}
fn zip_outer_join_column(
&self,
right_column: &Series,
opt_join_tuples: &[(Option<u32>, Option<u32>)],
) -> Series {
ZipOuterJoinColumn::zip_outer_join_column(&self.0, right_column, opt_join_tuples)
}
fn subtract(&self, rhs: &Series) -> Result<Series> {
NumOpsDispatch::subtract(&self.0, rhs)
}
fn add_to(&self, rhs: &Series) -> Result<Series> {
NumOpsDispatch::add_to(&self.0, rhs)
}
fn multiply(&self, rhs: &Series) -> Result<Series> {
NumOpsDispatch::multiply(&self.0, rhs)
}
fn divide(&self, rhs: &Series) -> Result<Series> {
NumOpsDispatch::divide(&self.0, rhs)
}
fn remainder(&self, rhs: &Series) -> Result<Series> {
NumOpsDispatch::remainder(&self.0, rhs)
}
fn group_tuples(&self, multithreaded: bool) -> GroupTuples {
IntoGroupTuples::group_tuples(&self.0, multithreaded)
}
#[cfg(feature = "sort_multiple")]
fn argsort_multiple(&self, by: &[Series], reverse: &[bool]) -> Result<UInt32Chunked> {
self.0.argsort_multiple(by, reverse)
}
fn str_value(&self, index: usize) -> Cow<str> {
// get AnyValue
Cow::Owned(format!("{}", self.get(index)))
}
}
impl SeriesTrait for SeriesWrap<$ca> {
#[cfg(feature = "rolling_window")]
fn rolling_apply(
&self,
_window_size: usize,
_f: &dyn Fn(&Series) -> Series,
) -> Result<Series> {
ChunkRollApply::rolling_apply(&self.0, _window_size, _f).map(|ca| ca.into_series())
}
#[cfg(feature = "interpolate")]
fn interpolate(&self) -> Series {
self.0.interpolate().into_series()
}
fn bitand(&self, other: &Series) -> Result<Series> {
let other = if other.len() == 1 {
Cow::Owned(other.cast(self.dtype())?)
} else {
Cow::Borrowed(other)
};
let other = self.0.unpack_series_matching_type(&other)?;
Ok(self.0.bitand(&other).into_series())
}
fn bitor(&self, other: &Series) -> Result<Series> {
let other = if other.len() == 1 {
Cow::Owned(other.cast(self.dtype())?)
} else {
Cow::Borrowed(other)
};
let other = self.0.unpack_series_matching_type(&other)?;
Ok(self.0.bitor(&other).into_series())
}
fn bitxor(&self, other: &Series) -> Result<Series> {
let other = if other.len() == 1 {
Cow::Owned(other.cast(self.dtype())?)
} else {
Cow::Borrowed(other)
};
let other = self.0.unpack_series_matching_type(&other)?;
Ok(self.0.bitxor(&other).into_series())
}
fn rename(&mut self, name: &str) {
self.0.rename(name);
}
fn chunk_lengths(&self) -> ChunkIdIter {
self.0.chunk_id()
}
fn name(&self) -> &str {
self.0.name()
}
fn chunks(&self) -> &Vec<ArrayRef> {
self.0.chunks()
}
fn shrink_to_fit(&mut self) {
self.0.shrink_to_fit()
}
fn i8(&self) -> Result<&Int8Chunked> {
if matches!(self.0.dtype(), DataType::Int8) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const Int8Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into i8",
self.name(),
self.dtype(),
)
.into(),
))
}
}
// For each column create a series
fn i16(&self) -> Result<&Int16Chunked> {
if matches!(self.0.dtype(), DataType::Int16) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const Int16Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into i16",
self.name(),
self.dtype(),
)
.into(),
))
}
}
fn i32(&self) -> Result<&Int32Chunked> {
if matches!(self.0.dtype(), DataType::Int32) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const Int32Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into i32",
self.name(),
self.dtype(),
)
.into(),
))
}
}
fn i64(&self) -> Result<&Int64Chunked> {
if matches!(self.0.dtype(), DataType::Int64) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const Int64Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into i64",
self.name(),
self.dtype(),
)
.into(),
))
}
}
fn f32(&self) -> Result<&Float32Chunked> {
if matches!(self.0.dtype(), DataType::Float32) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const Float32Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into f32",
self.name(),
self.dtype(),
)
.into(),
))
}
}
fn f64(&self) -> Result<&Float64Chunked> {
if matches!(self.0.dtype(), DataType::Float64) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const Float64Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into f64",
self.name(),
self.dtype(),
)
.into(),
))
}
}
fn u8(&self) -> Result<&UInt8Chunked> {
if matches!(self.0.dtype(), DataType::UInt8) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const UInt8Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into u8",
self.name(),
self.dtype(),
)
.into(),
))
}
}
fn u16(&self) -> Result<&UInt16Chunked> {
if matches!(self.0.dtype(), DataType::UInt16) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const UInt16Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into u16",
self.name(),
self.dtype(),
)
.into(),
))
}
}
fn u32(&self) -> Result<&UInt32Chunked> {
if matches!(self.0.dtype(), DataType::UInt32) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const UInt32Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into u32",
self.name(),
self.dtype(),
)
.into(),
))
}
}
fn u64(&self) -> Result<&UInt64Chunked> {
if matches!(self.0.dtype(), DataType::UInt64) {
unsafe { Ok(&*(self as *const dyn SeriesTrait as *const UInt64Chunked)) }
} else {
Err(PolarsError::DataTypeMisMatch(
format!(
"cannot unpack Series: {:?} of type {:?} into u64",
self.name(),
self.dtype(),
)
.into(),
))
}
}
fn append_array(&mut self, other: ArrayRef) -> Result<()> {
self.0.append_array(other)
}
fn slice(&self, offset: i64, length: usize) -> Series {
return self.0.slice(offset, length).into_series();
}
fn append(&mut self, other: &Series) -> Result<()> {
if self.0.dtype() == other.dtype() {
// todo! add object
self.0.append(other.as_ref().as_ref());
Ok(())
} else {
Err(PolarsError::DataTypeMisMatch(
"cannot append Series; data types don't match".into(),
))
}
}
fn filter(&self, filter: &BooleanChunked) -> Result<Series> {
ChunkFilter::filter(&self.0, filter).map(|ca| ca.into_series())
}
fn mean(&self) -> Option<f64> {
self.0.mean()
}
fn median(&self) -> Option<f64> {
self.0.median()
}
fn take(&self, indices: &UInt32Chunked) -> Result<Series> {
let indices = if indices.chunks.len() > 1 {
Cow::Owned(indices.rechunk())
} else {
Cow::Borrowed(indices)
};
Ok(ChunkTake::take(&self.0, (&*indices).into())?.into_series())
}
fn take_iter(&self, iter: &mut dyn TakeIterator) -> Result<Series> {
Ok(ChunkTake::take(&self.0, iter.into())?.into_series())
}
fn take_every(&self, n: usize) -> Series {
self.0.take_every(n).into_series()
}
unsafe fn take_iter_unchecked(&self, iter: &mut dyn TakeIterator) -> Series {
ChunkTake::take_unchecked(&self.0, iter.into()).into_series()
}
unsafe fn take_unchecked(&self, idx: &UInt32Chunked) -> Result<Series> {
let idx = if idx.chunks.len() > 1 {
Cow::Owned(idx.rechunk())
} else {
Cow::Borrowed(idx)
};
Ok(ChunkTake::take_unchecked(&self.0, (&*idx).into()).into_series())
}
unsafe fn take_opt_iter_unchecked(&self, iter: &mut dyn TakeIteratorNulls) -> Series {
ChunkTake::take_unchecked(&self.0, iter.into()).into_series()
}
#[cfg(feature = "take_opt_iter")]
fn take_opt_iter(&self, iter: &mut dyn TakeIteratorNulls) -> Result<Series> {
Ok(ChunkTake::take(&self.0, iter.into())?.into_series())
}
fn len(&self) -> usize {
self.0.len()
}
fn rechunk(&self) -> Series {
ChunkOps::rechunk(&self.0).into_series()
}
fn head(&self, length: Option<usize>) -> Series {
self.0.head(length).into_series()
}
fn tail(&self, length: Option<usize>) -> Series {
self.0.tail(length).into_series()
}
fn expand_at_index(&self, index: usize, length: usize) -> Series {
ChunkExpandAtIndex::expand_at_index(&self.0, index, length).into_series()
}
fn cast(&self, data_type: &DataType) -> Result<Series> {
self.0.cast(data_type)
}
fn to_dummies(&self) -> Result<DataFrame> {
ToDummies::to_dummies(&self.0)
}
fn value_counts(&self) -> Result<DataFrame> {
ChunkUnique::value_counts(&self.0)
}
fn get(&self, index: usize) -> AnyValue {
self.0.get_any_value(index)
}
#[inline]
unsafe fn get_unchecked(&self, index: usize) -> AnyValue {
self.0.get_any_value_unchecked(index)
}
fn sort_in_place(&mut self, reverse: bool) {
ChunkSort::sort_in_place(&mut self.0, reverse);
}
fn sort(&self, reverse: bool) -> Series {
ChunkSort::sort(&self.0, reverse).into_series()
}
fn argsort(&self, reverse: bool) -> UInt32Chunked {
ChunkSort::argsort(&self.0, reverse)
}
fn null_count(&self) -> usize {
self.0.null_count()
}
fn unique(&self) -> Result<Series> {
ChunkUnique::unique(&self.0).map(|ca| ca.into_series())
}
fn n_unique(&self) -> Result<usize> {
ChunkUnique::n_unique(&self.0)
}
fn arg_unique(&self) -> Result<UInt32Chunked> {
ChunkUnique::arg_unique(&self.0)
}
fn arg_min(&self) -> Option<usize> {
ArgAgg::arg_min(&self.0)
}
fn arg_max(&self) -> Option<usize> {
ArgAgg::arg_max(&self.0)
}
fn is_null(&self) -> BooleanChunked {
self.0.is_null()
}
fn is_not_null(&self) -> BooleanChunked {
self.0.is_not_null()
}
fn is_unique(&self) -> Result<BooleanChunked> {
ChunkUnique::is_unique(&self.0)
}
fn is_duplicated(&self) -> Result<BooleanChunked> {
ChunkUnique::is_duplicated(&self.0)
}
fn reverse(&self) -> Series {
ChunkReverse::reverse(&self.0).into_series()
}
fn as_single_ptr(&mut self) -> Result<usize> {
self.0.as_single_ptr()
}
fn shift(&self, periods: i64) -> Series {
ChunkShift::shift(&self.0, periods).into_series()
}
fn fill_null(&self, strategy: FillNullStrategy) -> Result<Series> {
ChunkFillNull::fill_null(&self.0, strategy).map(|ca| ca.into_series())
}
fn sum_as_series(&self) -> Series {
ChunkAggSeries::sum_as_series(&self.0)
}
fn max_as_series(&self) -> Series {
ChunkAggSeries::max_as_series(&self.0)
}
fn min_as_series(&self) -> Series {
ChunkAggSeries::min_as_series(&self.0)
}
fn mean_as_series(&self) -> Series {
ChunkAggSeries::mean_as_series(&self.0)
}
fn median_as_series(&self) -> Series {
ChunkAggSeries::median_as_series(&self.0)
}
fn var_as_series(&self) -> Series {
VarAggSeries::var_as_series(&self.0)
}
fn std_as_series(&self) -> Series {
VarAggSeries::std_as_series(&self.0)
}
fn quantile_as_series(&self, quantile: f64) -> Result<Series> {
ChunkAggSeries::quantile_as_series(&self.0, quantile)
}
fn fmt_list(&self) -> String {
FmtList::fmt_list(&self.0)
}
fn clone_inner(&self) -> Arc<dyn SeriesTrait> {
Arc::new(SeriesWrap(Clone::clone(&self.0)))
}
#[cfg(feature = "random")]
#[cfg_attr(docsrs, doc(cfg(feature = "random")))]
fn sample_n(&self, n: usize, with_replacement: bool) -> Result<Series> {
self.0
.sample_n(n, with_replacement)
.map(|ca| ca.into_series())
}
#[cfg(feature = "random")]
#[cfg_attr(docsrs, doc(cfg(feature = "random")))]
fn sample_frac(&self, frac: f64, with_replacement: bool) -> Result<Series> {
self.0
.sample_frac(frac, with_replacement)
.map(|ca| ca.into_series())
}
fn pow(&self, exponent: f64) -> Result<Series> {
let f_err = || {
Err(PolarsError::InvalidOperation(
format!("power operation not supported on dtype {:?}", self.dtype()).into(),
))
};
match self.dtype() {
DataType::Utf8 | DataType::List(_) | DataType::Boolean => f_err(),
DataType::Float32 => Ok(self.0.pow_f32(exponent as f32).into_series()),
_ => Ok(self.0.pow_f64(exponent).into_series()),
}
}
fn peak_max(&self) -> BooleanChunked {
self.0.peak_max()
}
fn peak_min(&self) -> BooleanChunked {
self.0.peak_min()
}
#[cfg(feature = "is_in")]
fn is_in(&self, other: &Series) -> Result<BooleanChunked> {
IsIn::is_in(&self.0, other)
}
#[cfg(feature = "repeat_by")]
fn repeat_by(&self, by: &UInt32Chunked) -> ListChunked {
RepeatBy::repeat_by(&self.0, by)
}
#[cfg(feature = "checked_arithmetic")]
fn checked_div(&self, rhs: &Series) -> Result<Series> {
self.0.checked_div(rhs)
}
#[cfg(feature = "is_first")]
fn is_first(&self) -> Result<BooleanChunked> {
self.0.is_first()
}
#[cfg(feature = "object")]
fn as_any(&self) -> &dyn Any {
&self.0
}
#[cfg(feature = "mode")]
fn mode(&self) -> Result<Series> {
Ok(self.0.mode()?.into_series())
}
#[cfg(feature = "concat_str")]
fn str_concat(&self, delimiter: &str) -> Utf8Chunked {
self.0.str_concat(delimiter)
}
}
};
}
#[cfg(feature = "dtype-u8")]
impl_dyn_series!(UInt8Chunked);
#[cfg(feature = "dtype-u16")]
impl_dyn_series!(UInt16Chunked);
impl_dyn_series!(UInt32Chunked);
impl_dyn_series!(UInt64Chunked);
#[cfg(feature = "dtype-i8")]
impl_dyn_series!(Int8Chunked);
#[cfg(feature = "dtype-i16")]
impl_dyn_series!(Int16Chunked);
impl_dyn_series!(Int32Chunked);
impl_dyn_series!(Int64Chunked);
macro_rules! impl_dyn_series_numeric {
($ca: ident) => {
impl private::PrivateSeriesNumeric for SeriesWrap<$ca> {
fn bit_repr_is_large(&self) -> bool {
$ca::bit_repr_is_large()
}
fn bit_repr_large(&self) -> UInt64Chunked {
self.0.bit_repr_large()
}
fn bit_repr_small(&self) -> UInt32Chunked {
self.0.bit_repr_small()
}
}
};
}
impl_dyn_series_numeric!(Float32Chunked);
impl_dyn_series_numeric!(Float64Chunked);
#[cfg(feature = "dtype-u8")]
impl_dyn_series_numeric!(UInt8Chunked);
#[cfg(feature = "dtype-u16")]
impl_dyn_series_numeric!(UInt16Chunked);
impl_dyn_series_numeric!(UInt32Chunked);
impl_dyn_series_numeric!(UInt64Chunked);
#[cfg(feature = "dtype-i8")]
impl_dyn_series_numeric!(Int8Chunked);
#[cfg(feature = "dtype-i16")]
impl_dyn_series_numeric!(Int16Chunked);
impl_dyn_series_numeric!(Int32Chunked);
impl_dyn_series_numeric!(Int64Chunked);
impl private::PrivateSeriesNumeric for SeriesWrap<Utf8Chunked> {}
impl private::PrivateSeriesNumeric for SeriesWrap<ListChunked> {}
impl private::PrivateSeriesNumeric for SeriesWrap<BooleanChunked> {}
|
s.f64().unwrap().rolling_std(options)
|
part1.py
|
import pygame, sys
from pygame.locals import *
import random
#### GAME SETUP ######
pygame.init()
FPS = 60
FramePerSec = pygame.time.Clock()
# Defining game constants
RED = (255, 0, 0)
WHITE = (255, 255, 255)
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 600
GAME_NAME = "Dodge The Enemy"
SCORE = 0
# Creating the main surface
DISPLAYSURF = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
DISPLAYSURF.fill(WHITE)
pygame.display.set_caption(GAME_NAME)
# Create class interfaces
class Enemy(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image = pygame.image.load("images/alien.png")
self.surf = pygame.Surface((100, 100))
self.rect = self.surf.get_rect(center = (random.randint(40, (SCREEN_WIDTH-40)),0))
def move(self):
self.rect.move_ip(0,5)
if (self.rect.bottom > 600):
self.rect.top = 0
self.rect.center = (random.randint(40, (SCREEN_WIDTH-40)), 0)
def draw(self, surface):
surface.blit(self.image, self.rect)
class Player(pygame.sprite.Sprite):
def __
|
elf):
super().__init__() # initilizing Sprite
self.image = pygame.image.load("images/rocket.png")
self.surf = pygame.Surface((100, 100))
self.rect = self.surf.get_rect(center = (250, 500))
def update(self):
pressed_keys = pygame.key.get_pressed()
if self.rect.left > 0:
if pressed_keys[K_LEFT]:
self.rect.move_ip(-5, 0)
if self.rect.right < SCREEN_WIDTH:
if pressed_keys[K_RIGHT]:
self.rect.move_ip(5, 0)
def draw(self, surface):
surface.blit(self.image, self.rect)
### GAME STARTUP #######
P1 = Player()
E1 = Enemy()
while True:
list_events = pygame.event.get()
for event in list_events:
if event.type == QUIT:
pygame.quit()
sys.exit()
# get physical updates
P1.update()
E1.move()
# update graphics
DISPLAYSURF.fill(WHITE)
P1.draw(DISPLAYSURF)
E1.draw(DISPLAYSURF)
pygame.display.update()
FramePerSec.tick(FPS)
|
init__(s
|
test_color.py
|
"""Tests Home Assistant color util methods."""
import unittest
import homeassistant.util.color as color_util
|
class TestColorUtil(unittest.TestCase):
"""Test color util methods."""
# pylint: disable=invalid-name
def test_color_RGB_to_xy(self):
"""Test color_RGB_to_xy."""
self.assertEqual((0, 0), color_util.color_RGB_to_xy(0, 0, 0))
self.assertEqual((0.3127159072215825, 0.3290014805066623),
color_util.color_RGB_to_xy(255, 255, 255))
self.assertEqual((0.15001662234042554, 0.060006648936170214),
color_util.color_RGB_to_xy(0, 0, 255))
self.assertEqual((0.3, 0.6), color_util.color_RGB_to_xy(0, 255, 0))
self.assertEqual((0.6400744994567747, 0.3299705106316933),
color_util.color_RGB_to_xy(255, 0, 0))
def test_color_xy_brightness_to_RGB(self):
"""Test color_RGB_to_xy."""
self.assertEqual((0, 0, 0),
color_util.color_xy_brightness_to_RGB(1, 1, 0))
self.assertEqual((255, 235, 214),
color_util.color_xy_brightness_to_RGB(.35, .35, 255))
self.assertEqual((255, 0, 45),
color_util.color_xy_brightness_to_RGB(1, 0, 255))
self.assertEqual((0, 255, 0),
color_util.color_xy_brightness_to_RGB(0, 1, 255))
self.assertEqual((0, 83, 255),
color_util.color_xy_brightness_to_RGB(0, 0, 255))
def test_rgb_hex_to_rgb_list(self):
"""Test rgb_hex_to_rgb_list."""
self.assertEqual([255, 255, 255],
color_util.rgb_hex_to_rgb_list('ffffff'))
self.assertEqual([0, 0, 0],
color_util.rgb_hex_to_rgb_list('000000'))
self.assertEqual([255, 255, 255, 255],
color_util.rgb_hex_to_rgb_list('ffffffff'))
self.assertEqual([0, 0, 0, 0],
color_util.rgb_hex_to_rgb_list('00000000'))
self.assertEqual([51, 153, 255],
color_util.rgb_hex_to_rgb_list('3399ff'))
self.assertEqual([51, 153, 255, 0],
color_util.rgb_hex_to_rgb_list('3399ff00'))
| |
service.go
|
// *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package refactorspaces
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Definition of AWS::RefactorSpaces::Service Resource Type
type Service struct {
pulumi.CustomResourceState
ApplicationIdentifier pulumi.StringOutput `pulumi:"applicationIdentifier"`
Arn pulumi.StringOutput `pulumi:"arn"`
Description pulumi.StringPtrOutput `pulumi:"description"`
EndpointType ServiceEndpointTypePtrOutput `pulumi:"endpointType"`
EnvironmentIdentifier pulumi.StringOutput `pulumi:"environmentIdentifier"`
LambdaEndpoint ServiceLambdaEndpointInputPtrOutput `pulumi:"lambdaEndpoint"`
|
// Metadata that you can assign to help organize the frameworks that you create. Each tag is a key-value pair.
Tags ServiceTagArrayOutput `pulumi:"tags"`
UrlEndpoint ServiceUrlEndpointInputPtrOutput `pulumi:"urlEndpoint"`
VpcId pulumi.StringPtrOutput `pulumi:"vpcId"`
}
// NewService registers a new resource with the given unique name, arguments, and options.
func NewService(ctx *pulumi.Context,
name string, args *ServiceArgs, opts ...pulumi.ResourceOption) (*Service, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.ApplicationIdentifier == nil {
return nil, errors.New("invalid value for required argument 'ApplicationIdentifier'")
}
if args.EnvironmentIdentifier == nil {
return nil, errors.New("invalid value for required argument 'EnvironmentIdentifier'")
}
var resource Service
err := ctx.RegisterResource("aws-native:refactorspaces:Service", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetService gets an existing Service resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetService(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *ServiceState, opts ...pulumi.ResourceOption) (*Service, error) {
var resource Service
err := ctx.ReadResource("aws-native:refactorspaces:Service", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Service resources.
type serviceState struct {
}
type ServiceState struct {
}
func (ServiceState) ElementType() reflect.Type {
return reflect.TypeOf((*serviceState)(nil)).Elem()
}
type serviceArgs struct {
ApplicationIdentifier string `pulumi:"applicationIdentifier"`
Description *string `pulumi:"description"`
EndpointType *ServiceEndpointType `pulumi:"endpointType"`
EnvironmentIdentifier string `pulumi:"environmentIdentifier"`
LambdaEndpoint *ServiceLambdaEndpointInput `pulumi:"lambdaEndpoint"`
Name *string `pulumi:"name"`
// Metadata that you can assign to help organize the frameworks that you create. Each tag is a key-value pair.
Tags []ServiceTag `pulumi:"tags"`
UrlEndpoint *ServiceUrlEndpointInput `pulumi:"urlEndpoint"`
VpcId *string `pulumi:"vpcId"`
}
// The set of arguments for constructing a Service resource.
type ServiceArgs struct {
ApplicationIdentifier pulumi.StringInput
Description pulumi.StringPtrInput
EndpointType ServiceEndpointTypePtrInput
EnvironmentIdentifier pulumi.StringInput
LambdaEndpoint ServiceLambdaEndpointInputPtrInput
Name pulumi.StringPtrInput
// Metadata that you can assign to help organize the frameworks that you create. Each tag is a key-value pair.
Tags ServiceTagArrayInput
UrlEndpoint ServiceUrlEndpointInputPtrInput
VpcId pulumi.StringPtrInput
}
func (ServiceArgs) ElementType() reflect.Type {
return reflect.TypeOf((*serviceArgs)(nil)).Elem()
}
type ServiceInput interface {
pulumi.Input
ToServiceOutput() ServiceOutput
ToServiceOutputWithContext(ctx context.Context) ServiceOutput
}
func (*Service) ElementType() reflect.Type {
return reflect.TypeOf((*Service)(nil))
}
func (i *Service) ToServiceOutput() ServiceOutput {
return i.ToServiceOutputWithContext(context.Background())
}
func (i *Service) ToServiceOutputWithContext(ctx context.Context) ServiceOutput {
return pulumi.ToOutputWithContext(ctx, i).(ServiceOutput)
}
type ServiceOutput struct{ *pulumi.OutputState }
func (ServiceOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Service)(nil))
}
func (o ServiceOutput) ToServiceOutput() ServiceOutput {
return o
}
func (o ServiceOutput) ToServiceOutputWithContext(ctx context.Context) ServiceOutput {
return o
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*ServiceInput)(nil)).Elem(), &Service{})
pulumi.RegisterOutputType(ServiceOutput{})
}
|
Name pulumi.StringPtrOutput `pulumi:"name"`
ServiceIdentifier pulumi.StringOutput `pulumi:"serviceIdentifier"`
|
Exemplo 1.py
|
#1 - Função que retorna uma variável:
"""
Escreva uma função de potenciação
"""
def potência(base, exp):
pot = ba
|
tência(2,3)
print(a)
|
se**exp
return pot
a = po
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.