file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
yesno.go
|
package astval
import (
"bytes"
"fmt"
)
const (
yes = "yes"
no = "no"
)
// YesNo is a boolean value that will be marshaled as "yes" or "no".
type YesNo bool
// String returns the boolean as the string "yes" or "no".
func (b YesNo) String() string {
if b {
return yes
}
return no
}
// MarshalText marshals the boolean the boolean as "yes" or "no".
func (b YesNo) MarshalText() ([]byte, error) {
if b
|
return []byte(no), nil
}
// UnmarshalText parses "yes" or "no" as a boolean value.
func (b *YesNo) UnmarshalText(text []byte) error {
switch {
case bytes.Equal(text, []byte(yes)):
*b = YesNo(true)
return nil
case bytes.Equal(text, []byte(no)):
*b = YesNo(false)
return nil
default:
return fmt.Errorf("cannot marshal \"%s\" as yes/no value", string(text))
}
}
|
{
return []byte(yes), nil
}
|
routes.ts
|
export const USERS = '/users';
|
export const USER = '/users/:id';
|
|
exponent.rs
|
// Adapted from https://github.com/Alexhuszagh/rust-lexical.
use crate::lexical::exponent::*;
#[test]
fn scientific_exponent_test() {
// 0 digits in the integer
assert_eq!(scientific_exponent(0, 0, 5), -6);
assert_eq!(scientific_exponent(10, 0, 5), 4);
assert_eq!(scientific_exponent(-10, 0, 5), -16);
// >0 digits in the integer
assert_eq!(scientific_exponent(0, 1, 5), 0);
assert_eq!(scientific_exponent(0, 2, 5), 1);
assert_eq!(scientific_exponent(0, 2, 20), 1);
assert_eq!(scientific_exponent(10, 2, 20), 11);
assert_eq!(scientific_exponent(-10, 2, 20), -9);
// Underflow
assert_eq!(
scientific_exponent(i32::min_value(), 0, 0),
i32::min_value()
);
assert_eq!(
scientific_exponent(i32::min_value(), 0, 5),
i32::min_value()
);
// Overflow
assert_eq!(
scientific_exponent(i32::max_value(), 0, 0),
i32::max_value() - 1
);
assert_eq!(
scientific_exponent(i32::max_value(), 5, 0),
i32::max_value()
|
}
#[test]
fn mantissa_exponent_test() {
assert_eq!(mantissa_exponent(10, 5, 0), 5);
assert_eq!(mantissa_exponent(0, 5, 0), -5);
assert_eq!(
mantissa_exponent(i32::max_value(), 5, 0),
i32::max_value() - 5
);
assert_eq!(mantissa_exponent(i32::max_value(), 0, 5), i32::max_value());
assert_eq!(mantissa_exponent(i32::min_value(), 5, 0), i32::min_value());
assert_eq!(
mantissa_exponent(i32::min_value(), 0, 5),
i32::min_value() + 5
);
}
|
);
|
main.py
|
#!/usr/bin/env python3
import asyncio
import logging
import uuid
from bleak import BleakScanner, BleakClient
# Enable debug output
# logging.basicConfig(level=logging.DEBUG)
DEVICE_NAME = "m5-stack"
SERVICE_UUID = uuid.UUID("4fafc201-1fb5-459e-8fcc-c5c9c331914b")
CHAR_UUID = uuid.UUID("beb5483e-36e1-4688-b7f5-ea07361b26a8")
async def run(loop):
print("Searching devices...")
devices = await BleakScanner.discover()
device = list(filter(lambda d: d.name == DEVICE_NAME, devices))
if len(device) == 0:
raise RuntimeError(f"Failed to find a device name '{DEVICE_NAME}'")
address = device[0].address
print(f"Connecting to the device... (address: {address})")
async with BleakClient(address, loop=loop) as client:
print("Message from the device...")
value = await client.read_gatt_char(CHAR_UUID)
print(value.decode())
print("Sending message to the device...")
message = bytearray(b"hi!")
await client.write_gatt_char(CHAR_UUID, message, True)
def
|
(sender, data):
print(f"Received: {data}")
print("Subscribing to characteristic changes...")
await client.start_notify(CHAR_UUID, callback)
print("Waiting 60 seconds to receive data from the device...")
await asyncio.sleep(60)
loop = asyncio.get_event_loop()
loop.run_until_complete(run(loop))
|
callback
|
main.go
|
package main
import (
log "github.com/sirupsen/logrus"
"github.com/yutopp/go-rtmp"
)
func
|
() {
client, err := rtmp.Dial("rtmp", "localhost:1935", &rtmp.ConnConfig{
Logger: log.StandardLogger(),
})
if err != nil {
log.Panicf("Failed: %+v", err)
}
defer client.Close()
log.Infof("Client created")
if err := client.Connect(); err != nil {
log.Infof("Failed to connect: Err=%+v", err)
}
log.Infof("connected")
stream, err := client.CreateStream()
if err != nil {
log.Infof("Failed to create stream: Err=%+v", err)
}
defer stream.Close()
log.Infof("stream created")
}
|
main
|
client.py
|
#!/usr/bin/env python3
import os
import socket
from collections import namedtuple
from enum import Enum, unique
HeadTerm = namedtuple('HeadTerm', ['index', 'value'])
@unique
class Header(Enum):
FILE_NAME = HeadTerm(index=0, value=0x80)
FILE_SIZE = HeadTerm(index=0, value=0x40)
FILE_CONTEXT = HeadTerm(index=0, value=0x20)
def
|
(header: Header) -> bytes:
HEAD_SIZE = 8
messageHead = bytearray(HEAD_SIZE)
messageHead[header.value.index] = header.value.value
return bytes(messageHead)
def main():
TCP_IP = 'iao.life'
TCP_PORT = 11030
BUFFER_SIZE = 1024
HEAD_SIZE = 8
FILE_DIR = '/mnt/d/iao13/Pictures/Cache2.jpg'
f = open(FILE_DIR, 'rb')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
filename = bytes(FILE_DIR.split('/')[-1], 'utf-8')
for i in range(0, len(filename), BUFFER_SIZE - HEAD_SIZE):
message = filename[i:i + BUFFER_SIZE - HEAD_SIZE]
if not message:
break
message = headEncoder(Header.FILE_NAME) + message
if len(message) < BUFFER_SIZE:
message = message + bytes(BUFFER_SIZE - len(message))
s.send(message)
filesize = os.path.getsize(FILE_DIR)
message = headEncoder(Header.FILE_SIZE) + filesize.to_bytes(BUFFER_SIZE - HEAD_SIZE, byteorder='big')
s.send(message)
while 1:
message = f.read(BUFFER_SIZE - HEAD_SIZE)
if not message:
break
message = headEncoder(Header.FILE_CONTEXT) + message
if len(message) < BUFFER_SIZE:
message = message + bytes(BUFFER_SIZE - len(message))
s.send(message)
s.close()
f.close()
if __name__ == '__main__':
main()
|
headEncoder
|
test_apkaye.py
|
import os
import json
import pytest
import shutil
# Getting absolute paths, names and regexes
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(TEST_DIR)
SERVICE_CONFIG_NAME = "service_manifest.yml"
SERVICE_CONFIG_PATH = os.path.join(ROOT_DIR, SERVICE_CONFIG_NAME)
TEMP_SERVICE_CONFIG_PATH = os.path.join("/tmp", SERVICE_CONFIG_NAME)
# Samples that we will be sending to the service
sample1 = dict(
sid=1,
metadata={},
service_name='apkaye',
service_config={},
fileinfo=dict(
magic='ASCII text, with no line terminators',
md5='1f09ecbd362fa0dfff88d4788e6f5df0',
mime='text/plain',
sha1='a649bf201cde05724e48f2d397a615b201be34fb',
sha256='dadc624d4454e10293dbd1b701b9ee9f99ef83b4cd07b695111d37eb95abcff8',
size=19,
type='unknown',
),
filename='dadc624d4454e10293dbd1b701b9ee9f99ef83b4cd07b695111d37eb95abcff8',
min_classification='TLP:WHITE',
max_files=501, # TODO: get the actual value
ttl=3600,
)
@pytest.fixture
def class_instance():
temp_service_config_path = os.path.join("/tmp", SERVICE_CONFIG_NAME)
try:
# Placing the service_manifest.yml in the tmp directory
shutil.copyfile(SERVICE_CONFIG_PATH, temp_service_config_path)
from apkaye.apkaye import APKaye
yield APKaye()
finally:
# Delete the service_manifest.yml
os.remove(temp_service_config_path)
class TestAPKaye:
@classmethod
def setup_class(cls):
# Placing the samples in the tmp directory
samples_path = os.path.join(TEST_DIR, "samples")
for sample in os.listdir(samples_path):
sample_path = os.path.join(samples_path, sample)
shutil.copyfile(sample_path, os.path.join("/tmp", sample))
@classmethod
def teardown_class(cls):
# Cleaning up the tmp directory
samples_path = os.path.join(TEST_DIR, "samples")
for sample in os.listdir(samples_path):
temp_sample_path = os.path.join("/tmp", sample)
os.remove(temp_sample_path)
@staticmethod
def test_init(class_instance):
assert class_instance.apktool == "/opt/al_support/apktool.jar"
assert class_instance.dex2jar == "/opt/al_support/dex2jar-2.0/d2j-dex2jar.sh"
assert class_instance.aapt == "/opt/al_support/aapt2/aapt2"
@staticmethod
def test_start():
# TODO: somehow check if error was logged in service.log
# service.start()
pass
@staticmethod
def test_get_tool_version(class_instance):
assert class_instance.get_tool_version() == "APKTOOL: 2.4.0 - D2J: 2.0 - AAPT2: 3.5.1-5435860"
@staticmethod
@pytest.mark.parametrize("sample", [
sample1
])
def test_execute(sample, class_instance):
# Imports required to execute the sample
from assemblyline_v4_service.common.task import Task
from assemblyline.odm.messages.task import Task as ServiceTask
from assemblyline_v4_service.common.request import ServiceRequest
# Creating the required objects for execution
service_task = ServiceTask(sample1)
task = Task(service_task)
class_instance._task = task
service_request = ServiceRequest(task)
# Actually executing the sample
task.service_config = {"resubmit_apk_as_jar": False}
class_instance.execute(service_request)
# Get the result of execute() from the test method
test_result = task.get_service_result()
# Get the assumed "correct" result of the sample
correct_result_path = os.path.join(TEST_DIR, "results", task.file_name + ".json")
with open(correct_result_path, "r") as f:
correct_result = json.loads(f.read())
f.close()
# Assert that the appropriate sections of the dict are equal
# Avoiding date in the response
test_result_response = test_result.pop("response")
correct_result_response = correct_result.pop("response")
assert test_result == correct_result
# Comparing everything in the response except for the date
test_result_response["milestones"].pop("service_completed")
correct_result_response["milestones"].pop("service_completed")
assert test_result_response == correct_result_response
@staticmethod
@pytest.mark.parametrize("apktool_out_dir,result", [
("", None)
])
def test_validate_certs(apktool_out_dir, result, class_instance):
class_instance.validate_certs(apktool_out_dir=apktool_out_dir, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apktool_out_dir,result", [
("", None)
])
def test_find_scripts_and_exes(apktool_out_dir, result, class_instance):
class_instance.find_scripts_and_exes(apktool_out_dir=apktool_out_dir, result=result)
pass
@pytest.mark.parametrize("apktool_out_dir,result", [
("", None)
])
def test_find_network_indicators(self, apktool_out_dir, result, class_instance):
class_instance.find_network_indicators(apktool_out_dir=apktool_out_dir, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apktool_out_dir,result", [
("", None)
])
def test_analyse_apktool_output(apktool_out_dir, result, class_instance):
class_instance.analyse_apktool_output(apktool_out_dir=apktool_out_dir, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apk,target_dir,work_dir,result", [
("", "", "", None)
])
def test_run_apktool(apk, target_dir, work_dir, result, class_instance):
class_instance.run_apktool(apk=apk, target_dir=target_dir, work_dir=work_dir, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apk,target", [
("", "")
])
def test_get_dex(apk, target, class_instance):
class_instance.get_dex(apk=apk, target=target)
pass
@staticmethod
@pytest.mark.parametrize("apk_file,target,result,val", [
("", "", None, None)
])
def test_resubmit_dex2jar_output(apk_file, target, result, val, class_instance):
class_instance.resubmit_dex2jar_output(apk_file=apk_file, target=target, result=result, request=val)
pass
@staticmethod
@pytest.mark.parametrize("args", [
[]
])
def test_run_appt(args, class_instance):
class_instance.run_appt(args=args)
pass
@staticmethod
@pytest.mark.parametrize("apk_file,result", [
("", None)
])
def test_run_badging_analysis(apk_file, result, class_instance):
class_instance.run_badging_analysis(apk_file=apk_file, result=result)
pass
@staticmethod
@pytest.mark.parametrize("apk_file,result", [
("", None)
])
def
|
(apk_file, result, class_instance):
class_instance.run_strings_analysis(apk_file=apk_file, result=result)
pass
|
test_run_strings_analysis
|
arbitrary_value.rs
|
use std::fmt::Formatter;
// trying to keep values at 3 bytes
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[derive(Clone, Debug, PartialEq)]
pub enum AttributeValue<'a> {
Text(&'a str),
Float32(f32),
Float64(f64),
Int32(i32),
Int64(i64),
Uint32(u32),
Uint64(u64),
Bool(bool),
Vec3Float(f32, f32, f32),
Vec3Int(i32, i32, i32),
Vec3Uint(u32, u32, u32),
Vec4Float(f32, f32, f32, f32),
Vec4Int(i32, i32, i32, i32),
Vec4Uint(u32, u32, u32, u32),
Bytes(&'a [u8]),
Any(ArbitraryAttributeValue<'a>),
}
impl<'a> AttributeValue<'a> {
pub fn is_truthy(&self) -> bool {
match self {
AttributeValue::Text(t) => *t == "true",
AttributeValue::Bool(t) => *t,
_ => false,
}
}
pub fn is_falsy(&self) -> bool {
match self {
AttributeValue::Text(t) => *t == "false",
AttributeValue::Bool(t) => !(*t),
_ => false,
}
}
}
impl<'a> std::fmt::Display for AttributeValue<'a> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
AttributeValue::Text(a) => write!(f, "{}", a),
AttributeValue::Float32(a) => write!(f, "{}", a),
AttributeValue::Float64(a) => write!(f, "{}", a),
AttributeValue::Int32(a) => write!(f, "{}", a),
AttributeValue::Int64(a) => write!(f, "{}", a),
AttributeValue::Uint32(a) => write!(f, "{}", a),
AttributeValue::Uint64(a) => write!(f, "{}", a),
AttributeValue::Bool(a) => write!(f, "{}", a),
AttributeValue::Vec3Float(_, _, _) => todo!(),
AttributeValue::Vec3Int(_, _, _) => todo!(),
AttributeValue::Vec3Uint(_, _, _) => todo!(),
AttributeValue::Vec4Float(_, _, _, _) => todo!(),
AttributeValue::Vec4Int(_, _, _, _) => todo!(),
AttributeValue::Vec4Uint(_, _, _, _) => todo!(),
AttributeValue::Bytes(_) => todo!(),
AttributeValue::Any(_) => todo!(),
}
}
}
#[derive(Clone, Copy)]
pub struct ArbitraryAttributeValue<'a> {
pub value: &'a dyn std::any::Any,
pub cmp: fn(&'a dyn std::any::Any, &'a dyn std::any::Any) -> bool,
}
impl PartialEq for ArbitraryAttributeValue<'_> {
fn eq(&self, other: &Self) -> bool {
(self.cmp)(self.value, other.value)
}
}
impl std::fmt::Debug for ArbitraryAttributeValue<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ArbitraryAttributeValue").finish()
}
}
#[cfg(feature = "serialize")]
impl<'a> serde::Serialize for ArbitraryAttributeValue<'a> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
panic!("ArbitraryAttributeValue should not be serialized")
}
}
#[cfg(feature = "serialize")]
impl<'de, 'a> serde::Deserialize<'de> for &'a ArbitraryAttributeValue<'a> {
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
panic!("ArbitraryAttributeValue is not deserializable!")
}
}
#[cfg(feature = "serialize")]
impl<'de, 'a> serde::Deserialize<'de> for ArbitraryAttributeValue<'a> {
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
panic!("ArbitraryAttributeValue is not deserializable!")
}
}
impl<'a> AttributeValue<'a> {
pub fn as_text(&self) -> Option<&'a str>
|
pub fn as_float32(&self) -> Option<f32> {
match self {
AttributeValue::Float32(f) => Some(*f),
_ => None,
}
}
pub fn as_float64(&self) -> Option<f64> {
match self {
AttributeValue::Float64(f) => Some(*f),
_ => None,
}
}
pub fn as_int32(&self) -> Option<i32> {
match self {
AttributeValue::Int32(i) => Some(*i),
_ => None,
}
}
pub fn as_int64(&self) -> Option<i64> {
match self {
AttributeValue::Int64(i) => Some(*i),
_ => None,
}
}
pub fn as_uint32(&self) -> Option<u32> {
match self {
AttributeValue::Uint32(i) => Some(*i),
_ => None,
}
}
pub fn as_uint64(&self) -> Option<u64> {
match self {
AttributeValue::Uint64(i) => Some(*i),
_ => None,
}
}
pub fn as_bool(&self) -> Option<bool> {
match self {
AttributeValue::Bool(b) => Some(*b),
_ => None,
}
}
pub fn as_vec3_float(&self) -> Option<(f32, f32, f32)> {
match self {
AttributeValue::Vec3Float(x, y, z) => Some((*x, *y, *z)),
_ => None,
}
}
pub fn as_vec3_int(&self) -> Option<(i32, i32, i32)> {
match self {
AttributeValue::Vec3Int(x, y, z) => Some((*x, *y, *z)),
_ => None,
}
}
pub fn as_vec3_uint(&self) -> Option<(u32, u32, u32)> {
match self {
AttributeValue::Vec3Uint(x, y, z) => Some((*x, *y, *z)),
_ => None,
}
}
pub fn as_vec4_float(&self) -> Option<(f32, f32, f32, f32)> {
match self {
AttributeValue::Vec4Float(x, y, z, w) => Some((*x, *y, *z, *w)),
_ => None,
}
}
pub fn as_vec4_int(&self) -> Option<(i32, i32, i32, i32)> {
match self {
AttributeValue::Vec4Int(x, y, z, w) => Some((*x, *y, *z, *w)),
_ => None,
}
}
pub fn as_vec4_uint(&self) -> Option<(u32, u32, u32, u32)> {
match self {
AttributeValue::Vec4Uint(x, y, z, w) => Some((*x, *y, *z, *w)),
_ => None,
}
}
pub fn as_bytes(&self) -> Option<&[u8]> {
match self {
AttributeValue::Bytes(b) => Some(b),
_ => None,
}
}
pub fn as_any(&self) -> Option<&'a ArbitraryAttributeValue> {
match self {
AttributeValue::Any(a) => Some(a),
_ => None,
}
}
}
// #[test]
// fn test_attribute_value_size() {
// assert_eq!(std::mem::size_of::<AttributeValue<'_>>(), 24);
// }
|
{
match self {
AttributeValue::Text(s) => Some(s),
_ => None,
}
}
|
api_op_DescribeDBClusterParameters.go
|
// Code generated by smithy-go-codegen DO NOT EDIT.
package rds
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/rds/types"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Returns the detailed parameter list for a particular DB cluster parameter group.
// For more information on Amazon Aurora, see What Is Amazon Aurora?
// (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)
// in the Amazon Aurora User Guide. This action only applies to Aurora DB clusters.
func (c *Client) DescribeDBClusterParameters(ctx context.Context, params *DescribeDBClusterParametersInput, optFns ...func(*Options)) (*DescribeDBClusterParametersOutput, error) {
if params == nil {
params = &DescribeDBClusterParametersInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeDBClusterParameters", params, optFns, addOperationDescribeDBClusterParametersMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeDBClusterParametersOutput)
out.ResultMetadata = metadata
return out, nil
}
//
type DescribeDBClusterParametersInput struct {
// The name of a specific DB cluster parameter group to return parameter details
// for. Constraints:
//
// * If supplied, must match the name of an existing
// DBClusterParameterGroup.
//
// This member is required.
DBClusterParameterGroupName *string
// This parameter isn't currently supported.
Filters []*types.Filter
// An optional pagination token provided by a previous DescribeDBClusterParameters
// request. If this parameter is specified, the response includes only records
// beyond the marker, up to the value specified by MaxRecords.
Marker *string
// The maximum number of records to include in the response. If more records exist
// than the specified MaxRecords value, a pagination token called a marker is
// included in the response so you can retrieve the remaining results. Default: 100
// Constraints: Minimum 20, maximum 100.
MaxRecords *int32
// A value that indicates to return only parameters for a specific source.
// Parameter sources can be engine, service, or customer.
Source *string
}
// Provides details about a DB cluster parameter group including the parameters in
// the DB cluster parameter group.
type DescribeDBClusterParametersOutput struct {
// An optional pagination token provided by a previous DescribeDBClusterParameters
// request. If this parameter is specified, the response includes only records
// beyond the marker, up to the value specified by MaxRecords .
Marker *string
// Provides a list of parameters for the DB cluster parameter group.
Parameters []*types.Parameter
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationDescribeDBClusterParametersMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsquery_serializeOpDescribeDBClusterParameters{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDescribeDBClusterParameters{}, middleware.After)
if err != nil {
return err
}
awsmiddleware.AddRequestInvocationIDMiddleware(stack)
smithyhttp.AddContentLengthMiddleware(stack)
addResolveEndpointMiddleware(stack, options)
v4.AddComputePayloadSHA256Middleware(stack)
addRetryMiddlewares(stack, options)
addHTTPSignerV4Middleware(stack, options)
awsmiddleware.AddAttemptClockSkewMiddleware(stack)
addClientUserAgent(stack)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
smithyhttp.AddCloseResponseBodyMiddleware(stack)
addOpDescribeDBClusterParametersValidationMiddleware(stack)
stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeDBClusterParameters(options.Region), middleware.Before)
addRequestIDRetrieverMiddleware(stack)
addResponseErrorMiddleware(stack)
return nil
}
func
|
(region string) awsmiddleware.RegisterServiceMetadata {
return awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "rds",
OperationName: "DescribeDBClusterParameters",
}
}
|
newServiceMetadataMiddleware_opDescribeDBClusterParameters
|
content_browser.rs
|
use std::{
path::{Path, PathBuf},
process::Command,
};
use sabi_graphics::{Texture, TextureId};
use sabi_messenger::{get_events_from_string, Message, MessageBox, MessengerRw};
use sabi_profiler::debug_log;
use sabi_resources::{Data, Resource, SerializableResource, SharedData, SharedDataRc};
use sabi_serialize::deserialize;
use sabi_ui::{
implement_widget_data, CentralPanel, CollapsingHeader, DialogEvent, DialogOp, ScrollArea,
SidePanel, TextEdit, TextureId as eguiTextureId, TopBottomPanel, UIWidget, Ui, Widget, Window,
};
struct File {
path: PathBuf,
}
struct Dir {
path: PathBuf,
subdirs: Vec<Dir>,
files: Vec<File>,
}
#[allow(dead_code)]
struct
|
{
shared_data: SharedDataRc,
global_dispatcher: MessageBox,
title: String,
folder: PathBuf,
selected_folder: PathBuf,
selected_file: String,
is_editable: bool,
operation: DialogOp,
icon_file_texture_id: TextureId,
dir: Dir,
extension: String,
}
implement_widget_data!(ContentBrowserData);
pub struct ContentBrowser {
ui_page: Resource<UIWidget>,
file_icon: Resource<Texture>,
}
impl ContentBrowser {
pub fn new(
shared_data: &SharedDataRc,
global_messenger: &MessengerRw,
operation: DialogOp,
path: &Path,
extension: String,
) -> Self {
let file_icon = Texture::request_load(
shared_data,
global_messenger,
PathBuf::from("./icons/file.png").as_path(),
None,
);
let mut selected_folder = Data::data_folder();
let mut selected_file = String::new();
if path.to_path_buf().is_file() {
if let Some(folder) = path.parent() {
selected_folder = folder.to_path_buf();
}
if let Some(filename) = path.file_name() {
selected_file = filename.to_str().unwrap().to_string();
}
}
let mut dir = Dir {
path: selected_folder.clone(),
subdirs: Vec::new(),
files: Vec::new(),
};
Self::fill_dir(&mut dir, selected_folder.as_path());
let data = ContentBrowserData {
shared_data: shared_data.clone(),
title: match operation {
DialogOp::Open => "Open".to_string(),
DialogOp::Save => "Save".to_string(),
DialogOp::New => "New".to_string(),
},
folder: selected_folder.clone(),
selected_folder,
selected_file,
is_editable: !matches!(operation, DialogOp::Open),
operation,
global_dispatcher: global_messenger.read().unwrap().get_dispatcher().clone(),
icon_file_texture_id: *file_icon.id(),
dir,
extension,
};
let ui_page = Self::create(shared_data, data);
Self { ui_page, file_icon }
}
fn fill_dir(dir: &mut Dir, root: &Path) {
if let Ok(directory) = std::fs::read_dir(root) {
directory.for_each(|entry| {
if let Ok(dir_entry) = entry {
let path = dir_entry.path();
if path.is_file() {
dir.files.push(File { path });
} else if path.is_dir() {
let mut subdir = Dir {
path: dir_entry.path(),
subdirs: Vec::new(),
files: Vec::new(),
};
Self::fill_dir(&mut subdir, path.as_path());
dir.subdirs.push(subdir);
}
}
});
}
}
fn get_files<'a>(dir: &'a Dir, path: &Path) -> &'a Vec<File> {
if dir.path.as_path() != path {
for d in dir.subdirs.iter() {
if dir.path.as_path() == path {
return &d.files;
} else if path.starts_with(&d.path) {
return Self::get_files(d, path);
}
}
}
&dir.files
}
fn populate_with_folders_tree(
ui: &mut Ui,
directory: &Dir,
selected_folder: &mut PathBuf,
selected_file: &mut String,
) {
sabi_profiler::scoped_profile!("populate_with_folders_tree");
let selected = selected_folder == &directory.path;
if directory.subdirs.is_empty() {
if ui
.selectable_label(
selected,
directory.path.file_stem().unwrap().to_str().unwrap(),
)
.clicked()
{
*selected_folder = directory.path.to_path_buf();
*selected_file = String::new();
}
} else {
let collapsing =
CollapsingHeader::new(directory.path.file_stem().unwrap().to_str().unwrap())
.selectable(true)
.default_open(selected)
.selected(selected);
let header_response = collapsing
.show(ui, |ui| {
for subdir in directory.subdirs.iter() {
Self::populate_with_folders_tree(
ui,
subdir,
selected_folder,
selected_file,
);
}
})
.header_response;
if header_response.clicked() {
*selected_folder = directory.path.to_path_buf();
*selected_file = String::new();
}
}
}
fn populate_with_files(
ui: &mut Ui,
files: &[File],
selected_file: &mut String,
selected_extension: &str,
texture_index: u64,
) {
sabi_profiler::scoped_profile!("populate_with_files");
ui.vertical(|ui| {
for file in files.iter() {
let filename = file.path.file_name().unwrap().to_str().unwrap().to_string();
let extension = file.path.extension().unwrap().to_str().unwrap().to_string();
if extension == selected_extension {
let selected = selected_file == &filename;
ui.horizontal(|ui| {
ui.image(eguiTextureId::User(texture_index as _), [16., 16.]);
if ui.selectable_label(selected, filename.clone()).clicked() {
*selected_file = filename;
}
});
}
}
});
}
fn create(shared_data: &SharedDataRc, data: ContentBrowserData) -> Resource<UIWidget> {
let left_panel_min_width = 100.;
let left_panel_max_width = left_panel_min_width * 4.;
let button_size = 50.;
UIWidget::register(shared_data, data, move |ui_data, ui_context| {
if let Some(data) = ui_data.as_any_mut().downcast_mut::<ContentBrowserData>() {
let mut open = true;
let mut rect = ui_context.available_rect();
rect.min.x += rect.size().x * 0.05;
rect.min.y += rect.size().y * 0.05;
rect.max.x -= rect.size().x * 0.05;
rect.max.y -= rect.size().y * 0.1;
Window::new(data.title.clone())
.vscroll(false)
.title_bar(true)
.collapsible(false)
.resizable(true)
.open(&mut open)
.default_rect(rect)
.show(ui_context, |ui| {
let _ = &data;
sabi_profiler::scoped_profile!("Window");
SidePanel::left("Folders")
.resizable(true)
.width_range(left_panel_min_width..=left_panel_max_width)
.show_inside(ui, |ui| {
sabi_profiler::scoped_profile!("SidePanel");
ScrollArea::vertical().show(ui, |ui| {
Self::populate_with_folders_tree(
ui,
&data.dir,
&mut data.selected_folder,
&mut data.selected_file,
);
})
});
TopBottomPanel::bottom("bottom_panel")
.resizable(false)
.min_height(0.0)
.show_inside(ui, |ui| {
let _ = &data;
sabi_profiler::scoped_profile!("BottomPanel");
ui.horizontal(|ui| {
let _ = &data;
ui.label("Filename: ");
TextEdit::singleline(&mut data.selected_file)
.hint_text("File name here")
.interactive(data.is_editable)
.frame(data.is_editable)
.desired_width(ui.available_width() - 2. * button_size)
.ui(ui);
if ui.button("Ok").clicked() {
let path = data.selected_folder.clone();
let path = path.join(data.selected_file.clone());
data.global_dispatcher
.write()
.unwrap()
.send(
DialogEvent::Confirmed(data.operation, path)
.as_boxed(),
)
.ok();
}
if ui.button("Cancel").clicked() {
data.global_dispatcher
.write()
.unwrap()
.send(DialogEvent::Canceled(data.operation).as_boxed())
.ok();
}
});
});
CentralPanel::default().show_inside(ui, |ui| {
sabi_profiler::scoped_profile!("CentralPanel");
let rect = ui.max_rect();
ScrollArea::vertical()
.max_height(rect.height())
.show(ui, |ui| {
if data.selected_folder.is_dir() {
if let Some(texture_index) =
SharedData::get_index_of_resource::<Texture>(
&data.shared_data,
&data.icon_file_texture_id,
)
{
let path = data.selected_folder.as_path().to_path_buf();
let files = Self::get_files(&data.dir, path.as_path());
Self::populate_with_files(
ui,
files,
&mut data.selected_file,
data.extension.as_str(),
texture_index as _,
);
}
}
});
});
});
if !open {
data.global_dispatcher
.write()
.unwrap()
.send(DialogEvent::Canceled(data.operation).as_boxed())
.ok();
}
}
})
}
fn process_command_result(command: &mut Command, dispatcher: MessageBox) {
let result = command.output();
match result {
Ok(output) => {
let string = String::from_utf8(output.stdout).unwrap();
debug_log(string.as_str());
for e in get_events_from_string(string) {
if let Ok(event) = deserialize::<DialogEvent>(&e) {
dispatcher.write().unwrap().send(event.as_boxed()).ok();
}
}
}
Err(_) => {
debug_log("Failed to execute process");
}
}
}
}
|
ContentBrowserData
|
auth.service.ts
|
import { Injectable, Inject } from '@nestjs/common';
import { ILogin } from '@interfaces';
import { ClientProxy } from '@nestjs/microservices';
@Injectable()
export class
|
{
constructor(
@Inject('AUTH_SERVICE') private readonly clientService: ClientProxy) {}
login(payload: ILogin) {
const pattern = { cmd: 'login'};
return this.clientService.send<any>(pattern, payload).toPromise();
}
verify(token: string) {
const pattern = { cmd: 'verify' };
return this.clientService.send<any>(pattern, token).toPromise();
}
logout(token: string) {
const pattern = { cmd: 'logout' };
return this.clientService.send<any>(pattern, token).toPromise();
}
googleLogin(payload: any) {
const pattern = { cmd: 'gLogin' };
return this.clientService.send<any>(pattern, payload).toPromise();
}
}
|
AuthService
|
Selector.js
|
/**
* Selector.js
*
* Copyright, Moxiecode Systems AB
* Released under LGPL License.
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
/**
* Selector engine, enables you to select controls by using CSS like expressions.
* We currently only support basic CSS expressions to reduce the size of the core
* and the ones we support should be enough for most cases.
*
* @example
* Supported expressions:
* element
* element#name
* element.class
* element[attr]
* element[attr*=value]
* element[attr~=value]
* element[attr!=value]
* element[attr^=value]
* element[attr$=value]
* element:<state>
* element:not(<expression>)
* element:first
* element:last
* element:odd
* element:even
* element element
* element > element
*
* @class tinymce.ui.Selector
*/
define("tinymce/ui/Selector", [
"tinymce/util/Class",
"tinymce/util/Tools"
], function(Class, Tools) {
"use strict";
/**
* Produces an array with a unique set of objects. It will not compare the values
* but the references of the objects.
*
* @private
* @method unqiue
* @param {Array} array Array to make into an array with unique items.
* @return {Array} Array with unique items.
*/
function unique(array) {
var uniqueItems = [], i = array.length, item;
while (i--) {
item = array[i];
if (!item.__checked) {
uniqueItems.push(item);
item.__checked = 1;
}
}
i = uniqueItems.length;
while (i--) {
delete uniqueItems[i].__checked;
}
return uniqueItems;
}
var expression = /^([\w\\*]+)?(?:#([\w\\]+))?(?:\.([\w\\\.]+))?(?:\[\@?([\w\\]+)([\^\$\*!~]?=)([\w\\]+)\])?(?:\:(.+))?/i;
/*jshint maxlen:255 */
var chunker = /((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
whiteSpace = /^\s*|\s*$/g,
Collection;
var Selector = Class.extend({
/**
* Constructs a new Selector instance.
*
* @constructor
* @method init
* @param {String} selector CSS like selector expression.
*/
init: function(selector) {
var match = this.match;
function compileNameFilter(name) {
if (name) {
name = name.toLowerCase();
return function(item) {
return name === '*' || item.type === name;
};
}
}
function compileIdFilter(id) {
if (id) {
return function(item) {
return item._name === id;
};
}
}
function compileClassesFilter(classes) {
if (classes) {
classes = classes.split('.');
return function(item) {
var i = classes.length;
while (i--) {
if (!item.hasClass(classes[i])) {
return false;
}
}
return true;
};
}
}
function compileAttrFilter(name, cmp, check) {
if (name) {
return function(item) {
var value = item[name] ? item[name]() : '';
return !cmp ? !!check :
cmp === "=" ? value === check :
cmp === "*=" ? value.indexOf(check) >= 0 :
cmp === "~=" ? (" " + value + " ").indexOf(" " + check + " ") >= 0 :
cmp === "!=" ? value != check :
cmp === "^=" ? value.indexOf(check) === 0 :
cmp === "$=" ? value.substr(value.length - check.length) === check :
false;
};
}
}
function compilePsuedoFilter(name) {
var notSelectors;
if (name) {
name = /(?:not\((.+)\))|(.+)/i.exec(name);
if (!name[1]) {
name = name[2];
return function(item, index, length) {
return name === 'first' ? index === 0 :
name === 'last' ? index === length - 1 :
name === 'even' ? index % 2 === 0 :
name === 'odd' ? index % 2 === 1 :
item[name] ? item[name]() :
|
notSelectors = parseChunks(name[1], []);
return function(item) {
return !match(item, notSelectors);
};
}
}
}
function compile(selector, filters, direct) {
var parts;
function add(filter) {
if (filter) {
filters.push(filter);
}
}
// Parse expression into parts
parts = expression.exec(selector.replace(whiteSpace, ''));
add(compileNameFilter(parts[1]));
add(compileIdFilter(parts[2]));
add(compileClassesFilter(parts[3]));
add(compileAttrFilter(parts[4], parts[5], parts[6]));
add(compilePsuedoFilter(parts[7]));
// Mark the filter with psuedo for performance
filters.psuedo = !!parts[7];
filters.direct = direct;
return filters;
}
// Parser logic based on Sizzle by John Resig
function parseChunks(selector, selectors) {
var parts = [], extra, matches, i;
do {
chunker.exec("");
matches = chunker.exec(selector);
if (matches) {
selector = matches[3];
parts.push(matches[1]);
if (matches[2]) {
extra = matches[3];
break;
}
}
} while (matches);
if (extra) {
parseChunks(extra, selectors);
}
selector = [];
for (i = 0; i < parts.length; i++) {
if (parts[i] != '>') {
selector.push(compile(parts[i], [], parts[i - 1] === '>'));
}
}
selectors.push(selector);
return selectors;
}
this._selectors = parseChunks(selector, []);
},
/**
* Returns true/false if the selector matches the specified control.
*
* @method match
* @param {tinymce.ui.Control} control Control to match agains the selector.
* @param {Array} selectors Optional array of selectors, mostly used internally.
* @return {Boolean} true/false state if the control matches or not.
*/
match: function(control, selectors) {
var i, l, si, sl, selector, fi, fl, filters, index, length, siblings, count, item;
selectors = selectors || this._selectors;
for (i = 0, l = selectors.length; i < l; i++) {
selector = selectors[i];
sl = selector.length;
item = control;
count = 0;
for (si = sl - 1; si >= 0; si--) {
filters = selector[si];
while (item) {
// Find the index and length since a psuedo filter like :first needs it
if (filters.psuedo) {
siblings = item.parent().items();
index = Tools.inArray(item, siblings);
length = siblings.length;
}
for (fi = 0, fl = filters.length; fi < fl; fi++) {
if (!filters[fi](item, index, length)) {
fi = fl + 1;
break;
}
}
if (fi === fl) {
count++;
break;
} else {
// If it didn't match the right most expression then
// break since it's no point looking at the parents
if (si === sl - 1) {
break;
}
}
item = item.parent();
}
}
// If we found all selectors then return true otherwise continue looking
if (count === sl) {
return true;
}
}
return false;
},
/**
* Returns a tinymce.ui.Collection with matches of the specified selector inside the specified container.
*
* @method find
* @param {tinymce.ui.Control} container Container to look for items in.
* @return {tinymce.ui.Collection} Collection with matched elements.
*/
find: function(container) {
var matches = [], i, l, selectors = this._selectors;
function collect(items, selector, index) {
var i, l, fi, fl, item, filters = selector[index];
for (i = 0, l = items.length; i < l; i++) {
item = items[i];
// Run each filter agains the item
for (fi = 0, fl = filters.length; fi < fl; fi++) {
if (!filters[fi](item, i, l)) {
fi = fl + 1;
break;
}
}
// All filters matched the item
if (fi === fl) {
// Matched item is on the last expression like: panel toolbar [button]
if (index == selector.length - 1) {
matches.push(item);
} else {
// Collect next expression type
if (item.items) {
collect(item.items(), selector, index + 1);
}
}
} else if (filters.direct) {
return;
}
// Collect child items
if (item.items) {
collect(item.items(), selector, index);
}
}
}
if (container.items) {
for (i = 0, l = selectors.length; i < l; i++) {
collect(container.items(), selectors[i], 0);
}
// Unique the matches if needed
if (l > 1) {
matches = unique(matches);
}
}
// Fix for circular reference
if (!Collection) {
// TODO: Fix me!
Collection = Selector.Collection;
}
return new Collection(matches);
}
});
return Selector;
});
|
false;
};
} else {
// Compile not expression
|
output.js
|
function
|
(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
// @Filename: /ns.ts
var ns;
(function(ns1) {
var Class = function Class() {
"use strict";
_classCallCheck(this, Class);
};
ns1.Class = Class;
ns1.Value = "";
var nested1;
(function(nested) {
var NestedClass = function NestedClass() {
"use strict";
_classCallCheck(this, NestedClass);
};
nested.NestedClass = NestedClass;
})(nested1 = ns1.nested || (ns1.nested = {
}));
})(ns || (ns = {
}));
ns.Class; // Error
ns.Value; // Error
var c;
var t = "";
var n = {
a: ''
};
export { };
|
_classCallCheck
|
im_context.rs
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use crate::InputHints;
use crate::InputPurpose;
use crate::Widget;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem;
use std::mem::transmute;
use std::ptr;
glib::wrapper! {
pub struct IMContext(Object<ffi::GtkIMContext, ffi::GtkIMContextClass>);
match fn {
get_type => || ffi::gtk_im_context_get_type(),
}
}
pub const NONE_IM_CONTEXT: Option<&IMContext> = None;
pub trait IMContextExt: 'static {
#[doc(alias = "gtk_im_context_delete_surrounding")]
fn delete_surrounding(&self, offset: i32, n_chars: i32) -> bool;
#[doc(alias = "gtk_im_context_filter_key")]
fn filter_key(
&self,
press: bool,
surface: &gdk::Surface,
device: &gdk::Device,
time: u32,
keycode: u32,
state: gdk::ModifierType,
group: i32,
) -> bool;
#[doc(alias = "gtk_im_context_focus_in")]
fn focus_in(&self);
#[doc(alias = "gtk_im_context_focus_out")]
fn focus_out(&self);
#[doc(alias = "gtk_im_context_get_preedit_string")]
fn get_preedit_string(&self) -> (glib::GString, pango::AttrList, i32);
#[cfg_attr(feature = "v4_2", deprecated)]
#[doc(alias = "gtk_im_context_get_surrounding")]
fn get_surrounding(&self) -> Option<(glib::GString, i32)>;
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
#[doc(alias = "gtk_im_context_get_surrounding_with_selection")]
fn get_surrounding_with_selection(&self) -> Option<(glib::GString, i32, i32)>;
#[doc(alias = "gtk_im_context_reset")]
fn reset(&self);
#[doc(alias = "gtk_im_context_set_client_widget")]
fn set_client_widget<P: IsA<Widget>>(&self, widget: Option<&P>);
#[doc(alias = "gtk_im_context_set_cursor_location")]
fn set_cursor_location(&self, area: &gdk::Rectangle);
#[cfg_attr(feature = "v4_2", deprecated)]
#[doc(alias = "gtk_im_context_set_surrounding")]
fn set_surrounding(&self, text: &str, cursor_index: i32);
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
#[doc(alias = "gtk_im_context_set_surrounding_with_selection")]
fn set_surrounding_with_selection(&self, text: &str, cursor_index: i32, anchor_index: i32);
#[doc(alias = "gtk_im_context_set_use_preedit")]
fn set_use_preedit(&self, use_preedit: bool);
fn get_property_input_hints(&self) -> InputHints;
fn set_property_input_hints(&self, input_hints: InputHints);
fn get_property_input_purpose(&self) -> InputPurpose;
fn set_property_input_purpose(&self, input_purpose: InputPurpose);
fn connect_commit<F: Fn(&Self, &str) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_delete_surrounding<F: Fn(&Self, i32, i32) -> bool + 'static>(
&self,
f: F,
) -> SignalHandlerId;
fn connect_preedit_changed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_preedit_end<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_preedit_start<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_retrieve_surrounding<F: Fn(&Self) -> bool + 'static>(&self, f: F)
-> SignalHandlerId;
fn connect_property_input_hints_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_input_purpose_notify<F: Fn(&Self) + 'static>(
&self,
f: F,
) -> SignalHandlerId;
}
impl<O: IsA<IMContext>> IMContextExt for O {
fn delete_surrounding(&self, offset: i32, n_chars: i32) -> bool {
unsafe {
from_glib(ffi::gtk_im_context_delete_surrounding(
self.as_ref().to_glib_none().0,
offset,
n_chars,
))
}
}
fn filter_key(
&self,
press: bool,
surface: &gdk::Surface,
device: &gdk::Device,
time: u32,
keycode: u32,
state: gdk::ModifierType,
group: i32,
) -> bool {
unsafe {
from_glib(ffi::gtk_im_context_filter_key(
self.as_ref().to_glib_none().0,
press.to_glib(),
surface.to_glib_none().0,
device.to_glib_none().0,
time,
keycode,
state.to_glib(),
group,
))
}
}
fn focus_in(&self) {
unsafe {
ffi::gtk_im_context_focus_in(self.as_ref().to_glib_none().0);
}
}
fn focus_out(&self) {
unsafe {
ffi::gtk_im_context_focus_out(self.as_ref().to_glib_none().0);
}
}
fn get_preedit_string(&self) -> (glib::GString, pango::AttrList, i32) {
unsafe {
let mut str = ptr::null_mut();
let mut attrs = ptr::null_mut();
let mut cursor_pos = mem::MaybeUninit::uninit();
ffi::gtk_im_context_get_preedit_string(
self.as_ref().to_glib_none().0,
&mut str,
&mut attrs,
cursor_pos.as_mut_ptr(),
);
let cursor_pos = cursor_pos.assume_init();
(from_glib_full(str), from_glib_full(attrs), cursor_pos)
}
}
fn get_surrounding(&self) -> Option<(glib::GString, i32)> {
unsafe {
let mut text = ptr::null_mut();
let mut cursor_index = mem::MaybeUninit::uninit();
let ret = from_glib(ffi::gtk_im_context_get_surrounding(
self.as_ref().to_glib_none().0,
&mut text,
cursor_index.as_mut_ptr(),
));
let cursor_index = cursor_index.assume_init();
if ret {
Some((from_glib_full(text), cursor_index))
} else {
None
}
}
}
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
fn get_surrounding_with_selection(&self) -> Option<(glib::GString, i32, i32)> {
unsafe {
let mut text = ptr::null_mut();
let mut cursor_index = mem::MaybeUninit::uninit();
let mut anchor_index = mem::MaybeUninit::uninit();
let ret = from_glib(ffi::gtk_im_context_get_surrounding_with_selection(
self.as_ref().to_glib_none().0,
&mut text,
cursor_index.as_mut_ptr(),
anchor_index.as_mut_ptr(),
));
let cursor_index = cursor_index.assume_init();
let anchor_index = anchor_index.assume_init();
if ret {
Some((from_glib_full(text), cursor_index, anchor_index))
} else {
None
}
}
}
fn reset(&self) {
unsafe {
ffi::gtk_im_context_reset(self.as_ref().to_glib_none().0);
}
}
fn set_client_widget<P: IsA<Widget>>(&self, widget: Option<&P>) {
unsafe {
ffi::gtk_im_context_set_client_widget(
self.as_ref().to_glib_none().0,
widget.map(|p| p.as_ref()).to_glib_none().0,
);
}
}
fn set_cursor_location(&self, area: &gdk::Rectangle) {
unsafe {
ffi::gtk_im_context_set_cursor_location(
self.as_ref().to_glib_none().0,
area.to_glib_none().0,
);
}
}
fn set_surrounding(&self, text: &str, cursor_index: i32) {
let len = text.len() as i32;
unsafe {
ffi::gtk_im_context_set_surrounding(
self.as_ref().to_glib_none().0,
text.to_glib_none().0,
len,
cursor_index,
);
}
}
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
fn set_surrounding_with_selection(&self, text: &str, cursor_index: i32, anchor_index: i32) {
let len = text.len() as i32;
unsafe {
ffi::gtk_im_context_set_surrounding_with_selection(
self.as_ref().to_glib_none().0,
text.to_glib_none().0,
len,
cursor_index,
anchor_index,
);
}
}
fn set_use_preedit(&self, use_preedit: bool) {
unsafe {
ffi::gtk_im_context_set_use_preedit(
self.as_ref().to_glib_none().0,
use_preedit.to_glib(),
);
}
}
fn get_property_input_hints(&self) -> InputHints {
unsafe {
let mut value = glib::Value::from_type(<InputHints as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(
self.to_glib_none().0 as *mut glib::gobject_ffi::GObject,
b"input-hints\0".as_ptr() as *const _,
value.to_glib_none_mut().0,
);
value
.get()
.expect("Return Value for property `input-hints` getter")
.unwrap()
}
}
fn set_property_input_hints(&self, input_hints: InputHints) {
unsafe {
glib::gobject_ffi::g_object_set_property(
self.to_glib_none().0 as *mut glib::gobject_ffi::GObject,
b"input-hints\0".as_ptr() as *const _,
glib::Value::from(&input_hints).to_glib_none().0,
);
}
}
fn get_property_input_purpose(&self) -> InputPurpose {
unsafe {
let mut value = glib::Value::from_type(<InputPurpose as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(
self.to_glib_none().0 as *mut glib::gobject_ffi::GObject,
b"input-purpose\0".as_ptr() as *const _,
value.to_glib_none_mut().0,
);
value
.get()
.expect("Return Value for property `input-purpose` getter")
.unwrap()
}
}
fn set_property_input_purpose(&self, input_purpose: InputPurpose) {
unsafe {
glib::gobject_ffi::g_object_set_property(
self.to_glib_none().0 as *mut glib::gobject_ffi::GObject,
b"input-purpose\0".as_ptr() as *const _,
glib::Value::from(&input_purpose).to_glib_none().0,
);
}
}
fn
|
<F: Fn(&Self, &str) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn commit_trampoline<P, F: Fn(&P, &str) + 'static>(
this: *mut ffi::GtkIMContext,
str: *mut libc::c_char,
f: glib::ffi::gpointer,
) where
P: IsA<IMContext>,
{
let f: &F = &*(f as *const F);
f(
&IMContext::from_glib_borrow(this).unsafe_cast_ref(),
&glib::GString::from_glib_borrow(str),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"commit\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
commit_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_delete_surrounding<F: Fn(&Self, i32, i32) -> bool + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn delete_surrounding_trampoline<
P,
F: Fn(&P, i32, i32) -> bool + 'static,
>(
this: *mut ffi::GtkIMContext,
offset: libc::c_int,
n_chars: libc::c_int,
f: glib::ffi::gpointer,
) -> glib::ffi::gboolean
where
P: IsA<IMContext>,
{
let f: &F = &*(f as *const F);
f(
&IMContext::from_glib_borrow(this).unsafe_cast_ref(),
offset,
n_chars,
)
.to_glib()
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"delete-surrounding\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
delete_surrounding_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_preedit_changed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn preedit_changed_trampoline<P, F: Fn(&P) + 'static>(
this: *mut ffi::GtkIMContext,
f: glib::ffi::gpointer,
) where
P: IsA<IMContext>,
{
let f: &F = &*(f as *const F);
f(&IMContext::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"preedit-changed\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
preedit_changed_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_preedit_end<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn preedit_end_trampoline<P, F: Fn(&P) + 'static>(
this: *mut ffi::GtkIMContext,
f: glib::ffi::gpointer,
) where
P: IsA<IMContext>,
{
let f: &F = &*(f as *const F);
f(&IMContext::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"preedit-end\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
preedit_end_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_preedit_start<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn preedit_start_trampoline<P, F: Fn(&P) + 'static>(
this: *mut ffi::GtkIMContext,
f: glib::ffi::gpointer,
) where
P: IsA<IMContext>,
{
let f: &F = &*(f as *const F);
f(&IMContext::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"preedit-start\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
preedit_start_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_retrieve_surrounding<F: Fn(&Self) -> bool + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn retrieve_surrounding_trampoline<P, F: Fn(&P) -> bool + 'static>(
this: *mut ffi::GtkIMContext,
f: glib::ffi::gpointer,
) -> glib::ffi::gboolean
where
P: IsA<IMContext>,
{
let f: &F = &*(f as *const F);
f(&IMContext::from_glib_borrow(this).unsafe_cast_ref()).to_glib()
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"retrieve-surrounding\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
retrieve_surrounding_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_property_input_hints_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_input_hints_trampoline<P, F: Fn(&P) + 'static>(
this: *mut ffi::GtkIMContext,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) where
P: IsA<IMContext>,
{
let f: &F = &*(f as *const F);
f(&IMContext::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::input-hints\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_input_hints_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_property_input_purpose_notify<F: Fn(&Self) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_input_purpose_trampoline<P, F: Fn(&P) + 'static>(
this: *mut ffi::GtkIMContext,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) where
P: IsA<IMContext>,
{
let f: &F = &*(f as *const F);
f(&IMContext::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::input-purpose\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_input_purpose_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for IMContext {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("IMContext")
}
}
|
connect_commit
|
networkproxy.go
|
package networkproxy
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"strings"
"syscall"
"time"
sf "github.com/estuary/flow/go/network-proxy/sshforwarding"
)
const ProgramName = "network-proxy-service"
func SupportedNetworkProxyTypes() []string
|
type NetworkProxyConfig struct {
ProxyType string `json:"proxyType"`
SshForwardingConfig sf.SshForwardingConfig `json:"sshForwarding"`
}
// GetFieldDocString implements the jsonschema.customSchemaGetFieldDocString interface.
func (NetworkProxyConfig) GetFieldDocString(fieldName string) string {
switch fieldName {
case "ProxyType":
return fmt.Sprintf("The type of the network proxy. Supported types are: ( %s )", strings.Join(SupportedNetworkProxyTypes(), ", "))
case "SshForwardingConfig":
return "Config for proxy of type sshForwarding"
default:
return ""
}
}
func (npc *NetworkProxyConfig) Validate() error {
if npc == nil {
return nil
}
var supported = false
for _, t := range SupportedNetworkProxyTypes() {
if t == npc.ProxyType {
supported = true
break
}
}
if !supported {
return fmt.Errorf("Unsupported proxy type: %s. Valid values are: ( %s ).", npc.ProxyType, strings.Join(SupportedNetworkProxyTypes(), ", "))
}
switch npc.ProxyType {
case "sshForwarding":
return npc.SshForwardingConfig.Validate()
default:
panic(fmt.Sprintf("Implementation of validating %s is not ready.", npc.ProxyType))
}
}
func (npc *NetworkProxyConfig) MarshalJSON() ([]byte, error) {
var m = make(map[string]interface{})
switch npc.ProxyType {
case "sshForwarding":
m[npc.ProxyType] = npc.SshForwardingConfig
default:
panic(fmt.Sprintf("Implementation of MarshalJSON for %s is missing.", npc.ProxyType))
}
return json.Marshal(m)
}
const defaultTimeoutSecs = 5
func (npc *NetworkProxyConfig) Start() error {
return npc.startInternal(defaultTimeoutSecs, os.Stderr)
}
func (npc *NetworkProxyConfig) startInternal(timeoutSecs uint16, stderr io.Writer) error {
if npc == nil {
// NetworkProxyConfig is not set.
return nil
}
var cmd = exec.Command(ProgramName)
cmd.SysProcAttr = &syscall.SysProcAttr{Pdeathsig: syscall.SIGTERM}
var readyCh = make(chan error)
cmd.Stdout = &readyWriter{delegate: os.Stdout, ch: readyCh}
cmd.Stderr = stderr
if err := npc.sendInput(cmd); err != nil {
return fmt.Errorf("sending input to service: %w", err)
} else if err := cmd.Start(); err != nil {
return fmt.Errorf("starting ssh forwarding service: %w", err)
}
select {
case err := <-readyCh:
if err != nil {
return fmt.Errorf(
"network proxy service error: %w",
err,
)
}
return nil
case <-time.After(time.Duration(timeoutSecs) * time.Second):
if cmd.Process != nil {
cmd.Process.Signal(syscall.SIGTERM)
}
return fmt.Errorf("network proxy service failed to be ready after waiting for long enough")
}
}
func (npc *NetworkProxyConfig) sendInput(cmd *exec.Cmd) error {
stdin, err := cmd.StdinPipe()
if err != nil {
return fmt.Errorf("getting stdin pipe: %w", err)
}
input, err := json.Marshal(npc)
if err != nil {
return fmt.Errorf("marshal input: %w", err)
}
go func() {
if _, err := stdin.Write(input); err != nil {
panic("Failed to send input to network-proxy-service binary.")
}
stdin.Close()
}()
return nil
}
type readyWriter struct {
delegate io.Writer
ch chan error
}
func (w *readyWriter) Write(p []byte) (int, error) {
if w.ch == nil {
return w.delegate.Write(p) // Common case.
}
defer func() {
close(w.ch)
w.ch = nil
}()
if bytes.HasPrefix(p, []byte("READY\n")) {
var n, err = w.delegate.Write(p[6:])
n += 6
return n, err
} else {
w.ch <- fmt.Errorf("did not read READY from subprocess")
return w.delegate.Write(p)
}
}
|
{
return []string{"sshForwarding"}
}
|
index.js
|
import assert from 'assert'
export default function (state) {
const { document, selection } = state
const texts = document.getTexts()
const first = texts.first()
const range = selection.merge({
anchorKey: first.key,
anchorOffset: first.length - 1,
focusKey: first.key,
focusOffset: first.length
})
const next = state
|
.select(range)
.toggleMark('bold')
.apply()
assert.deepEqual(next.selection.toJS(), range.toJS())
return next
}
|
.transform()
|
gpt.py
|
"""
Intergation of the pytorch_transformers openai and gpt2 modules.
Note that these objects are only to be used to load
pretrained models. The pytorch-transformers library
wasn't designed to train these models from scratch.
"""
import pytorch_transformers as pt
from flambe.nlp.transformers.utils import TransformerTextField, TransformerEmbedder
class GPTTextField(TransformerTextField):
"""Integrate the pytorch_transformers OpenAIGPTTokenizer.
Currently available aliases:
. `openai-gpt`
"""
_cls = pt.OpenAIGPTTokenizer
class
|
(TransformerEmbedder):
"""Integrate the pytorch_transformers OpenAIGPTmodel.
Currently available aliases:
. `openai-gpt`
"""
_cls = pt.OpenAIGPTModel
class GPT2TextField(TransformerTextField):
"""Integrate the pytorch_transformers GPT2Tokenizer.
Currently available aliases:
. `gpt2`
. `gpt2-medium`
. `gpt2-large`
"""
_cls = pt.GPT2Tokenizer
class GPT2Embedder(TransformerEmbedder):
"""Integrate the pytorch_transformers GPT2Model.
Currently available aliases:
. `gpt2`
. `gpt2-medium`
. `gpt2-large`
"""
_cls = pt.GPT2Model
|
GPTEmbedder
|
zero_order_properties.py
|
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
This module contains the general purpose property package for zero-order
unit models. Zero-order models do not track temperature and pressure, or any
form of energy flow.
"""
from idaes.core import (EnergyBalanceType,
MaterialBalanceType,
MaterialFlowBasis,
PhysicalParameterBlock,
StateBlock,
StateBlockData,
declare_process_block_class)
from idaes.core.components import Solvent, Solute
from idaes.core.phases import LiquidPhase
from idaes.core.util.misc import add_object_reference
from idaes.core.util.initialization import fix_state_vars, revert_state_vars
import idaes.logger as idaeslog
import idaes.core.util.scaling as iscale
from idaes.core.util.exceptions import ConfigurationError
from pyomo.environ import (Expression,
Param,
PositiveReals,
units as pyunits,
Var)
from pyomo.common.config import ConfigValue
# Some more inforation about this module
__author__ = "Andrew Lee"
# Set up logger
_log = idaeslog.getLogger(__name__)
@declare_process_block_class("WaterParameterBlock")
class WaterParameterBlockData(PhysicalParameterBlock):
"""
Property Parameter Block Class
Defines component and phase lists, along with base units and constant
parameters.
"""
CONFIG = PhysicalParameterBlock.CONFIG()
CONFIG.declare('database', ConfigValue(
description='An instance of a WaterTAP Database to use for parameters.'
))
CONFIG.declare('water_source', ConfigValue(
description=
'Water source to use when looking up parameters from database.'))
CONFIG.declare("solute_list", ConfigValue(
domain=list,
description="List of solute species of interest. If None, will use "
"all species defined in the water_source provided."))
def build(self):
'''
Callable method for Block construction.
'''
super().build()
self._state_block_class = WaterStateBlock
self.Liq = LiquidPhase()
self.H2O = Solvent()
# Get component set from database if provided
comp_set = None
if self.config.database is not None:
comp_set = self.config.database.get_solute_set(
self.config.water_source)
# Check definition of solute list
solute_list = self.config.solute_list
if solute_list is None:
# No user-provided solute list, look up list from database
if comp_set is None:
# No solute list in database and none provided.
raise ConfigurationError(
f"{self.name} no solute_list or database was defined. "
f"Users must provide at least one of these arguments.")
else:
solute_list = comp_set
elif self.config.database is not None:
# User provided custom list and database - check that all
# components are supported
for j in solute_list:
if j not in comp_set:
_log.info(f"{self.name} component {j} is not defined in "
f"the water_sources database file.")
else:
# User provided list but no database - assume they know what they
# are doing
pass
for j in solute_list:
self.add_component(str(j), Solute())
# Define default value for mass density of solution
self.dens_mass_default = 1000*pyunits.kg/pyunits.m**3
# Define default value for dynamic viscosity of solution
self.visc_d_default = 0.001*pyunits.kg/pyunits.m/pyunits.s
# ---------------------------------------------------------------------
# Set default scaling factors
self.default_scaling_factor = {
("flow_vol"): 1e3,
("conc_mass_comp"): 1e2}
@classmethod
def define_metadata(cls, obj):
obj.add_default_units({
'time': pyunits.s,
'length': pyunits.m,
'mass': pyunits.kg,
'amount': pyunits.mol,
'temperature': pyunits.K,
})
obj.add_properties(
{'flow_mass_comp': {'method': None},
'flow_vol': {'method': '_flow_vol'},
'conc_mass_comp': {'method': '_conc_mass_comp'},
'dens_mass': {'method': '_dens_mass'},
'visc_d': {'method': '_visc_d'}})
class _WaterStateBlock(StateBlock):
"""
This Class contains methods which should be applied to Property Blocks as a
whole, rather than individual elements of indexed Property Blocks.
"""
def initialize(blk,
state_args=None,
state_vars_fixed=False,
hold_state=False,
outlvl=idaeslog.NOTSET,
solver=None,
optarg=None):
'''
Initialization routine for property package.
Keyword Arguments:
state_args : Dictionary with initial guesses for the state vars
chosen. Note that if this method is triggered
through the control volume, and if initial guesses
were not provied at the unit model level, the
control volume passes the inlet values as initial
guess.The keys for the state_args dictionary are:
flow_mol_comp : value at which to initialize component
flows (default=None)
pressure : value at which to initialize pressure
(default=None)
temperature : value at which to initialize temperature
(default=None)
outlvl : sets output level of initialization routine
state_vars_fixed: Flag to denote if state vars have already been
fixed.
- True - states have already been fixed and
initialization does not need to worry
about fixing and unfixing variables.
- False - states have not been fixed. The state
block will deal with fixing/unfixing.
optarg : solver options dictionary object (default=None, use
default solver options)
solver : str indicating which solver to use during
initialization (default = None, use default solver)
hold_state : flag indicating whether the initialization routine
should unfix any state variables fixed during
initialization (default=False).
- True - states varaibles are not unfixed, and
a dict of returned containing flags for
which states were fixed during
initialization.
- False - state variables are unfixed after
initialization by calling the
relase_state method
Returns:
If hold_states is True, returns a dict containing flags for
which states were fixed during initialization.
'''
# For now, there are no ocnstraints in the property package, so only
# fix state variables if required
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="properties")
init_log.info('Initialization Complete.')
if hold_state is True:
flags = fix_state_vars(blk, state_args)
return flags
else:
return
def release_state(blk, flags, outlvl=idaeslog.NOTSET):
'''
Method to release state variables fixed during initialization.
Keyword Arguments:
flags : dict containing information of which state variables
were fixed during initialization, and should now be
unfixed. This dict is returned by initialize if
hold_state=True.
outlvl : sets output level of of logging
'''
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="properties")
if flags is None:
return
# Unfix state variables
revert_state_vars(blk, flags)
init_log.info('State Released.')
@declare_process_block_class("WaterStateBlock",
block_class=_WaterStateBlock)
class WaterStateBlockData(StateBlockData):
"""
General purpose StateBlock for Zero-Order unit models.
"""
def build(self):
super().build()
# Create state variables
self.flow_mass_comp = Var(self.component_list,
initialize=1,
domain=PositiveReals,
doc='Mass flowrate of each component',
units=pyunits.kg/pyunits.s)
# -------------------------------------------------------------------------
# Other properties
def _conc_mass_comp(self):
def rule_cmc(blk, j):
return (blk.flow_mass_comp[j] /
sum(self.flow_mass_comp[k] for k in self.component_list) *
blk.dens_mass)
self.conc_mass_comp = Expression(self.component_list,
rule=rule_cmc)
def _dens_mass(self):
self.dens_mass = Param(initialize=self.params.dens_mass_default,
units=pyunits.kg/pyunits.m**3,
mutable=True,
doc="Mass density of flow")
def _flow_vol(self):
self.flow_vol = Expression(
expr=sum(self.flow_mass_comp[j] for j in self.component_list) /
self.dens_mass)
def _visc_d(self):
self.visc_d = Param(initialize=self.params.visc_d_default,
units=pyunits.kg/pyunits.m/pyunits.s,
mutable=True,
doc="Dynamic viscosity of solution")
def get_material_flow_terms(blk, p, j):
return blk.flow_mass_comp[j]
def get_enthalpy_flow_terms(blk, p):
raise NotImplementedError
def get_material_density_terms(blk, p, j):
return blk.conc_mass_comp[j]
def get_energy_density_terms(blk, p):
raise NotImplementedError
def default_material_balance_type(self):
return MaterialBalanceType.componentTotal
def default_energy_balance_type(self):
|
def define_state_vars(blk):
return {"flow_mass_comp": blk.flow_mass_comp}
def define_display_vars(blk):
return {"Volumetric Flowrate": blk.flow_vol,
"Mass Concentration": blk.conc_mass_comp}
def get_material_flow_basis(blk):
return MaterialFlowBasis.mass
def calculate_scaling_factors(self):
# Get default scale factors and do calculations from base classes
super().calculate_scaling_factors()
d_sf_Q = self.params.default_scaling_factor["flow_vol"]
d_sf_c = self.params.default_scaling_factor["conc_mass_comp"]
for j, v in self.flow_mass_comp.items():
if iscale.get_scaling_factor(v) is None:
iscale.set_scaling_factor(v, d_sf_Q*d_sf_c)
if self.is_property_constructed("flow_vol"):
if iscale.get_scaling_factor(self.flow_vol) is None:
iscale.set_scaling_factor(self.flow_vol, d_sf_Q)
if self.is_property_constructed("conc_mass_comp"):
for j, v in self.conc_mass_comp.items():
sf_c = iscale.get_scaling_factor(self.conc_mass_comp[j])
if sf_c is None:
try:
sf_c = self.params.default_scaling_factor[
("conc_mass_comp", j)]
except KeyError:
sf_c = d_sf_c
iscale.set_scaling_factor(self.conc_mass_comp[j], sf_c)
|
return EnergyBalanceType.none
|
mod.rs
|
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CMD {
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
}
#[doc = r" Proxy"]
pub struct _RXENW<'a> {
w: &'a mut W,
}
impl<'a> _RXENW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RXDISW<'a> {
w: &'a mut W,
}
impl<'a> _RXDISW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _TXENW<'a> {
w: &'a mut W,
}
impl<'a> _TXENW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _TXDISW<'a> {
w: &'a mut W,
}
impl<'a> _TXDISW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _MASTERENW<'a> {
w: &'a mut W,
}
impl<'a> _MASTERENW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 4;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _MASTERDISW<'a> {
w: &'a mut W,
}
impl<'a> _MASTERDISW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 5;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RXBLOCKENW<'a> {
w: &'a mut W,
}
impl<'a> _RXBLOCKENW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RXBLOCKDISW<'a> {
w: &'a mut W,
}
impl<'a> _RXBLOCKDISW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _TXTRIENW<'a> {
w: &'a mut W,
}
impl<'a> _TXTRIENW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _TXTRIDISW<'a> {
w: &'a mut W,
}
impl<'a> _TXTRIDISW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CLEARTXW<'a> {
w: &'a mut W,
}
impl<'a> _CLEARTXW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CLEARRXW<'a> {
w: &'a mut W,
}
impl<'a> _CLEARRXW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 11;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - Receiver Enable"]
#[inline]
pub fn rxen(&mut self) -> _RXENW {
_RXENW { w: self }
}
#[doc = "Bit 1 - Receiver Disable"]
#[inline]
pub fn rxdis(&mut self) -> _RXDISW {
_RXDISW { w: self }
}
#[doc = "Bit 2 - Transmitter Enable"]
#[inline]
pub fn txen(&mut self) -> _TXENW {
_TXENW { w: self }
}
#[doc = "Bit 3 - Transmitter Disable"]
#[inline]
pub fn txdis(&mut self) -> _TXDISW {
_TXDISW { w: self }
}
#[doc = "Bit 4 - Master Enable"]
#[inline]
pub fn masteren(&mut self) -> _MASTERENW {
_MASTERENW { w: self }
}
#[doc = "Bit 5 - Master Disable"]
#[inline]
pub fn masterdis(&mut self) -> _MASTERDISW {
_MASTERDISW { w: self }
}
#[doc = "Bit 6 - Receiver Block Enable"]
#[inline]
pub fn rxblocken(&mut self) -> _RXBLOCKENW {
_RXBLOCKENW { w: self }
}
#[doc = "Bit 7 - Receiver Block Disable"]
#[inline]
pub fn rxblockdis(&mut self) -> _RXBLOCKDISW {
_RXBLOCKDISW { w: self }
|
pub fn txtrien(&mut self) -> _TXTRIENW {
_TXTRIENW { w: self }
}
#[doc = "Bit 9 - Transmitter Tristate Disable"]
#[inline]
pub fn txtridis(&mut self) -> _TXTRIDISW {
_TXTRIDISW { w: self }
}
#[doc = "Bit 10 - Clear TX"]
#[inline]
pub fn cleartx(&mut self) -> _CLEARTXW {
_CLEARTXW { w: self }
}
#[doc = "Bit 11 - Clear RX"]
#[inline]
pub fn clearrx(&mut self) -> _CLEARRXW {
_CLEARRXW { w: self }
}
}
|
}
#[doc = "Bit 8 - Transmitter Tristate Enable"]
#[inline]
|
interface.go
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
|
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
package cluster
import (
v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1"
internalinterfaces "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}
| |
fake.go
|
/*
Copyright 2021 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package fake
import (
context "context"
filtered "knative.dev/net-gateway-api/pkg/client/gatewayapi/injection/informers/apis/v1alpha1/gatewayclass/filtered"
factoryfiltered "knative.dev/net-gateway-api/pkg/client/gatewayapi/injection/informers/factory/filtered"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
var Get = filtered.Get
func init()
|
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(factoryfiltered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := factoryfiltered.Get(ctx, selector)
inf := f.Networking().V1alpha1().GatewayClasses()
ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
|
{
injection.Fake.RegisterFilteredInformers(withInformer)
}
|
alert_controller.go
|
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/conditions"
helper "github.com/fluxcd/pkg/runtime/controller"
"github.com/fluxcd/pkg/runtime/patch"
"github.com/fluxcd/pkg/runtime/predicates"
kuberecorder "k8s.io/client-go/tools/record"
"github.com/fluxcd/notification-controller/api/v1beta1"
)
var (
ProviderIndexKey = ".metadata.provider"
)
// AlertReconciler reconciles a Alert object
type AlertReconciler struct {
client.Client
helper.Metrics
kuberecorder.EventRecorder
Scheme *runtime.Scheme
}
type AlertReconcilerOptions struct {
MaxConcurrentReconciles int
}
func (r *AlertReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, AlertReconcilerOptions{})
}
func (r *AlertReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts AlertReconcilerOptions) error {
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1beta1.Alert{}, ProviderIndexKey,
func(o client.Object) []string {
alert := o.(*v1beta1.Alert)
return []string{
fmt.Sprintf("%s/%s", alert.GetNamespace(), alert.Spec.ProviderRef.Name),
}
}); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1beta1.Alert{}).
WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})).
Watches(
&source.Kind{Type: &v1beta1.Provider{}},
handler.EnqueueRequestsFromMapFunc(r.requestsForProviderChange),
builder.WithPredicates(predicate.GenerationChangedPredicate{}),
).
WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}).
Complete(r)
}
// +kubebuilder:rbac:groups=notification.toolkit.fluxcd.io,resources=alerts,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=notification.toolkit.fluxcd.io,resources=alerts/status,verbs=get;update;patch
func (r *AlertReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) {
start := time.Now()
log := ctrl.LoggerFrom(ctx)
alert := &v1beta1.Alert{}
if err := r.Get(ctx, req.NamespacedName, alert); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// record suspension metrics
r.RecordSuspend(ctx, alert, alert.Spec.Suspend)
|
return ctrl.Result{}, nil
}
patchHelper, err := patch.NewHelper(alert, r.Client)
if err != nil {
return ctrl.Result{}, err
}
defer func() {
patchOpts := []patch.Option{
patch.WithOwnedConditions{
Conditions: []string{
meta.ReadyCondition,
meta.ReconcilingCondition,
meta.StalledCondition,
},
},
}
if retErr == nil && (result.IsZero() || !result.Requeue) {
conditions.Delete(alert, meta.ReconcilingCondition)
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
readyCondition := conditions.Get(alert, meta.ReadyCondition)
switch readyCondition.Status {
case metav1.ConditionFalse:
// As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled
conditions.MarkStalled(alert, readyCondition.Reason, readyCondition.Message)
case metav1.ConditionTrue:
// As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled
conditions.Delete(alert, meta.StalledCondition)
}
}
if err := patchHelper.Patch(ctx, alert, patchOpts...); err != nil {
retErr = kerrors.NewAggregate([]error{retErr, err})
}
r.Metrics.RecordReadiness(ctx, alert)
r.Metrics.RecordDuration(ctx, alert, start)
}()
return r.reconcile(ctx, alert)
}
func (r *AlertReconciler) reconcile(ctx context.Context, alert *v1beta1.Alert) (ctrl.Result, error) {
// Mark the resource as under reconciliation
conditions.MarkReconciling(alert, meta.ProgressingReason, "")
// validate alert spec and provider
if err := r.validate(ctx, alert); err != nil {
conditions.MarkFalse(alert, meta.ReadyCondition, v1beta1.ValidationFailedReason, err.Error())
return ctrl.Result{}, client.IgnoreNotFound(err)
}
conditions.MarkTrue(alert, meta.ReadyCondition, meta.SucceededReason, v1beta1.InitializedReason)
ctrl.LoggerFrom(ctx).Info("Alert initialized")
return ctrl.Result{}, nil
}
func (r *AlertReconciler) validate(ctx context.Context, alert *v1beta1.Alert) error {
provider := &v1beta1.Provider{}
providerName := types.NamespacedName{Namespace: alert.Namespace, Name: alert.Spec.ProviderRef.Name}
if err := r.Get(ctx, providerName, provider); err != nil {
// log not found errors since they get filtered out
ctrl.LoggerFrom(ctx).Error(err, "failed to get provider %s", providerName.String())
return fmt.Errorf("failed to get provider '%s': %w", providerName.String(), err)
}
if !conditions.IsReady(provider) {
return fmt.Errorf("provider %s is not ready", providerName.String())
}
return nil
}
func (r *AlertReconciler) requestsForProviderChange(o client.Object) []reconcile.Request {
provider, ok := o.(*v1beta1.Provider)
if !ok {
panic(fmt.Errorf("expected a provider, got %T", o))
}
ctx := context.Background()
var list v1beta1.AlertList
if err := r.List(ctx, &list, client.MatchingFields{
ProviderIndexKey: client.ObjectKeyFromObject(provider).String(),
}); err != nil {
return nil
}
var reqs []reconcile.Request
for _, i := range list.Items {
reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)})
}
return reqs
}
|
if alert.Spec.Suspend {
log.Info("Reconciliation is suspended for this object")
|
cmddiff_test.go
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmddiff_test
import (
"os"
"path/filepath"
"testing"
"github.com/GoogleContainerTools/kpt/internal/cmddiff"
"github.com/GoogleContainerTools/kpt/internal/cmdget"
"github.com/GoogleContainerTools/kpt/internal/printer/fake"
"github.com/GoogleContainerTools/kpt/internal/testutil"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
)
func TestMain(m *testing.M) {
os.Exit(testutil.ConfigureTestKptCache(m))
}
func TestCmdInvalidDiffType(t *testing.T) {
runner := cmddiff.NewRunner(fake.CtxWithDefaultPrinter(), "")
runner.C.SetArgs([]string{"--diff-type", "invalid"})
err := runner.C.Execute()
assert.EqualError(t,
err,
"invalid diff-type 'invalid': supported diff-types are: local, remote, combined, 3way")
}
func TestCmdInvalidDiffTool(t *testing.T)
|
func TestCmdExecute(t *testing.T) {
g, w, clean := testutil.SetupRepoAndWorkspace(t, testutil.Content{
Data: testutil.Dataset1,
Branch: "master",
})
defer clean()
defer testutil.Chdir(t, w.WorkspaceDirectory)()
dest := filepath.Join(w.WorkspaceDirectory, g.RepoName)
getRunner := cmdget.NewRunner(fake.CtxWithDefaultPrinter(), "")
getRunner.Command.SetArgs([]string{"file://" + g.RepoDirectory + ".git/", "./"})
err := getRunner.Command.Execute()
assert.NoError(t, err)
runner := cmddiff.NewRunner(fake.CtxWithDefaultPrinter(), "")
runner.C.SetArgs([]string{dest, "--diff-type", "local"})
err = runner.C.Execute()
assert.NoError(t, err)
}
func TestCmd_flagAndArgParsing_Symlink(t *testing.T) {
dir := t.TempDir()
defer testutil.Chdir(t, dir)()
err := os.MkdirAll(filepath.Join(dir, "path", "to", "pkg", "dir"), 0700)
assert.NoError(t, err)
err = os.Symlink(filepath.Join("path", "to", "pkg", "dir"), "foo")
assert.NoError(t, err)
// verify the branch ref is set to the correct value
r := cmddiff.NewRunner(fake.CtxWithDefaultPrinter(), "kpt")
r.C.RunE = NoOpRunE
r.C.SetArgs([]string{"foo" + "@refs/heads/foo"})
err = r.C.Execute()
assert.NoError(t, err)
cwd, err := os.Getwd()
assert.NoError(t, err)
assert.Equal(t, filepath.Join(cwd, "path", "to", "pkg", "dir"), r.Path)
}
var NoOpRunE = func(cmd *cobra.Command, args []string) error { return nil }
|
{
runner := cmddiff.NewRunner(fake.CtxWithDefaultPrinter(), "")
runner.C.SetArgs([]string{"--diff-tool", "nodiff"})
err := runner.C.Execute()
assert.EqualError(t,
err,
"diff-tool 'nodiff' not found in the PATH")
}
|
import_schedule.py
|
import datetime as dt
from xml.etree import ElementTree as ET
from django.core.management.base import BaseCommand
from django.db import transaction
from django_scopes import scopes_disabled
from pretalx.event.models import Event, Organiser, Team
from pretalx.person.models import User
class Command(BaseCommand):
help = "Imports a frab xml export"
def add_arguments(self, parser):
|
@transaction.atomic
def handle(self, *args, **options):
from pretalx.schedule.utils import process_frab
path = options.get("path")
tree = ET.parse(path)
root = tree.getroot()
event_data = root.find("conference")
event = Event.objects.filter(
slug__iexact=event_data.find("acronym").text
).first()
with scopes_disabled():
if not event:
event = self.create_event(event_data)
team = event.organiser.teams.filter(
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
).first() or self.create_team(
str(event.name) + " Organisers", event.organiser
)
for user in User.objects.filter(is_administrator=True):
team.members.add(user)
team.save()
self.stdout.write(self.style.SUCCESS(process_frab(root, event)))
def create_event(self, event_data):
name = event_data.find("title").text
organiser = Organiser.objects.create(
name=name, slug=event_data.find("acronym").text
)
event = Event(
name=name,
organiser=organiser,
slug=event_data.find("acronym").text,
date_from=dt.datetime.strptime(
event_data.find("start").text, "%Y-%m-%d"
).date(),
date_to=dt.datetime.strptime(
event_data.find("end").text, "%Y-%m-%d"
).date(),
)
event.save()
self.create_team(name + " Organisers", organiser)
return event
def create_team(self, name, organiser):
return Team.objects.create(
name=name,
organiser=organiser,
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
)
|
parser.add_argument("path", type=str)
|
gorm.go
|
package initialize
import (
"gin-vue-admin/global"
"gin-vue-admin/model"
"go.uber.org/zap"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"os"
)
// Gorm 初始化数据库并产生数据库全局变量
func Gorm() *gorm.DB {
switch global.GVA_CONFIG.System.DbType {
case "mysql":
return GormMysql()
default:
return GormMysql()
}
}
// MysqlTables 注册数据库表专用
func MysqlTables(db *gorm.DB) {
err := db.AutoMigrate(
model.SysUser{},
model.SysAuthority{},
model.SysApi{},
model.SysBaseMenu{},
model.SysBaseMenuParameter{},
model.JwtBlacklist{},
model.SysWorkflow{},
model.SysWorkflowStepInfo{},
model.SysDictionary{},
model.SysDictionaryDetail{},
model.ExaFileUploadAndDownload{},
model.ExaFile{},
model.ExaFileChunk{},
model.ExaSimpleUploader{},
model.ExaCustomer{},
model.SysOperationRecord{},
)
if err != nil {
global.GVA_LOG.Error("register table failed"
|
}
// GormMysql 初始化Mysql数据库
func GormMysql() *gorm.DB {
m := global.GVA_CONFIG.Mysql
dsn := m.Username + ":" + m.Password + "@tcp(" + m.Path + ")/" + m.Dbname + "?" + m.Config
mysqlConfig := mysql.Config{
DSN: dsn, // DSN data source name
DefaultStringSize: 191, // string 类型字段的默认长度
DisableDatetimePrecision: true, // 禁用 datetime 精度,MySQL 5.6 之前的数据库不支持
DontSupportRenameIndex: true, // 重命名索引时采用删除并新建的方式,MySQL 5.7 之前的数据库和 MariaDB 不支持重命名索引
DontSupportRenameColumn: true, // 用 `change` 重命名列,MySQL 8 之前的数据库和 MariaDB 不支持重命名列
SkipInitializeWithVersion: false, // 根据版本自动配置
}
if db, err := gorm.Open(mysql.New(mysqlConfig), gormConfig(m.LogMode)); err != nil {
global.GVA_LOG.Error("MySQL启动异常", zap.Any("err", err))
os.Exit(0)
return nil
} else {
sqlDB, _ := db.DB()
sqlDB.SetMaxIdleConns(m.MaxIdleConns)
sqlDB.SetMaxOpenConns(m.MaxOpenConns)
return db
}
}
// gormConfig 根据配置决定是否开启日志
func gormConfig(mod bool) *gorm.Config {
if mod {
return &gorm.Config{
Logger: logger.Default.LogMode(logger.Info),
DisableForeignKeyConstraintWhenMigrating: true,
}
} else {
return &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
DisableForeignKeyConstraintWhenMigrating: true,
}
}
}
|
, zap.Any("err", err))
os.Exit(0)
}
global.GVA_LOG.Info("register table success")
|
_configuration.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class FeatureClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for FeatureClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription ID.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2021-07-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(FeatureClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def
|
(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
_configure
|
BAIR.py
|
import os
import io
import numpy as np
from PIL import Image
import torch
from torchvision.transforms import ToTensor
class BAIR (object):
"""Data Handler that loads robot pushing data."""
def __init__(self, data_root, train=True, seq_len=20, image_size=64):
self.root_dir = data_root
if train:
self.data_dir = '%s/processed_data/train' % self.root_dir
self.ordered = False
else:
self.data_dir = '%s/processed_data/test' % self.root_dir
self.ordered = True
self.dirs = []
for d1 in os.listdir(self.data_dir):
for d2 in os.listdir('%s/%s' % (self.data_dir, d1)):
self.dirs.append('%s/%s/%s' % (self.data_dir, d1, d2))
self.seq_len = seq_len
self.image_size = image_size
self.seed_is_set = False # multi threaded loading
self.d = 0
self.totensor = ToTensor()
def set_seed(self, seed):
if not self.seed_is_set:
self.seed_is_set = True
np.random.seed(seed)
def __len__(self):
return len(self.dirs)
def get_seq(self):
if self.ordered:
d = self.dirs[self.d]
if self.d == len(self.dirs) - 1:
self.d = 0
else:
self.d += 1
|
else:
d = self.dirs[np.random.randint(len(self.dirs))]
image_seq = []
for i in range(self.seq_len):
fname = '%s/%d.png' % (d, i)
# im = imread(fname).reshape(1, 64, 64, 3)
# im = np.array(Image.open(fname)).reshape((1, 3, 64, 64))
im = self.totensor(Image.open(fname)).reshape(1, 3, 64, 64)
image_seq.append(im)
image_seq = torch.cat(image_seq, axis=0)
return image_seq
def __getitem__(self, index):
self.set_seed(index)
return self.get_seq()
if __name__ == "__main__":
from torch.utils.data import DataLoader
train_dataset = BAIR('src/data/datasets/BAIR/raw', train=True)
train_dataloader = DataLoader(train_dataloader, batch_size=4)
print(len(train_dataset, train_dataloader))
| |
sw-property-list.spec.js
|
import { createLocalVue, shallowMount } from '@vue/test-utils';
import 'src/module/sw-property/page/sw-property-list';
function createWrapper(privileges = []) {
const localVue = createLocalVue();
localVue.directive('tooltip', {});
return shallowMount(Shopware.Component.build('sw-property-list'), {
localVue,
mocks: {
$tc: () => {},
$route: {
query: {
page: 1,
limit: 25
}
},
$router: {
replace: () => {}
}
},
provide: {
repositoryFactory: {
create: () => ({
search: () => {
return Promise.resolve([
{
id: '1a2b3c4e',
name: 'Test property',
sourceEntitiy: 'property'
}
]);
}
})
},
acl: {
can: (identifier) => {
if (!identifier) { return true; }
return privileges.includes(identifier);
}
}
},
stubs: {
'sw-page': `
<div class="sw-page">
<slot name="smart-bar-actions"></slot>
<slot name="content">CONTENT</slot>
<slot></slot>
|
'sw-entity-listing': {
props: ['items'],
template: `
<div>
<template v-for="item in items">
<slot name="actions" v-bind="{ item }"></slot>
</template>
</div>`
},
'sw-language-switch': true,
'sw-empty-state': true,
'sw-context-menu-item': true
}
});
}
describe('module/sw-property/page/sw-property-list', () => {
it('should be a Vue.JS component', async () => {
const wrapper = createWrapper();
await wrapper.vm.$nextTick();
expect(wrapper.isVueInstance()).toBe(true);
});
it('should not be able to create a new property', async () => {
const wrapper = createWrapper();
await wrapper.vm.$nextTick();
const createButton = wrapper.find('.sw-property-list__button-create');
expect(createButton.attributes().disabled).toBeTruthy();
});
it('should be able to create a new property', async () => {
const wrapper = createWrapper([
'property.creator'
]);
await wrapper.vm.$nextTick();
const createButton = wrapper.find('.sw-property-list__button-create');
expect(createButton.attributes().disabled).toBeFalsy();
});
it('should not be able to inline edit', async () => {
const wrapper = createWrapper();
await wrapper.vm.$nextTick();
const entityListing = wrapper.find('.sw-property-list-grid');
expect(entityListing.exists()).toBeTruthy();
expect(entityListing.attributes().allowinlineedit).toBeFalsy();
});
it('should be able to inline edit', async () => {
const wrapper = createWrapper([
'property.editor'
]);
await wrapper.vm.$nextTick();
const entityListing = wrapper.find('.sw-property-list-grid');
expect(entityListing.exists()).toBeTruthy();
expect(entityListing.attributes().allowinlineedit).toBeTruthy();
});
it('should not be able to delete', async () => {
const wrapper = createWrapper();
await wrapper.vm.$nextTick();
const deleteMenuItem = wrapper.find('.sw-property-list__delete-action');
expect(deleteMenuItem.attributes().disabled).toBeTruthy();
});
it('should be able to delete', async () => {
const wrapper = createWrapper([
'property.deleter'
]);
await wrapper.vm.$nextTick();
const deleteMenuItem = wrapper.find('.sw-property-list__delete-action');
expect(deleteMenuItem.attributes().disabled).toBeFalsy();
});
it('should not be able to edit', async () => {
const wrapper = createWrapper();
await wrapper.vm.$nextTick();
const editMenuItem = wrapper.find('.sw-property-list__edit-action');
expect(editMenuItem.attributes().disabled).toBeTruthy();
});
it('should be able to edit', async () => {
const wrapper = createWrapper([
'property.editor'
]);
await wrapper.vm.$nextTick();
const editMenuItem = wrapper.find('.sw-property-list__edit-action');
expect(editMenuItem.attributes().disabled).toBeFalsy();
});
});
|
</div>`,
'sw-button': true,
'sw-icon': true,
'sw-search-bar': true,
|
hosted.rs
|
// SPDX-FileCopyrightText: 2020 Sean Cross <[email protected]>
// SPDX-License-Identifier: Apache-2.0
pub mod irq;
pub mod mem;
pub mod process;
pub mod rand;
pub mod syscall;
use std::cell::RefCell;
use std::convert::TryInto;
use std::env;
use std::io::Read;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, TcpStream, ToSocketAddrs};
use std::thread_local;
use crate::arch::process::Process;
use crate::services::SystemServices;
use crossbeam_channel::{unbounded, Receiver, RecvError, RecvTimeoutError, Sender};
use xous_kernel::{MemoryAddress, ProcessInit, ProcessKey, Result, SysCall, ThreadInit, PID, TID};
enum ThreadMessage {
SysCall(PID, TID, SysCall),
NewConnection(TcpStream, ProcessKey),
}
#[derive(Debug)]
enum NewPidMessage {
NewPid(PID),
}
#[derive(Debug)]
enum ExitMessage {
Exit,
}
thread_local!(static NETWORK_LISTEN_ADDRESS: RefCell<SocketAddr> = RefCell::new(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0)));
thread_local!(static SEND_ADDR: RefCell<Option<Sender<SocketAddr>>> = RefCell::new(None));
thread_local!(static PID1_KEY: RefCell<[u8; 16]> = RefCell::new([0u8; 16]));
#[cfg(test)]
pub fn set_pid1_key(new_key: [u8; 16]) {
PID1_KEY.with(|p1k| *p1k.borrow_mut() = new_key);
}
/// Set the network address for this particular thread.
#[cfg(test)]
pub fn set_listen_address(new_address: &SocketAddr) {
NETWORK_LISTEN_ADDRESS.with(|nla| {
let mut address = nla.borrow_mut();
*address = *new_address;
});
}
/// Set the network address for this particular thread.
#[allow(dead_code)]
pub fn set_send_addr(send_addr: Sender<SocketAddr>) {
SEND_ADDR.with(|sa| {
*sa.borrow_mut() = Some(send_addr);
});
}
#[cfg(not(test))]
fn generate_pid_key() -> [u8; 16] {
use ::rand::{thread_rng, Rng};
let mut process_key = [0u8; 16];
let mut rng = thread_rng();
for b in process_key.iter_mut() {
*b = rng.gen();
}
process_key
}
#[allow(dead_code)]
pub fn current_pid() -> PID {
crate::arch::process::current_pid()
}
/// Each client gets its own connection and its own thread, which is handled here.
fn handle_connection(
conn: TcpStream,
pid: PID,
chn: Sender<ThreadMessage>,
should_exit: std::sync::Arc<core::sync::atomic::AtomicBool>,
) {
enum ServerMessage {
Exit,
ServerPacket([usize; 9]),
ServerPacketWithData([usize; 9], Vec<u8>),
}
fn conn_thread(mut conn: TcpStream, sender: Sender<ServerMessage>, _pid: PID) {
loop {
|
eprintln!(
"KERNEL({}): client disconnected: {} -- shutting down virtual process",
_pid, _e
);
sender.send(ServerMessage::Exit).ok();
return;
}
let mut packet_data = [0usize; 9];
for (bytes, word) in raw_data
.chunks_exact(std::mem::size_of::<usize>())
.zip(packet_data.iter_mut())
{
*word = usize::from_le_bytes(bytes.try_into().unwrap());
}
sender
.send(
if (packet_data[1] == xous_kernel::syscall::SysCallNumber::SendMessage as _
|| packet_data[1]
== xous_kernel::syscall::SysCallNumber::TrySendMessage as _)
&& (packet_data[3] == 1 || packet_data[3] == 2 || packet_data[3] == 3)
{
let mut v = vec![0; packet_data[6]];
if conn.read_exact(&mut v).is_err() {
sender.send(ServerMessage::Exit).ok();
return;
}
ServerMessage::ServerPacketWithData(packet_data, v)
} else if packet_data[1]
== xous_kernel::syscall::SysCallNumber::ReturnMemory as _
{
let mut v = vec![0; packet_data[4]];
if conn.read_exact(&mut v).is_err() {
sender.send(ServerMessage::Exit).ok();
return;
}
ServerMessage::ServerPacketWithData(packet_data, v)
} else {
ServerMessage::ServerPacket(packet_data)
},
)
.unwrap();
}
}
let (sender, receiver) = unbounded();
let conn_sender = sender.clone();
std::thread::Builder::new()
.name(format!("PID {}: client connection thread", pid))
.spawn(move || {
conn_thread(conn, conn_sender, pid);
})
.unwrap();
std::thread::Builder::new()
.name(format!("PID {}: client should_exit thread", pid))
.spawn(move || loop {
if should_exit.load(core::sync::atomic::Ordering::Relaxed) {
// eprintln!("KERNEL: should_exit == 1");
sender.send(ServerMessage::Exit).ok();
return;
}
std::thread::park_timeout(std::time::Duration::from_secs(1));
})
.unwrap();
for msg in receiver {
match msg {
ServerMessage::Exit => {
#[cfg(not(test))]
eprintln!("KERNEL({}): Received ServerMessage::Exit", pid);
break;
}
ServerMessage::ServerPacket(pkt) => {
let thread_id = pkt[0];
let call = xous_kernel::SysCall::from_args(
pkt[1], pkt[2], pkt[3], pkt[4], pkt[5], pkt[6], pkt[7], pkt[8],
);
match call {
Err(e) => {
eprintln!("KERNEL({}): Received invalid syscall: {:?}", pid, e);
eprintln!(
"Raw packet: {:08x} {} {} {} {} {} {} {}",
pkt[0], pkt[1], pkt[2], pkt[3], pkt[4], pkt[5], pkt[6], pkt[7]
);
}
Ok(call) => chn
.send(ThreadMessage::SysCall(pid, thread_id, call))
.expect("couldn't make syscall"),
}
}
ServerMessage::ServerPacketWithData(pkt, data) => {
let thread_id = pkt[0];
let call = xous_kernel::SysCall::from_args(
pkt[1], pkt[2], pkt[3], pkt[4], pkt[5], pkt[6], pkt[7], pkt[8],
);
match call {
Err(e) => {
eprintln!("KERNEL({}): Received invalid syscall: {:?}", pid, e);
eprintln!(
"Raw packet: {:08x} {} {} {} {} {} {} {}",
pkt[0], pkt[1], pkt[2], pkt[3], pkt[4], pkt[5], pkt[6], pkt[7]
);
}
Ok(mut call) => {
// eprintln!(
// "Received packet: {:08x} {} {} {} {} {} {} {}: {:?}",
// pkt[0], pkt[1], pkt[2], pkt[3], pkt[4], pkt[5], pkt[6], pkt[7], call
// );
match call {
SysCall::SendMessage(ref _cid, ref mut envelope)
| SysCall::TrySendMessage(ref _cid, ref mut envelope) => {
match envelope {
xous_kernel::Message::MutableBorrow(msg)
| xous_kernel::Message::Borrow(msg)
| xous_kernel::Message::Move(msg) => {
// Update the address pointer. This will get turned back into a
// usable pointer by casting it back into a &[T] on the other
// side. This is just a pointer to the start of data
// as well as the index into the data it points at. The lengths
// should still be equal once we reconstitute the data in the
// other process.
// ::debug_here::debug_here!();
let sliced_data = data.into_boxed_slice();
assert_eq!(
sliced_data.len(),
msg.buf.len(),
"deconstructed data {} != message buf length {}",
sliced_data.len(),
msg.buf.len()
);
msg.buf.addr =
match MemoryAddress::new(Box::into_raw(sliced_data)
as *mut u8
as usize)
{
Some(a) => a,
_ => unreachable!(),
};
}
xous_kernel::Message::Scalar(_)
| xous_kernel::Message::BlockingScalar(_) => (),
}
}
SysCall::ReturnMemory(ref _sender, ref mut buf) => {
let sliced_data = data.into_boxed_slice();
assert_eq!(
sliced_data.len(),
buf.len(),
"deconstructed data {} != message buf length {}",
sliced_data.len(),
buf.len()
);
buf.addr = match MemoryAddress::new(Box::into_raw(sliced_data)
as *mut u8
as usize)
{
Some(a) => a,
_ => unreachable!(),
};
}
_ => panic!("unsupported message type"),
}
chn.send(ThreadMessage::SysCall(pid, thread_id, call))
.expect("couldn't make syscall");
}
}
}
}
}
#[cfg(not(test))]
eprintln!(
"KERNEL({}): Finished the thread so sending TerminateProcess",
pid
);
chn.send(ThreadMessage::SysCall(
pid,
1,
xous_kernel::SysCall::TerminateProcess,
))
.unwrap();
}
fn listen_thread(
listen_addr: SocketAddr,
chn: Sender<ThreadMessage>,
mut local_addr_sender: Option<Sender<SocketAddr>>,
new_pid_channel: Receiver<NewPidMessage>,
exit_channel: Receiver<ExitMessage>,
) {
let should_exit = std::sync::Arc::new(core::sync::atomic::AtomicBool::new(false));
// println!("KERNEL(1): Starting Xous server on {}...", listen_addr);
let listener = TcpListener::bind(listen_addr).unwrap_or_else(|e| {
panic!("Unable to create server: {}", e);
});
// Notify the host what our kernel address is, if a listener exists.
if let Some(las) = local_addr_sender.take() {
las.send(listener.local_addr().unwrap()).unwrap();
}
let mut clients = vec![];
fn accept_new_connection(
mut conn: TcpStream,
chn: &Sender<ThreadMessage>,
new_pid_channel: &Receiver<NewPidMessage>,
clients: &mut Vec<(std::thread::JoinHandle<()>, TcpStream)>,
should_exit: &std::sync::Arc<core::sync::atomic::AtomicBool>,
) -> bool {
let thr_chn = chn.clone();
// Read the challenge access key from the client
let mut access_key = [0u8; 16];
conn.read_exact(&mut access_key).unwrap();
// Spawn a new process. This process will start out in the "Allocated" state.
chn.send(ThreadMessage::NewConnection(
conn.try_clone()
.expect("couldn't make a copy of the network connection for the kernel"),
ProcessKey::new(access_key),
))
.expect("couldn't request a new PID");
// The kernel will immediately respond with a new PID.
let NewPidMessage::NewPid(new_pid) = new_pid_channel
.recv()
.expect("couldn't receive message from main thread");
// println!("KERNEL({}): New client connected from {}", new_pid, _addr);
let conn_copy = conn.try_clone().expect("couldn't duplicate connection");
let should_exit = should_exit.clone();
let jh = std::thread::Builder::new()
.name(format!("kernel PID {} listener", new_pid))
.spawn(move || handle_connection(conn, new_pid, thr_chn, should_exit))
.expect("couldn't spawn listen thread");
clients.push((jh, conn_copy));
false
}
fn exit_server(
should_exit: std::sync::Arc<core::sync::atomic::AtomicBool>,
clients: Vec<(std::thread::JoinHandle<()>, TcpStream)>,
) {
should_exit.store(true, core::sync::atomic::Ordering::Relaxed);
for (jh, conn) in clients {
use std::net::Shutdown;
conn.shutdown(Shutdown::Both).ok();
jh.join().expect("couldn't join client thread");
}
}
// Use `listener` in a nonblocking setup so that we can exit when doing tests
enum ClientMessage {
NewConnection(TcpStream),
Exit,
};
let (sender, receiver) = unbounded();
let tcp_sender = sender.clone();
let exit_sender = sender;
let (shutdown_listener, shutdown_listener_receiver) = unbounded();
// `listener.accept()` has no way to break, so we must put it in nonblocking mode
listener.set_nonblocking(true).unwrap();
std::thread::Builder::new()
.name("kernel accept thread".to_owned())
.spawn(move || loop {
match listener.accept() {
Ok((conn, _addr)) => {
conn.set_nonblocking(false).unwrap();
tcp_sender.send(ClientMessage::NewConnection(conn)).unwrap();
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
match shutdown_listener_receiver
.recv_timeout(std::time::Duration::from_millis(500))
{
Err(RecvTimeoutError::Timeout) => continue,
Ok(()) | Err(RecvTimeoutError::Disconnected) => {
return;
}
}
}
Err(e) => {
// Windows generates this error -- WSACancelBlockingCall -- when a
// connection is shut down while `accept()` is running. This should
// only happen when the system is shutting down, so ignore it.
if cfg!(windows) {
if let Some(10004) = e.raw_os_error() {
return;
}
}
eprintln!(
"error accepting connections: {} ({:?}) ({:?})",
e,
e,
e.kind()
);
return;
}
}
})
.unwrap();
// Spawn a thread to listen for the `exit` command, and relay that
// to the main thread. This prevents us from needing to poll, since
// all messages are coalesced into a single channel.
std::thread::Builder::new()
.name("kernel exit listener".to_owned())
.spawn(move || match exit_channel.recv() {
Ok(ExitMessage::Exit) => exit_sender.send(ClientMessage::Exit).unwrap(),
Err(RecvError) => eprintln!("error receiving exit command"),
})
.unwrap();
for msg in receiver {
match msg {
ClientMessage::NewConnection(conn) => {
if accept_new_connection(conn, &chn, &new_pid_channel, &mut clients, &should_exit) {
break;
}
}
ClientMessage::Exit => break,
}
}
shutdown_listener.send(()).unwrap();
exit_server(should_exit, clients);
}
/// The idle function is run when there are no directly-runnable processes
/// that kmain can activate. In a hosted environment,this is the primary
/// thread that handles network communications, and this function never returns.
pub fn idle() -> bool {
// Start listening.
let (sender, message_receiver) = unbounded();
let (new_pid_sender, new_pid_receiver) = unbounded();
let (exit_sender, exit_receiver) = unbounded();
// Allocate PID1 with the key we were passed.
let pid1_key = PID1_KEY.with(|p1k| *p1k.borrow());
let pid1_init = ProcessInit {
key: ProcessKey::new(pid1_key),
};
let pid1 = SystemServices::with_mut(|ss| ss.create_process(pid1_init)).unwrap();
assert_eq!(pid1.get(), 1);
let listen_addr = env::var("XOUS_LISTEN_ADDR")
.map(|s| {
s.to_socket_addrs()
.expect("invalid server address")
.next()
.expect("unable to resolve server address")
})
.unwrap_or_else(|_| NETWORK_LISTEN_ADDRESS.with(|nla| *nla.borrow()));
#[cfg(not(test))]
let address_receiver = {
let (sender, receiver) = unbounded();
set_send_addr(sender);
receiver
};
let listen_thread_handle = SEND_ADDR.with(|sa| {
let sa = sa.borrow_mut().take();
std::thread::Builder::new()
.name("kernel network listener".to_owned())
.spawn(move || listen_thread(listen_addr, sender, sa, new_pid_receiver, exit_receiver))
.expect("couldn't spawn listen thread")
});
#[cfg(not(test))]
{
let address = address_receiver.recv().unwrap();
xous_kernel::arch::set_xous_address(address);
println!("KERNEL: Xous server listening on {}", address);
println!("KERNEL: Starting initial processes:");
let mut args = std::env::args();
args.next();
// Set the current PID to 1, which was created above. This ensures all init processes
// are owned by PID1.
crate::arch::process::set_current_pid(pid1);
// Go through each arg and spawn it as a new process. Failures here will
// halt the entire system.
println!(" PID | Command");
println!("-------+------------------");
for arg in args {
let process_key = generate_pid_key();
let init = xous_kernel::ProcessInit {
key: ProcessKey::new(process_key),
};
let new_pid = SystemServices::with_mut(|ss| ss.create_process(init)).unwrap();
println!(" {:^5} | {}", new_pid, arg);
let process_args = xous_kernel::ProcessArgs::new("program", arg);
xous_kernel::arch::create_process_post(process_args, init, new_pid)
.expect("couldn't spawn");
}
}
while let Ok(msg) = message_receiver.recv() {
match msg {
ThreadMessage::NewConnection(conn, access_key) => {
// The new process should already have a PID registered. Convert its access key
// into a PID, and register the connection with the server.
let new_pid =
crate::arch::process::register_connection_for_key(conn, access_key).unwrap();
// println!(
// "KERNEL: Access key {:?} mapped to PID {}",
// access_key, new_pid
// );
// Inform the backchannel of the new process ID.
new_pid_sender
.send(NewPidMessage::NewPid(new_pid))
.expect("couldn't send new pid to new connection");
// conn.write_all(&new_pid.get().to_le_bytes())
// .expect("couldn't send pid to new process");
// Switch to this process immediately, which moves it from `Setup(_)` to `Running(0)`.
// Note that in this system, multiple processes can be active at once. This is
// similar to having one core for each process
SystemServices::with_mut(|ss| {
ss.create_thread(new_pid, ThreadInit {})?;
ss.switch_to_thread(new_pid, None)
})
.unwrap();
}
ThreadMessage::SysCall(pid, thread_id, call) => {
// println!("KERNEL({}): Received syscall {:?}", pid, call);
crate::arch::process::set_current_pid(pid);
// println!("KERNEL({}): Now running as the new process", pid);
// If the call being made is to terminate the current process, we need to know
// because we won't be able to send a response.
let is_terminate = call == SysCall::TerminateProcess;
let is_shutdown = call == SysCall::Shutdown;
// For a "Shutdown" command, send the response before we issue the shutdown.
// This is because the "process" will be "terminated" (the network socket will be closed),
// and we won't be able to send the response after we're done.
if is_shutdown {
// println!("KERNEL: Detected shutdown -- sending final \"Ok\" to the client");
let mut process = Process::current();
let mut response_vec = Vec::new();
response_vec.extend_from_slice(&thread_id.to_le_bytes());
for word in Result::Ok.to_args().iter_mut() {
response_vec.extend_from_slice(&word.to_le_bytes());
}
process.send(&response_vec).unwrap_or_else(|_e| {
// If we're unable to send data to the process, assume it's dead and terminate it.
println!(
"Unable to send response to process: {:?} -- terminating",
_e
);
crate::syscall::handle(pid, thread_id, false, SysCall::TerminateProcess)
.ok();
});
// println!("KERNEL: Done sending");
}
// Handle the syscall within the Xous kernel
let response = crate::syscall::handle(pid, thread_id, false, call)
.unwrap_or_else(Result::Error);
// println!("KERNEL({}): Syscall response {:?}", pid, response);
// There's a response if it wasn't a blocked process and we're not terminating.
// Send the response back to the target.
if response != Result::BlockedProcess && !is_terminate && !is_shutdown {
// The syscall may change what the current process is, but we always
// want to send a response to the process where the request came from.
// For this block, switch to the original PID, send the message, then
// switch back.
let existing_pid = crate::arch::process::current_pid();
crate::arch::process::set_current_pid(pid);
let mut process = Process::current();
let mut response_vec = Vec::new();
response_vec.extend_from_slice(&thread_id.to_le_bytes());
for word in response.to_args().iter_mut() {
response_vec.extend_from_slice(&word.to_le_bytes());
}
if let Some(mem) = response.memory() {
let s = unsafe { core::slice::from_raw_parts(mem.as_ptr(), mem.len()) };
response_vec.extend_from_slice(s);
}
process.send(&response_vec).unwrap_or_else(|_e| {
// If we're unable to send data to the process, assume it's dead and terminate it.
eprintln!(
"KERNEL({}): Unable to send response to process: {:?} -- terminating",
pid, _e
);
crate::syscall::handle(pid, thread_id, false, SysCall::TerminateProcess)
.ok();
});
crate::arch::process::set_current_pid(existing_pid);
}
if is_shutdown {
exit_sender
.send(ExitMessage::Exit)
.expect("couldn't send shutdown signal");
break;
}
}
}
}
// println!("Exiting Xous because the listen thread channel has closed. Waiting for thread to finish...");
listen_thread_handle
.join()
.expect("error waiting for listen thread to return");
// println!("Thank you for using Xous!");
false
}
|
let mut raw_data = [0u8; 9 * std::mem::size_of::<usize>()];
if let Err(_e) = conn.read_exact(&mut raw_data) {
#[cfg(not(test))]
|
plots.rs
|
use std::collections::HashSet;
use abstutil::prettyprint_usize;
use geom::{Circle, Distance, Duration, Percent, Polygon, Pt2D, Time, UnitFmt};
use crate::{Color, EventCtx, GeomBatch, TextExt, Toggle, Widget};
pub struct PlotOptions<X: Axis<X>, Y: Axis<Y>> {
pub filterable: bool,
pub max_x: Option<X>,
pub max_y: Option<Y>,
pub disabled: HashSet<String>,
}
impl<X: Axis<X>, Y: Axis<Y>> PlotOptions<X, Y> {
pub fn filterable() -> PlotOptions<X, Y> {
PlotOptions {
filterable: true,
max_x: None,
max_y: None,
disabled: HashSet::new(),
}
}
pub fn fixed() -> PlotOptions<X, Y> {
PlotOptions {
filterable: false,
max_x: None,
max_y: None,
disabled: HashSet::new(),
}
}
}
pub trait Axis<T>: 'static + Copy + std::cmp::Ord {
// percent is [0.0, 1.0]
fn from_percent(&self, percent: f64) -> T;
fn to_percent(self, max: T) -> f64;
fn prettyprint(self) -> String;
// For order of magnitude calculations
fn to_f64(self) -> f64;
fn from_f64(&self, x: f64) -> T;
fn zero() -> T;
}
impl Axis<usize> for usize {
fn from_percent(&self, percent: f64) -> usize {
((*self as f64) * percent) as usize
}
fn to_percent(self, max: usize) -> f64 {
if max == 0 {
0.0
} else {
(self as f64) / (max as f64)
}
}
fn prettyprint(self) -> String {
prettyprint_usize(self)
}
fn to_f64(self) -> f64 {
self as f64
}
fn from_f64(&self, x: f64) -> usize {
x as usize
}
fn zero() -> usize {
0
}
}
impl Axis<Duration> for Duration {
fn from_percent(&self, percent: f64) -> Duration {
*self * percent
}
fn to_percent(self, max: Duration) -> f64 {
if max == Duration::ZERO {
0.0
} else {
self / max
}
}
fn prettyprint(self) -> String {
self.to_string(&UnitFmt {
metric: false,
round_durations: true,
})
}
fn to_f64(self) -> f64 {
self.inner_seconds() as f64
}
fn from_f64(&self, x: f64) -> Duration {
Duration::seconds(x as f64)
}
fn zero() -> Duration {
Duration::ZERO
}
}
impl Axis<Time> for Time {
fn from_percent(&self, percent: f64) -> Time {
self.percent_of(percent)
}
fn to_percent(self, max: Time) -> f64 {
if max == Time::START_OF_DAY {
0.0
} else {
self.to_percent(max)
}
}
fn prettyprint(self) -> String {
self.ampm_tostring()
}
fn to_f64(self) -> f64 {
self.inner_seconds() as f64
}
fn from_f64(&self, x: f64) -> Time {
Time::START_OF_DAY + Duration::seconds(x as f64)
}
fn zero() -> Time {
Time::START_OF_DAY
}
}
impl Axis<Distance> for Distance {
fn from_percent(&self, percent: f64) -> Distance {
*self * percent
}
fn to_percent(self, max: Distance) -> f64 {
if max == Distance::ZERO {
0.0
} else {
self / max
}
}
fn prettyprint(self) -> String {
self.to_string(&UnitFmt {
metric: false,
round_durations: true,
})
}
fn to_f64(self) -> f64 {
self.inner_meters() as f64
}
fn from_f64(&self, x: f64) -> Distance {
Distance::meters(x as f64)
}
fn zero() -> Distance {
Distance::ZERO
}
}
pub struct Series<X, Y> {
pub label: String,
pub color: Color,
// Assume this is sorted by X.
pub pts: Vec<(X, Y)>,
}
pub fn make_legend<X: Axis<X>, Y: Axis<Y>>(
ctx: &EventCtx,
series: &Vec<Series<X, Y>>,
opts: &PlotOptions<X, Y>,
) -> Widget {
let mut row = Vec::new();
let mut seen = HashSet::new();
for s in series {
if seen.contains(&s.label) {
continue;
}
seen.insert(s.label.clone());
if opts.filterable {
row.push(Toggle::colored_checkbox(
ctx,
&s.label,
s.color,
!opts.disabled.contains(&s.label),
));
} else {
let radius = 15.0;
row.push(Widget::row(vec![
GeomBatch::from(vec![(
s.color,
Circle::new(Pt2D::new(radius, radius), Distance::meters(radius)).to_polygon(),
)])
.into_widget(ctx),
s.label.clone().text_widget(ctx),
]));
}
}
Widget::custom_row(row).flex_wrap(ctx, Percent::int(24))
}
// TODO If this proves useful, lift to geom
pub fn
|
(pts: Vec<Pt2D>, width: Distance) -> Polygon {
use lyon::math::{point, Point};
use lyon::path::Path;
use lyon::tessellation::geometry_builder::{BuffersBuilder, Positions, VertexBuffers};
use lyon::tessellation::{StrokeOptions, StrokeTessellator};
let mut builder = Path::builder();
for (idx, pt) in pts.into_iter().enumerate() {
let pt = point(pt.x() as f32, pt.y() as f32);
if idx == 0 {
builder.move_to(pt);
} else {
builder.line_to(pt);
}
}
let path = builder.build();
let mut geom: VertexBuffers<Point, u32> = VertexBuffers::new();
let mut buffer = BuffersBuilder::new(&mut geom, Positions);
StrokeTessellator::new()
.tessellate(
&path,
&StrokeOptions::tolerance(0.01).with_line_width(width.inner_meters() as f32),
&mut buffer,
)
.unwrap();
Polygon::precomputed(
geom.vertices
.into_iter()
.map(|v| Pt2D::new(f64::from(v.x), f64::from(v.y)))
.collect(),
geom.indices.into_iter().map(|idx| idx as usize).collect(),
)
}
|
thick_lineseries
|
startOnBoot.js
|
/*
* @Author : tongzonghua
* @Date : 2020-10-21 01:07:30
* @LastEditors : tongzonghua
* @LastEditTime : 2020-10-21 03:02:31
* @Email : [email protected]
* @Description : 开机自启动
* @FilePath : /cli/aggna-electron-template/src/main/startOnBoot.js
*/
// 引用winreg模块
var WinReg = require('winreg')
var startOnBoot = {
// 设置自动启动
enableAutoStart: function (name, file, callback) {
var key = getKey()
key.set(name, WinReg.REG_SZ, file, callback || noop)
},
// 取消自动启动
disableAutoStart: function (name, callback) {
var key = getKey()
key.remove(name, callback || noop)
},
// 获取是否自动启动
getAutoStartValue: function (name, callback) {
var key = getKey()
key.get(name, function (error, result) {
if (result) {
callback(null, result.value)
} else {
callback(error)
}
})
}
}
var RUN_LOCATION = '\\Software\\Microsoft\\Windows\\CurrentVersion\\Run'
// 获取注册表key
function getKey() {
return new WinReg({
// hive: WinReg.HKCU, // Curren
|
hive: WinReg.HKLM, // LocalMachine,
key: RUN_LOCATION
})
}
// callback自定义方法,你可以在这里写代码
function noop() {
}
// 导出
module.exports = startOnBoot
|
tUser,
|
readers.py
|
"""
This module contains functions to read Caltrans PEMS station data and metadata files.
"""
import pandas as pd
import sys
from ..caada_typing import pathlike as _pathlike
def read_pems_station_csv(csv_file: _pathlike) -> pd.DataFrame:
|
def read_pems_station_meta(filename: _pathlike) -> pd.DataFrame:
"""Read a PEMS station metadata file.
Parameters
----------
filename
Path to the metadata tab-delimited file to read.
Returns
-------
pandas.DataFrame
A dataframe, indexed by site ID, containing the metadata from the requested file.
"""
try:
df = pd.read_csv(filename, sep='\t')
except pd.errors.ParserError as err:
print('Error parsing metadata file: {}'.format(filename), file=sys.stderr)
raise
df.set_index('ID', inplace=True)
df.rename(columns=lambda s: s.lower(), inplace=True)
return df
|
"""Read a Caltrans PEMS daily station .csv file
Parameters
----------
csv_file
The path to the PEMS file to read
Returns
-------
A dataframe containing the PEMS data with the correct header
"""
columns = ['timestamp', 'station', 'district', 'route', 'direction of travel', 'lane type', 'station length', 'samples',
'percent observed', 'total flow', 'delay 35', 'delay 40', 'delay 45', 'delay 50', 'delay 55', 'delay 60']
df = pd.read_csv(csv_file, header=None)
df.columns = columns
df['timestamp'] = pd.DatetimeIndex(df['timestamp'])
return df
|
thumbnail.py
|
from __future__ import unicode_literals
import os
from io import BytesIO
try:
from PIL import Image, ImageOps
except ImportError:
raise RuntimeError('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".')
from .utils import import_from_string, generate_filename, parse_size, aspect_to_string
class Thumbnail(object):
def __init__(self, app=None, configure_jinja=True):
self.app = app
self._configure_jinja = configure_jinja
|
self._default_root_url = '/'
self._default_thumbnail_root_url = '/'
self._default_format = 'JPEG'
self._default_storage_backend = 'sanic_thumbnails.storage_backends.FilesystemStorageBackend'
if app is not None:
self.init_app(app)
async def init_app(self, app):
if self.app is None:
self.app = app
app.thumbnail_instance = self
if not hasattr(app, 'extensions'):
app.extensions = {}
if 'thumbnail' in app.extensions:
raise RuntimeError('Sanic-thumbnail extension already initialized')
app.extensions['thumbnail'] = self
app.config.setdefault('THUMBNAIL_MEDIA_ROOT', self._default_root_directory)
app.config.setdefault('THUMBNAIL_MEDIA_THUMBNAIL_ROOT', self._default_thumbnail_directory)
app.config.setdefault('THUMBNAIL_MEDIA_URL', self._default_root_url)
app.config.setdefault('THUMBNAIL_MEDIA_THUMBNAIL_URL', self._default_thumbnail_root_url)
app.config.setdefault('THUMBNAIL_STORAGE_BACKEND', self._default_storage_backend)
app.config.setdefault('THUMBNAIL_DEFAUL_FORMAT', self._default_format)
if self._configure_jinja:
app.jinja_env.filters.update(
thumbnail=await self.get_thumbnail,
)
@property
def root_directory(self):
path = self.app.config['THUMBNAIL_MEDIA_ROOT']
if os.path.isabs(path):
return path
else:
return os.path.join(self.app.root_path, path)
@property
def thumbnail_directory(self):
path = self.app.config['THUMBNAIL_MEDIA_THUMBNAIL_ROOT']
if os.path.isabs(path):
return path
else:
return os.path.join(self.app.root_path, path)
@property
def root_url(self):
return self.app.config['THUMBNAIL_MEDIA_URL']
@property
def thumbnail_url(self):
return self.app.config['THUMBNAIL_MEDIA_THUMBNAIL_URL']
@property
def storage_backend(self):
return self.app.config['THUMBNAIL_STORAGE_BACKEND']
async def get_storage_backend(self):
backend_class = import_from_string(self.storage_backend)
return backend_class(app=self.app)
async def get_thumbnail(self, original, size, **options):
storage = self.get_storage_backend()
crop = options.get('crop', 'fit')
background = options.get('background')
quality = options.get('quality', 90)
thumbnail_size = parse_size(size)
original_path, original_filename = os.path.split(original)
thumbnail_filename = generate_filename(original_filename, aspect_to_string(size), crop, background, quality)
original_filepath = os.path.join(self.root_directory, original_path, original_filename)
thumbnail_filepath = os.path.join(self.thumbnail_directory, original_path, thumbnail_filename)
thumbnail_url = os.path.join(self.thumbnail_url, original_path, thumbnail_filename)
if storage.exists(thumbnail_filepath):
return thumbnail_url
image = Image.open(BytesIO(storage.read(original_filepath)))
try:
image.load()
except (IOError, OSError):
self.app.logger.warning('Thumbnail not load image: %s', original_filepath)
return thumbnail_url
# get original image format
options['format'] = options.get('format', image.format)
image = await self._create_thumbnail(image, thumbnail_size, crop,
background=background)
raw_data = await self.get_raw_data(image, **options)
storage.save(thumbnail_filepath, raw_data)
return thumbnail_url
async def get_raw_data(self, image, **options):
data = {
'format': self._get_format(image, **options),
'quality': options.get('quality', 90),
}
_file = BytesIO()
image.save(_file, **data)
return _file.getvalue()
@staticmethod
def colormode(image, colormode='RGB'):
if colormode == 'RGB' or colormode == 'RGBA':
if image.mode == 'RGBA':
return image
if image.mode == 'LA':
return image.convert('RGBA')
return image.convert(colormode)
if colormode == 'GRAY':
return image.convert('L')
return image.convert(colormode)
@staticmethod
def background(original_image, color=0xff):
size = (max(original_image.size),) * 2
image = Image.new('L', size, color)
image.paste(original_image, tuple(map(lambda x: (x[0] - x[1]) / 2, zip(size, original_image.size))))
return image
async def _get_format(self, image, **options):
if options.get('format'):
return options.get('format')
if image.format:
return image.format
return self.app.config['THUMBNAIL_DEFAUL_FORMAT']
async def _create_thumbnail(self, image, size, crop='fit', background=None):
if crop == 'fit':
image = ImageOps.fit(image, size, Image.ANTIALIAS)
else:
image = image.copy()
image.thumbnail(size, resample=Image.ANTIALIAS)
if background is not None:
image = self.background(image)
image = self.colormode(image)
return image
|
self._default_root_directory = 'media'
self._default_thumbnail_directory = 'media'
|
defaultFunc.go
|
package good
import (
"time"
tf "github.com/tensorflow/tensorflow/tensorflow/go"
"github.com/zartbot/go_utils/map2value"
"github.com/zartbot/golap/api/datastream"
)
func DefaultModelFunc(d *datastream.DataStream, m *ModelContainer, sess *tf.Session)
|
{
startTime := time.Now()
imgBytes, err := map2value.MapToBytes(d.RecordMap, "image")
if err != nil {
d.RecordMap["State"] = "Failed decode image"
return
}
objlist, err := m.Prediction(imgBytes, sess)
if err != nil {
d.RecordMap["State"] = "Failed during detection"
return
}
ObjectCount := make(map[string]uint32)
for _, v := range objlist {
item, ok := ObjectCount[v.LabelStr]
if !ok {
ObjectCount[v.LabelStr] = uint32(1)
} else {
ObjectCount[v.LabelStr] = item + 1
}
}
d.RecordMap["detect_object"] = objlist
d.RecordMap["objectCount"] = ObjectCount
d.RecordMap["ElapsedTime_Prediction"] = time.Since(startTime)
//if need render image
/*
newimg, err := RenderObject(imgBytes, objlist)
if err != nil {
d.RecordMap["State"] = "Failed during render"
return
}
f, err := os.Create("/home/kevin/Desktop/d.jpg")
if err != nil {
fmt.Println(err)
return
}
n2, err := f.Write(newimg)
if err != nil {
fmt.Println(err)
f.Close()
return
}
fmt.Println(n2, "bytes written successfully")
err = f.Close()
if err != nil {
fmt.Println(err)
return
}
*/
d.RecordMap["State"] = "success"
delete(d.RecordMap, "image")
d.RecordMap["ElapsedTime"] = time.Since(startTime)
}
|
|
unix.rs
|
//! Unix specific definitions
use std::cmp;
use std::io::{self, ErrorKind, Read, Write};
use std::os::unix::io::{AsRawFd, RawFd};
use std::sync;
use std::sync::atomic::{AtomicBool, Ordering};
use log::{debug, warn};
use nix::poll::{self, PollFlags};
use nix::sys::signal;
use nix::sys::termios;
use nix::sys::termios::{SetArg, SpecialCharacterIndices as SCI, Termios};
use unicode_segmentation::UnicodeSegmentation;
use utf8parse::{Parser, Receiver};
use super::{width, RawMode, RawReader, Renderer, Term};
use crate::config::{BellStyle, ColorMode, Config, OutputStreamType};
use crate::error;
use crate::edit::Prompt;
use crate::highlight::Highlighter;
use crate::keys::{KeyCode as K, KeyEvent, KeyEvent as E, Modifiers as M};
use crate::layout::{Layout, Position};
use crate::line_buffer::LineBuffer;
use crate::{Cmd, Result};
use std::collections::HashMap;
use crate::tty::add_prompt_and_highlight;
const STDIN_FILENO: RawFd = libc::STDIN_FILENO;
/// Unsupported Terminals that don't support RAW mode
const UNSUPPORTED_TERM: [&str; 3] = ["dumb", "cons25", "emacs"];
const BRACKETED_PASTE_ON: &[u8] = b"\x1b[?2004h";
const BRACKETED_PASTE_OFF: &[u8] = b"\x1b[?2004l";
impl AsRawFd for OutputStreamType {
fn as_raw_fd(&self) -> RawFd {
match self {
OutputStreamType::Stdout => libc::STDOUT_FILENO,
OutputStreamType::Stderr => libc::STDERR_FILENO,
}
}
}
nix::ioctl_read_bad!(win_size, libc::TIOCGWINSZ, libc::winsize);
#[allow(clippy::useless_conversion)]
fn get_win_size<T: AsRawFd + ?Sized>(fileno: &T) -> (usize, usize) {
use std::mem::zeroed;
if cfg!(test) {
return (80, 24);
}
unsafe {
let mut size: libc::winsize = zeroed();
match win_size(fileno.as_raw_fd(), &mut size) {
Ok(0) => {
// In linux pseudo-terminals are created with dimensions of
// zero. If host application didn't initialize the correct
// size before start we treat zero size as 80 columns and
// infinite rows
let cols = if size.ws_col == 0 {
80
} else {
size.ws_col as usize
};
let rows = if size.ws_row == 0 {
usize::MAX
} else {
size.ws_row as usize
};
(cols, rows)
}
_ => (80, 24),
}
}
}
/// Check TERM environment variable to see if current term is in our
/// unsupported list
fn is_unsupported_term() -> bool {
match std::env::var("TERM") {
Ok(term) => {
for iter in &UNSUPPORTED_TERM {
if (*iter).eq_ignore_ascii_case(&term) {
return true;
}
}
false
}
Err(_) => false,
}
}
/// Return whether or not STDIN, STDOUT or STDERR is a TTY
fn is_a_tty(fd: RawFd) -> bool {
unsafe { libc::isatty(fd) != 0 }
}
pub type PosixKeyMap = HashMap<KeyEvent, Cmd>;
#[cfg(not(test))]
pub type KeyMap = PosixKeyMap;
#[must_use = "You must restore default mode (disable_raw_mode)"]
pub struct PosixMode {
termios: termios::Termios,
out: Option<OutputStreamType>,
}
#[cfg(not(test))]
pub type Mode = PosixMode;
impl RawMode for PosixMode {
/// Disable RAW mode for the terminal.
fn disable_raw_mode(&self) -> Result<()> {
termios::tcsetattr(STDIN_FILENO, SetArg::TCSADRAIN, &self.termios)?;
// disable bracketed paste
if let Some(out) = self.out {
write_and_flush(out, BRACKETED_PASTE_OFF)?;
}
Ok(())
}
}
// Rust std::io::Stdin is buffered with no way to know if bytes are available.
// So we use low-level stuff instead...
struct StdinRaw {}
impl Read for StdinRaw {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
loop {
let res = unsafe {
libc::read(
STDIN_FILENO,
buf.as_mut_ptr() as *mut libc::c_void,
buf.len() as libc::size_t,
)
};
if res == -1 {
let error = io::Error::last_os_error();
if error.kind() != ErrorKind::Interrupted || SIGWINCH.load(Ordering::Relaxed) {
return Err(error);
}
} else {
#[allow(clippy::cast_sign_loss)]
return Ok(res as usize);
}
}
}
}
/// Console input reader
pub struct PosixRawReader {
stdin: StdinRaw,
timeout_ms: i32,
buf: [u8; 1],
parser: Parser,
receiver: Utf8,
key_map: PosixKeyMap,
}
struct Utf8 {
c: Option<char>,
valid: bool,
}
const UP: char = 'A'; // kcuu1, kUP*
const DOWN: char = 'B'; // kcud1, kDN*
const RIGHT: char = 'C'; // kcuf1, kRIT*
const LEFT: char = 'D'; // kcub1, kLFT*
const END: char = 'F'; // kend*
const HOME: char = 'H'; // khom*
const INSERT: char = '2'; // kic*
const DELETE: char = '3'; // kdch1, kDC*
const PAGE_UP: char = '5'; // kpp, kPRV*
const PAGE_DOWN: char = '6'; // knp, kNXT*
const RXVT_HOME: char = '7';
const RXVT_END: char = '8';
const SHIFT: char = '2';
const ALT: char = '3';
const ALT_SHIFT: char = '4';
const CTRL: char = '5';
const CTRL_SHIFT: char = '6';
const CTRL_ALT: char = '7';
const CTRL_ALT_SHIFT: char = '8';
const RXVT_SHIFT: char = '$';
const RXVT_CTRL: char = '\x1e';
const RXVT_CTRL_SHIFT: char = '@';
impl PosixRawReader {
fn new(config: &Config, key_map: PosixKeyMap) -> Self {
Self {
stdin: StdinRaw {},
timeout_ms: config.keyseq_timeout(),
buf: [0; 1],
parser: Parser::new(),
receiver: Utf8 {
c: None,
valid: true,
},
key_map,
}
}
/// Handle \E <seq1> sequences
// https://invisible-island.net/xterm/xterm-function-keys.html
fn escape_sequence(&mut self) -> Result<KeyEvent> {
self._do_escape_sequence(true)
}
/// Don't call directly, call `PosixRawReader::escape_sequence` instead
fn _do_escape_sequence(&mut self, allow_recurse: bool) -> Result<KeyEvent> {
// Read the next byte representing the escape sequence.
let seq1 = self.next_char()?;
if seq1 == '[' {
// \E[ sequences. (CSI)
self.escape_csi()
} else if seq1 == 'O' {
// xterm
// \EO sequences. (SS3)
self.escape_o()
} else if seq1 == '\x1b' {
// \E\E — used by rxvt, iTerm (under default config), etc.
// ```
// \E\E[A => Alt-Up
// \E\E[B => Alt-Down
// \E\E[C => Alt-Right
// \E\E[D => Alt-Left
// ```
//
// In general this more or less works just adding ALT to an existing
// key, but has a wrinkle in that `ESC ESC` without anything
// following should be interpreted as the the escape key.
//
// We handle this by polling to see if there's anything coming
// within our timeout, and if so, recursing once, but adding alt to
// what we read.
if !allow_recurse {
return Ok(E::ESC);
}
let timeout = if self.timeout_ms < 0 {
100
} else {
self.timeout_ms
};
match self.poll(timeout) {
// Ignore poll errors, it's very likely we'll pick them up on
// the next read anyway.
Ok(0) | Err(_) => Ok(E::ESC),
Ok(n) => {
debug_assert!(n > 0, "{}", n);
// recurse, and add the alt modifier.
let E(k, m) = self._do_escape_sequence(false)?;
Ok(E(k, m | M::ALT))
}
}
} else {
Ok(E::alt(seq1))
}
}
/// Handle \E[ <seq2> escape sequences
fn escape_csi(&mut self) -> Result<KeyEvent> {
let seq2 = self.next_char()?;
if seq2.is_digit(10) {
match seq2 {
'0' | '9' => {
debug!(target: "rustyline", "unsupported esc sequence: \\E[{:?}", seq2);
Ok(E(K::UnknownEscSeq, M::NONE))
}
_ => {
// Extended escape, read additional byte.
self.extended_escape(seq2)
}
}
} else if seq2 == '[' {
let seq3 = self.next_char()?;
// Linux console
Ok(match seq3 {
'A' => E(K::F(1), M::NONE),
'B' => E(K::F(2), M::NONE),
'C' => E(K::F(3), M::NONE),
'D' => E(K::F(4), M::NONE),
'E' => E(K::F(5), M::NONE),
_ => {
debug!(target: "rustyline", "unsupported esc sequence: \\E[[{:?}", seq3);
E(K::UnknownEscSeq, M::NONE)
}
})
} else {
// ANSI
Ok(match seq2 {
UP => E(K::Up, M::NONE),
DOWN => E(K::Down, M::NONE),
RIGHT => E(K::Right, M::NONE),
LEFT => E(K::Left, M::NONE),
//'E' => E(K::, M::), // Ignore
END => E(K::End, M::NONE),
//'G' => E(K::, M::), // Ignore
HOME => E(K::Home, M::NONE), // khome
//'J' => E(K::, M::), // clr_eos
//'K' => E(K::, M::), // clr_eol
//'L' => E(K::, M::), // il1
//'M' => E(K::, M::), // kmous
//'P' => E(K::Delete, M::NONE), // dch1
'Z' => E(K::BackTab, M::NONE),
'a' => E(K::Up, M::SHIFT), // rxvt: kind or kUP
'b' => E(K::Down, M::SHIFT), // rxvt: kri or kDN
'c' => E(K::Right, M::SHIFT), // rxvt
'd' => E(K::Left, M::SHIFT), // rxvt
_ => {
debug!(target: "rustyline", "unsupported esc sequence: \\E[{:?}", seq2);
E(K::UnknownEscSeq, M::NONE)
}
})
}
}
/// Handle \E[ <seq2:digit> escape sequences
#[allow(clippy::cognitive_complexity)]
fn extended_escape(&mut self, seq2: char) -> Result<KeyEvent> {
let seq3 = self.next_char()?;
if seq3 == '~' {
Ok(match seq2 {
'1' | RXVT_HOME => E(K::Home, M::NONE), // tmux, xrvt
INSERT => E(K::Insert, M::NONE),
DELETE => E(K::Delete, M::NONE),
'4' | RXVT_END => E(K::End, M::NONE), // tmux, xrvt
PAGE_UP => E(K::PageUp, M::NONE),
PAGE_DOWN => E(K::PageDown, M::NONE),
_ => {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{}~", seq2);
E(K::UnknownEscSeq, M::NONE)
}
})
} else if seq3.is_digit(10) {
let seq4 = self.next_char()?;
if seq4 == '~' {
Ok(match (seq2, seq3) {
('1', '1') => E(K::F(1), M::NONE), // rxvt-unicode
('1', '2') => E(K::F(2), M::NONE), // rxvt-unicode
('1', '3') => E(K::F(3), M::NONE), // rxvt-unicode
('1', '4') => E(K::F(4), M::NONE), // rxvt-unicode
('1', '5') => E(K::F(5), M::NONE), // kf5
('1', '7') => E(K::F(6), M::NONE), // kf6
('1', '8') => E(K::F(7), M::NONE), // kf7
('1', '9') => E(K::F(8), M::NONE), // kf8
('2', '0') => E(K::F(9), M::NONE), // kf9
('2', '1') => E(K::F(10), M::NONE), // kf10
('2', '3') => E(K::F(11), M::NONE), // kf11
('2', '4') => E(K::F(12), M::NONE), // kf12
//('6', '2') => KeyCode::ScrollUp,
//('6', '3') => KeyCode::ScrollDown,
_ => {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{}{}~", seq2, seq3);
E(K::UnknownEscSeq, M::NONE)
}
})
} else if seq4 == ';' {
let seq5 = self.next_char()?;
if seq5.is_digit(10) {
let seq6 = self.next_char()?;
if seq6.is_digit(10) {
self.next_char()?; // 'R' expected
Ok(E(K::UnknownEscSeq, M::NONE))
} else if seq6 == 'R' {
Ok(E(K::UnknownEscSeq, M::NONE))
} else if seq6 == '~' {
Ok(match (seq2, seq3, seq5) {
('1', '5', CTRL) => E(K::F(5), M::CTRL),
//('1', '5', '6') => E(K::F(17), M::CTRL),
('1', '7', CTRL) => E(K::F(6), M::CTRL),
//('1', '7', '6') => E(K::F(18), M::CTRL),
('1', '8', CTRL) => E(K::F(7), M::CTRL),
('1', '9', CTRL) => E(K::F(8), M::CTRL),
//('1', '9', '6') => E(K::F(19), M::CTRL),
('2', '0', CTRL) => E(K::F(9), M::CTRL),
//('2', '0', '6') => E(K::F(21), M::CTRL),
('2', '1', CTRL) => E(K::F(10), M::CTRL),
//('2', '1', '6') => E(K::F(22), M::CTRL),
('2', '3', CTRL) => E(K::F(11), M::CTRL),
//('2', '3', '6') => E(K::F(23), M::CTRL),
('2', '4', CTRL) => E(K::F(12), M::CTRL),
//('2', '4', '6') => E(K::F(24), M::CTRL),
_ => {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{}{};{}~", seq2, seq3, seq5);
E(K::UnknownEscSeq, M::NONE)
}
})
} else {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{}{};{}{}", seq2, seq3, seq5, seq6);
Ok(E(K::UnknownEscSeq, M::NONE))
}
} else {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{}{};{:?}", seq2, seq3, seq5);
Ok(E(K::UnknownEscSeq, M::NONE))
}
} else if seq4.is_digit(10) {
let seq5 = self.next_char()?;
if seq5 == '~' {
Ok(match (seq2, seq3, seq4) {
('2', '0', '0') => E(K::BracketedPasteStart, M::NONE),
('2', '0', '1') => E(K::BracketedPasteEnd, M::NONE),
_ => {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{}{}{}~", seq2, seq3, seq4);
E(K::UnknownEscSeq, M::NONE)
}
})
} else {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{}{}{}{}", seq2, seq3, seq4, seq5);
Ok(E(K::UnknownEscSeq, M::NONE))
}
} else {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{}{}{:?}", seq2, seq3, seq4);
Ok(E(K::UnknownEscSeq, M::NONE))
}
} else if seq3 == ';' {
let seq4 = self.next_char()?;
if seq4.is_digit(10) {
let seq5 = self.next_char()?;
if seq5.is_digit(10) {
self.next_char()?; // 'R' expected
//('1', '0', UP) => E(K::, M::), // Alt + Shift + Up
Ok(E(K::UnknownEscSeq, M::NONE))
} else if seq2 == '1' {
Ok(match (seq4, seq5) {
(SHIFT, UP) => E(K::Up, M::SHIFT), // ~ key_sr
(SHIFT, DOWN) => E(K::Down, M::SHIFT), // ~ key_sf
(SHIFT, RIGHT) => E(K::Right, M::SHIFT),
(SHIFT, LEFT) => E(K::Left, M::SHIFT),
(SHIFT, END) => E(K::End, M::SHIFT), // kEND
(SHIFT, HOME) => E(K::Home, M::SHIFT), // kHOM
//('2', 'P') => E(K::F(13), M::NONE),
//('2', 'Q') => E(K::F(14), M::NONE),
//('2', 'S') => E(K::F(16), M::NONE),
(ALT, UP) => E(K::Up, M::ALT),
(ALT, DOWN) => E(K::Down, M::ALT),
(ALT, RIGHT) => E(K::Right, M::ALT),
(ALT, LEFT) => E(K::Left, M::ALT),
(ALT, END) => E(K::End, M::ALT),
(ALT, HOME) => E(K::Home, M::ALT),
(ALT_SHIFT, UP) => E(K::Up, M::ALT_SHIFT),
(ALT_SHIFT, DOWN) => E(K::Down, M::ALT_SHIFT),
(ALT_SHIFT, RIGHT) => E(K::Right, M::ALT_SHIFT),
(ALT_SHIFT, LEFT) => E(K::Left, M::ALT_SHIFT),
(ALT_SHIFT, END) => E(K::End, M::ALT_SHIFT),
(ALT_SHIFT, HOME) => E(K::Home, M::ALT_SHIFT),
(CTRL, UP) => E(K::Up, M::CTRL),
(CTRL, DOWN) => E(K::Down, M::CTRL),
(CTRL, RIGHT) => E(K::Right, M::CTRL),
(CTRL, LEFT) => E(K::Left, M::CTRL),
(CTRL, END) => E(K::End, M::CTRL),
(CTRL, HOME) => E(K::Home, M::CTRL),
(CTRL, 'P') => E(K::F(1), M::CTRL),
(CTRL, 'Q') => E(K::F(2), M::CTRL),
(CTRL, 'S') => E(K::F(4), M::CTRL),
(CTRL, 'p') => E(K::Char('0'), M::CTRL),
(CTRL, 'q') => E(K::Char('1'), M::CTRL),
(CTRL, 'r') => E(K::Char('2'), M::CTRL),
(CTRL, 's') => E(K::Char('3'), M::CTRL),
(CTRL, 't') => E(K::Char('4'), M::CTRL),
(CTRL, 'u') => E(K::Char('5'), M::CTRL),
(CTRL, 'v') => E(K::Char('6'), M::CTRL),
(CTRL, 'w') => E(K::Char('7'), M::CTRL),
(CTRL, 'x') => E(K::Char('8'), M::CTRL),
(CTRL, 'y') => E(K::Char('9'), M::CTRL),
(CTRL_SHIFT, UP) => E(K::Up, M::CTRL_SHIFT),
(CTRL_SHIFT, DOWN) => E(K::Down, M::CTRL_SHIFT),
(CTRL_SHIFT, RIGHT) => E(K::Right, M::CTRL_SHIFT),
(CTRL_SHIFT, LEFT) => E(K::Left, M::CTRL_SHIFT),
(CTRL_SHIFT, END) => E(K::End, M::CTRL_SHIFT),
(CTRL_SHIFT, HOME) => E(K::Home, M::CTRL_SHIFT),
//('6', 'P') => E(K::F(13), M::CTRL),
//('6', 'Q') => E(K::F(14), M::CTRL),
//('6', 'S') => E(K::F(16), M::CTRL),
(CTRL_SHIFT, 'p') => E(K::Char('0'), M::CTRL_SHIFT),
(CTRL_SHIFT, 'q') => E(K::Char('1'), M::CTRL_SHIFT),
(CTRL_SHIFT, 'r') => E(K::Char('2'), M::CTRL_SHIFT),
(CTRL_SHIFT, 's') => E(K::Char('3'), M::CTRL_SHIFT),
(CTRL_SHIFT, 't') => E(K::Char('4'), M::CTRL_SHIFT),
(CTRL_SHIFT, 'u') => E(K::Char('5'), M::CTRL_SHIFT),
(CTRL_SHIFT, 'v') => E(K::Char('6'), M::CTRL_SHIFT),
(CTRL_SHIFT, 'w') => E(K::Char('7'), M::CTRL_SHIFT),
(CTRL_SHIFT, 'x') => E(K::Char('8'), M::CTRL_SHIFT),
(CTRL_SHIFT, 'y') => E(K::Char('9'), M::CTRL_SHIFT),
(CTRL_ALT, UP) => E(K::Up, M::CTRL_ALT),
(CTRL_ALT, DOWN) => E(K::Down, M::CTRL_ALT),
(CTRL_ALT, RIGHT) => E(K::Right, M::CTRL_ALT),
(CTRL_ALT, LEFT) => E(K::Left, M::CTRL_ALT),
(CTRL_ALT, END) => E(K::End, M::CTRL_ALT),
(CTRL_ALT, HOME) => E(K::Home, M::CTRL_ALT),
(CTRL_ALT, 'p') => E(K::Char('0'), M::CTRL_ALT),
(CTRL_ALT, 'q') => E(K::Char('1'), M::CTRL_ALT),
(CTRL_ALT, 'r') => E(K::Char('2'), M::CTRL_ALT),
(CTRL_ALT, 's') => E(K::Char('3'), M::CTRL_ALT),
(CTRL_ALT, 't') => E(K::Char('4'), M::CTRL_ALT),
(CTRL_ALT, 'u') => E(K::Char('5'), M::CTRL_ALT),
(CTRL_ALT, 'v') => E(K::Char('6'), M::CTRL_ALT),
(CTRL_ALT, 'w') => E(K::Char('7'), M::CTRL_ALT),
(CTRL_ALT, 'x') => E(K::Char('8'), M::CTRL_ALT),
(CTRL_ALT, 'y') => E(K::Char('9'), M::CTRL_ALT),
(CTRL_ALT_SHIFT, UP) => E(K::Up, M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, DOWN) => E(K::Down, M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, RIGHT) => E(K::Right, M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, LEFT) => E(K::Left, M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, END) => E(K::End, M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, HOME) => E(K::Home, M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 'p') => E(K::Char('0'), M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 'q') => E(K::Char('1'), M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 'r') => E(K::Char('2'), M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 's') => E(K::Char('3'), M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 't') => E(K::Char('4'), M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 'u') => E(K::Char('5'), M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 'v') => E(K::Char('6'), M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 'w') => E(K::Char('7'), M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 'x') => E(K::Char('8'), M::CTRL_ALT_SHIFT),
(CTRL_ALT_SHIFT, 'y') => E(K::Char('9'), M::CTRL_ALT_SHIFT),
// Meta + arrow on (some?) Macs when using iTerm defaults
('9', UP) => E(K::Up, M::ALT),
('9', DOWN) => E(K::Down, M::ALT),
('9', RIGHT) => E(K::Right, M::ALT),
('9', LEFT) => E(K::Left, M::ALT),
_ => {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[1;{}{:?}", seq4, seq5);
E(K::UnknownEscSeq, M::NONE)
}
})
} else if seq5 == '~' {
Ok(match (seq2, seq4) {
(INSERT, SHIFT) => E(K::Insert, M::SHIFT),
(INSERT, ALT) => E(K::Insert, M::ALT),
(INSERT, ALT_SHIFT) => E(K::Insert, M::ALT_SHIFT),
(INSERT, CTRL) => E(K::Insert, M::CTRL),
(INSERT, CTRL_SHIFT) => E(K::Insert, M::CTRL_SHIFT),
(INSERT, CTRL_ALT) => E(K::Insert, M::CTRL_ALT),
(INSERT, CTRL_ALT_SHIFT) => E(K::Insert, M::CTRL_ALT_SHIFT),
(DELETE, SHIFT) => E(K::Delete, M::SHIFT),
(DELETE, ALT) => E(K::Delete, M::ALT),
(DELETE, ALT_SHIFT) => E(K::Delete, M::ALT_SHIFT),
(DELETE, CTRL) => E(K::Delete, M::CTRL),
(DELETE, CTRL_SHIFT) => E(K::Delete, M::CTRL_SHIFT),
(DELETE, CTRL_ALT) => E(K::Delete, M::CTRL_ALT),
(DELETE, CTRL_ALT_SHIFT) => E(K::Delete, M::CTRL_ALT_SHIFT),
(PAGE_UP, SHIFT) => E(K::PageUp, M::SHIFT),
(PAGE_UP, ALT) => E(K::PageUp, M::ALT),
(PAGE_UP, ALT_SHIFT) => E(K::PageUp, M::ALT_SHIFT),
(PAGE_UP, CTRL) => E(K::PageUp, M::CTRL),
(PAGE_UP, CTRL_SHIFT) => E(K::PageUp, M::CTRL_SHIFT),
(PAGE_UP, CTRL_ALT) => E(K::PageUp, M::CTRL_ALT),
(PAGE_UP, CTRL_ALT_SHIFT) => E(K::PageUp, M::CTRL_ALT_SHIFT),
(PAGE_DOWN, SHIFT) => E(K::PageDown, M::SHIFT),
(PAGE_DOWN, ALT) => E(K::PageDown, M::ALT),
(PAGE_DOWN, ALT_SHIFT) => E(K::PageDown, M::ALT_SHIFT),
(PAGE_DOWN, CTRL) => E(K::PageDown, M::CTRL),
(PAGE_DOWN, CTRL_SHIFT) => E(K::PageDown, M::CTRL_SHIFT),
(PAGE_DOWN, CTRL_ALT) => E(K::PageDown, M::CTRL_ALT),
(PAGE_DOWN, CTRL_ALT_SHIFT) => E(K::PageDown, M::CTRL_ALT_SHIFT),
_ => {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{};{:?}~", seq2, seq4);
E(K::UnknownEscSeq, M::NONE)
}
})
} else {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{};{}{:?}", seq2, seq4, seq5);
Ok(E(K::UnknownEscSeq, M::NONE))
}
} else {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{};{:?}", seq2, seq4);
Ok(E(K::UnknownEscSeq, M::NONE))
}
} else {
Ok(match (seq2, seq3) {
(DELETE, RXVT_CTRL) => E(K::Delete, M::CTRL),
(DELETE, RXVT_CTRL_SHIFT) => E(K::Delete, M::CTRL_SHIFT),
(CTRL, UP) => E(K::Up, M::CTRL),
(CTRL, DOWN) => E(K::Down, M::CTRL),
(CTRL, RIGHT) => E(K::Right, M::CTRL),
(CTRL, LEFT) => E(K::Left, M::CTRL),
(PAGE_UP, RXVT_CTRL) => E(K::PageUp, M::CTRL),
(PAGE_UP, RXVT_SHIFT) => E(K::PageUp, M::SHIFT),
(PAGE_UP, RXVT_CTRL_SHIFT) => E(K::PageUp, M::CTRL_SHIFT),
(PAGE_DOWN, RXVT_CTRL) => E(K::PageDown, M::CTRL),
(PAGE_DOWN, RXVT_SHIFT) => E(K::PageDown, M::SHIFT),
(PAGE_DOWN, RXVT_CTRL_SHIFT) => E(K::PageDown, M::CTRL_SHIFT),
(RXVT_HOME, RXVT_CTRL) => E(K::Home, M::CTRL),
(RXVT_HOME, RXVT_SHIFT) => E(K::Home, M::SHIFT),
(RXVT_HOME, RXVT_CTRL_SHIFT) => E(K::Home, M::CTRL_SHIFT),
(RXVT_END, RXVT_CTRL) => E(K::End, M::CTRL), // kEND5 or kel
(RXVT_END, RXVT_SHIFT) => E(K::End, M::SHIFT),
(RXVT_END, RXVT_CTRL_SHIFT) => E(K::End, M::CTRL_SHIFT),
_ => {
debug!(target: "rustyline",
"unsupported esc sequence: \\E[{}{:?}", seq2, seq3);
E(K::UnknownEscSeq, M::NONE)
}
})
}
}
/// Handle \EO <seq2> escape sequences
fn escape_o(&mut self) -> Result<KeyEvent> {
let seq2 = self.next_char()?;
Ok(match seq2 {
UP => E(K::Up, M::NONE),
DOWN => E(K::Down, M::NONE),
RIGHT => E(K::Right, M::NONE),
LEFT => E(K::Left, M::NONE),
//'E' => E(K::, M::),// key_b2, kb2
END => E(K::End, M::NONE), // kend
HOME => E(K::Home, M::NONE), // khome
'M' => E::ENTER, // kent
'P' => E(K::F(1), M::NONE), // kf1
'Q' => E(K::F(2), M::NONE), // kf2
'R' => E(K::F(3), M::NONE), // kf3
'S' => E(K::F(4), M::NONE), // kf4
'a' => E(K::Up, M::CTRL),
'b' => E(K::Down, M::CTRL),
'c' => E(K::Right, M::CTRL), // rxvt
'd' => E(K::Left, M::CTRL), // rxvt
'l' => E(K::F(8), M::NONE),
't' => E(K::F(5), M::NONE), // kf5 or kb1
'u' => E(K::F(6), M::NONE), // kf6 or kb2
'v' => E(K::F(7), M::NONE), // kf7 or kb3
'w' => E(K::F(9), M::NONE), // kf9 or ka1
'x' => E(K::F(10), M::NONE), // kf10 or ka2
_ => {
debug!(target: "rustyline", "unsupported esc sequence: \\EO{:?}", seq2);
E(K::UnknownEscSeq, M::NONE)
}
})
}
fn poll(&mut self, timeout_ms: i32) -> ::nix::Result<i32> {
let mut fds = [poll::PollFd::new(STDIN_FILENO, PollFlags::POLLIN)];
let r = poll::poll(&mut fds, timeout_ms);
match r {
Ok(_) => r,
Err(nix::errno::Errno::EINTR) => {
if SIGWINCH.load(Ordering::Relaxed) {
r
} else {
Ok(0) // Ignore EINTR while polling
}
}
Err(_) => r,
}
}
}
impl RawReader for PosixRawReader {
fn next_key(&mut self, single_esc_abort: bool) -> Result<KeyEvent> {
let c = self.next_char()?;
let mut key = KeyEvent::new(c, M::NONE);
if key == E::ESC {
let timeout_ms = if single_esc_abort && self.timeout_ms == -1 {
0
} else {
self.timeout_ms
};
match self.poll(timeout_ms) {
Ok(n) if n == 0 => {
// single escape
}
Ok(_) => {
// escape sequence
key = self.escape_sequence()?
}
// Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
Err(e) => return Err(e.into()),
}
}
debug!(target: "rustyline", "c: {:?} => key: {:?}", c, key);
Ok(key)
}
fn next_char(&mut self) -> Result<char> {
loop {
let n = self.stdin.read(&mut self.buf)?;
if n == 0 {
return Err(error::ReadlineError::Eof);
}
let b = self.buf[0];
self.parser.advance(&mut self.receiver, b);
if !self.receiver.valid {
return Err(error::ReadlineError::Utf8Error);
} else if let Some(c) = self.receiver.c.take() {
return Ok(c);
}
}
}
fn read_pasted_text(&mut self) -> Result<String> {
let mut buffer = String::new();
loop {
match self.next_char()? {
'\x1b' => {
let key = self.escape_sequence()?;
if key == E(K::BracketedPasteEnd, M::NONE) {
break;
} else {
continue; // TODO validate
}
}
c => buffer.push(c),
};
}
let buffer = buffer.replace("\r\n", "\n");
let buffer = buffer.replace('\r', "\n");
Ok(buffer)
}
fn find_binding(&self, key: &KeyEvent) -> Option<Cmd> {
let cmd = self.key_map.get(key).cloned();
if let Some(ref cmd) = cmd {
debug!(target: "rustyline", "terminal key binding: {:?} => {:?}", key, cmd);
}
cmd
}
}
impl Receiver for Utf8 {
/// Called whenever a code point is parsed successfully
fn codepoint(&mut self, c: char) {
self.c = Some(c);
self.valid = true;
}
/// Called when an invalid_sequence is detected
fn invalid_sequence(&mut self) {
|
/// Console output writer
pub struct PosixRenderer {
out: OutputStreamType,
cols: usize, // Number of columns in terminal
buffer: String,
tab_stop: usize,
colors_enabled: bool,
bell_style: BellStyle,
}
impl PosixRenderer {
fn new(
out: OutputStreamType,
tab_stop: usize,
colors_enabled: bool,
bell_style: BellStyle,
) -> Self {
let (cols, _) = get_win_size(&out);
Self {
out,
cols,
buffer: String::with_capacity(1024),
tab_stop,
colors_enabled,
bell_style,
}
}
fn clear_old_rows(&mut self, layout: &Layout) {
use std::fmt::Write;
let current_row = layout.cursor.row;
let old_rows = layout.end.row;
// old_rows < cursor_row if the prompt spans multiple lines and if
// this is the default State.
let cursor_row_movement = old_rows.saturating_sub(current_row);
// move the cursor down as required
if cursor_row_movement > 0 {
write!(self.buffer, "\x1b[{}B", cursor_row_movement).unwrap();
}
// clear old rows
for _ in 0..old_rows {
self.buffer.push_str("\r\x1b[0K\x1b[A");
}
// clear the line
self.buffer.push_str("\r\x1b[0K");
}
}
impl Renderer for PosixRenderer {
type Reader = PosixRawReader;
fn move_cursor(&mut self, old: Position, new: Position) -> Result<()> {
use std::fmt::Write;
self.buffer.clear();
let row_ordering = new.row.cmp(&old.row);
if row_ordering == cmp::Ordering::Greater {
// move down
let row_shift = new.row - old.row;
if row_shift == 1 {
self.buffer.push_str("\x1b[B");
} else {
write!(self.buffer, "\x1b[{}B", row_shift).unwrap();
}
} else if row_ordering == cmp::Ordering::Less {
// move up
let row_shift = old.row - new.row;
if row_shift == 1 {
self.buffer.push_str("\x1b[A");
} else {
write!(self.buffer, "\x1b[{}A", row_shift).unwrap();
}
}
let col_ordering = new.col.cmp(&old.col);
if col_ordering == cmp::Ordering::Greater {
// move right
let col_shift = new.col - old.col;
if col_shift == 1 {
self.buffer.push_str("\x1b[C");
} else {
write!(self.buffer, "\x1b[{}C", col_shift).unwrap();
}
} else if col_ordering == cmp::Ordering::Less {
// move left
let col_shift = old.col - new.col;
if col_shift == 1 {
self.buffer.push_str("\x1b[D");
} else {
write!(self.buffer, "\x1b[{}D", col_shift).unwrap();
}
}
self.write_and_flush(self.buffer.as_bytes())
}
fn refresh_line(
&mut self,
prompt: &Prompt,
line: &LineBuffer,
hint: Option<&str>,
old_layout: &Layout,
new_layout: &Layout,
highlighter: Option<&dyn Highlighter>,
) -> Result<()> {
use std::fmt::Write;
self.buffer.clear();
let cursor = new_layout.cursor;
let end_pos = new_layout.end;
self.clear_old_rows(old_layout);
add_prompt_and_highlight(|s| self.buffer.push_str(s),
highlighter, line, prompt);
// display hint
if let Some(hint) = hint {
if let Some(highlighter) = highlighter {
self.buffer.push_str(&highlighter.highlight_hint(hint));
} else {
self.buffer.push_str(hint);
}
}
// we have to generate our own newline on line wrap
if end_pos.col == 0
&& end_pos.row > 0
&& !hint
.map(|h| h.ends_with('\n'))
.unwrap_or_else(|| line.ends_with('\n'))
{
self.buffer.push('\n');
}
// position the cursor
let new_cursor_row_movement = end_pos.row - cursor.row;
// move the cursor up as required
if new_cursor_row_movement > 0 {
write!(self.buffer, "\x1b[{}A", new_cursor_row_movement).unwrap();
}
// position the cursor within the line
if cursor.col > 0 {
write!(self.buffer, "\r\x1b[{}C", cursor.col).unwrap();
} else {
self.buffer.push('\r');
}
self.write_and_flush(self.buffer.as_bytes())?;
Ok(())
}
fn write_and_flush(&self, buf: &[u8]) -> Result<()> {
write_and_flush(self.out, buf)
}
/// Control characters are treated as having zero width.
/// Characters with 2 column width are correctly handled (not split).
fn calculate_position(&self, s: &str, orig: Position, left_margin: usize)
-> Position
{
let mut pos = orig;
let mut esc_seq = 0;
for c in s.graphemes(true) {
if c == "\n" {
pos.row += 1;
pos.col = left_margin;
continue;
}
let cw = if c == "\t" {
self.tab_stop - (pos.col % self.tab_stop)
} else {
width(c, &mut esc_seq)
};
pos.col += cw;
if pos.col > self.cols {
pos.row += 1;
pos.col = cw;
}
}
if pos.col == self.cols {
pos.col = 0;
pos.row += 1;
}
pos
}
fn beep(&mut self) -> Result<()> {
match self.bell_style {
BellStyle::Audible => {
io::stderr().write_all(b"\x07")?;
io::stderr().flush()?;
Ok(())
}
_ => Ok(()),
}
}
/// Clear the screen. Used to handle ctrl+l
fn clear_screen(&mut self) -> Result<()> {
self.write_and_flush(b"\x1b[H\x1b[2J")
}
/// Check if a SIGWINCH signal has been received
fn sigwinch(&self) -> bool {
SIGWINCH
.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst)
.unwrap_or(false)
}
/// Try to update the number of columns in the current terminal,
fn update_size(&mut self) {
let (cols, _) = get_win_size(&self.out);
self.cols = cols;
}
fn get_columns(&self) -> usize {
self.cols
}
/// Try to get the number of rows in the current terminal,
/// or assume 24 if it fails.
fn get_rows(&self) -> usize {
let (_, rows) = get_win_size(&self.out);
rows
}
fn colors_enabled(&self) -> bool {
self.colors_enabled
}
fn move_cursor_at_leftmost(&mut self, rdr: &mut PosixRawReader) -> Result<()> {
if rdr.poll(0)? != 0 {
debug!(target: "rustyline", "cannot request cursor location");
return Ok(());
}
/* Report cursor location */
self.write_and_flush(b"\x1b[6n")?;
/* Read the response: ESC [ rows ; cols R */
if rdr.poll(100)? == 0
|| rdr.next_char()? != '\x1b'
|| rdr.next_char()? != '['
|| read_digits_until(rdr, ';')?.is_none()
{
warn!(target: "rustyline", "cannot read initial cursor location");
return Ok(());
}
let col = read_digits_until(rdr, 'R')?;
debug!(target: "rustyline", "initial cursor location: {:?}", col);
if col != Some(1) {
self.write_and_flush(b"\n")?;
}
Ok(())
}
}
fn read_digits_until(rdr: &mut PosixRawReader, sep: char) -> Result<Option<u32>> {
let mut num: u32 = 0;
loop {
match rdr.next_char()? {
digit @ '0'..='9' => {
num = num
.saturating_mul(10)
.saturating_add(digit.to_digit(10).unwrap());
continue;
}
c if c == sep => break,
_ => return Ok(None),
}
}
Ok(Some(num))
}
static SIGWINCH_ONCE: sync::Once = sync::Once::new();
static SIGWINCH: AtomicBool = AtomicBool::new(false);
fn install_sigwinch_handler() {
SIGWINCH_ONCE.call_once(|| unsafe {
let sigwinch = signal::SigAction::new(
signal::SigHandler::Handler(sigwinch_handler),
signal::SaFlags::empty(),
signal::SigSet::empty(),
);
let _ = signal::sigaction(signal::SIGWINCH, &sigwinch);
});
}
extern "C" fn sigwinch_handler(_: libc::c_int) {
SIGWINCH.store(true, Ordering::SeqCst);
debug!(target: "rustyline", "SIGWINCH");
}
fn map_key(key_map: &mut HashMap<KeyEvent, Cmd>, raw: &Termios, index: SCI, name: &str, cmd: Cmd) {
let cc = char::from(raw.control_chars[index as usize]);
let key = KeyEvent::new(cc, M::NONE);
debug!(target: "rustyline", "{}: {:?}", name, key);
key_map.insert(key, cmd);
}
#[cfg(not(test))]
pub type Terminal = PosixTerminal;
#[derive(Clone, Debug)]
pub struct PosixTerminal {
unsupported: bool,
stdin_isatty: bool,
stdstream_isatty: bool,
pub(crate) color_mode: ColorMode,
stream_type: OutputStreamType,
tab_stop: usize,
bell_style: BellStyle,
enable_bracketed_paste: bool,
}
impl PosixTerminal {
fn colors_enabled(&self) -> bool {
match self.color_mode {
ColorMode::Enabled => self.stdstream_isatty,
ColorMode::Forced => true,
ColorMode::Disabled => false,
}
}
}
impl Term for PosixTerminal {
type KeyMap = PosixKeyMap;
type Mode = PosixMode;
type Reader = PosixRawReader;
type Writer = PosixRenderer;
fn new(
color_mode: ColorMode,
stream_type: OutputStreamType,
tab_stop: usize,
bell_style: BellStyle,
enable_bracketed_paste: bool,
) -> Self {
let term = Self {
unsupported: is_unsupported_term(),
stdin_isatty: is_a_tty(STDIN_FILENO),
stdstream_isatty: is_a_tty(stream_type.as_raw_fd()),
color_mode,
stream_type,
tab_stop,
bell_style,
enable_bracketed_paste,
};
if !term.unsupported && term.stdin_isatty && term.stdstream_isatty {
install_sigwinch_handler();
}
term
}
// Init checks:
/// Check if current terminal can provide a rich line-editing user
/// interface.
fn is_unsupported(&self) -> bool {
self.unsupported
}
/// check if stdin is connected to a terminal.
fn is_stdin_tty(&self) -> bool {
self.stdin_isatty
}
fn is_output_tty(&self) -> bool {
self.stdstream_isatty
}
// Interactive loop:
fn enable_raw_mode(&mut self) -> Result<(Self::Mode, PosixKeyMap)> {
use nix::errno::Errno::ENOTTY;
use nix::sys::termios::{ControlFlags, InputFlags, LocalFlags};
if !self.stdin_isatty {
return Err(ENOTTY.into());
}
let original_mode = termios::tcgetattr(STDIN_FILENO)?;
let mut raw = original_mode.clone();
// disable BREAK interrupt, CR to NL conversion on input,
// input parity check, strip high bit (bit 8), output flow control
raw.input_flags &= !(InputFlags::BRKINT
| InputFlags::ICRNL
| InputFlags::INPCK
| InputFlags::ISTRIP
| InputFlags::IXON);
// we don't want raw output, it turns newlines into straight line feeds
// disable all output processing
// raw.c_oflag = raw.c_oflag & !(OutputFlags::OPOST);
// character-size mark (8 bits)
raw.control_flags |= ControlFlags::CS8;
// disable echoing, canonical mode, extended input processing and signals
raw.local_flags &=
!(LocalFlags::ECHO | LocalFlags::ICANON | LocalFlags::IEXTEN | LocalFlags::ISIG);
raw.control_chars[SCI::VMIN as usize] = 1; // One character-at-a-time input
raw.control_chars[SCI::VTIME as usize] = 0; // with blocking read
let mut key_map: HashMap<KeyEvent, Cmd> = HashMap::with_capacity(4);
map_key(&mut key_map, &raw, SCI::VEOF, "VEOF", Cmd::EndOfFile);
map_key(&mut key_map, &raw, SCI::VINTR, "VINTR", Cmd::Interrupt);
map_key(&mut key_map, &raw, SCI::VQUIT, "VQUIT", Cmd::Interrupt);
map_key(&mut key_map, &raw, SCI::VSUSP, "VSUSP", Cmd::Suspend);
termios::tcsetattr(STDIN_FILENO, SetArg::TCSADRAIN, &raw)?;
// enable bracketed paste
let out = if !self.enable_bracketed_paste {
None
} else if let Err(e) = write_and_flush(self.stream_type, BRACKETED_PASTE_ON) {
debug!(target: "rustyline", "Cannot enable bracketed paste: {}", e);
None
} else {
Some(self.stream_type)
};
Ok((
PosixMode {
termios: original_mode,
out,
},
key_map,
))
}
/// Create a RAW reader
fn create_reader(&self, config: &Config, key_map: PosixKeyMap) -> Result<PosixRawReader> {
Ok(PosixRawReader::new(config, key_map))
}
fn create_writer(&self) -> PosixRenderer {
PosixRenderer::new(
self.stream_type,
self.tab_stop,
self.colors_enabled(),
self.bell_style,
)
}
}
#[cfg(not(test))]
pub fn suspend() -> Result<()> {
use nix::unistd::Pid;
// suspend the whole process group
signal::kill(Pid::from_raw(0), signal::SIGTSTP)?;
Ok(())
}
fn write_and_flush(out: OutputStreamType, buf: &[u8]) -> Result<()> {
match out {
OutputStreamType::Stdout => {
io::stdout().write_all(buf)?;
io::stdout().flush()?;
}
OutputStreamType::Stderr => {
io::stderr().write_all(buf)?;
io::stderr().flush()?;
}
}
Ok(())
}
#[cfg(test)]
mod test {
use super::{Position, PosixRenderer, PosixTerminal, Renderer};
use crate::config::{BellStyle, OutputStreamType};
use crate::line_buffer::LineBuffer;
use crate::edit::Prompt;
#[test]
#[ignore]
fn prompt_with_ansi_escape_codes() {
let out = PosixRenderer::new(OutputStreamType::Stdout, 4, true, BellStyle::default());
let pos = out.calculate_position("\x1b[1;32m>>\x1b[0m ", Position::default(), 0);
assert_eq!(3, pos.col);
assert_eq!(0, pos.row);
}
#[test]
fn test_unsupported_term() {
::std::env::set_var("TERM", "xterm");
assert!(!super::is_unsupported_term());
::std::env::set_var("TERM", "dumb");
assert!(super::is_unsupported_term());
}
#[test]
fn test_send() {
fn assert_send<T: Send>() {}
assert_send::<PosixTerminal>();
}
#[test]
fn test_sync() {
fn assert_sync<T: Sync>() {}
assert_sync::<PosixTerminal>();
}
#[test]
fn test_line_wrap() {
let mut out = PosixRenderer::new(OutputStreamType::Stdout, 4, true, BellStyle::default());
let prompt = Prompt {
text: "> ",
is_default: true,
size: out.calculate_position("> ", Position::default(), 0),
has_continuation: false,
};
let mut line = LineBuffer::init("", 0, None);
let old_layout = out.compute_layout(&prompt, &line, None);
assert_eq!(Position { col: 2, row: 0 }, old_layout.cursor);
assert_eq!(old_layout.cursor, old_layout.end);
assert_eq!(Some(true), line.insert('a', out.cols - prompt.size.col + 1));
let new_layout = out.compute_layout(&prompt, &line, None);
assert_eq!(Position { col: 1, row: 1 }, new_layout.cursor);
assert_eq!(new_layout.cursor, new_layout.end);
out.refresh_line(&prompt, &line, None, &old_layout, &new_layout, None)
.unwrap();
#[rustfmt::skip]
assert_eq!(
"\r\u{1b}[0K> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\r\u{1b}[1C",
out.buffer
);
}
}
|
self.c = None;
self.valid = false;
}
}
|
specfunc.go
|
package main
import (
"fmt"
"strings"
)
const refString = "Mary*had,a%little_lamb"
func main()
|
{
// The splitFunc is called for each
// rune in a string. If the rune
// equals any of character in a "*%,_"
// the refString is split.
words := strings.FieldsFunc(refString, func(r rune) bool {
return strings.ContainsRune("*%,_", r)
})
for idx, word := range words {
fmt.Printf("Word %d is: %s\n", idx, word)
}
}
|
|
progress.go
|
package commands
import (
"fmt"
"time"
)
type progress struct {
start time.Time
every int
count int
i int
}
func newProgress(every int) *progress
|
func (p *progress) rate() int {
dur := time.Since(p.start)
return int(1000.0 / float64(dur/time.Millisecond) * float64(p.count))
}
func (p *progress) inc() {
p.count++
p.i++
if p.i == p.every {
p.i = 0
fmt.Printf("\r%d", p.count)
}
}
func (p *progress) done() {
fmt.Printf("\r%d done (%d/sec)\n", p.count, p.rate())
}
|
{
p := &progress{start: time.Now(), every: every}
return p
}
|
waiter.go
|
// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package botanist
import (
"context"
"fmt"
"net"
"time"
"github.com/gardener/gardener/pkg/operation/common"
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/retry"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// WaitUntilNginxIngressServiceIsReady waits until the external load balancer of the nginx ingress controller has been created.
func (b *Botanist) WaitUntilNginxIngressServiceIsReady(ctx context.Context) error {
const timeout = 10 * time.Minute
loadBalancerIngress, err := kutil.WaitUntilLoadBalancerIsReady(ctx, b.K8sShootClient, metav1.NamespaceSystem, "addons-nginx-ingress-controller", timeout, b.Logger)
if err != nil {
return err
}
b.SetNginxIngressAddress(loadBalancerIngress, b.K8sSeedClient.Client())
return nil
}
// WaitUntilVpnShootServiceIsReady waits until the external load balancer of the VPN has been created.
func (b *Botanist) WaitUntilVpnShootServiceIsReady(ctx context.Context) error {
const timeout = 10 * time.Minute
_, err := kutil.WaitUntilLoadBalancerIsReady(ctx, b.K8sShootClient, metav1.NamespaceSystem, "vpn-shoot", timeout, b.Logger)
return err
}
// WaitUntilTunnelConnectionExists waits until a port forward connection to the tunnel pod (vpn-shoot) in the kube-system
// namespace of the Shoot cluster can be established.
func (b *Botanist) WaitUntilTunnelConnectionExists(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, 15*time.Minute)
defer cancel()
return retry.Until(timeoutCtx, 5*time.Second, func(ctx context.Context) (bool, error) {
done, err := CheckTunnelConnection(ctx, b.K8sShootClient, b.Logger, common.VPNTunnel)
// If the tunnel connection check failed but is not yet "done" (i.e., will be retried, hence, it didn't fail
// with a severe error), and if the classic VPN solution is used for the shoot cluster then let's try to fetch
// the last events of the vpn-shoot service (potentially indicating an error with the load balancer service).
if err != nil &&
!done &&
!b.Shoot.ReversedVPNEnabled {
b.Logger.Errorf("error %v occurred while checking the tunnel connection", err)
service := &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: "vpn-shoot",
Namespace: metav1.NamespaceSystem,
},
}
eventsErrorMessage, err2 := kutil.FetchEventMessages(ctx, b.K8sShootClient.Client().Scheme(), b.K8sShootClient.Client(), service, corev1.EventTypeWarning, 2)
if err2 != nil {
b.Logger.Errorf("error %v occurred while fetching events for VPN load balancer service", err2)
return retry.SevereError(fmt.Errorf("'%w' occurred but could not fetch events for more information", err))
}
if eventsErrorMessage != ""
|
}
return done, err
})
}
// WaitUntilNodesDeleted waits until no nodes exist in the shoot cluster anymore.
func (b *Botanist) WaitUntilNodesDeleted(ctx context.Context) error {
return retry.Until(ctx, 5*time.Second, func(ctx context.Context) (done bool, err error) {
nodesList := &corev1.NodeList{}
if err := b.K8sShootClient.Client().List(ctx, nodesList); err != nil {
return retry.SevereError(err)
}
if len(nodesList.Items) == 0 {
return retry.Ok()
}
b.Logger.Infof("Waiting until all nodes have been deleted in the shoot cluster...")
return retry.MinorError(fmt.Errorf("not all nodes have been deleted in the shoot cluster"))
})
}
// WaitUntilNoPodRunning waits until there is no running Pod in the shoot cluster.
func (b *Botanist) WaitUntilNoPodRunning(ctx context.Context) error {
b.Logger.Info("waiting until there are no running Pods in the shoot cluster...")
return retry.Until(ctx, 5*time.Second, func(ctx context.Context) (done bool, err error) {
podList := &corev1.PodList{}
if err := b.K8sShootClient.Client().List(ctx, podList); err != nil {
return retry.SevereError(err)
}
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
msg := fmt.Sprintf("waiting until there are no running Pods in the shoot cluster... "+
"there is still at least one running Pod in the shoot cluster: %s/%s", pod.Namespace, pod.Name)
b.Logger.Info(msg)
return retry.MinorError(fmt.Errorf(msg))
}
}
return retry.Ok()
})
}
// WaitUntilEndpointsDoNotContainPodIPs waits until all endpoints in the shoot cluster to not contain any IPs from the Shoot's PodCIDR.
func (b *Botanist) WaitUntilEndpointsDoNotContainPodIPs(ctx context.Context) error {
b.Logger.Info("waiting until there are no Endpoints containing Pod IPs in the shoot cluster...")
var podsNetwork *net.IPNet
if val := b.Shoot.Info.Spec.Networking.Pods; val != nil {
var err error
_, podsNetwork, err = net.ParseCIDR(*val)
if err != nil {
return fmt.Errorf("unable to check if there are still Endpoints containing Pod IPs in the shoot cluster. Shoots's Pods network could not be parsed: %+v", err)
}
} else {
return fmt.Errorf("unable to check if there are still Endpoints containing Pod IPs in the shoot cluster. Shoot's Pods network is empty")
}
return retry.Until(ctx, 5*time.Second, func(ctx context.Context) (done bool, err error) {
endpointsList := &corev1.EndpointsList{}
if err := b.K8sShootClient.Client().List(ctx, endpointsList); err != nil {
return retry.SevereError(err)
}
serviceList := &corev1.ServiceList{}
if err := b.K8sShootClient.Client().List(ctx, serviceList); err != nil {
return retry.SevereError(err)
}
epsNotReconciledByKCM := sets.NewString()
for _, service := range serviceList.Items {
// if service.Spec.Selector is empty or nil, kube-controller-manager will not reconcile Endpoints for this Service
if len(service.Spec.Selector) == 0 {
epsNotReconciledByKCM.Insert(fmt.Sprintf("%s/%s", service.Namespace, service.Name))
}
}
for _, endpoints := range endpointsList.Items {
if epsNotReconciledByKCM.Has(fmt.Sprintf("%s/%s", endpoints.Namespace, endpoints.Name)) {
continue
}
for _, subset := range endpoints.Subsets {
for _, address := range subset.Addresses {
if podsNetwork.Contains(net.ParseIP(address.IP)) {
msg := fmt.Sprintf("waiting until there are no Endpoints containing Pod IPs in the shoot cluster... "+
"There is still at least one Endpoints object containing a Pod's IP: %s/%s, IP: %s", endpoints.Namespace, endpoints.Name, address.IP)
b.Logger.Info(msg)
return retry.MinorError(fmt.Errorf(msg))
}
}
}
}
return retry.Ok()
})
}
// WaitUntilRequiredExtensionsReady waits until all the extensions required for a shoot reconciliation are ready
func (b *Botanist) WaitUntilRequiredExtensionsReady(ctx context.Context) error {
return retry.UntilTimeout(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (done bool, err error) {
if err := b.RequiredExtensionsReady(ctx); err != nil {
b.Logger.Infof("Waiting until all the required extension controllers are ready (%+v)", err)
return retry.MinorError(err)
}
return retry.Ok()
})
}
|
{
return retry.SevereError(fmt.Errorf("%s\n\n%s", err.Error(), eventsErrorMessage))
}
|
BillboardDeviceCapability.rs
|
// This file is part of security-keys-rust. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/security-keys-rust/master/COPYRIGHT. No part of security-keys-rust, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2021 The developers of security-keys-rust. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/security-keys-rust/master/COPYRIGHT.
/// Billboard device capability.
#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct BillboardDeviceCapability
{
url: Option<LocalizedStrings>,
vconn_power: Option<BillboardVconnPowerInWatts>,
version: Version,
device_container_failed_because: Option<BillboardDeviceContainerFailedBecause>,
alternate_modes: Vec<BillboardAlternateMode>,
preferred_alternate_mode_index: u8,
}
impl BillboardDeviceCapability
{
#[allow(missing_docs)]
#[inline(always)]
pub const fn url(&self) -> Option<&LocalizedStrings>
{
self.url.as_ref()
}
#[allow(missing_docs)]
#[inline(always)]
pub const fn vconn_power(&self) -> Option<BillboardVconnPowerInWatts>
{
self.vconn_power
}
#[allow(missing_docs)]
#[inline(always)]
pub const fn version(&self) -> Version
{
self.version
}
#[allow(missing_docs)]
#[inline(always)]
pub const fn device_container_failed_because(&self) -> Option<BillboardDeviceContainerFailedBecause>
{
self.device_container_failed_because
}
#[allow(missing_docs)]
#[inline(always)]
pub fn alternate_modes(&self) -> &[BillboardAlternateMode]
{
|
#[allow(missing_docs)]
#[inline(always)]
pub fn preferred_alternate_mode(&self) -> &BillboardAlternateMode
{
self.alternate_modes.get_unchecked_safe(self.preferred_alternate_mode_index)
}
const MinimumBLength: usize = 44;
#[inline(always)]
pub(super) fn parse(device_capability_bytes: &[u8], device_connection: &DeviceConnection) -> Result<DeadOrAlive<Self>, BillboardDeviceCapabilityParseError>
{
use BillboardDeviceCapabilityParseError::*;
const MinimumBLength: usize = BillboardDeviceCapability::MinimumBLength;
let length = device_capability_bytes.len();
const MinimumSize: usize = minimum_size::<MinimumBLength>();
if unlikely!(length < MinimumSize)
{
return Err(ShorterThanMinimumSize)
}
let number_of_alternate_modes = BillboardAlternateMode::parse_number_of_alternate_modes(device_capability_bytes)?;
let preferred_alternate_mode_index = device_capability_bytes.u8(capability_descriptor_index::<5>());
if unlikely!((preferred_alternate_mode_index as usize) >= number_of_alternate_modes)
{
return Err(PreferredAlternateModeIndexTooLarge { preferred_alternate_mode_index, number_of_alternate_modes: number_of_alternate_modes as u8 })
}
let configuration_result = device_capability_bytes.bytes(capability_descriptor_index::<8>(), 32);
let alternate_modes = return_ok_if_dead!(BillboardAlternateMode::parse_alternate_modes(device_capability_bytes.get_unchecked_range_safe(MinimumSize .. ), number_of_alternate_modes, configuration_result, device_connection)?);
let version = device_capability_bytes.version(capability_descriptor_index::<40>()).map_err(VersionParse)?;
Ok
(
Alive
(
Self
{
url: return_ok_if_dead!(device_connection.find_string(device_capability_bytes.u8(capability_descriptor_index::<4>())).map_err(InvalidAdditionalInformationUrl)?),
version,
vconn_power: BillboardVconnPowerInWatts::parse(device_capability_bytes),
device_container_failed_because: BillboardDeviceContainerFailedBecause::parse(version, device_capability_bytes),
alternate_modes,
preferred_alternate_mode_index,
}
)
)
}
}
|
&self.alternate_modes
}
|
indexer.go
|
package indexeres
import (
"github.com/rancher/harvester-server/pkg/apis/harvester.cattle.io/v1alpha1"
"github.com/rancher/harvester-server/pkg/config"
rbacv1 "k8s.io/api/rbac/v1"
)
const (
UserNameIndex = "auth.harvester.cattle.io/user-username-index"
RbByRoleAndSubjectIndex = "auth.harvester.cattle.io/crb-by-role-and-subject"
)
func
|
(scaled *config.Scaled) {
informer := scaled.Management.HarvesterFactory.Harvester().V1alpha1().User().Cache()
informer.AddIndexer(UserNameIndex, indexUserByUsername)
}
func RegisterManagementIndexers(management *config.Management) {
informer := management.RbacFactory.Rbac().V1().ClusterRoleBinding().Cache()
informer.AddIndexer(RbByRoleAndSubjectIndex, rbByRoleAndSubject)
}
func indexUserByUsername(obj *v1alpha1.User) ([]string, error) {
return []string{obj.Username}, nil
}
func rbByRoleAndSubject(obj *rbacv1.ClusterRoleBinding) ([]string, error) {
var keys []string
for _, s := range obj.Subjects {
keys = append(keys, RbRoleSubjectKey(obj.RoleRef.Name, s))
}
return keys, nil
}
func RbRoleSubjectKey(roleName string, subject rbacv1.Subject) string {
return roleName + "." + subject.Kind + "." + subject.Name
}
|
RegisterScaledIndexers
|
verilog_gen.rs
|
use evalexpr::ContextWithMutableVariables;
use num_bigint::BigUint;
use regex::Regex;
use crate::core::ast::{
VerilogBlock, VerilogBlockOrConditional, VerilogCase, VerilogConditional, VerilogExpression,
VerilogLink, VerilogLiteral, VerilogLoop, VerilogMatch, VerilogOp, VerilogOpUnary,
};
use crate::core::code_writer::CodeWriter;
use crate::core::verilog_visitor::{walk_block, VerilogVisitor};
struct LoopVariable {
variable: String,
value: usize,
}
pub struct VerilogCodeGenerator {
io: CodeWriter,
loops: Vec<LoopVariable>,
links: Vec<VerilogLink>,
}
impl VerilogCodeGenerator {
pub fn new() -> VerilogCodeGenerator {
Self {
io: CodeWriter::new(),
loops: vec![],
links: vec![],
}
}
fn array_index_simplification(&self, a: &str) -> String {
let re = Regex::new(r"\[([^\]]*)\]").unwrap();
let mut context = evalexpr::HashMapContext::new();
for lvar in &self.loops {
let _ = context.set_value(lvar.variable.clone(), (lvar.value as i64).into());
}
for x in re.captures(a) {
if x.len() == 2 {
if let Some(txt) = x.get(1) {
let arg = evalexpr::eval_with_context(txt.as_str(), &context).unwrap();
return re.replace(a, format!("$${}", arg)).to_string();
}
}
}
a.to_string()
}
fn ident_fixup(&self, a: &str) -> String {
let mut x = a.to_owned();
for index in &self.loops {
if x == index.variable {
x = format!("{}", index.value);
}
}
if x.starts_with(".") {
x.remove(0);
}
x = x
.replace(".", "$")
.replace("::", "$")
.trim_end_matches("$next")
.to_owned();
if x.contains('[') {
x = self.array_index_simplification(&x);
}
x
}
}
impl ToString for VerilogCodeGenerator {
fn to_string(&self) -> String {
self.io.to_string()
}
}
pub fn verilog_combinatorial(code: &VerilogBlock) -> String {
let mut gen = VerilogCodeGenerator::new();
gen.visit_block(code);
// add forward links to the code
let links = gen
.links
.iter()
.map(|x| {
match x {
VerilogLink::Forward(x) => {
format!(
"always @(*) {}${} = {}${};",
x.other_name.replace("[", "$").replace("]", ""),
x.my_name,
x.owner_name.replace("[", "$").replace("]", ""),
x.my_name
)
}
VerilogLink::Backward(x) => {
format!(
"always @(*) {}${} = {}${};",
x.owner_name.replace("[", "$").replace("]", ""),
x.my_name,
x.other_name.replace("[", "$").replace("]", ""),
x.my_name
)
}
VerilogLink::Bidirectional(x) => {
if x.my_name.is_empty() {
format!("assign {} = {};", x.owner_name, x.other_name)
} else {
format!(
"assign {}${} = {}${};",
x.owner_name, x.my_name, x.other_name, x.my_name
)
}
}
}
.to_string()
})
.collect::<Vec<_>>()
.join("\n");
/*
let links = gen
.links
.iter()
.map(|x| {
x.replace("link!(", "")
.replace(")", "")
.replace(",", "=")
.replace("self.", "")
.replace(".", "_")
})
.map(|x| format!("assign {};", x))
.collect::<Vec<_>>()
.join("\n");
*/
format!("always @(*) {}\n{}", gen.to_string(), links)
// format!("always @(*) {}\n", gen.to_string())
}
impl VerilogVisitor for VerilogCodeGenerator {
fn visit_block(&mut self, b: &VerilogBlock) {
self.io.writeln("begin");
self.io.push();
walk_block(self, b);
self.io.pop();
self.io.add_line("end");
}
fn visit_loop(&mut self, a: &VerilogLoop) {
let start = a.from.as_usize();
let end = a.to.as_usize();
for i in start..end {
self.loops.push(LoopVariable {
variable: a.index.clone(),
value: i,
});
walk_block(self, &a.block);
self.loops.pop();
}
}
fn visit_slice_assignment(
&mut self,
base: &str,
width: &usize,
offset: &VerilogExpression,
replacement: &VerilogExpression,
) {
self.io.write(format!("{}[(", base));
self.visit_expression(offset);
self.io.write(format!(")+:({})] = ", width));
self.visit_expression(replacement);
self.io.writeln(";");
}
fn visit_conditional(&mut self, c: &VerilogConditional) {
self.io.write("if (");
self.visit_expression(&c.test);
self.io.write(") ");
self.visit_block(&c.then);
self.visit_block_or_conditional(&c.otherwise);
}
fn visit_block_or_conditional(&mut self, o: &VerilogBlockOrConditional) {
match &o {
VerilogBlockOrConditional::Block(b) => {
self.io.write("else ");
self.visit_block(&b);
}
VerilogBlockOrConditional::Conditional(c) => {
self.io.write("else ");
self.visit_statement(c);
}
VerilogBlockOrConditional::None => {}
}
}
fn visit_match(&mut self, m: &VerilogMatch) {
self.io.write("case (");
self.visit_expression(&m.test);
self.io.writeln(")");
self.io.push();
m.cases.iter().for_each(|x| self.visit_case(x));
self.io.pop();
self.io.writeln("endcase")
}
fn visit_comment(&mut self, x: &str) {
self.io.add(format!("// {}", x));
}
fn visit_signal(&mut self, sig: &str) {
self.io.write(self.ident_fixup(sig));
}
fn visit_literal(&mut self, v: &VerilogLiteral) {
self.io.write(v.to_string());
}
fn visit_link(&mut self, l: &[VerilogLink]) {
for link in l {
self.links.push(link.clone());
}
}
fn visit_case(&mut self, c: &VerilogCase) {
self.io.write(self.ident_fixup(&c.condition));
self.io.writeln(":");
self.io.push();
self.visit_block(&c.block);
self.io.pop();
}
fn visit_binop(&mut self, l: &VerilogExpression, o: &VerilogOp, r: &VerilogExpression) {
self.visit_expression(l);
self.io.write(" ");
self.io.write(match o {
VerilogOp::Add => "+",
VerilogOp::Sub => "-",
VerilogOp::Mul => "*",
VerilogOp::LogicalAnd => "&&",
VerilogOp::LogicalOr => "||",
VerilogOp::BitXor => "^",
VerilogOp::BitAnd => "&",
VerilogOp::BitOr => "|",
VerilogOp::Shl => "<<",
VerilogOp::Shr => ">>",
VerilogOp::Eq => "==",
VerilogOp::Lt => "<",
VerilogOp::Le => "<=",
VerilogOp::Ne => "!=",
VerilogOp::Ge => ">=",
VerilogOp::Gt => ">",
});
self.io.write(" ");
self.visit_expression(r);
}
|
fn visit_unop(&mut self, o: &VerilogOpUnary, r: &VerilogExpression) {
self.io.write(match o {
VerilogOpUnary::Not => "~",
VerilogOpUnary::Neg => "-",
VerilogOpUnary::All => "&",
VerilogOpUnary::Any => "|",
VerilogOpUnary::Xor => "^",
});
self.visit_expression(r);
}
fn visit_assignment(&mut self, l: &VerilogExpression, r: &VerilogExpression) {
self.visit_expression(l);
self.io.write(" = ");
self.visit_expression(r);
self.io.writeln(";");
}
fn visit_paren(&mut self, e: &VerilogExpression) {
self.io.write("(");
self.visit_expression(e);
self.io.write(")");
}
fn visit_cast(&mut self, e: &VerilogExpression, bits: &usize) {
self.io.write("(");
self.visit_expression(e);
let mask = (BigUint::from(1_u32) << bits) - 1_u32;
self.io.write(format!(") & {}'h{:x}", bits, mask))
}
fn visit_index(&mut self, a: &VerilogExpression, b: &VerilogExpression) {
self.visit_expression(a);
self.io.write("[");
self.visit_expression(b);
self.io.write("]");
}
fn visit_slice(&mut self, sig: &VerilogExpression, width: &usize, offset: &VerilogExpression) {
self.visit_expression(sig);
self.io.write("[(");
self.visit_expression(offset);
self.io.write(format!(")+:({})]", width));
}
fn visit_index_replace(
&mut self,
sig: &VerilogExpression,
ndx: &VerilogExpression,
val: &VerilogExpression,
) {
self.io.write("(");
self.visit_expression(sig);
self.io.write(" & ~(1 << (");
self.visit_expression(ndx);
self.io.write(")) | ((");
self.visit_expression(val);
self.io.write(") << (");
self.visit_expression(ndx);
self.io.write(")))");
}
}
#[test]
fn test_array_replacement() {
let re = Regex::new(r"\[([^\]]*)\]").unwrap();
let test = "a[((i+1))]";
let captures = re.captures(test);
let mut context = evalexpr::HashMapContext::new();
context.set_value("i".to_string(), 5.into()).unwrap();
for x in re.captures(test) {
println!("Match {:?}", x);
if x.len() == 2 {
if let Some(txt) = x.get(1) {
let arg = evalexpr::eval_with_context(txt.as_str(), &context).unwrap();
println!("Replace {} -> {}", txt.as_str(), arg);
println!("Update {}", re.replace(test, format!("$${}", arg)))
}
}
}
assert!(captures.is_some());
}
pub fn filter_blackbox_directives(t: &str) -> String {
let mut in_black_box = false;
let mut ret = vec![];
for line in t.split("\n") {
in_black_box = in_black_box || line.starts_with("(* blackbox *)");
if !in_black_box {
ret.push(line);
}
if line.starts_with("endmodule") {
in_black_box = false;
}
}
ret.join("\n")
}
#[test]
fn test_filter_bb_directives() {
let p = r#"
blah
more code
goes here
(* blackbox *)
module my_famous_module(
super_secret_arg1,
super_secret_arg2,
super_secret_arg3);
/* Comment */
endmodule
stuff
"#;
let q = filter_blackbox_directives(p);
println!("{}", q);
assert!(!q.contains("blackbox"));
assert!(!q.contains("module"));
assert!(!q.contains("endmodule"));
assert!(q.contains("more code"));
assert!(q.contains("stuff"));
}
| |
pfr.rs
|
#[doc = "Register `PFR[%s]` reader"]
pub struct R(crate::R<PFR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<PFR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<PFR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<PFR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `PFR[%s]` writer"]
pub struct W(crate::W<PFR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<PFR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<PFR_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<PFR_SPEC>) -> Self
|
}
impl W {
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Processor Feature Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pfr](index.html) module"]
pub struct PFR_SPEC;
impl crate::RegisterSpec for PFR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [pfr::R](R) reader structure"]
impl crate::Readable for PFR_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [pfr::W](W) writer structure"]
impl crate::Writable for PFR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets PFR[%s]
to value 0"]
impl crate::Resettable for PFR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
|
{
W(writer)
}
|
test_tabs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from libcst._tabs import expand_tabs
from libcst.testing.utils import UnitTest, data_provider
class ExpandTabsTest(UnitTest):
@data_provider(
[
("\t", " " * 8),
("\t\t", " " * 16),
|
("abcd\t", "abcd "),
("abcdefg\t", "abcdefg "),
("abcdefgh\t", "abcdefgh "),
("\tsuffix", " suffix"),
]
)
def test_expand_tabs(self, input: str, output: str) -> None:
self.assertEqual(expand_tabs(input), output)
|
(" \t", " " * 8),
("\t ", " " * 12),
|
mod.rs
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[repr(C)]
#[doc = "*Required features: 'Win32_UI_Shell_Common', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub struct COMDLG_FILTERSPEC {
pub pszName: super::super::super::Foundation::PWSTR,
pub pszSpec: super::super::super::Foundation::PWSTR,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for COMDLG_FILTERSPEC {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for COMDLG_FILTERSPEC {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub type DEVICE_SCALE_FACTOR = i32;
|
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const DEVICE_SCALE_FACTOR_INVALID: DEVICE_SCALE_FACTOR = 0i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_100_PERCENT: DEVICE_SCALE_FACTOR = 100i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_120_PERCENT: DEVICE_SCALE_FACTOR = 120i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_125_PERCENT: DEVICE_SCALE_FACTOR = 125i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_140_PERCENT: DEVICE_SCALE_FACTOR = 140i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_150_PERCENT: DEVICE_SCALE_FACTOR = 150i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_160_PERCENT: DEVICE_SCALE_FACTOR = 160i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_175_PERCENT: DEVICE_SCALE_FACTOR = 175i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_180_PERCENT: DEVICE_SCALE_FACTOR = 180i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_200_PERCENT: DEVICE_SCALE_FACTOR = 200i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_225_PERCENT: DEVICE_SCALE_FACTOR = 225i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_250_PERCENT: DEVICE_SCALE_FACTOR = 250i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_300_PERCENT: DEVICE_SCALE_FACTOR = 300i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_350_PERCENT: DEVICE_SCALE_FACTOR = 350i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_400_PERCENT: DEVICE_SCALE_FACTOR = 400i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_450_PERCENT: DEVICE_SCALE_FACTOR = 450i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SCALE_500_PERCENT: DEVICE_SCALE_FACTOR = 500i32;
pub type IObjectArray = *mut ::core::ffi::c_void;
pub type IObjectCollection = *mut ::core::ffi::c_void;
#[repr(C)]
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub struct ITEMIDLIST {
pub mkid: SHITEMID,
}
impl ::core::marker::Copy for ITEMIDLIST {}
impl ::core::clone::Clone for ITEMIDLIST {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub type PERCEIVED = i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_FIRST: PERCEIVED = -3i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_CUSTOM: PERCEIVED = -3i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_UNSPECIFIED: PERCEIVED = -2i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_FOLDER: PERCEIVED = -1i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_UNKNOWN: PERCEIVED = 0i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_TEXT: PERCEIVED = 1i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_IMAGE: PERCEIVED = 2i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_AUDIO: PERCEIVED = 3i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_VIDEO: PERCEIVED = 4i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_COMPRESSED: PERCEIVED = 5i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_DOCUMENT: PERCEIVED = 6i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_SYSTEM: PERCEIVED = 7i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_APPLICATION: PERCEIVED = 8i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_GAMEMEDIA: PERCEIVED = 9i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_CONTACTS: PERCEIVED = 10i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVED_TYPE_LAST: PERCEIVED = 10i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVEDFLAG_GDIPLUS: u32 = 16u32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVEDFLAG_HARDCODED: u32 = 2u32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVEDFLAG_NATIVESUPPORT: u32 = 4u32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVEDFLAG_SOFTCODED: u32 = 1u32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVEDFLAG_UNDEFINED: u32 = 0u32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVEDFLAG_WMSDK: u32 = 32u32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const PERCEIVEDFLAG_ZIPFOLDER: u32 = 64u32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub type SHCOLSTATE = i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_DEFAULT: SHCOLSTATE = 0i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_TYPE_STR: SHCOLSTATE = 1i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_TYPE_INT: SHCOLSTATE = 2i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_TYPE_DATE: SHCOLSTATE = 3i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_TYPEMASK: SHCOLSTATE = 15i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_ONBYDEFAULT: SHCOLSTATE = 16i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_SLOW: SHCOLSTATE = 32i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_EXTENDED: SHCOLSTATE = 64i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_SECONDARYUI: SHCOLSTATE = 128i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_HIDDEN: SHCOLSTATE = 256i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_PREFER_VARCMP: SHCOLSTATE = 512i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_PREFER_FMTCMP: SHCOLSTATE = 1024i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_NOSORTBYFOLDERNESS: SHCOLSTATE = 2048i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_VIEWONLY: SHCOLSTATE = 65536i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_BATCHREAD: SHCOLSTATE = 131072i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_NO_GROUPBY: SHCOLSTATE = 262144i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_FIXED_WIDTH: SHCOLSTATE = 4096i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_NODPISCALE: SHCOLSTATE = 8192i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_FIXED_RATIO: SHCOLSTATE = 16384i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const SHCOLSTATE_DISPLAYMASK: SHCOLSTATE = 61440i32;
#[repr(C, packed(1))]
#[doc = "*Required features: 'Win32_UI_Shell_Common', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub struct SHELLDETAILS {
pub fmt: i32,
pub cxChar: i32,
pub str: STRRET,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for SHELLDETAILS {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for SHELLDETAILS {
fn clone(&self) -> Self {
*self
}
}
#[repr(C, packed(1))]
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub struct SHITEMID {
pub cb: u16,
pub abID: [u8; 1],
}
impl ::core::marker::Copy for SHITEMID {}
impl ::core::clone::Clone for SHITEMID {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: 'Win32_UI_Shell_Common', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub struct STRRET {
pub uType: u32,
pub Anonymous: STRRET_0,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for STRRET {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for STRRET {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: 'Win32_UI_Shell_Common', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub union STRRET_0 {
pub pOleStr: super::super::super::Foundation::PWSTR,
pub uOffset: u32,
pub cStr: [u8; 260],
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for STRRET_0 {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for STRRET_0 {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub type STRRET_TYPE = i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const STRRET_WSTR: STRRET_TYPE = 0i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const STRRET_OFFSET: STRRET_TYPE = 1i32;
#[doc = "*Required features: 'Win32_UI_Shell_Common'*"]
pub const STRRET_CSTR: STRRET_TYPE = 2i32;
| |
consume.go
|
package alert
import (
"bytes"
"encoding/json"
"fmt"
"os/exec"
"sort"
"strconv"
"strings"
"time"
"github.com/didi/nightingale/v5/cache"
"github.com/didi/nightingale/v5/config"
"github.com/didi/nightingale/v5/judge"
"github.com/didi/nightingale/v5/models"
"github.com/toolkits/pkg/concurrent/semaphore"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/net/httplib"
"github.com/toolkits/pkg/sys"
)
func popEvent() {
sema := semaphore.NewSemaphore(config.Config.Alert.NotifyConcurrency)
duration := time.Duration(100) * time.Millisecond
for {
events := judge.EventQueue.PopBackBy(200)
if len(events) < 1 {
time.Sleep(duration)
continue
}
consume(events, sema)
}
}
func consume(events []interface{}, sema *semaphore.Semaphore) {
for i := range events {
if events[i] == nil {
continue
}
event := events[i].(*models.AlertEvent)
alertRule, exists := cache.AlertRules.Get(event.RuleId)
if !exists {
logger.Errorf("event_consume: alert rule not found, event:%+v", event)
continue
}
logger.Debugf("[event_consume_success][type:%v][event:%+v]", event.IsPromePull, event)
if isNoneffective(event, alertRule) {
// 告警规则非生效时段
continue
}
event.RuleName = alertRule.Name
event.RuleNote = alertRule.Note
event.NotifyChannels = alertRule.NotifyChannels
classpaths := cache.ResClasspath.GetValues(event.ResIdent)
event.ResClasspaths = strings.Join(classpaths, " ")
enrichTag(event, alertRule)
if isEventMute(event) {
// 被屏蔽的事件
event.MarkMuted()
if config.Config.Alert.MutedAlertPersist {
err := event.Add()
if err != nil {
logger.Warningf("event_consume: insert muted event err:%v, event:%+v", err, event)
}
}
continue
}
// 操作数据库
persist(event)
// 不管是告警还是恢复,都触发回调,接收端自己处理
if alertRule.Callbacks != "" {
go callback(event, alertRule)
}
uids := genNotifyUserIDs(alertRule)
if len(uids) == 0 {
logger.Warningf("event_consume: notify users not found, event:%+v", event)
continue
}
users := cache.UserCache.GetByIds(uids)
if len(users) == 0 {
logger.Warningf("event_consume: notify users not found, event:%+v", event)
continue
}
alertMsg := AlertMsg{
Event: event,
Rule: alertRule,
Users: users,
}
logger.Infof("event_consume: notify alert:%+v", alertMsg)
sema.Acquire()
go func(alertMsg AlertMsg) {
defer sema.Release()
notify(alertMsg)
}(alertMsg)
}
}
func genNotifyUserIDs(alertRule *models.AlertRule) []int64 {
uidMap := make(map[int64]struct{})
groupIds := strings.Fields(alertRule.NotifyGroups)
for _, groupId := range groupIds {
gid, err := strconv.ParseInt(groupId, 10, 64)
if err != nil {
logger.Warningf("event_consume: strconv groupid(%s) fail: %v", groupId, err)
continue
}
um, exists := cache.UserGroupMember.Get(gid)
if !exists {
continue
}
for uid := range um {
uidMap[uid] = struct{}{}
}
}
userIds := strings.Fields(alertRule.NotifyUsers)
for _, userId := range userIds {
uid, err := strconv.ParseInt(userId, 10, 64)
if err != nil {
logger.Warningf("event_consume: strconv userid(%s) fail: %v", userId, err)
continue
}
uidMap[uid] = struct{}{}
}
uids := make([]int64, 0, len(uidMap))
for uid := range uidMap {
uids = append(uids, uid)
}
return uids
}
// 如果是告警,就存库,如果是恢复,就从未恢复的告警表里删除
func persist(event *models.AlertEvent) {
if event.IsRecov() {
err := event.DelByHashId()
if err != nil {
logger.Warningf("event_consume: delete recovery event err:%v, event:%+v", err, event)
}
} else {
err := event.Add()
if err != nil {
logger.Warningf("event_consume: insert alert event err:%v, event:%+v", err, event)
}
}
}
type AlertMsg struct {
Event *models.AlertEvent `json:"event"`
Rule *models.AlertRule `json:"rule"`
Users []*models.User `json:"users"`
}
func notify(alertMsg AlertMsg) {
//增加并发控制
bs, err := json.Marshal(alertMsg)
if err != nil {
logger.Errorf("notify: marshal alert %+v err:%v", alertMsg, err)
}
fpath := config.Config.Alert.NotifyScriptPath
cmd := exec.Command(fpath)
cmd.Stdin = bytes.NewReader(bs)
// combine stdout and stderr
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
err = cmd.Start()
if err != nil {
logger.Errorf("notify: run cmd err:%v", err)
return
}
err, isTimeout := sys.WrapTimeout(cmd, time.Duration(10)*time.Second)
if isTimeout {
if err == nil {
logger.Errorf("notify: timeout and killed process %s", fpath)
}
if err != nil {
logger.Errorf("notify: kill process %s occur error %v", fpath, err)
}
return
}
if err != nil {
logger.Errorf("notify: exec script %s occur error: %v", fpath, err)
return
}
logger.Infof("notify: exec %s output: %s", fpath, buf.String())
}
func callback(event *models.AlertEvent, alertRule *models.AlertRule) {
urls := strings.Fields(alertRule.Callbacks)
for _, url := range urls {
if url == "" {
continue
}
if !(strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://")) {
url = "http://" + url
}
resp, code, err := httplib.PostJSON(url, 5*time.Second, event, map[string]string{})
if err != nil {
logger.Errorf("callback[%s] fail, callback content: %+v, resp: %s, err: %v, code:%d", url, event, string(resp), err, code)
} else {
logger.Infof("callback[%s] succ, callback content: %+v, resp: %s, code:%d", url, event, string(resp), code)
}
}
}
func isNoneffective(event *models.AlertEvent, alertRule *models.AlertRule) bool {
// 生效时间过滤
if alertRule.Status == models.ALERT_RULE_DISABLED {
logger.Debugf("event:%+v alert rule:%+v disable", event, alertRule)
return true
}
tm := time.Unix(event.TriggerTime, 0)
triggerTime := tm.Format("15:04")
triggerWeek := strconv.Itoa(int(tm.Weekday()))
if alertRule.EnableStime <= alertRule.EnableEtime {
if triggerTime < alertRule.EnableStime || triggerTime > alertRule.EnableEtime {
logger.Debugf("event:%+v alert rule:%+v triggerTime Noneffective", event, alertRule)
return true
}
} else {
if triggerTime < alertRule.EnableStime && triggerTime > alertRule.EnableEtime {
logger.Debugf("event:%+v alert rule:%+v triggerTime Noneffective", event, alertRule)
return true
}
}
if !strings.Contains(alertRule.EnableDaysOfWeek, triggerWeek) {
logger.Debugf("event:%+v alert rule:%+v triggerWeek Noneffective", event, alertRule)
return true
}
return false
}
// 事件的tags有多种tags组成:ident作为一个tag,数据本身的tags(前期已经把res的tags也附到数据tags里了)、规则的tags
func enrichTag(event *models.AlertEvent, alertRule *models.AlertRule) {
if event.ResIdent != "" {
event.TagMap["ident"] = event.ResIdent
}
if alertRule.AppendTags != "" {
|
arr := strings.Split(tag, "=")
if len(arr) != 2 {
logger.Warningf("alertRule AppendTags:%+v illagel", alertRule.AppendTags)
continue
}
event.TagMap[arr[0]] = arr[1]
}
}
var tagList []string
for key, value := range event.TagMap {
tagList = append(tagList, fmt.Sprintf("%s=%s", key, value))
}
sort.Strings(tagList)
event.Tags = strings.Join(tagList, " ")
}
|
appendTags := strings.Fields(alertRule.AppendTags)
for _, tag := range appendTags {
|
settings.py
|
from bizfriendly import app
from flask.ext.heroku import Heroku
import os
heroku = Heroku(app) # Sets CONFIG automagically
app.config.update(
# DEBUG = True,
# SQLALCHEMY_DATABASE_URI = 'postgres://hackyourcity@localhost/howtocity',
# SQLALCHEMY_DATABASE_URI = 'postgres://postgres:root@localhost/howtocity',
# SECRET_KEY = '123456'
)
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['MAIL_GUN_KEY'] = os.environ.get('MAIL_GUN_KEY')
app.config['AWS_ACCESS_KEY_ID'] = os.environ.get('AWS_ACCESS_KEY_ID')
app.config['AWS_SECRET_ACCESS_KEY'] = os.environ.get('AWS_SECRET_ACCESS_KEY')
app.config['S3_BUCKET_NAME'] = os.environ.get('S3_BUCKET_NAME')
def add_cors_header(response):
|
app.after_request(add_cors_header)
|
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Authorization, Content-Type'
response.headers['Access-Control-Allow-Methods'] = 'POST, GET, PUT, PATCH, DELETE, OPTIONS'
return response
|
mysqld.go
|
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Commands for controlling an external mysql process.
Some commands are issued as exec'd tools, some are handled by connecting via
the mysql protocol.
*/
package mysqlctl
import (
"bufio"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"time"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/mysql"
"github.com/youtube/vitess/go/sqldb"
"github.com/youtube/vitess/go/stats"
"github.com/youtube/vitess/go/vt/dbconfigs"
"github.com/youtube/vitess/go/vt/dbconnpool"
vtenv "github.com/youtube/vitess/go/vt/env"
"github.com/youtube/vitess/go/vt/hook"
"github.com/youtube/vitess/go/vt/mysqlctl/mysqlctlclient"
"golang.org/x/net/context"
)
var (
dbaPoolSize = flag.Int("dba_pool_size", 20, "Size of the connection pool for dba connections")
dbaIdleTimeout = flag.Duration("dba_idle_timeout", time.Minute, "Idle timeout for dba connections")
appPoolSize = flag.Int("app_pool_size", 40, "Size of the connection pool for app connections")
appIdleTimeout = flag.Duration("app_idle_timeout", time.Minute, "Idle timeout for app connections")
socketFile = flag.String("mysqlctl_socket", "", "socket file to use for remote mysqlctl actions (empty for local actions)")
// masterConnectRetry is used in 'SET MASTER' commands
masterConnectRetry = flag.Duration("master_connect_retry", 10*time.Second, "how long to wait in between slave -> connection attempts. Only precise to the second.")
)
// Mysqld is the object that represents a mysqld daemon running on this server.
type Mysqld struct {
config *Mycnf
dba *sqldb.ConnParams
dbApp *sqldb.ConnParams
dbaPool *dbconnpool.ConnectionPool
appPool *dbconnpool.ConnectionPool
replParams *sqldb.ConnParams
dbaMysqlStats *stats.Timings
tabletDir string
// mutex protects the fields below.
mutex sync.Mutex
mysqlFlavor MysqlFlavor
onTermFuncs []func()
cancelWaitCmd chan struct{}
}
// NewMysqld creates a Mysqld object based on the provided configuration
// and connection parameters.
// dbaName and appName are the base for stats exports, use 'Dba' and 'App', except in tests
func NewMysqld(dbaName, appName string, config *Mycnf, dba, app, repl *sqldb.ConnParams) *Mysqld {
if *dba == dbconfigs.DefaultDBConfigs.Dba {
dba.UnixSocket = config.SocketFile
}
// create and open the connection pool for dba access
dbaMysqlStatsName := ""
dbaPoolName := ""
if dbaName != "" {
dbaMysqlStatsName = "Mysql" + dbaName
dbaPoolName = dbaName + "ConnPool"
}
dbaMysqlStats := stats.NewTimings(dbaMysqlStatsName)
dbaPool := dbconnpool.NewConnectionPool(dbaPoolName, *dbaPoolSize, *dbaIdleTimeout)
dbaPool.Open(dbconnpool.DBConnectionCreator(dba, dbaMysqlStats))
// create and open the connection pool for app access
appMysqlStatsName := ""
appPoolName := ""
if appName != "" {
appMysqlStatsName = "Mysql" + appName
appPoolName = appName + "ConnPool"
}
appMysqlStats := stats.NewTimings(appMysqlStatsName)
appPool := dbconnpool.NewConnectionPool(appPoolName, *appPoolSize, *appIdleTimeout)
appPool.Open(dbconnpool.DBConnectionCreator(app, appMysqlStats))
return &Mysqld{
config: config,
dba: dba,
dbApp: app,
dbaPool: dbaPool,
appPool: appPool,
replParams: repl,
dbaMysqlStats: dbaMysqlStats,
tabletDir: path.Dir(config.DataDir),
}
}
// Cnf returns the mysql config for the daemon
func (mysqld *Mysqld) Cnf() *Mycnf {
return mysqld.config
}
// TabletDir returns the main tablet directory.
// It's a method so it can be accessed through the MysqlDaemon interface.
func (mysqld *Mysqld) TabletDir() string {
return mysqld.tabletDir
}
// RunMysqlUpgrade will run the mysql_upgrade program on the current install.
// Will not be called when mysqld is running.
func (mysqld *Mysqld) RunMysqlUpgrade() error {
// Execute as remote action on mysqlctld if requested.
if *socketFile != "" {
log.Infof("executing Mysqld.RunMysqlUpgrade() remotely via mysqlctld server: %v", *socketFile)
client, err := mysqlctlclient.New("unix", *socketFile)
if err != nil {
return fmt.Errorf("can't dial mysqlctld: %v", err)
}
defer client.Close()
return client.RunMysqlUpgrade(context.TODO())
}
// find mysql_upgrade. If not there, we do nothing.
dir, err := vtenv.VtMysqlRoot()
if err != nil {
log.Warningf("VT_MYSQL_ROOT not set, skipping mysql_upgrade step: %v", err)
return nil
}
name := path.Join(dir, "bin/mysql_upgrade")
if _, err := os.Stat(name); err != nil {
log.Warningf("mysql_upgrade binary not present, skipping it: %v", err)
return nil
}
// run the program, if it fails, we fail
args := []string{
// --defaults-file=* must be the first arg.
"--defaults-file=" + mysqld.config.path,
"--socket", mysqld.config.SocketFile,
"--user", mysqld.dba.Uname,
"--force", // Don't complain if it's already been upgraded.
}
if mysqld.dba.Pass != "" {
// --password must be omitted entirely if empty, or else it will prompt.
args = append(args, "--password", mysqld.dba.Pass)
}
cmd := exec.Command(name, args...)
cmd.Env = []string{os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql")}
out, err := cmd.CombinedOutput()
log.Infof("mysql_upgrade output: %s", out)
return err
}
// Start will start the mysql daemon, either by running the 'mysqld_start'
// hook, or by running mysqld_safe in the background.
// If a mysqlctld address is provided in a flag, Start will run remotely.
func (mysqld *Mysqld) Start(ctx context.Context) error {
// Execute as remote action on mysqlctld if requested.
if *socketFile != "" {
log.Infof("executing Mysqld.Start() remotely via mysqlctld server: %v", *socketFile)
client, err := mysqlctlclient.New("unix", *socketFile)
if err != nil {
return fmt.Errorf("can't dial mysqlctld: %v", err)
}
defer client.Close()
return client.Start(ctx)
}
var name string
ts := fmt.Sprintf("Mysqld.Start(%v)", time.Now().Unix())
// try the mysqld start hook, if any
switch hr := hook.NewSimpleHook("mysqld_start").Execute(); hr.ExitStatus {
case hook.HOOK_SUCCESS:
// hook exists and worked, we can keep going
name = "mysqld_start hook"
case hook.HOOK_DOES_NOT_EXIST:
// hook doesn't exist, run mysqld_safe ourselves
log.Infof("%v: No mysqld_start hook, running mysqld_safe directly", ts)
dir, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
name = path.Join(dir, "bin/mysqld_safe")
arg := []string{
"--defaults-file=" + mysqld.config.path}
env := []string{os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql")}
cmd := exec.Command(name, arg...)
cmd.Dir = dir
cmd.Env = env
log.Infof("%v %#v", ts, cmd)
stderr, err := cmd.StderrPipe()
if err != nil {
return nil
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil
}
go func() {
scanner := bufio.NewScanner(stderr)
for scanner.Scan() {
log.Infof("%v stderr: %v", ts, scanner.Text())
}
}()
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
log.Infof("%v stdout: %v", ts, scanner.Text())
}
}()
err = cmd.Start()
if err != nil {
return nil
}
mysqld.mutex.Lock()
mysqld.cancelWaitCmd = make(chan struct{})
go func(cancel <-chan struct{}) {
// Wait regardless of cancel, so we don't generate defunct processes.
err := cmd.Wait()
log.Infof("%v exit: %v", ts, err)
// The process exited. Trigger OnTerm callbacks, unless we were cancelled.
select {
case <-cancel:
default:
mysqld.mutex.Lock()
for _, callback := range mysqld.onTermFuncs {
go callback()
}
mysqld.mutex.Unlock()
}
}(mysqld.cancelWaitCmd)
mysqld.mutex.Unlock()
default:
// hook failed, we report error
return fmt.Errorf("mysqld_start hook failed: %v", hr.String())
}
return mysqld.Wait(ctx)
}
// Wait returns nil when mysqld is up and accepting connections.
func (mysqld *Mysqld) Wait(ctx context.Context) error {
log.Infof("Waiting for mysqld socket file (%v) to be ready...", mysqld.config.SocketFile)
for {
select {
case <-ctx.Done():
return errors.New("deadline exceeded waiting for mysqld socket file to appear: " + mysqld.config.SocketFile)
default:
}
_, statErr := os.Stat(mysqld.config.SocketFile)
if statErr == nil {
// Make sure the socket file isn't stale.
// Use a user that exists even before we apply the init_db_sql_file.
conn, connErr := mysql.Connect(sqldb.ConnParams{
Uname: "root",
Charset: "utf8",
UnixSocket: mysqld.config.SocketFile,
})
if connErr == nil {
conn.Close()
return nil
}
log.Infof("mysqld socket file exists, but can't connect: %v", connErr)
} else if !os.IsNotExist(statErr) {
return fmt.Errorf("can't stat mysqld socket file: %v", statErr)
}
time.Sleep(100 * time.Millisecond)
}
}
// Shutdown will stop the mysqld daemon that is running in the background.
//
// waitForMysqld: should the function block until mysqld has stopped?
// This can actually take a *long* time if the buffer cache needs to be fully
// flushed - on the order of 20-30 minutes.
//
// If a mysqlctld address is provided in a flag, Shutdown will run remotely.
func (mysqld *Mysqld) Shutdown(ctx context.Context, waitForMysqld bool) error {
log.Infof("Mysqld.Shutdown")
// Execute as remote action on mysqlctld if requested.
if *socketFile != "" {
log.Infof("executing Mysqld.Shutdown() remotely via mysqlctld server: %v", *socketFile)
client, err := mysqlctlclient.New("unix", *socketFile)
if err != nil {
return fmt.Errorf("can't dial mysqlctld: %v", err)
}
defer client.Close()
return client.Shutdown(ctx, waitForMysqld)
}
// We're shutting down on purpose. We no longer want to be notified when
// mysqld terminates.
mysqld.mutex.Lock()
if mysqld.cancelWaitCmd != nil {
close(mysqld.cancelWaitCmd)
mysqld.cancelWaitCmd = nil
}
mysqld.mutex.Unlock()
// possibly mysql is already shutdown, check for a few files first
_, socketPathErr := os.Stat(mysqld.config.SocketFile)
_, pidPathErr := os.Stat(mysqld.config.PidFile)
if os.IsNotExist(socketPathErr) && os.IsNotExist(pidPathErr) {
log.Warningf("assuming mysqld already shut down - no socket, no pid file found")
return nil
}
// try the mysqld shutdown hook, if any
h := hook.NewSimpleHook("mysqld_shutdown")
hr := h.Execute()
switch hr.ExitStatus {
case hook.HOOK_SUCCESS:
// hook exists and worked, we can keep going
case hook.HOOK_DOES_NOT_EXIST:
// hook doesn't exist, try mysqladmin
log.Infof("No mysqld_shutdown hook, running mysqladmin directly")
dir, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
name := path.Join(dir, "bin/mysqladmin")
arg := []string{
"-u", mysqld.dba.Uname, "-S", mysqld.config.SocketFile,
"shutdown"}
env := []string{
os.ExpandEnv("LD_LIBRARY_PATH=$VT_MYSQL_ROOT/lib/mysql"),
}
_, _, err = execCmd(name, arg, env, dir, nil)
if err != nil {
return err
}
default:
// hook failed, we report error
return fmt.Errorf("mysqld_shutdown hook failed: %v", hr.String())
}
// Wait for mysqld to really stop. Use the socket and pid files as a
// proxy for that since we can't call wait() in a process we
// didn't start.
if waitForMysqld {
log.Infof("Mysqld.Shutdown: waiting for socket file (%v) and pid file (%v) to disappear",
mysqld.config.SocketFile, mysqld.config.PidFile)
for {
select {
case <-ctx.Done():
return errors.New("gave up waiting for mysqld to stop")
default:
}
_, socketPathErr = os.Stat(mysqld.config.SocketFile)
_, pidPathErr = os.Stat(mysqld.config.PidFile)
if os.IsNotExist(socketPathErr) && os.IsNotExist(pidPathErr) {
return nil
}
time.Sleep(100 * time.Millisecond)
}
}
return nil
}
// execCmd searches the PATH for a command and runs it, logging the output.
// If input is not nil, pipe it to the command's stdin.
func execCmd(name string, args, env []string, dir string, input io.Reader) (cmd *exec.Cmd, output string, err error) {
cmdPath, _ := exec.LookPath(name)
log.Infof("execCmd: %v %v %v", name, cmdPath, args)
cmd = exec.Command(cmdPath, args...)
cmd.Env = env
cmd.Dir = dir
if input != nil {
cmd.Stdin = input
}
out, err := cmd.CombinedOutput()
output = string(out)
if err != nil {
err = errors.New(name + ": " + output)
}
log.Infof("execCmd: command returned: %v", output)
return cmd, output, err
}
// Init will create the default directory structure for the mysqld process,
// generate / configure a my.cnf file, install a skeleton database,
// and apply the provided initial SQL file.
func (mysqld *Mysqld) Init(ctx context.Context, initDBSQLFile string) error {
log.Infof("mysqlctl.Init")
err := mysqld.createDirs()
if err != nil {
log.Errorf("%s", err.Error())
return err
}
root, err := vtenv.VtRoot()
if err != nil {
log.Errorf("%s", err.Error())
return err
}
// Set up config files.
if err = mysqld.initConfig(root); err != nil {
log.Errorf("failed creating %v: %v", mysqld.config.path, err)
return err
}
// Install data dir.
if err = mysqld.installDataDir(); err != nil {
return err
}
// Start mysqld.
if err = mysqld.Start(ctx); err != nil {
log.Errorf("failed starting mysqld (check %v for more info): %v", mysqld.config.ErrorLogPath, err)
return err
}
// Run initial SQL file.
sqlFile, err := os.Open(initDBSQLFile)
if err != nil {
return fmt.Errorf("can't open init_db_sql_file (%v): %v", initDBSQLFile, err)
}
defer sqlFile.Close()
if err := mysqld.executeMysqlScript("root", sqlFile); err != nil {
return fmt.Errorf("can't run init_db_sql_file (%v): %v", initDBSQLFile, err)
}
return nil
}
func (mysqld *Mysqld) installDataDir() error {
mysqlRoot, err := vtenv.VtMysqlRoot()
if err != nil {
log.Errorf("%v", err)
return err
}
// Check mysqld version.
_, version, err := execCmd(path.Join(mysqlRoot, "sbin/mysqld"),
[]string{"--version"}, nil, mysqlRoot, nil)
if err != nil {
return err
}
if strings.Contains(version, "Ver 5.7.") {
// MySQL 5.7 GA and up have deprecated mysql_install_db.
// Instead, initialization is built into mysqld.
log.Infof("Installing data dir with mysqld --initialize-insecure")
args := []string{
"--defaults-file=" + mysqld.config.path,
"--basedir=" + mysqlRoot,
"--initialize-insecure", // Use empty 'root'@'localhost' password.
}
if _, _, err = execCmd(path.Join(mysqlRoot, "sbin/mysqld"), args, nil, mysqlRoot, nil); err != nil {
log.Errorf("mysqld --initialize-insecure failed: %v", err)
return err
}
return nil
}
log.Infof("Installing data dir with mysql_install_db")
args := []string{
"--defaults-file=" + mysqld.config.path,
"--basedir=" + mysqlRoot,
}
if _, _, err = execCmd(path.Join(mysqlRoot, "bin/mysql_install_db"), args, nil, mysqlRoot, nil); err != nil {
log.Errorf("mysql_install_db failed: %v", err)
return err
}
return nil
}
func (mysqld *Mysqld) initConfig(root string) error {
var err error
var configData string
switch hr := hook.NewSimpleHook("make_mycnf").Execute(); hr.ExitStatus {
case hook.HOOK_DOES_NOT_EXIST:
log.Infof("make_mycnf hook doesn't exist, reading default template files")
cnfTemplatePaths := []string{
path.Join(root, "config/mycnf/default.cnf"),
path.Join(root, "config/mycnf/master.cnf"),
path.Join(root, "config/mycnf/replica.cnf"),
}
if extraCnf := os.Getenv("EXTRA_MY_CNF"); extraCnf != "" {
parts := strings.Split(extraCnf, ":")
cnfTemplatePaths = append(cnfTemplatePaths, parts...)
}
configData, err = mysqld.config.makeMycnf(cnfTemplatePaths)
case hook.HOOK_SUCCESS:
configData, err = mysqld.config.fillMycnfTemplate(hr.Stdout)
default:
return fmt.Errorf("make_mycnf hook failed(%v): %v", hr.ExitStatus, hr.Stderr)
}
if err != nil {
return err
}
return ioutil.WriteFile(mysqld.config.path, []byte(configData), 0664)
}
func (mysqld *Mysqld) createDirs() error {
log.Infof("creating directory %s", mysqld.tabletDir)
if err := os.MkdirAll(mysqld.tabletDir, os.ModePerm); err != nil {
return err
}
for _, dir := range TopLevelDirs() {
if err := mysqld.createTopDir(dir); err != nil {
return err
}
}
for _, dir := range mysqld.config.directoryList() {
log.Infof("creating directory %s", dir)
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return err
}
// FIXME(msolomon) validate permissions?
}
return nil
}
// createTopDir creates a top level directory under TabletDir.
// However, if a directory of the same name already exists under
// vtenv.VtDataRoot(), it creates a directory named after the tablet
// id under that directory, and then creates a symlink under TabletDir
// that points to the newly created directory. For example, if
// /vt/data is present, it will create the following structure:
// /vt/data/vt_xxxx /vt/vt_xxxx/data -> /vt/data/vt_xxxx
func (mysqld *Mysqld) createTopDir(dir string) error {
vtname := path.Base(mysqld.tabletDir)
target := path.Join(vtenv.VtDataRoot(), dir)
_, err := os.Lstat(target)
if err != nil {
if os.IsNotExist(err) {
topdir := path.Join(mysqld.tabletDir, dir)
log.Infof("creating directory %s", topdir)
return os.MkdirAll(topdir, os.ModePerm)
}
return err
}
linkto := path.Join(target, vtname)
source := path.Join(mysqld.tabletDir, dir)
log.Infof("creating directory %s", linkto)
err = os.MkdirAll(linkto, os.ModePerm)
if err != nil {
return err
}
log.Infof("creating symlink %s -> %s", source, linkto)
return os.Symlink(linkto, source)
}
// Teardown will shutdown the running daemon, and delete the root directory.
func (mysqld *Mysqld) Teardown(ctx context.Context, force bool) error {
log.Infof("mysqlctl.Teardown")
if err := mysqld.Shutdown(ctx, true); err != nil {
log.Warningf("failed mysqld shutdown: %v", err.Error())
if !force {
return err
}
}
var removalErr error
for _, dir := range TopLevelDirs() {
qdir := path.Join(mysqld.tabletDir, dir)
if err := deleteTopDir(qdir); err != nil {
removalErr = err
}
}
return removalErr
}
func
|
(dir string) (removalErr error) {
fi, err := os.Lstat(dir)
if err != nil {
log.Errorf("error deleting dir %v: %v", dir, err.Error())
removalErr = err
} else if fi.Mode()&os.ModeSymlink != 0 {
target, err := filepath.EvalSymlinks(dir)
if err != nil {
log.Errorf("could not resolve symlink %v: %v", dir, err.Error())
removalErr = err
}
log.Infof("remove data dir (symlinked) %v", target)
if err = os.RemoveAll(target); err != nil {
log.Errorf("failed removing %v: %v", target, err.Error())
removalErr = err
}
}
log.Infof("remove data dir %v", dir)
if err = os.RemoveAll(dir); err != nil {
log.Errorf("failed removing %v: %v", dir, err.Error())
removalErr = err
}
return
}
// executeMysqlCommands executes some SQL commands,
// using the mysql command line tool.
func (mysqld *Mysqld) executeMysqlCommands(user, sql string) error {
return mysqld.executeMysqlScript(user, strings.NewReader(sql))
}
// executeMysqlScript executes a .sql script file with the mysql command line tool.
func (mysqld *Mysqld) executeMysqlScript(user string, sql io.Reader) error {
dir, err := vtenv.VtMysqlRoot()
if err != nil {
return err
}
name := path.Join(dir, "bin/mysql")
arg := []string{"--batch", "-u", user, "-S", mysqld.config.SocketFile}
env := []string{
"LD_LIBRARY_PATH=" + path.Join(dir, "lib/mysql"),
}
_, _, err = execCmd(name, arg, env, dir, sql)
if err != nil {
return err
}
return nil
}
// GetAppConnection returns a connection from the app pool.
// Recycle needs to be called on the result.
func (mysqld *Mysqld) GetAppConnection(ctx context.Context) (dbconnpool.PoolConnection, error) {
return mysqld.appPool.Get(ctx)
}
// GetDbaConnection creates a new DBConnection.
func (mysqld *Mysqld) GetDbaConnection() (*dbconnpool.DBConnection, error) {
return dbconnpool.NewDBConnection(mysqld.dba, mysqld.dbaMysqlStats)
}
// Close will close this instance of Mysqld. It will wait for all dba
// queries to be finished.
func (mysqld *Mysqld) Close() {
mysqld.dbaPool.Close()
mysqld.appPool.Close()
}
// OnTerm registers a function to be called if mysqld terminates for any
// reason other than a call to Mysqld.Shutdown(). This only works if mysqld
// was actually started by calling Start() on this Mysqld instance.
func (mysqld *Mysqld) OnTerm(f func()) {
mysqld.mutex.Lock()
defer mysqld.mutex.Unlock()
mysqld.onTermFuncs = append(mysqld.onTermFuncs, f)
}
|
deleteTopDir
|
cilrs_collect_data.py
|
import os
from functools import partial
import PIL
import lmdb
import numpy as np
from ding.envs import SyncSubprocessEnvManager
from ding.utils.default_helper import deep_merge_dicts
from easydict import EasyDict
from tqdm import tqdm
from haco.DIDrive_core.data import CarlaBenchmarkCollector, BenchmarkDatasetSaver
from haco.DIDrive_core.envs import SimpleCarlaEnv, CarlaEnvWrapper
from haco.DIDrive_core.policy import AutoPIDPolicy
from haco.DIDrive_core.utils.others.tcp_helper import parse_carla_tcp
config = dict(
env=dict(
env_num=5,
simulator=dict(
disable_two_wheels=True,
planner=dict(
type='behavior',
resolution=1,
),
obs=(
dict(
name='rgb',
type='rgb',
size=[400, 300],
position=[1.3, 0.0, 2.3],
fov=100,
),
),
verbose=True,
),
col_is_failure=True,
stuck_is_failure=True,
ran_light_is_failure=True,
manager=dict(
auto_reset=False,
shared_memory=False,
context='spawn',
max_retry=1,
),
wrapper=dict(
speed_factor=25.,
scale=1,
crop=256,
),
),
server=[
dict(carla_host='localhost', carla_ports=[9000, 9010, 2]),
],
policy=dict(
target_speed=25,
tl_threshold=13,
noise=True,
noise_kwargs=dict(),
collect=dict(
n_episode=100,
dir_path='./datasets_train/cilrs_datasets_train',
preloads_name='cilrs_datasets_train.npy',
collector=dict(
suite='FullTown01-v1',
nocrash=True,
),
)
),
)
main_config = EasyDict(config)
def cilrs_postprocess(observasion, scale=1, crop=256):
rgb = observasion['rgb'].copy()
im = PIL.Image.fromarray(rgb)
(width, height) = (int(im.width // scale), int(im.height // scale))
rgb = im.resize((width, height))
rgb = np.asarray(rgb)
start_x = height // 2 - crop // 2
start_y = width // 2 - crop // 2
rgb = rgb[start_x:start_x + crop, start_y:start_y + crop]
sensor_data = {'rgb': rgb}
others = {}
return sensor_data, others
def wrapped_env(env_cfg, wrapper_cfg, host, port, tm_port=None):
return CarlaEnvWrapper(SimpleCarlaEnv(env_cfg, host, port, tm_port), wrapper_cfg)
def post_process(config):
epi_folder = [x for x in os.listdir(config.policy.collect.dir_path) if x.startswith('epi')]
all_img_list = []
all_mea_list = []
for item in tqdm(epi_folder):
lmdb_file = lmdb.open(os.path.join(config.policy.collect.dir_path, item, 'measurements.lmdb')).begin(write=False)
png_files = [
x for x in os.listdir(os.path.join(config.policy.collect.dir_path, item)) if (x.endswith('png') and x.startswith('rgb'))
]
png_files.sort()
for png_file in png_files:
index = png_file.split('_')[1].split('.')[0]
measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)
data = {}
data['control'] = np.array([measurements[15], measurements[16], measurements[17]]).astype(np.float32)
data['speed'] = measurements[10] / config.env.wrapper.speed_factor
data['command'] = float(measurements[11])
new_dict = {}
new_dict['brake'] = data['control'][2]
new_dict['steer'] = (data['control'][0] + 1) / 2
new_dict['throttle'] = data['control'][1]
new_dict['speed'] = data['speed']
new_dict['command'] = data['command']
all_img_list.append(os.path.join(item, png_file))
all_mea_list.append(new_dict)
if not os.path.exists('_preloads'):
os.mkdir('_preloads')
np.save('_preloads/{}'.format(config.policy.collect.preloads_name), [all_img_list, all_mea_list])
def main(cfg, seed=0):
|
if __name__ == '__main__':
main(main_config)
|
cfg.env.manager = deep_merge_dicts(SyncSubprocessEnvManager.default_config(), cfg.env.manager)
tcp_list = parse_carla_tcp(cfg.server)
env_num = cfg.env.env_num
assert len(tcp_list) >= env_num, \
"Carla server not enough! Need {} servers but only found {}.".format(env_num, len(tcp_list))
collector_env = SyncSubprocessEnvManager(
env_fn=[partial(wrapped_env, cfg.env, cfg.env.wrapper, *tcp_list[i]) for i in range(env_num)],
cfg=cfg.env.manager,
)
policy = AutoPIDPolicy(cfg.policy)
collector = CarlaBenchmarkCollector(cfg.policy.collect.collector, collector_env, policy.collect_mode)
if not os.path.exists(cfg.policy.collect.dir_path):
os.makedirs(cfg.policy.collect.dir_path)
collected_episodes = 0
data_postprocess = lambda x: cilrs_postprocess(x, scale=cfg.env.wrapper.scale, crop=cfg.env.wrapper.crop)
saver = BenchmarkDatasetSaver(cfg.policy.collect.dir_path, cfg.env.simulator.obs, data_postprocess)
print('[MAIN] Start collecting data')
saver.make_dataset_path(cfg.policy.collect)
while collected_episodes < cfg.policy.collect.n_episode:
# Sampling data from environments
n_episode = min(cfg.policy.collect.n_episode - collected_episodes, env_num * 2)
new_data = collector.collect(n_episode=n_episode)
saver.save_episodes_data(new_data, start_episode=collected_episodes)
collected_episodes += n_episode
print('[MAIN] Current collected: ', collected_episodes, '/', cfg.policy.collect.n_episode)
collector_env.close()
saver.make_index()
print('[MAIN] Making preloads')
post_process(cfg)
|
eeprom.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Inky display-type EEPROM tools."""
import datetime
import struct
EEP_ADDRESS = 0x50
EEP_WP = 12
DISPLAY_VARIANT = [
None,
'Red pHAT (High-Temp)',
'Yellow wHAT',
'Black wHAT',
'Black pHAT',
'Yellow pHAT',
'Red wHAT',
'Red wHAT (High-Temp)',
'Red wHAT',
None,
'Black pHAT (SSD1608)',
'Red pHAT (SSD1608)',
'Yellow pHAT (SSD1608)',
None,
'7-Colour (UC8159)'
]
class EPDType:
"""Class to represent EPD EEPROM structure."""
valid_colors = [None, 'black', 'red', 'yellow', None, '7colour']
def __init__(self, width, height, color, pcb_variant, display_variant, write_time=None):
"""Initialise new EEPROM data structure."""
self.width = width
self.height = height
self.color = color
if type(color) == str:
self.set_color(color)
self.pcb_variant = pcb_variant
self.display_variant = display_variant
self.eeprom_write_time = str(datetime.datetime.now()) if write_time is None else write_time
def __repr__(self):
"""Return string representation of EEPROM data structure."""
return """Display: {}x{}
Color: {}
PCB Variant: {}
Display Variant: {}
Time: {}""".format(self.width,
self.height,
self.get_color(),
self.pcb_variant / 10.0,
self.display_variant,
self.eeprom_write_time)
@classmethod
def from_bytes(class_object, data):
"""Initialise new EEPROM data structure from a bytes-like object or list."""
data = bytearray(data)
data = struct.unpack('<HHBBB22p', data)
return class_object(*data)
def update_eeprom_write_time(self):
"""Update the stored write time."""
self.eeprom_write_time = str(datetime.datetime.now())
def
|
(self):
"""Return a bytearray representing the EEPROM data structure."""
return struct.pack('<HHBBB22p',
self.width,
self.height,
self.color,
self.pcb_variant,
self.display_variant,
str(datetime.datetime.now()).encode("ASCII"))
def to_list(self):
"""Return a list of bytes representing the EEPROM data structure."""
return [ord(c) for c in self.encode()]
def set_color(self, color):
"""Set the stored colour value."""
try:
self.color = self.valid_colors.index(color)
except IndexError:
raise ValueError('Invalid colour: {}'.format(color))
def get_color(self):
"""Get the stored colour value."""
try:
return self.valid_colors[self.color]
except IndexError:
return None
def get_variant(self):
"""Return text name of the display variant."""
try:
return DISPLAY_VARIANT[self.display_variant]
except IndexError:
return None
# Normal Yellow wHAT
yellow_what_1_E = EPDType(400, 300, color='yellow', pcb_variant=12, display_variant=2)
# Normal Black wHAT
black_what_1_E = EPDType(400, 300, color='black', pcb_variant=12, display_variant=3)
# Normal Black pHAT
black_phat_1_E = EPDType(212, 104, color='black', pcb_variant=12, display_variant=4)
# Hightemp Red pHAT
red_small_1_E = EPDType(212, 104, color='red', pcb_variant=12, display_variant=1)
def read_eeprom(i2c_bus=None):
"""Return a class representing EEPROM contents, or none."""
try:
if i2c_bus is None:
try:
from smbus2 import SMBus
except ImportError:
raise ImportError('This library requires the smbus2 module\nInstall with: sudo pip install smbus2')
i2c_bus = SMBus(1)
i2c_bus.write_i2c_block_data(EEP_ADDRESS, 0x00, [0x00])
return EPDType.from_bytes(i2c_bus.read_i2c_block_data(EEP_ADDRESS, 0, 29))
except IOError:
return None
def main(args):
"""EEPROM Test Function."""
print(read_eeprom())
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
encode
|
cli.rs
|
#![allow(non_camel_case_types)]
use assert_cmd::prelude::*;
use predicates::prelude::*;
use std::convert::TryInto;
use std::fs;
use std::io::{ErrorKind, Write};
use std::mem;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::process::Command;
use std::slice;
// nginx bindings
type u_char = std::os::raw::c_uchar;
type u_short = std::os::raw::c_ushort;
type time_t = std::os::raw::c_long;
type ngx_uint_t = usize;
// #[derive(Debug)] // Requires rust 1.47 due to std::array::LengthAtMost32
#[repr(C)]
struct ngx_http_file_cache_header_t {
version: ngx_uint_t,
valid_sec: time_t,
updating_sec: time_t,
error_sec: time_t,
last_modified: time_t,
date: time_t,
crc32: u32,
valid_msec: u_short,
header_start: u_short,
body_start: u_short,
etag_len: u_char,
etag: [u_char; 128_usize],
vary_len: u_char,
vary: [u_char; 128_usize],
variant: [u_char; 16_usize],
}
const CLI_NAME: &str = "nginx-ecm-rs";
#[link(name = "c")]
extern "C" {
fn geteuid() -> u32;
}
fn create_cache_file(
tmp_dir: &std::path::Path,
version: usize,
key: Option<&[u8]>,
) -> Result<std::path::PathBuf, Box<dyn std::error::Error>> {
let mut file_path = tmp_dir.to_path_buf();
let mut file = Option::None;
// Create unique file if called multiple times from single test (3 attempts)
for i in 1..=3 {
file_path = tmp_dir.join(format!("cache-file-{}", i));
file = match fs::OpenOptions::new().write(true).create_new(true).open(&file_path) {
Ok(f) => Some(f),
Err(e) => {
if e.kind() == ErrorKind::AlreadyExists {
continue;
}
panic!("{:#?}", e);
}
};
if file.is_some() {
break;
}
}
let mut file = file.unwrap();
let key = match key {
Some(key) => key,
None => b"\nKEY: .",
};
let mut cache_header: ngx_http_file_cache_header_t = unsafe { mem::zeroed() };
let cache_header_size = mem::size_of::<ngx_http_file_cache_header_t>();
cache_header.version = version;
cache_header.header_start = (cache_header_size + key.len()).try_into().unwrap();
unsafe {
let cache_header_slice = slice::from_raw_parts_mut(&mut cache_header as *mut _ as *mut u8, cache_header_size);
file.write_all(cache_header_slice)?;
}
file.write_all(key)?;
Ok(file_path)
}
#[test]
fn cache_dir_doesnt_exist() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("nginx-ecm-rs")?;
cmd.arg("/no/such/dir/");
cmd.assert()
.failure()
.stderr(predicate::str::contains("No such file or directory"));
Ok(())
}
#[test]
fn cache_dir_no_permission() -> Result<(), Box<dyn std::error::Error>> {
let euid = unsafe { geteuid() };
// skip test if root
if euid == 0 {
assert!(true, "SKIP")
} else {
let dir = tempfile::tempdir().unwrap();
let path = dir.path();
let metadata = path.metadata()?;
let mut permissions = metadata.permissions();
permissions.set_mode(0o000);
fs::set_permissions(path, permissions)?;
let mut cmd = Command::cargo_bin("nginx-ecm-rs")?;
cmd.arg(path);
cmd.assert()
.failure()
.stderr(predicate::str::contains("Permission denied"));
}
Ok(())
}
#[test]
fn special_file() -> Result<(), Box<dyn std::error::Error>> {
let dir = tempfile::tempdir()?;
Command::new("mkfifo").arg(dir.path().join("fifo")).status().unwrap();
let mut cmd = Command::cargo_bin(CLI_NAME)?;
cmd.arg(dir.path());
cmd.assert()
.failure()
.stderr(predicate::str::contains("neither file nor directory"));
Ok(())
}
#[test]
fn cache_file_no_permission() -> Result<(), Box<dyn std::error::Error>> {
let euid = unsafe { geteuid() };
// skip test if root
if euid == 0 {
assert!(true, "SKIP")
} else {
let dir = tempfile::tempdir().unwrap();
let path = dir.path();
let file_path = dir.path().join("cache-test.bin");
fs::File::create(&file_path)?;
let metadata = file_path.metadata()?;
let mut permissions = metadata.permissions();
permissions.set_mode(0o000);
fs::set_permissions(file_path, permissions)?;
let mut cmd = Command::cargo_bin("nginx-ecm-rs")?;
cmd.arg(path);
cmd.assert()
.failure()
.stderr(predicate::str::contains("Permission denied"));
}
Ok(())
}
#[test]
fn short_cache_file() -> Result<(), Box<dyn std::error::Error>> {
let dir = tempfile::tempdir()?;
let file_path = dir.path().join("cache-test.bin");
let mut file = fs::File::create(&file_path)?;
writeln!(file, ".")?;
let mut cmd = Command::cargo_bin(CLI_NAME)?;
cmd.arg(dir.path());
cmd.assert()
.success()
.stderr(predicate::str::contains("failed to fill whole buffer"));
Ok(())
}
#[test]
fn valid_nginx_cache_file() -> Result<(), Box<dyn std::error::Error>> {
let dir_path = Path::new("tests/data");
let mut cmd = Command::cargo_bin(CLI_NAME)?;
cmd.arg(dir_path).arg("--match-key").arg("zimage.example.com");
cmd.assert()
.success()
.stdout(predicate::str::contains("77cefbbd3b90b3f68899b6c7aa02d007"));
Ok(())
}
#[test]
fn valid_generated_cache_file() -> Result<(), Box<dyn std::error::Error>> {
let tmp_dir = tempfile::tempdir()?;
create_cache_file(tmp_dir.path(), 5, Some(b"\nKEY: http://example.com/hello.html"))?;
let mut cmd = Command::cargo_bin(CLI_NAME)?;
cmd.arg(tmp_dir.path()).arg("--match-key").arg("example.com");
cmd.assert().success().stdout(predicate::str::contains("example"));
Ok(())
}
#[test]
fn invalid_cache_header_version() -> Result<(), Box<dyn std::error::Error>> {
let tmp_dir = tempfile::tempdir()?;
create_cache_file(tmp_dir.path(), 6, None)?;
let mut cmd = Command::cargo_bin(CLI_NAME)?;
cmd.arg(tmp_dir.path());
cmd.assert()
.failure()
.stderr(predicate::str::contains("header version mismatch"));
Ok(())
}
#[test]
fn invalid_key_string() -> Result<(), Box<dyn std::error::Error>>
|
#[test]
fn purge_purges_only_matched() -> Result<(), Box<dyn std::error::Error>> {
let tmp_dir = tempfile::tempdir()?;
let tmp_dir = tmp_dir.path();
let fcom = create_cache_file(tmp_dir, 5, Some(b"\nKEY: http://example.com/hello.html"))?;
let forg = create_cache_file(tmp_dir, 5, Some(b"\nKEY: http://example.org/hello.html"))?;
assert!(fcom.exists());
assert!(forg.exists());
let mut cmd = Command::cargo_bin(CLI_NAME)?;
cmd.arg(tmp_dir).arg("--match-key").arg("example.com").arg("--purge");
cmd.assert().success().stdout(predicate::str::contains("example.com"));
assert!(!fcom.exists());
assert!(forg.exists());
Ok(())
}
|
{
let tmp_dir = tempfile::tempdir()?;
// 0xfe is invalid utf9 sequence
// \n K E Y : \s
let bytes: &'static [u8] = &[0x0a, 0x4b, 0x45, 0x59, 0x3a, 0x20, 0xfe, 0x0a];
let key = Some(bytes);
let file_path = create_cache_file(tmp_dir.path(), 5, key)?;
let mut cmd = Command::cargo_bin(CLI_NAME)?;
cmd.arg(tmp_dir.path()).arg("--match-key").arg("example.com");
let res = cmd.assert().success().try_stderr(predicate::str::contains("Utf8Error"));
if let Err(e) = res {
let bad_file = tempfile::NamedTempFile::new().unwrap();
let bad_path = bad_file.into_temp_path();
let bad_path = bad_path.keep()?;
fs::copy(&file_path, &bad_path).unwrap();
panic!("Test file preserved as {} due to {}", bad_path.display(), e);
}
Ok(())
}
|
set.go
|
package termstore
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87 "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph"
)
// Set provides operations to manage the drive singleton.
type Set struct {
i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.Entity
// Children terms of set in term [store].
children []Termable;
// Date and time of set creation. Read-only.
createdDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time;
// Description that gives details on the term usage.
description *string;
// Name of the set for each languageTag.
localizedNames []LocalizedNameable;
//
parentGroup Groupable;
// Custom properties for the set.
properties []i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.KeyValueable;
// Indicates which terms have been pinned or reused directly under the set.
relations []Relationable;
// All the terms under the set.
terms []Termable;
}
// NewSet instantiates a new set and sets the default values.
func
|
()(*Set) {
m := &Set{
Entity: *i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.NewEntity(),
}
return m
}
// CreateSetFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateSetFromDiscriminatorValue(parseNode i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, error) {
return NewSet(), nil
}
// GetChildren gets the children property value. Children terms of set in term [store].
func (m *Set) GetChildren()([]Termable) {
if m == nil {
return nil
} else {
return m.children
}
}
// GetCreatedDateTime gets the createdDateTime property value. Date and time of set creation. Read-only.
func (m *Set) GetCreatedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.createdDateTime
}
}
// GetDescription gets the description property value. Description that gives details on the term usage.
func (m *Set) GetDescription()(*string) {
if m == nil {
return nil
} else {
return m.description
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *Set) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["children"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateTermFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]Termable, len(val))
for i, v := range val {
res[i] = v.(Termable)
}
m.SetChildren(res)
}
return nil
}
res["createdDateTime"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetCreatedDateTime(val)
}
return nil
}
res["description"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDescription(val)
}
return nil
}
res["localizedNames"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateLocalizedNameFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]LocalizedNameable, len(val))
for i, v := range val {
res[i] = v.(LocalizedNameable)
}
m.SetLocalizedNames(res)
}
return nil
}
res["parentGroup"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(CreateGroupFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetParentGroup(val.(Groupable))
}
return nil
}
res["properties"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.CreateKeyValueFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.KeyValueable, len(val))
for i, v := range val {
res[i] = v.(i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.KeyValueable)
}
m.SetProperties(res)
}
return nil
}
res["relations"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateRelationFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]Relationable, len(val))
for i, v := range val {
res[i] = v.(Relationable)
}
m.SetRelations(res)
}
return nil
}
res["terms"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateTermFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]Termable, len(val))
for i, v := range val {
res[i] = v.(Termable)
}
m.SetTerms(res)
}
return nil
}
return res
}
// GetLocalizedNames gets the localizedNames property value. Name of the set for each languageTag.
func (m *Set) GetLocalizedNames()([]LocalizedNameable) {
if m == nil {
return nil
} else {
return m.localizedNames
}
}
// GetParentGroup gets the parentGroup property value.
func (m *Set) GetParentGroup()(Groupable) {
if m == nil {
return nil
} else {
return m.parentGroup
}
}
// GetProperties gets the properties property value. Custom properties for the set.
func (m *Set) GetProperties()([]i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.KeyValueable) {
if m == nil {
return nil
} else {
return m.properties
}
}
// GetRelations gets the relations property value. Indicates which terms have been pinned or reused directly under the set.
func (m *Set) GetRelations()([]Relationable) {
if m == nil {
return nil
} else {
return m.relations
}
}
// GetTerms gets the terms property value. All the terms under the set.
func (m *Set) GetTerms()([]Termable) {
if m == nil {
return nil
} else {
return m.terms
}
}
func (m *Set) IsNil()(bool) {
return m == nil
}
// Serialize serializes information the current object
func (m *Set) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
if m.GetChildren() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetChildren()))
for i, v := range m.GetChildren() {
cast[i] = v.(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable)
}
err = writer.WriteCollectionOfObjectValues("children", cast)
if err != nil {
return err
}
}
{
err = writer.WriteTimeValue("createdDateTime", m.GetCreatedDateTime())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("description", m.GetDescription())
if err != nil {
return err
}
}
if m.GetLocalizedNames() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetLocalizedNames()))
for i, v := range m.GetLocalizedNames() {
cast[i] = v.(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable)
}
err = writer.WriteCollectionOfObjectValues("localizedNames", cast)
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("parentGroup", m.GetParentGroup())
if err != nil {
return err
}
}
if m.GetProperties() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetProperties()))
for i, v := range m.GetProperties() {
cast[i] = v.(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable)
}
err = writer.WriteCollectionOfObjectValues("properties", cast)
if err != nil {
return err
}
}
if m.GetRelations() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetRelations()))
for i, v := range m.GetRelations() {
cast[i] = v.(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable)
}
err = writer.WriteCollectionOfObjectValues("relations", cast)
if err != nil {
return err
}
}
if m.GetTerms() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetTerms()))
for i, v := range m.GetTerms() {
cast[i] = v.(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable)
}
err = writer.WriteCollectionOfObjectValues("terms", cast)
if err != nil {
return err
}
}
return nil
}
// SetChildren sets the children property value. Children terms of set in term [store].
func (m *Set) SetChildren(value []Termable)() {
if m != nil {
m.children = value
}
}
// SetCreatedDateTime sets the createdDateTime property value. Date and time of set creation. Read-only.
func (m *Set) SetCreatedDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.createdDateTime = value
}
}
// SetDescription sets the description property value. Description that gives details on the term usage.
func (m *Set) SetDescription(value *string)() {
if m != nil {
m.description = value
}
}
// SetLocalizedNames sets the localizedNames property value. Name of the set for each languageTag.
func (m *Set) SetLocalizedNames(value []LocalizedNameable)() {
if m != nil {
m.localizedNames = value
}
}
// SetParentGroup sets the parentGroup property value.
func (m *Set) SetParentGroup(value Groupable)() {
if m != nil {
m.parentGroup = value
}
}
// SetProperties sets the properties property value. Custom properties for the set.
func (m *Set) SetProperties(value []i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.KeyValueable)() {
if m != nil {
m.properties = value
}
}
// SetRelations sets the relations property value. Indicates which terms have been pinned or reused directly under the set.
func (m *Set) SetRelations(value []Relationable)() {
if m != nil {
m.relations = value
}
}
// SetTerms sets the terms property value. All the terms under the set.
func (m *Set) SetTerms(value []Termable)() {
if m != nil {
m.terms = value
}
}
|
NewSet
|
total_variation.py
|
import numpy as np
from seisflows.tools import unix
from seisflows.tools.array import loadnpy, savenpy
from seisflows.tools.array import grid2mesh, mesh2grid, stack
from seisflows.tools.code import exists
from seisflows.tools.config import SeisflowsParameters, SeisflowsPaths, \
ParameterError, custom_import
from seisflows.tools.math import nabla, tv
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
import system
import solver
class total_variation(custom_import('postprocess', 'regularize')):
|
""" Adds regularization options to base class
So far, can only be used for 2D inversion, because the required spatial
derivative operator "nabla" is not yet available for 3D grids.
"""
def check(self):
""" Checks parameters and paths
"""
super(total_variation, self).check()
if not PAR.LAMBDA:
raise ValueError
if not hasattr(PAR, 'EPSILON'):
setattr(PAR, 'EPSILON', 0.)
def nabla(self, mesh, m, g):
M, grid = mesh2grid(g, mesh)
DM = tv(M, epsilon=PAR.EPSILON)
dm = grid2mesh(DM, grid, mesh)
return dm/np.mean(m)
|
|
main.rs
|
//! Application may have multiple data objects that are shared across
//! all handlers within same Application. Data could be added
//! with `App::data()` method, multiple different data objects could be added.
//!
//! > **Note**: http server accepts an application factory rather than an
//! application > instance. Http server constructs an application instance for
//! each thread, > thus application data
//! > must be constructed multiple times. If you want to share data between
//! different > threads, a shared object should be used, e.g. `Arc`.
|
use std::io;
use std::sync::Mutex;
use actix_web::{middleware, web, App, HttpRequest, HttpResponse, HttpServer};
/// simple handle
async fn index(state: web::Data<Mutex<usize>>, req: HttpRequest) -> HttpResponse {
println!("{:?}", req);
*(state.lock().unwrap()) += 1;
HttpResponse::Ok().body(format!("Num of requests: {}", state.lock().unwrap()))
}
#[actix_rt::main]
async fn main() -> io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
let counter = web::Data::new(Mutex::new(0usize));
//move is necessary to give closure below ownership of counter
HttpServer::new(move || {
App::new()
.app_data(counter.clone()) // <- create app with shared state
// enable logger
.wrap(middleware::Logger::default())
// register simple handler, handle all methods
.service(web::resource("/").to(index))
})
.bind("127.0.0.1:8080")?
.run()
.await
}
|
//!
//! Check [user guide](https://actix.rs/docs/application/#state) for more info.
|
directive-metadata.ts
|
import {CategorizedClassDoc} from './dgeni-definitions';
import {
ArrayLiteralExpression,
CallExpression,
ObjectLiteralExpression,
PropertyAssignment,
StringLiteral, SyntaxKind
} from 'typescript';
/**
* Determines the component or directive metadata from the specified Dgeni class doc. The resolved
* directive metadata will be stored in a Map.
*
* Currently only string literal assignments and array literal assignments are supported. Other
* value types are not necessary because they are not needed for any user-facing documentation.
*
* ```ts
* @Component({
* inputs: ["red", "blue"],
* exportAs: "test"
* })
* export class MyComponent {}
* ```
*/
export function
|
(classDoc: CategorizedClassDoc): Map<string, any> | null {
const declaration = classDoc.symbol.valueDeclaration;
if (!declaration || !declaration.decorators) {
return null;
}
const directiveDecorator = declaration.decorators
.filter(decorator => decorator.expression)
.filter(decorator => decorator.expression.kind === SyntaxKind.CallExpression)
.find(decorator => (decorator.expression as any).expression.getText() === 'Component' ||
(decorator.expression as any).expression.getText() === 'Directive');
if (!directiveDecorator) {
return null;
}
// Since the actual decorator expression is by default a LeftHandSideExpression, and TypeScript
// doesn't allow a casting it to a CallExpression, we have to cast it to "any" before.
const expression = (directiveDecorator.expression as any) as CallExpression;
// The argument length of the CallExpression needs to be exactly one, because it's the single
// JSON object in the @Component/@Directive decorator.
if (expression.arguments.length !== 1) {
return null;
}
const objectExpression = expression.arguments[0] as ObjectLiteralExpression;
const resultMetadata = new Map<string, any>();
objectExpression.properties.forEach((prop: PropertyAssignment) => {
// Support ArrayLiteralExpression assignments in the directive metadata.
if (prop.initializer.kind === SyntaxKind.ArrayLiteralExpression) {
const arrayData = (prop.initializer as ArrayLiteralExpression).elements
.map((literal: StringLiteral) => literal.text);
resultMetadata.set(prop.name.getText(), arrayData);
}
// Support normal StringLiteral and NoSubstitutionTemplateLiteral assignments
if (prop.initializer.kind === SyntaxKind.StringLiteral ||
prop.initializer.kind === SyntaxKind.NoSubstitutionTemplateLiteral) {
resultMetadata.set(prop.name.getText(), (prop.initializer as StringLiteral).text);
}
});
return resultMetadata;
}
|
getDirectiveMetadata
|
cnn_utils.py
|
import torch.nn as nn
__author__ = "Sachin Mehta"
__version__ = "1.0.1"
__maintainer__ = "Sachin Mehta"
class CBR(nn.Module):
'''
This class defines the convolution layer with batch normalization and PReLU activation
'''
def __init__(self, nIn, nOut, kSize, stride=1, groups=1):
'''
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: stride rate for down-sampling. Default is 1
'''
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, groups=groups)
self.bn = nn.BatchNorm2d(nOut)
self.act = nn.PReLU(nOut)
def forward(self, input):
'''
:param input: input feature map
:return: transformed feature map
'''
output = self.conv(input)
# output = self.conv1(output)
output = self.bn(output)
output = self.act(output)
return output
class BR(nn.Module):
'''
This class groups the batch normalization and PReLU activation
'''
def __init__(self, nOut):
'''
:param nOut: output feature maps
'''
super().__init__()
self.bn = nn.BatchNorm2d(nOut)
self.act = nn.PReLU(nOut)
def forward(self, input):
'''
:param input: input feature map
:return: normalized and thresholded feature map
'''
output = self.bn(input)
output = self.act(output)
return output
class CB(nn.Module):
'''
This class groups the convolution and batch normalization
'''
def __init__(self, nIn, nOut, kSize, stride=1, groups=1):
'''
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optinal stide for down-sampling
'''
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False,
groups=groups)
self.bn = nn.BatchNorm2d(nOut)
def forward(self, input):
'''
:param input: input feature map
:return: transformed feature map
'''
output = self.conv(input)
output = self.bn(output)
return output
class C(nn.Module):
'''
This class is for a convolutional layer.
'''
def __init__(self, nIn, nOut, kSize, stride=1, groups=1):
'''
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optional stride rate for down-sampling
'''
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False,
groups=groups)
def forward(self, input):
'''
:param input: input feature map
:return: transformed feature map
'''
output = self.conv(input)
return output
class CDilated(nn.Module):
'''
|
This class defines the dilated convolution.
'''
def __init__(self, nIn, nOut, kSize, stride=1, d=1, groups=1):
'''
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optional stride rate for down-sampling
:param d: optional dilation rate
'''
super().__init__()
padding = int((kSize - 1) / 2) * d
self.conv = nn.Conv2d(nIn, nOut,kSize, stride=stride, padding=padding, bias=False,
dilation=d, groups=groups)
def forward(self, input):
'''
:param input: input feature map
:return: transformed feature map
'''
output = self.conv(input)
return output
class CDilatedB(nn.Module):
'''
This class defines the dilated convolution with batch normalization.
'''
def __init__(self, nIn, nOut, kSize, stride=1, d=1, groups=1):
'''
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: optional stride rate for down-sampling
:param d: optional dilation rate
'''
super().__init__()
padding = int((kSize - 1) / 2) * d
self.conv = nn.Conv2d(nIn, nOut,kSize, stride=stride, padding=padding, bias=False,
dilation=d, groups=groups)
self.bn = nn.BatchNorm2d(nOut)
def forward(self, input):
'''
:param input: input feature map
:return: transformed feature map
'''
return self.bn(self.conv(input))
| |
database.rs
|
// imports
use sofa::{Client, Database};
use std::env;
// function to establish couchdb connection and get a database
pub fn establish_connection() -> Database
|
{
// crate the couchdb client
let client = Client::new(
env::var("iodine_database_url").expect("Environment variable iodine_database_url not set"),
)
.expect("Failed to connect to database");
// get the database
client
.db(env::var("iodine_database_name")
.expect("Environment variable iodine_database_name not set"))
.unwrap()
}
|
|
getAddOnConfig.js
|
export default config => ({
autoCollapseDisabled: false,
collapsedBreakpoint: 'md',
secondaryAutoCollapseDisabled: false,
secondaryCollapseBreakpoint: 'md',
heightAdjustmentDisabled: false,
...config,
|
});
|
|
struct_data_point.go
|
package ecs
|
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DataPoint is a nested struct in ecs response
type DataPoint struct {
TimeStamp string `json:"TimeStamp" xml:"TimeStamp"`
Size int64 `json:"Size" xml:"Size"`
}
|
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
|
constants.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@File :constants.py
@Author :[email protected]
@Blog : http://blog.frankdevhub.site
@Date :2021/9/4 18:34
"""
class Xpath:
"""
selenium dicts
web element attribute name
页面对象常用属性参数
"""
def __init__(self):
pass
ATTRIBUTE_NAME = "name"
ATTRIBUTE_TITLE = "title"
ATTRIBUTE_TARGET = "target"
ATTRIBUTE_HREF = "href"
ATTRIBUTE_CLASS = "class"
ATTRIBUTE_VALUE = "value"
class WebLinks:
"""网络资源相关链接地址"""
def __init__(self):
pass
BAI_DU = "https://www.baidu.com" # 百度首页地址
BLOG_EXAMPLE_1 = "https://blog.51cto.com/oldboy" # 51CTO博客站点实例(https://blog.51cto.com/oldboy)
class BusinessConstants:
"""业务常用字段"""
# 接口规范常用字段
def __init__(s
|
s
SUCCESS = "success" # 返回成功
# selenium configuration
SELENIUM_CACHE_ROOT_NULL = "selenium cache root directory path should not be null" # 驱动缓存路径配置不存在
SELENIUM_CACHE_ROOT_NOT_EXISTS = "selenium cache root directory not exist" # 驱动缓存配置文件不存在
SELENIUM_CACHE_FILE_NAME_NULL = "selenium cache file name should not be null" # 驱动缓存路径配配置为空
SELENIUM_WEB_DRIVER_PATH_NULL = "selenium web driver path should not be null" # 无法加载驱动,路径配置为空
SELENIUM_WEB_DRIVER_NOT_EXIST = "selenium web driver not exist" # 无法加载驱动,驱动不存在
# 字符信息常量
CHARACTER_NULL_ARGUMENT = "character should not be null" # 字符不能为空
INVALID_CHINESE_CHARACTER = "not a chinese character" # 非中文的字符异常
INVALID_ENGLISH_CHARACTER = "not an english character" # 非英文的字符的异常
#
# if __name__ == '__main__':
# url_link = BusinessConstants.BLOG_EXAMPLE_1
# print(url_link)
|
elf):
pas
|
integers.rs
|
extern crate asn1;
use asn1::aper::{self, APerElement, UNCONSTRAINED};
use std::i32;
#[test]
fn unconstrained_negative() {
let data = b"\x04\xff\xff\xff\xd5";
let mut d = aper::Decoder::new(data);
assert_eq!(-43, d.decode_int(None, None).unwrap());
}
#[test]
fn
|
() {
let data = b"\x02\x10\x00";
let mut d = aper::Decoder::new(data);
assert_eq!(4096, d.decode_int(None, None).unwrap());
}
#[test]
fn constrained_bounds() {
let data = b"\x00";
let mut d = aper::Decoder::new(data);
assert_eq!(4000, d.decode_int(Some(4000), Some(4255)).unwrap());
}
#[test]
fn constrained_bounds_unpadded() {
let data = b"\x60";
let mut d = aper::Decoder::new(data);
assert_eq!(11, d.decode_int(Some(10), Some(12)).unwrap());
assert_eq!(12, d.decode_int(Some(10), Some(12)).unwrap());
}
#[test]
fn semiconstrainted_bounds() {
let data = b"\x02\x10\x01";
let mut d = aper::Decoder::new(data);
assert_eq!(4096, d.decode_int(Some(-1), None).unwrap());
}
#[test]
fn std_i8() {
let data_min = b"\x00"; // i8::MIN
let data_med = b"\xab"; // 43
let data_max = b"\xff"; // i8::MAX
let mut d = aper::Decoder::new(data_min);
assert_eq!(std::i8::MIN, i8::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_med);
assert_eq!(43 as i8, i8::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_max);
assert_eq!(std::i8::MAX, i8::from_aper(&mut d, UNCONSTRAINED).unwrap());
}
#[test]
fn std_i16() {
let data_min = b"\x00\x00"; // i16::MIN
let data_med = b"\x80\x2b"; // 43
let data_max = b"\xff\xff"; // i16::MAX
let mut d = aper::Decoder::new(data_min);
assert_eq!(std::i16::MIN, i16::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_med);
assert_eq!(43 as i16, i16::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_max);
assert_eq!(std::i16::MAX, i16::from_aper(&mut d, UNCONSTRAINED).unwrap());
}
#[test]
fn std_i32() {
let data_min = b"\x04\x00\x00\x00\x00"; // i32::MIN
let data_med = b"\x04\x80\x00\x00\x2b"; // 43
let data_max = b"\x04\xff\xff\xff\xff"; // i32::MAX
let mut d = aper::Decoder::new(data_min);
assert_eq!(std::i32::MIN, i32::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_med);
assert_eq!(43 as i32, i32::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_max);
assert_eq!(std::i32::MAX, i32::from_aper(&mut d, UNCONSTRAINED).unwrap());
}
#[test]
fn std_u8() {
let data_min = b"\x00"; // u8::MIN
let data_med = b"\x2b"; // 43
let data_max = b"\xff"; // u8::MAX
let mut d = aper::Decoder::new(data_min);
assert_eq!(std::u8::MIN, u8::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_med);
assert_eq!(43 as u8, u8::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_max);
assert_eq!(std::u8::MAX, u8::from_aper(&mut d, UNCONSTRAINED).unwrap());
}
#[test]
fn std_u16() {
let data_min = b"\x00\x00"; // u16::MIN
let data_med = b"\x00\x2b"; // 43
let data_max = b"\xff\xff"; // u16::MAX
let mut d = aper::Decoder::new(data_min);
assert_eq!(std::u16::MIN, u16::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_med);
assert_eq!(43 as u16, u16::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_max);
assert_eq!(std::u16::MAX, u16::from_aper(&mut d, UNCONSTRAINED).unwrap());
}
#[test]
fn std_u32() {
let data_min = b"\x04\x00\x00\x00\x00"; // u32::MIN
let data_med = b"\x04\x00\x00\x00\x2b"; // 43
let data_max = b"\x04\xff\xff\xff\xff"; // u32::MAX
let mut d = aper::Decoder::new(data_min);
assert_eq!(std::u32::MIN, u32::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_med);
assert_eq!(43 as u32, u32::from_aper(&mut d, UNCONSTRAINED).unwrap());
d = aper::Decoder::new(data_max);
assert_eq!(std::u32::MAX, u32::from_aper(&mut d, UNCONSTRAINED).unwrap());
}
|
unconstrained_positive
|
specialization_graph.rs
|
use super::OverlapError;
use crate::hir::def_id::DefId;
use crate::ich::{self, StableHashingContext};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
StableHasherResult};
use crate::traits;
use crate::ty::{self, TyCtxt, TypeFoldable};
use crate::ty::fast_reject::{self, SimplifiedType};
use syntax::ast::Ident;
use crate::util::captures::Captures;
use crate::util::nodemap::{DefIdMap, FxHashMap};
/// A per-trait graph of impls in specialization order. At the moment, this
/// graph forms a tree rooted with the trait itself, with all other nodes
/// representing impls, and parent-child relationships representing
/// specializations.
///
/// The graph provides two key services:
///
/// - Construction. This implicitly checks for overlapping impls (i.e., impls
/// that overlap but where neither specializes the other -- an artifact of the
/// simple "chain" rule.
///
/// - Parent extraction. In particular, the graph can give you the *immediate*
/// parents of a given specializing impl, which is needed for extracting
/// default items amongst other things. In the simple "chain" rule, every impl
/// has at most one parent.
#[derive(RustcEncodable, RustcDecodable)]
pub struct Graph {
// All impls have a parent; the "root" impls have as their parent the `def_id`
// of the trait.
parent: DefIdMap<DefId>,
// The "root" impls are found by looking up the trait's def_id.
children: DefIdMap<Children>,
}
/// Children of a given impl, grouped into blanket/non-blanket varieties as is
/// done in `TraitDef`.
#[derive(Default, RustcEncodable, RustcDecodable)]
struct Children {
// Impls of a trait (or specializations of a given impl). To allow for
// quicker lookup, the impls are indexed by a simplified version of their
// `Self` type: impls with a simplifiable `Self` are stored in
// `nonblanket_impls` keyed by it, while all other impls are stored in
// `blanket_impls`.
//
// A similar division is used within `TraitDef`, but the lists there collect
// together *all* the impls for a trait, and are populated prior to building
// the specialization graph.
/// Impls of the trait.
nonblanket_impls: FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
/// Blanket impls associated with the trait.
blanket_impls: Vec<DefId>,
}
#[derive(Copy, Clone, Debug)]
pub enum FutureCompatOverlapErrorKind {
Issue43355,
Issue33140,
}
#[derive(Debug)]
pub struct FutureCompatOverlapError {
pub error: OverlapError,
pub kind: FutureCompatOverlapErrorKind
}
/// The result of attempting to insert an impl into a group of children.
enum Inserted {
/// The impl was inserted as a new child in this group of children.
BecameNewSibling(Option<FutureCompatOverlapError>),
/// The impl should replace existing impls [X1, ..], because the impl specializes X1, X2, etc.
ReplaceChildren(Vec<DefId>),
/// The impl is a specialization of an existing child.
ShouldRecurseOn(DefId),
}
impl<'tcx> Children {
/// Insert an impl into this set of children without comparing to any existing impls.
fn
|
(&mut self, tcx: TyCtxt<'tcx>, impl_def_id: DefId) {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) {
debug!("insert_blindly: impl_def_id={:?} sty={:?}", impl_def_id, sty);
self.nonblanket_impls.entry(sty).or_default().push(impl_def_id)
} else {
debug!("insert_blindly: impl_def_id={:?} sty=None", impl_def_id);
self.blanket_impls.push(impl_def_id)
}
}
/// Removes an impl from this set of children. Used when replacing
/// an impl with a parent. The impl must be present in the list of
/// children already.
fn remove_existing(&mut self, tcx: TyCtxt<'tcx>, impl_def_id: DefId) {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
let vec: &mut Vec<DefId>;
if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) {
debug!("remove_existing: impl_def_id={:?} sty={:?}", impl_def_id, sty);
vec = self.nonblanket_impls.get_mut(&sty).unwrap();
} else {
debug!("remove_existing: impl_def_id={:?} sty=None", impl_def_id);
vec = &mut self.blanket_impls;
}
let index = vec.iter().position(|d| *d == impl_def_id).unwrap();
vec.remove(index);
}
/// Attempt to insert an impl into this set of children, while comparing for
/// specialization relationships.
fn insert(
&mut self,
tcx: TyCtxt<'tcx>,
impl_def_id: DefId,
simplified_self: Option<SimplifiedType>,
) -> Result<Inserted, OverlapError> {
let mut last_lint = None;
let mut replace_children = Vec::new();
debug!(
"insert(impl_def_id={:?}, simplified_self={:?})",
impl_def_id,
simplified_self,
);
let possible_siblings = match simplified_self {
Some(sty) => PotentialSiblings::Filtered(self.filtered(sty)),
None => PotentialSiblings::Unfiltered(self.iter()),
};
for possible_sibling in possible_siblings {
debug!(
"insert: impl_def_id={:?}, simplified_self={:?}, possible_sibling={:?}",
impl_def_id,
simplified_self,
possible_sibling,
);
let overlap_error = |overlap: traits::coherence::OverlapResult<'_>| {
// Found overlap, but no specialization; error out.
let trait_ref = overlap.impl_header.trait_ref.unwrap();
let self_ty = trait_ref.self_ty();
OverlapError {
with_impl: possible_sibling,
trait_desc: trait_ref.to_string(),
// Only report the `Self` type if it has at least
// some outer concrete shell; otherwise, it's
// not adding much information.
self_desc: if self_ty.has_concrete_skeleton() {
Some(self_ty.to_string())
} else {
None
},
intercrate_ambiguity_causes: overlap.intercrate_ambiguity_causes,
involves_placeholder: overlap.involves_placeholder,
}
};
let tcx = tcx.global_tcx();
let (le, ge) = traits::overlapping_impls(
tcx,
possible_sibling,
impl_def_id,
traits::IntercrateMode::Issue43355,
|overlap| {
if let Some(overlap_kind) =
tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling)
{
match overlap_kind {
ty::ImplOverlapKind::Permitted => {}
ty::ImplOverlapKind::Issue33140 => {
last_lint = Some(FutureCompatOverlapError {
error: overlap_error(overlap),
kind: FutureCompatOverlapErrorKind::Issue33140
});
}
}
return Ok((false, false));
}
let le = tcx.specializes((impl_def_id, possible_sibling));
let ge = tcx.specializes((possible_sibling, impl_def_id));
if le == ge {
Err(overlap_error(overlap))
} else {
Ok((le, ge))
}
},
|| Ok((false, false)),
)?;
if le && !ge {
debug!("descending as child of TraitRef {:?}",
tcx.impl_trait_ref(possible_sibling).unwrap());
// The impl specializes `possible_sibling`.
return Ok(Inserted::ShouldRecurseOn(possible_sibling));
} else if ge && !le {
debug!("placing as parent of TraitRef {:?}",
tcx.impl_trait_ref(possible_sibling).unwrap());
replace_children.push(possible_sibling);
} else {
if let None = tcx.impls_are_allowed_to_overlap(
impl_def_id, possible_sibling)
{
// do future-compat checks for overlap. Have issue #33140
// errors overwrite issue #43355 errors when both are present.
traits::overlapping_impls(
tcx,
possible_sibling,
impl_def_id,
traits::IntercrateMode::Fixed,
|overlap| {
last_lint = Some(FutureCompatOverlapError {
error: overlap_error(overlap),
kind: FutureCompatOverlapErrorKind::Issue43355
});
},
|| (),
);
}
// no overlap (error bailed already via ?)
}
}
if !replace_children.is_empty() {
return Ok(Inserted::ReplaceChildren(replace_children));
}
// No overlap with any potential siblings, so add as a new sibling.
debug!("placing as new sibling");
self.insert_blindly(tcx, impl_def_id);
Ok(Inserted::BecameNewSibling(last_lint))
}
fn iter(&mut self) -> impl Iterator<Item = DefId> + '_ {
let nonblanket = self.nonblanket_impls.iter_mut().flat_map(|(_, v)| v.iter());
self.blanket_impls.iter().chain(nonblanket).cloned()
}
fn filtered(&mut self, sty: SimplifiedType) -> impl Iterator<Item = DefId> + '_ {
let nonblanket = self.nonblanket_impls.entry(sty).or_default().iter();
self.blanket_impls.iter().chain(nonblanket).cloned()
}
}
// A custom iterator used by Children::insert
enum PotentialSiblings<I, J>
where I: Iterator<Item = DefId>,
J: Iterator<Item = DefId>
{
Unfiltered(I),
Filtered(J)
}
impl<I, J> Iterator for PotentialSiblings<I, J>
where I: Iterator<Item = DefId>,
J: Iterator<Item = DefId>
{
type Item = DefId;
fn next(&mut self) -> Option<Self::Item> {
match *self {
PotentialSiblings::Unfiltered(ref mut iter) => iter.next(),
PotentialSiblings::Filtered(ref mut iter) => iter.next()
}
}
}
impl<'tcx> Graph {
pub fn new() -> Graph {
Graph {
parent: Default::default(),
children: Default::default(),
}
}
/// Insert a local impl into the specialization graph. If an existing impl
/// conflicts with it (has overlap, but neither specializes the other),
/// information about the area of overlap is returned in the `Err`.
pub fn insert(
&mut self,
tcx: TyCtxt<'tcx>,
impl_def_id: DefId,
) -> Result<Option<FutureCompatOverlapError>, OverlapError> {
assert!(impl_def_id.is_local());
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
let trait_def_id = trait_ref.def_id;
debug!("insert({:?}): inserting TraitRef {:?} into specialization graph",
impl_def_id, trait_ref);
// If the reference itself contains an earlier error (e.g., due to a
// resolution failure), then we just insert the impl at the top level of
// the graph and claim that there's no overlap (in order to suppress
// bogus errors).
if trait_ref.references_error() {
debug!("insert: inserting dummy node for erroneous TraitRef {:?}, \
impl_def_id={:?}, trait_def_id={:?}",
trait_ref, impl_def_id, trait_def_id);
self.parent.insert(impl_def_id, trait_def_id);
self.children.entry(trait_def_id).or_default()
.insert_blindly(tcx, impl_def_id);
return Ok(None);
}
let mut parent = trait_def_id;
let mut last_lint = None;
let simplified = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false);
// Descend the specialization tree, where `parent` is the current parent node.
loop {
use self::Inserted::*;
let insert_result = self.children.entry(parent).or_default()
.insert(tcx, impl_def_id, simplified)?;
match insert_result {
BecameNewSibling(opt_lint) => {
last_lint = opt_lint;
break;
}
ReplaceChildren(grand_children_to_be) => {
// We currently have
//
// P
// |
// G
//
// and we are inserting the impl N. We want to make it:
//
// P
// |
// N
// |
// G
// Adjust P's list of children: remove G and then add N.
{
let siblings = self.children
.get_mut(&parent)
.unwrap();
for &grand_child_to_be in &grand_children_to_be {
siblings.remove_existing(tcx, grand_child_to_be);
}
siblings.insert_blindly(tcx, impl_def_id);
}
// Set G's parent to N and N's parent to P.
for &grand_child_to_be in &grand_children_to_be {
self.parent.insert(grand_child_to_be, impl_def_id);
}
self.parent.insert(impl_def_id, parent);
// Add G as N's child.
for &grand_child_to_be in &grand_children_to_be {
self.children.entry(impl_def_id).or_default()
.insert_blindly(tcx, grand_child_to_be);
}
break;
}
ShouldRecurseOn(new_parent) => {
parent = new_parent;
}
}
}
self.parent.insert(impl_def_id, parent);
Ok(last_lint)
}
/// Insert cached metadata mapping from a child impl back to its parent.
pub fn record_impl_from_cstore(&mut self, tcx: TyCtxt<'tcx>, parent: DefId, child: DefId) {
if self.parent.insert(child, parent).is_some() {
bug!("When recording an impl from the crate store, information about its parent \
was already present.");
}
self.children.entry(parent).or_default().insert_blindly(tcx, child);
}
/// The parent of a given impl, which is the `DefId` of the trait when the
/// impl is a "specialization root".
pub fn parent(&self, child: DefId) -> DefId {
*self.parent.get(&child).unwrap()
}
}
/// A node in the specialization graph is either an impl or a trait
/// definition; either can serve as a source of item definitions.
/// There is always exactly one trait definition node: the root.
#[derive(Debug, Copy, Clone)]
pub enum Node {
Impl(DefId),
Trait(DefId),
}
impl<'tcx> Node {
pub fn is_from_trait(&self) -> bool {
match *self {
Node::Trait(..) => true,
_ => false,
}
}
/// Iterate over the items defined directly by the given (impl or trait) node.
pub fn items(&self, tcx: TyCtxt<'tcx>) -> ty::AssocItemsIterator<'tcx> {
tcx.associated_items(self.def_id())
}
pub fn def_id(&self) -> DefId {
match *self {
Node::Impl(did) => did,
Node::Trait(did) => did,
}
}
}
pub struct Ancestors<'tcx> {
trait_def_id: DefId,
specialization_graph: &'tcx Graph,
current_source: Option<Node>,
}
impl Iterator for Ancestors<'_> {
type Item = Node;
fn next(&mut self) -> Option<Node> {
let cur = self.current_source.take();
if let Some(Node::Impl(cur_impl)) = cur {
let parent = self.specialization_graph.parent(cur_impl);
self.current_source = if parent == self.trait_def_id {
Some(Node::Trait(parent))
} else {
Some(Node::Impl(parent))
};
}
cur
}
}
pub struct NodeItem<T> {
pub node: Node,
pub item: T,
}
impl<T> NodeItem<T> {
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> NodeItem<U> {
NodeItem {
node: self.node,
item: f(self.item),
}
}
}
impl<'tcx> Ancestors<'tcx> {
/// Search the items from the given ancestors, returning each definition
/// with the given name and the given kind.
// FIXME(#35870): avoid closures being unexported due to `impl Trait`.
#[inline]
pub fn defs(
self,
tcx: TyCtxt<'tcx>,
trait_item_name: Ident,
trait_item_kind: ty::AssocKind,
trait_def_id: DefId,
) -> impl Iterator<Item = NodeItem<ty::AssocItem>> + Captures<'tcx> + 'tcx {
self.flat_map(move |node| {
use crate::ty::AssocKind::*;
node.items(tcx).filter(move |impl_item| match (trait_item_kind, impl_item.kind) {
| (Const, Const)
| (Method, Method)
| (Type, Type)
| (Type, Existential)
=> tcx.hygienic_eq(impl_item.ident, trait_item_name, trait_def_id),
| (Const, _)
| (Method, _)
| (Type, _)
| (Existential, _)
=> false,
}).map(move |item| NodeItem { node: node, item: item })
})
}
}
/// Walk up the specialization ancestors of a given impl, starting with that
/// impl itself.
pub fn ancestors(
tcx: TyCtxt<'tcx>,
trait_def_id: DefId,
start_from_impl: DefId,
) -> Ancestors<'tcx> {
let specialization_graph = tcx.specialization_graph_of(trait_def_id);
Ancestors {
trait_def_id,
specialization_graph,
current_source: Some(Node::Impl(start_from_impl)),
}
}
impl<'a> HashStable<StableHashingContext<'a>> for Children {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
let Children {
ref nonblanket_impls,
ref blanket_impls,
} = *self;
ich::hash_stable_trait_impls(hcx, hasher, blanket_impls, nonblanket_impls);
}
}
impl_stable_hash_for!(struct self::Graph {
parent,
children
});
|
insert_blindly
|
GenBelongsTo.py
|
from GenObj import *
class GenBelongsTo(GenObj):
def __init__(self, name, stmtIndex):
super(GenBelongsTo, self).__init__(name)
self.stmtIndex = stmtIndex
def getStmtIndex(self):
return self.stmtIndex
|
def setStmtIndex(self, stmtIndex):
self.stmtIndex = stmtIndex
|
|
search.go
|
package storage
import (
"fmt"
"io"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
)
// BlockRef references a Block.
//
// BlockRef is valid only until the corresponding Search is valid,
// i.e. it becomes invalid after Search.MustClose is called.
type BlockRef struct {
p *part
bh blockHeader
}
func (br *BlockRef) reset() {
br.p = nil
br.bh = blockHeader{}
}
func (br *BlockRef) init(p *part, bh *blockHeader) {
br.p = p
br.bh = *bh
}
// MustReadBlock reads block from br to dst.
//
// if fetchData is false, then only block header is read, otherwise all the data is read.
func (br *BlockRef) MustReadBlock(dst *Block, fetchData bool) {
dst.Reset()
dst.bh = br.bh
if !fetchData {
return
}
dst.timestampsData = bytesutil.Resize(dst.timestampsData[:0], int(br.bh.TimestampsBlockSize))
br.p.timestampsFile.MustReadAt(dst.timestampsData, int64(br.bh.TimestampsBlockOffset))
dst.valuesData = bytesutil.Resize(dst.valuesData[:0], int(br.bh.ValuesBlockSize))
br.p.valuesFile.MustReadAt(dst.valuesData, int64(br.bh.ValuesBlockOffset))
}
// MetricBlockRef contains reference to time series block for a single metric.
type MetricBlockRef struct {
// The metric name
MetricName []byte
// The block reference. Call BlockRef.MustReadBlock in order to obtain the block.
BlockRef *BlockRef
}
// MetricBlock is a time series block for a single metric.
type MetricBlock struct {
// MetricName is metric name for the given Block.
MetricName []byte
// Block is a block for the given MetricName
Block Block
}
// Marshal marshals MetricBlock to dst
func (mb *MetricBlock) Marshal(dst []byte) []byte {
dst = encoding.MarshalBytes(dst, mb.MetricName)
return MarshalBlock(dst, &mb.Block)
}
// MarshalBlock marshals b to dst.
//
// b.MarshalData must be called on b before calling MarshalBlock.
func MarshalBlock(dst []byte, b *Block) []byte {
dst = b.bh.Marshal(dst)
dst = encoding.MarshalBytes(dst, b.timestampsData)
dst = encoding.MarshalBytes(dst, b.valuesData)
return dst
}
// Unmarshal unmarshals MetricBlock from src
func (mb *MetricBlock) Unmarshal(src []byte) ([]byte, error) {
mb.Block.Reset()
tail, mn, err := encoding.UnmarshalBytes(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal MetricName: %w", err)
}
mb.MetricName = append(mb.MetricName[:0], mn...)
src = tail
return UnmarshalBlock(&mb.Block, src)
}
// UnmarshalBlock unmarshal Block from src to dst.
//
// dst.UnmarshalData isn't called on the block.
func
|
(dst *Block, src []byte) ([]byte, error) {
tail, err := dst.bh.Unmarshal(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal blockHeader: %w", err)
}
src = tail
tail, tds, err := encoding.UnmarshalBytes(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal timestampsData: %w", err)
}
dst.timestampsData = append(dst.timestampsData[:0], tds...)
src = tail
tail, vd, err := encoding.UnmarshalBytes(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal valuesData: %w", err)
}
dst.valuesData = append(dst.valuesData[:0], vd...)
src = tail
return src, nil
}
// Search is a search for time series.
type Search struct {
// MetricBlockRef is updated with each Search.NextMetricBlock call.
MetricBlockRef MetricBlockRef
idb *indexDB
ts tableSearch
// tr contains time range used in the serach.
tr TimeRange
// tfss contains tag filters used in the search.
tfss []*TagFilters
// deadline in unix timestamp seconds for the current search.
deadline uint64
err error
needClosing bool
loops int
prevMetricID uint64
}
func (s *Search) reset() {
s.MetricBlockRef.MetricName = s.MetricBlockRef.MetricName[:0]
s.MetricBlockRef.BlockRef = nil
s.idb = nil
s.ts.reset()
s.tr = TimeRange{}
s.tfss = nil
s.deadline = 0
s.err = nil
s.needClosing = false
s.loops = 0
s.prevMetricID = 0
}
// Init initializes s from the given storage, tfss and tr.
//
// MustClose must be called when the search is done.
//
// Init returns the upper bound on the number of found time series.
func (s *Search) Init(storage *Storage, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) int {
if s.needClosing {
logger.Panicf("BUG: missing MustClose call before the next call to Init")
}
s.reset()
s.tr = tr
s.tfss = tfss
s.deadline = deadline
s.needClosing = true
//根据查询指标以及 lable 获取相应的 TSID 信息
tsids, err := storage.searchTSIDs(tfss, tr, maxMetrics, deadline)
if err == nil {
err = storage.prefetchMetricNames(tsids, deadline)
}
// It is ok to call Init on error from storage.searchTSIDs.
// Init must be called before returning because it will fail
// on Seach.MustClose otherwise.
s.ts.Init(storage.tb, tsids, tr)
if err != nil {
s.err = err
return 0
}
s.idb = storage.idb()
return len(tsids)
}
// MustClose closes the Search.
func (s *Search) MustClose() {
if !s.needClosing {
logger.Panicf("BUG: missing Init call before MustClose")
}
s.ts.MustClose()
s.reset()
}
// Error returns the last error from s.
func (s *Search) Error() error {
if s.err == io.EOF || s.err == nil {
return nil
}
return fmt.Errorf("error when searching for tagFilters=%s on the time range %s: %w", s.tfss, s.tr.String(), s.err)
}
// NextMetricBlock proceeds to the next MetricBlockRef.
func (s *Search) NextMetricBlock() bool {
if s.err != nil {
return false
}
for s.ts.NextBlock() {
if s.loops&paceLimiterSlowIterationsMask == 0 {
if err := checkSearchDeadlineAndPace(s.deadline); err != nil {
s.err = err
return false
}
}
s.loops++
tsid := &s.ts.BlockRef.bh.TSID
if tsid.MetricID != s.prevMetricID {
var err error
s.MetricBlockRef.MetricName, err = s.idb.searchMetricNameWithCache(s.MetricBlockRef.MetricName[:0], tsid.MetricID, tsid.AccountID, tsid.ProjectID)
if err != nil {
if err == io.EOF {
// Skip missing metricName for tsid.MetricID.
// It should be automatically fixed. See indexDB.searchMetricNameWithCache for details.
continue
}
s.err = err
return false
}
s.prevMetricID = tsid.MetricID
}
s.MetricBlockRef.BlockRef = s.ts.BlockRef
return true
}
if err := s.ts.Error(); err != nil {
s.err = err
return false
}
s.err = io.EOF
return false
}
// SearchQuery is used for sending search queries from vmselect to vmstorage.
type SearchQuery struct {
AccountID uint32
ProjectID uint32
MinTimestamp int64
MaxTimestamp int64
TagFilterss [][]TagFilter
}
// NewSearchQuery creates new search query for the given args.
func NewSearchQuery(accountID, projectID uint32, start, end int64, tagFilterss [][]TagFilter) *SearchQuery {
return &SearchQuery{
AccountID: accountID,
ProjectID: projectID,
MinTimestamp: start,
MaxTimestamp: end,
TagFilterss: tagFilterss,
}
}
// TagFilter represents a single tag filter from SearchQuery.
type TagFilter struct {
Key []byte
Value []byte
IsNegative bool
IsRegexp bool
}
// String returns string representation of tf.
func (tf *TagFilter) String() string {
var bb bytesutil.ByteBuffer
fmt.Fprintf(&bb, "{Key=%q, Value=%q, IsNegative: %v, IsRegexp: %v}", tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp)
return string(bb.B)
}
// Marshal appends marshaled tf to dst and returns the result.
func (tf *TagFilter) Marshal(dst []byte) []byte {
dst = encoding.MarshalBytes(dst, tf.Key)
dst = encoding.MarshalBytes(dst, tf.Value)
x := 0
if tf.IsNegative {
x = 2
}
if tf.IsRegexp {
x |= 1
}
dst = append(dst, byte(x))
return dst
}
// Unmarshal unmarshals tf from src and returns the tail.
func (tf *TagFilter) Unmarshal(src []byte) ([]byte, error) {
tail, k, err := encoding.UnmarshalBytes(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal Key: %w", err)
}
tf.Key = append(tf.Key[:0], k...)
src = tail
tail, v, err := encoding.UnmarshalBytes(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal Value: %w", err)
}
tf.Value = append(tf.Value[:0], v...)
src = tail
if len(src) < 1 {
return src, fmt.Errorf("cannot unmarshal IsNegative+IsRegexp from empty src")
}
x := src[0]
switch x {
case 0:
tf.IsNegative = false
tf.IsRegexp = false
case 1:
tf.IsNegative = false
tf.IsRegexp = true
case 2:
tf.IsNegative = true
tf.IsRegexp = false
case 3:
tf.IsNegative = true
tf.IsRegexp = true
default:
return src, fmt.Errorf("unexpected value for IsNegative+IsRegexp: %d; must be in the range [0..3]", x)
}
src = src[1:]
return src, nil
}
// String returns string representation of the search query.
func (sq *SearchQuery) String() string {
var bb bytesutil.ByteBuffer
fmt.Fprintf(&bb, "AccountID=%d, ProjectID=%d, MinTimestamp=%s, MaxTimestamp=%s, TagFilters=[\n",
sq.AccountID, sq.ProjectID, timestampToTime(sq.MinTimestamp), timestampToTime(sq.MaxTimestamp))
for _, tagFilters := range sq.TagFilterss {
for _, tf := range tagFilters {
fmt.Fprintf(&bb, "%s", tf.String())
}
fmt.Fprintf(&bb, "\n")
}
fmt.Fprintf(&bb, "]")
return string(bb.B)
}
// Marshal appends marshaled sq to dst and returns the result.
func (sq *SearchQuery) Marshal(dst []byte) []byte {
dst = encoding.MarshalUint32(dst, sq.AccountID)
dst = encoding.MarshalUint32(dst, sq.ProjectID)
dst = encoding.MarshalVarInt64(dst, sq.MinTimestamp)
dst = encoding.MarshalVarInt64(dst, sq.MaxTimestamp)
dst = encoding.MarshalVarUint64(dst, uint64(len(sq.TagFilterss)))
for _, tagFilters := range sq.TagFilterss {
dst = encoding.MarshalVarUint64(dst, uint64(len(tagFilters)))
for i := range tagFilters {
dst = tagFilters[i].Marshal(dst)
}
}
return dst
}
// Unmarshal unmarshals sq from src and returns the tail.
func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) {
if len(src) < 4 {
return src, fmt.Errorf("cannot unmarshal AccountID: too short src len: %d; must be at least %d bytes", len(src), 4)
}
sq.AccountID = encoding.UnmarshalUint32(src)
src = src[4:]
if len(src) < 4 {
return src, fmt.Errorf("cannot unmarshal ProjectID: too short src len: %d; must be at least %d bytes", len(src), 4)
}
sq.ProjectID = encoding.UnmarshalUint32(src)
src = src[4:]
tail, minTs, err := encoding.UnmarshalVarInt64(src)
if err != nil {
return src, fmt.Errorf("cannot unmarshal MinTimestamp: %w", err)
}
sq.MinTimestamp = minTs
src = tail
tail, maxTs, err := encoding.UnmarshalVarInt64(src)
if err != nil {
return src, fmt.Errorf("cannot unmarshal MaxTimestamp: %w", err)
}
sq.MaxTimestamp = maxTs
src = tail
tail, tfssCount, err := encoding.UnmarshalVarUint64(src)
if err != nil {
return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %w", err)
}
if n := int(tfssCount) - cap(sq.TagFilterss); n > 0 {
sq.TagFilterss = append(sq.TagFilterss[:cap(sq.TagFilterss)], make([][]TagFilter, n)...)
}
sq.TagFilterss = sq.TagFilterss[:tfssCount]
src = tail
for i := 0; i < int(tfssCount); i++ {
tail, tfsCount, err := encoding.UnmarshalVarUint64(src)
if err != nil {
return src, fmt.Errorf("cannot unmarshal the count of TagFilters: %w", err)
}
src = tail
tagFilters := sq.TagFilterss[i]
if n := int(tfsCount) - cap(tagFilters); n > 0 {
tagFilters = append(tagFilters[:cap(tagFilters)], make([]TagFilter, n)...)
}
tagFilters = tagFilters[:tfsCount]
for j := 0; j < int(tfsCount); j++ {
tail, err := tagFilters[j].Unmarshal(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal TagFilter #%d: %w", j, err)
}
src = tail
}
sq.TagFilterss[i] = tagFilters
}
return src, nil
}
func checkSearchDeadlineAndPace(deadline uint64) error {
if fasttime.UnixTimestamp() > deadline {
return ErrDeadlineExceeded
}
storagepacelimiter.Search.WaitIfNeeded()
return nil
}
const (
paceLimiterFastIterationsMask = 1<<16 - 1
paceLimiterMediumIterationsMask = 1<<14 - 1
paceLimiterSlowIterationsMask = 1<<12 - 1
)
|
UnmarshalBlock
|
main.ts
|
import { NestFactory } from '@nestjs/core';
import { AppModule } from './app/app.module';
|
await app.listen(3000);
}
bootstrap();
|
import { ValidationPipe } from '@nestjs/common';
async function bootstrap() {
const app = await NestFactory.create(AppModule);
app.useGlobalPipes(new ValidationPipe());
|
test_multiple_deploys.py
|
import logging
import contextlib
import threading
from typing import (
TYPE_CHECKING,
Generator,
)
import pytest
import conftest
from rnode_testing.common import TestingContext
from rnode_testing.rnode import (
docker_network_with_started_bootstrap,
started_peer,
)
from rnode_testing.wait import (
wait_for_blocks_count_at_least,
wait_for_approved_block_received_handler_state,
)
if TYPE_CHECKING:
from _pytest.fixtures import SubRequest
from docker.client import DockerClient
from rnode_testing.rnode import Node
class DeployThread(threading.Thread):
def __init__(self, name, node, contract, count):
threading.Thread.__init__(self)
self.name = name
self.node = node
self.contract = contract
self.count = count
logging.info(f"Setup thread - {self.contract} to node {self.name}, amount {count}.")
def run(self):
for i in range(self.count):
logging.info(f"[{self.name}]-[{i}] Will deploy {self.contract}.")
d = self.node.deploy(self.contract)
logging.info(f"[{self.name}]-[{i}] Deploy {self.contract}: {d}")
p = self.node.propose()
logging.info(f"[{self.name}]-[{i}] Proposed {self.contract}: {p}")
s = self.node.show_blocks_with_depth(1)
logging.info(f"[{self.name}]-[{i}] Show blocks: {s}")
BOOTSTRAP_NODE_KEYS = conftest.KeyPair(private_key='80366db5fbb8dad7946f27037422715e4176dda41d582224db87b6c3b783d709', public_key='1cd8bf79a2c1bd0afa160f6cdfeb8597257e48135c9bf5e4823f2875a1492c97')
BONDED_VALIDATOR_KEY_1 = conftest.KeyPair(private_key='120d42175739387af0264921bb117e4c4c05fbe2ce5410031e8b158c6e414bb5', public_key='02ab69930f74b931209df3ce54e3993674ab3e7c98f715608a5e74048b332821')
BONDED_VALIDATOR_KEY_2 = conftest.KeyPair(private_key='120d42175739387af0264921bb117e4c4c05fbe2ce5410031e8b158c6e414bb5', public_key='02ab69930f74b931209df3ce54e3993674ab3e7c98f715608a5e74048b332821')
BONDED_VALIDATOR_KEY_3 = conftest.KeyPair(private_key='120d42175739387af0264921bb117e4c4c05fbe2ce5410031e8b158c6e414bb5', public_key='02ab69930f74b931209df3ce54e3993674ab3e7c98f715608a5e74048b332821')
@contextlib.contextmanager
def started_bonded_validator(context: TestingContext, bootstrap_node: "Node", no, key_pair) -> Generator["Node", None, None]:
with started_peer(
context=context,
network=bootstrap_node.network,
name='bonded-validator-' + str(no),
bootstrap=bootstrap_node,
key_pair=key_pair,
) as bonded_validator:
wait_for_approved_block_received_handler_state(bonded_validator, context.node_startup_timeout)
yield bonded_validator
@pytest.mark.xfail
def test_multiple_deploys_at_once(command_line_options_fixture, docker_client_fixture) -> None:
contract_path = '/opt/docker/examples/hello_world_again.rho'
peers_keypairs = [BONDED_VALIDATOR_KEY_1, BONDED_VALIDATOR_KEY_2, BONDED_VALIDATOR_KEY_3]
with conftest.testing_context(command_line_options_fixture, docker_client_fixture, bootstrap_keypair=BOOTSTRAP_NODE_KEYS, peers_keypairs=peers_keypairs) as context:
with docker_network_with_started_bootstrap(context=context) as bootstrap_node:
with started_bonded_validator(context, bootstrap_node, 1, BONDED_VALIDATOR_KEY_1) as no1:
with started_bonded_validator(context, bootstrap_node, 2, BONDED_VALIDATOR_KEY_2) as no2:
with started_bonded_validator(context, bootstrap_node, 3, BONDED_VALIDATOR_KEY_3) as no3:
deploy1 = DeployThread("node1", no1, contract_path, 1)
deploy1.start()
expected_blocks_count = 1
max_retrieved_blocks = 1
wait_for_blocks_count_at_least(
no1,
expected_blocks_count,
max_retrieved_blocks,
expected_blocks_count * 10,
)
deploy2 = DeployThread("node2", no2, contract_path, 3)
deploy2.start()
deploy3 = DeployThread("node3", no3, contract_path, 3)
deploy3.start()
expected_blocks_count = 7
max_retrieved_blocks = 7
wait_for_blocks_count_at_least(
no1,
expected_blocks_count,
max_retrieved_blocks,
480
)
wait_for_blocks_count_at_least(
no2,
expected_blocks_count,
max_retrieved_blocks,
expected_blocks_count * 10,
)
wait_for_blocks_count_at_least(
no3,
|
expected_blocks_count * 10,
)
deploy1.join()
deploy2.join()
deploy3.join()
|
expected_blocks_count,
max_retrieved_blocks,
|
gtimer_z_unit_1_test.go
|
// Copyright 2018 gf Author(https://github.com/gogf/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://github.com/gogf/gf.
// Timer Operations
package gtimer_test
import (
"github.com/gogf/gf/g/container/garray"
"github.com/gogf/gf/g/os/gtimer"
"github.com/gogf/gf/g/test/gtest"
"testing"
"time"
)
func New() *gtimer.Timer {
return gtimer.New(10, 10*time.Millisecond)
}
func TestTimer_Add_Close(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
//fmt.Println("start", time.Now())
timer.Add(200*time.Millisecond, func() {
//fmt.Println("entry1", time.Now())
array.Append(1)
})
timer.Add(200*time.Millisecond, func() {
//fmt.Println("entry2", time.Now())
array.Append(1)
})
timer.Add(400*time.Millisecond, func() {
//fmt.Println("entry3", time.Now())
array.Append(1)
})
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 2)
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 5)
timer.Close()
time.Sleep(250*time.Millisecond)
fixedLength := array.Len()
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), fixedLength)
})
}
func TestTimer_Start_Stop_Close(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
timer.Add(200*time.Millisecond, func() {
//glog.Println("add...")
array.Append(1)
})
gtest.Assert(array.Len(), 0)
time.Sleep(300*time.Millisecond)
gtest.Assert(array.Len(), 1)
timer.Stop()
time.Sleep(1000*time.Millisecond)
gtest.Assert(array.Len(), 1)
timer.Start()
time.Sleep(200*time.Millisecond)
gtest.Assert(array.Len(), 2)
timer.Close()
time.Sleep(1000*time.Millisecond)
gtest.Assert(array.Len(), 2)
})
}
func TestTimer_AddSingleton(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
timer.AddSingleton(200*time.Millisecond, func() {
array.Append(1)
time.Sleep(10*time.Second)
})
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 1)
time.Sleep(500*time.Millisecond)
gtest.Assert(array.Len(), 1)
})
}
func TestTimer_AddOnce(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
timer.AddOnce(200*time.Millisecond, func() {
array.Append(1)
})
timer.AddOnce(200*time.Millisecond, func() {
array.Append(1)
})
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 2)
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 2)
timer.Close()
time.Sleep(250*time.Millisecond)
fixedLength := array.Len()
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), fixedLength)
})
}
func TestTimer_AddTimes(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
timer.AddTimes(200*time.Millisecond, 2, func() {
array.Append(1)
})
time.Sleep(1000*time.Millisecond)
gtest.Assert(array.Len(), 2)
})
}
func TestTimer_DelayAdd(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
timer.DelayAdd(200*time.Millisecond, 200*time.Millisecond, func() {
array.Append(1)
})
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 0)
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 1)
})
}
func TestTimer_DelayAddEntry(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
timer.DelayAddEntry(200*time.Millisecond, 200*time.Millisecond, func() {
array.Append(1)
}, false, 100, gtimer.STATUS_READY)
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 0)
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 1)
})
}
func TestTimer_DelayAddSingleton(t *testing.T) {
gtest.Case(t, func() {
|
array.Append(1)
time.Sleep(10*time.Second)
})
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 0)
time.Sleep(1000*time.Millisecond)
gtest.Assert(array.Len(), 1)
})
}
func TestTimer_DelayAddOnce(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
timer.DelayAddOnce(200*time.Millisecond, 200*time.Millisecond, func() {
array.Append(1)
})
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 0)
time.Sleep(250*time.Millisecond)
gtest.Assert(array.Len(), 1)
time.Sleep(500*time.Millisecond)
gtest.Assert(array.Len(), 1)
})
}
func TestTimer_DelayAddTimes(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
timer.DelayAddTimes(200*time.Millisecond, 500*time.Millisecond, 2, func() {
array.Append(1)
})
time.Sleep(200*time.Millisecond)
gtest.Assert(array.Len(), 0)
time.Sleep(600*time.Millisecond)
gtest.Assert(array.Len(), 1)
time.Sleep(600*time.Millisecond)
gtest.Assert(array.Len(), 2)
time.Sleep(1000*time.Millisecond)
gtest.Assert(array.Len(), 2)
})
}
func TestTimer_AddLessThanInterval(t *testing.T) {
gtest.Case(t, func() {
timer := gtimer.New(10, 100*time.Millisecond)
array := garray.New()
timer.Add(20*time.Millisecond, func() {
array.Append(1)
})
time.Sleep(50*time.Millisecond)
gtest.Assert(array.Len(), 0)
time.Sleep(110*time.Millisecond)
gtest.Assert(array.Len(), 1)
time.Sleep(110*time.Millisecond)
gtest.Assert(array.Len(), 2)
})
}
func TestTimer_AddLeveledEntry1(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
//glog.Println("start")
timer.DelayAdd(1000*time.Millisecond, 1001*time.Millisecond, func() {
//glog.Println("add")
array.Append(1)
})
time.Sleep(1500*time.Millisecond)
gtest.Assert(array.Len(), 0)
time.Sleep(1300*time.Millisecond)
//glog.Println("check")
gtest.Assert(array.Len(), 1)
})
}
func TestTimer_Exit(t *testing.T) {
gtest.Case(t, func() {
timer := New()
array := garray.New()
timer.Add(200*time.Millisecond, func() {
array.Append(1)
gtimer.Exit()
})
time.Sleep(1000*time.Millisecond)
gtest.Assert(array.Len(), 1)
})
}
|
timer := New()
array := garray.New()
timer.DelayAddSingleton(200*time.Millisecond, 200*time.Millisecond, func() {
|
submatch.go
|
package strings
import (
"math"
)
// base represents the number whose power is computed for creating hash
// The greater the more chance for less collision.
|
const base = 128
type hashCache map[int]uint64
// Contains returns true if the substring exists in the source.
// It uses Rabin-Karp Substring algorithm to do so.
func Contains(source, substring string) bool {
substrLength := len(substring)
if substrLength > len(source) {
return false
}
// cache is just to avoid repeated computations of base Powers
var cache = make(hashCache)
for i := 0; i < substrLength; i++ {
cache[i] = uint64(math.Pow(base, float64(i)))
}
expectedHash := cache.compute(substring)
computedHash := cache.compute(source[:substrLength])
if computedHash == expectedHash {
if substring == source[:substrLength] {
return true
}
}
highestPower := substrLength - 1
for i := substrLength; i < len(source); i++ {
computedHash = (computedHash-cache.code(source[i-substrLength], highestPower))*base + cache.code(source[i], 0)
if computedHash == expectedHash {
if substring == source[i-substrLength+1:i+1] {
return true
}
}
}
return false
}
func (c hashCache) compute(str string) uint64 {
var result uint64
for i, j := len(str)-1, 0; i >= 0; i, j = i-1, j+1 {
result += c.code(str[i], j)
}
return result
}
func (c hashCache) code(char byte, position int) uint64 {
return c[position] * uint64(char)
}
|
//
// If s is the length of substring and b is the length of the source string
// Average time complexity of O(s + b).
// Worst case is O(sb).
|
enospc_error_notsupported.go
|
// IsErrNoSpace() on plan9 returns false because
// plan9 does not support syscall.ENOSPC error.
func IsErrNoSpace(cause error) (isNoSpc bool) {
isNoSpc = false
return
}
|
//go:build plan9
// +build plan9
package fserrors
|
|
num_inflowing_neighbours.rs
|
/*
This tool is part of the WhiteboxTools geospatial analysis library.
Authors: Dr. John Lindsay
Created: 25/06/2017
Last Modified: 12/10/2018
License: MIT
*/
use whitebox_raster::*;
use whitebox_common::structures::Array2D;
use crate::tools::*;
use num_cpus;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::path;
use std::sync::mpsc;
use std::sync::Arc;
use std::thread;
/// This tool calculates the number of inflowing neighbours for each grid cell in a raster file. The user
/// must specify the names of an input digital elevation model (DEM) file (`--dem`) and the output raster
/// file (`--output`). The tool calculates the D8 pointer file internally in order to identify inflowing
/// neighbouring cells.
///
/// Grid cells in the input DEM that contain the NoData value will be assigned the NoData value in the
/// output image. The output image is of the integer data type and continuous data scale.
///
/// # See Also
/// `NumDownslopeNeighbours`, `NumUpslopeNeighbours`
pub struct NumInflowingNeighbours {
name: String,
description: String,
toolbox: String,
parameters: Vec<ToolParameter>,
example_usage: String,
}
impl NumInflowingNeighbours {
pub fn new() -> NumInflowingNeighbours {
// public constructor
let name = "NumInflowingNeighbours".to_string();
let toolbox = "Hydrological Analysis".to_string();
let description = "Computes the number of inflowing neighbours to each cell in an input DEM based on the D8 algorithm.".to_string();
let mut parameters = vec![];
parameters.push(ToolParameter {
name: "Input DEM File".to_owned(),
flags: vec!["-i".to_owned(), "--dem".to_owned()],
description: "Input raster DEM file.".to_owned(),
parameter_type: ParameterType::ExistingFile(ParameterFileType::Raster),
default_value: None,
optional: false,
});
parameters.push(ToolParameter {
name: "Output File".to_owned(),
flags: vec!["-o".to_owned(), "--output".to_owned()],
description: "Output raster file.".to_owned(),
parameter_type: ParameterType::NewFile(ParameterFileType::Raster),
default_value: None,
optional: false,
});
let sep: String = path::MAIN_SEPARATOR.to_string();
let e = format!("{}", env::current_exe().unwrap().display());
let mut parent = env::current_exe().unwrap();
parent.pop();
let p = format!("{}", parent.display());
let mut short_exe = e
.replace(&p, "")
.replace(".exe", "")
.replace(".", "")
.replace(&sep, "");
if e.contains(".exe") {
short_exe += ".exe";
}
let usage = format!(
">>.*{0} -r={1} -v --wd=\"*path*to*data*\" -i=DEM.tif -o=output.tif",
short_exe, name
)
.replace("*", &sep);
NumInflowingNeighbours {
name: name,
description: description,
toolbox: toolbox,
parameters: parameters,
example_usage: usage,
}
}
}
impl WhiteboxTool for NumInflowingNeighbours {
fn get_source_file(&self) -> String {
String::from(file!())
}
fn get_tool_name(&self) -> String {
self.name.clone()
}
fn get_tool_description(&self) -> String {
self.description.clone()
}
fn get_tool_parameters(&self) -> String {
match serde_json::to_string(&self.parameters) {
Ok(json_str) => return format!("{{\"parameters\":{}}}", json_str),
Err(err) => return format!("{:?}", err),
}
}
fn get_example_usage(&self) -> String {
self.example_usage.clone()
}
fn get_toolbox(&self) -> String {
self.toolbox.clone()
}
fn run<'a>(
&self,
args: Vec<String>,
working_directory: &'a str,
verbose: bool,
) -> Result<(), Error> {
let mut input_file = String::new();
let mut output_file = String::new();
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
if vec[0].to_lowercase() == "-i"
|| vec[0].to_lowercase() == "--input"
|| vec[0].to_lowercase() == "--dem"
{
if keyval {
input_file = vec[1].to_string();
} else {
input_file = args[i + 1].to_string();
}
} else if vec[0].to_lowercase() == "-o" || vec[0].to_lowercase() == "--output" {
if keyval {
output_file = vec[1].to_string();
} else {
output_file = args[i + 1].to_string();
}
}
}
if verbose {
let tool_name = self.get_tool_name();
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if verbose {
println!("Reading data...")
};
|
let input = Arc::new(Raster::new(&input_file, "r")?);
// calculate the flow direction
let start = Instant::now();
let rows = input.configs.rows as isize;
let columns = input.configs.columns as isize;
let nodata = input.configs.nodata;
let cell_size_x = input.configs.resolution_x;
let cell_size_y = input.configs.resolution_y;
let diag_cell_size = (cell_size_x * cell_size_x + cell_size_y * cell_size_y).sqrt();
let mut flow_dir: Array2D<i8> = Array2D::new(rows, columns, -1, -1)?;
let mut num_procs = num_cpus::get() as isize;
let configs = whitebox_common::configs::get_configs()?;
let max_procs = configs.max_procs;
if max_procs > 0 && max_procs < num_procs {
num_procs = max_procs;
}
let (tx, rx) = mpsc::channel();
for tid in 0..num_procs {
let input = input.clone();
let tx = tx.clone();
thread::spawn(move || {
let nodata = input.configs.nodata;
let d_x = [1, 1, 1, 0, -1, -1, -1, 0];
let d_y = [-1, 0, 1, 1, 1, 0, -1, -1];
let grid_lengths = [
diag_cell_size,
cell_size_x,
diag_cell_size,
cell_size_y,
diag_cell_size,
cell_size_x,
diag_cell_size,
cell_size_y,
];
let (mut z, mut z_n): (f64, f64);
let (mut max_slope, mut slope): (f64, f64);
let mut dir: i8;
let mut neighbouring_nodata: bool;
let mut interior_pit_found = false;
for row in (0..rows).filter(|r| r % num_procs == tid) {
let mut data: Vec<i8> = vec![-1i8; columns as usize];
for col in 0..columns {
z = input[(row, col)];
if z != nodata {
dir = 0i8;
max_slope = f64::MIN;
neighbouring_nodata = false;
for i in 0..8 {
z_n = input[(row + d_y[i], col + d_x[i])];
if z_n != nodata {
slope = (z - z_n) / grid_lengths[i];
if slope > max_slope && slope > 0f64 {
max_slope = slope;
dir = i as i8;
}
} else {
neighbouring_nodata = true;
}
}
if max_slope >= 0f64 {
data[col as usize] = dir;
} else {
data[col as usize] = -1i8;
if !neighbouring_nodata {
interior_pit_found = true;
}
}
} else {
data[col as usize] = -1i8;
}
}
tx.send((row, data, interior_pit_found)).unwrap();
}
});
}
let mut interior_pit_found = false;
for r in 0..rows {
let (row, data, pit) = rx.recv().expect("Error receiving data from thread.");
flow_dir.set_row_data(row, data); //(data.0, data.1);
if pit {
interior_pit_found = true;
}
if verbose {
progress = (100.0_f64 * r as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Flow directions: {}%", progress);
old_progress = progress;
}
}
}
// calculate the number of inflowing cells
let flow_dir = Arc::new(flow_dir);
// let mut num_inflowing: Array2D<i8> = Array2D::new(rows, columns, -1, -1)?;
let (tx, rx) = mpsc::channel();
for tid in 0..num_procs {
let input = input.clone();
let flow_dir = flow_dir.clone();
let tx = tx.clone();
thread::spawn(move || {
let d_x = [1, 1, 1, 0, -1, -1, -1, 0];
let d_y = [-1, 0, 1, 1, 1, 0, -1, -1];
let inflowing_vals: [i8; 8] = [4, 5, 6, 7, 0, 1, 2, 3];
let mut z: f64;
let mut count: f64;
for row in (0..rows).filter(|r| r % num_procs == tid) {
let mut data: Vec<f64> = vec![nodata; columns as usize];
for col in 0..columns {
z = input[(row, col)];
if z != nodata {
count = 0f64;
for i in 0..8 {
if flow_dir[(row + d_y[i], col + d_x[i])] == inflowing_vals[i] {
count += 1f64;
}
}
data[col as usize] = count;
}
}
tx.send((row, data)).unwrap();
}
});
}
let mut output = Raster::initialize_using_file(&output_file, &input);
for r in 0..rows {
let (row, data) = rx.recv().expect("Error receiving data from thread.");
output.set_row_data(row, data);
if verbose {
progress = (100.0_f64 * r as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Num. inflowing neighbours: {}%", progress);
old_progress = progress;
}
}
}
output.configs.palette = "grey.plt".to_string();
let elapsed_time = get_formatted_elapsed_time(start);
output.add_metadata_entry(format!(
"Created by whitebox_tools\' {} tool",
self.get_tool_name()
));
output.add_metadata_entry(format!("Input file: {}", input_file));
output.add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time));
if verbose {
println!("Saving data...")
};
let _ = match output.write() {
Ok(_) => {
if verbose {
println!("Output file written")
}
}
Err(e) => return Err(e),
};
if verbose {
println!(
"{}",
&format!("Elapsed Time (excluding I/O): {}", elapsed_time)
);
}
if interior_pit_found {
println!("**********************************************************************************");
println!("WARNING: Interior pit cells were found within the input DEM. It is likely that the
DEM needs to be processed to remove topographic depressions and flats prior to
running this tool.");
println!("**********************************************************************************");
}
Ok(())
}
}
| |
StaticLinkedList.py
|
# static linked list
class StaticNode:
def __init__(self,value):
self.next = -1
self.value = value
class StaticLinkedList:
def __init__(self,numNodes):
|
def push(self,node):
if self.head == -2:
self.head = 0
node.next = 1
else:
curr = head
while self.array[curr].next != -1:
curr = self.array[curr].next + 1
self.array[curr].next = curr + 1
|
self.head = -2
self.array = [Node(-1)]*numNodes
|
globalState.js
|
import React, { Component } from 'react';
import api from '../utils/api';
import UserContext from './user-context';
class
|
extends Component {
state = {
user: {}
};
async componentDidMount() {
await this.getUser();
this.timer = setInterval(() => this.getUser(), 2 * 60 * 1000);
}
getUser = async () => {
const user = await api.get('user');
this.setState({ user });
return user;
};
render() {
return (
<UserContext.Provider value={{
user: this.state.user,
getUser: this.getUser
}}>
{this.props.children}
</UserContext.Provider>
);
}
}
export default GlobalState;
|
GlobalState
|
black_and_white.py
|
import argparse
import subprocess
from os import listdir, makedirs
from os.path import isfile, join, exists
import multiprocessing
parser = argparse.ArgumentParser(description='Generate renditions ')
parser.add_argument('-i', "--input", action='store', help='Folder where the renditions are', type=str,
required=True)
parser.add_argument('-o', "--output", action='store', help='Folder where the black and white renditions will be',
type=str, required=True)
parser.add_argument('-r', "--reprocess", action='store', help='Input file with files to reprocess', type=str,
required=False)
args = parser.parse_args()
reprocess = False
file_to_reprocess = None
if args.reprocess is not None:
reprocess = True
file_to_reprocess = args.reprocess
input_path = args.input
output_path = args.output
output_folders = {
'1080p': '1080p_black_and_white',
'720p': '720p_black_and_white',
'480p': '480p_black_and_white',
'360p': '360p_black_and_white',
'240p': '240p_black_and_white',
'144p': '144p_black_and_white',
}
cpu_count = multiprocessing.cpu_count()
cpu_to_use = 1 if reprocess else int(round(cpu_count / len(output_folders)))
input_folders = [
'1080p',
'720p',
'480p',
'360p',
'240p',
'144p',
]
def crete_folders():
for key, value in output_folders.items():
folder = output_path + '/' + value
if not exists(folder):
makedirs(folder)
def get_files_from_file(input_path, reprocess_file):
file_list = []
with open(reprocess_file) as file_reprocess:
for file_name in file_reprocess:
full_file = join(input_path, file_name.strip())
if isfile(full_file):
file_list.append(file_name.strip())
else:
print('File not found {}'.format(full_file))
print('{} files to reprocess in {}'.format(len(file_list), input_path))
return file_list
def get_input_output_jobs():
jobs = []
for folder in input_folders:
input_folder = join(input_path, folder)
output_folder = join(output_path, output_folders[folder])
if reprocess:
files = get_files_from_file(input_folder, file_to_reprocess)
else:
files = [f for f in listdir(input_folder) if isfile(join(input_folder, f)) and not f.startswith('.')]
for file in files:
full_input_file = join(input_folder, file)
full_output_file = join(output_folder, file)
jobs.append((full_input_file, full_output_file))
return jobs
def format_command(full_input_file, full_output_file):
print('processing {} {}'.format(full_input_file, full_output_file))
command = ['ffmpeg', '-y', '-i', '"' + full_input_file + '"', '-vf', 'hue=s=0', '-c:a',
'copy', '"' + full_output_file + '"'
]
return command
def worker(full_input_file, full_output_file):
|
if __name__=="__main__":
crete_folders()
jobs = get_input_output_jobs()
with multiprocessing.Pool(cpu_to_use) as pool:
pool.starmap(worker, jobs)
|
try:
ffmpeg_command = format_command(full_input_file, full_output_file)
ffmpeg = subprocess.Popen(' '.join(ffmpeg_command), stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
out, err = ffmpeg.communicate()
except Exception as e:
print(full_input_file, full_output_file)
print(e)
|
resourceShadow_test.go
|
package service_test
import (
|
"github.com/panjf2000/ants/v2"
"github.com/plgd-dev/hub/v2/grpc-gateway/pb"
"github.com/plgd-dev/hub/v2/pkg/log"
kitNetGrpc "github.com/plgd-dev/hub/v2/pkg/net/grpc"
"github.com/plgd-dev/hub/v2/resource-aggregate/commands"
"github.com/plgd-dev/hub/v2/resource-aggregate/cqrs/eventbus/nats/subscriber"
natsTest "github.com/plgd-dev/hub/v2/resource-aggregate/cqrs/eventbus/nats/test"
"github.com/plgd-dev/hub/v2/resource-aggregate/cqrs/utils"
"github.com/plgd-dev/hub/v2/resource-aggregate/events"
"github.com/plgd-dev/hub/v2/resource-directory/service"
"github.com/plgd-dev/hub/v2/test"
"github.com/plgd-dev/hub/v2/test/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
)
func TestResourceShadowGetResources(t *testing.T) {
type args struct {
req *pb.GetResourcesRequest
}
tests := []struct {
name string
args args
want map[string]*pb.Resource
}{
{
name: "list unauthorized device",
args: args{
req: &pb.GetResourcesRequest{
DeviceIdFilter: []string{Resource0.DeviceId},
},
},
},
{
name: "filter by resource Id",
args: args{
req: &pb.GetResourcesRequest{
ResourceIdFilter: []string{
Resource1.ToResourceIDString(),
Resource2.ToResourceIDString(),
},
},
},
want: map[string]*pb.Resource{
Resource1.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
},
Content: Resource1.Content,
},
Types: Resource1.ResourceTypes,
},
Resource2.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource2.DeviceId,
Href: Resource2.Href,
},
Content: Resource2.Content,
},
Types: Resource2.ResourceTypes,
},
},
},
{
name: "filter by device Id",
args: args{
req: &pb.GetResourcesRequest{
DeviceIdFilter: []string{Resource1.DeviceId},
},
},
want: map[string]*pb.Resource{
Resource1.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
},
Content: Resource1.Content,
},
Types: Resource1.ResourceTypes,
},
Resource3.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource3.DeviceId,
Href: Resource3.Href,
},
Content: Resource3.Content,
},
Types: Resource3.ResourceTypes,
},
},
},
{
name: "filter by type",
args: args{
req: &pb.GetResourcesRequest{
TypeFilter: []string{Resource2.ResourceTypes[0]},
},
},
want: map[string]*pb.Resource{
Resource1.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
},
Content: Resource1.Content,
},
Types: Resource1.ResourceTypes,
},
Resource2.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource2.DeviceId,
Href: Resource2.Href,
},
Content: Resource2.Content,
},
Types: Resource2.ResourceTypes,
},
},
},
{
name: "filter by device ID and type",
args: args{
req: &pb.GetResourcesRequest{
DeviceIdFilter: []string{Resource1.DeviceId},
TypeFilter: []string{Resource1.ResourceTypes[0]},
},
},
want: map[string]*pb.Resource{
Resource1.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
},
Content: Resource1.Content,
},
Types: Resource1.ResourceTypes,
},
},
},
{
name: "list all resources of user",
args: args{
req: &pb.GetResourcesRequest{},
},
want: map[string]*pb.Resource{
Resource1.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
},
Content: Resource1.Content,
},
Types: Resource1.ResourceTypes,
},
Resource2.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource2.DeviceId,
Href: Resource2.Href,
},
Content: Resource2.Content,
},
Types: Resource2.ResourceTypes,
},
Resource3.Href: {
Data: &events.ResourceChanged{
ResourceId: &commands.ResourceId{
DeviceId: Resource3.DeviceId,
Href: Resource3.Href,
},
Content: Resource3.Content,
},
Types: Resource3.ResourceTypes,
},
},
},
}
logger := log.NewLogger(log.MakeDefaultConfig())
pool, err := ants.NewPool(1)
require.NoError(t, err)
naClient, resourceSubscriber, err := natsTest.NewClientAndSubscriber(config.MakeSubscriberConfig(),
logger,
subscriber.WithGoPool(pool.Submit),
subscriber.WithUnmarshaler(utils.Unmarshal),
)
require.NoError(t, err)
defer func() {
resourceSubscriber.Close()
naClient.Close()
}()
ctx := kitNetGrpc.CtxWithIncomingToken(context.Background(), "b")
mf := service.NewEventStoreModelFactory()
resourceProjection, err := service.NewProjection(ctx, "test", testCreateEventstore(), resourceSubscriber, mf, time.Second)
require.NoError(t, err)
rd := service.NewResourceShadow(resourceProjection, []string{ /*Resource0.DeviceId,*/ Resource1.DeviceId, Resource2.DeviceId})
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fmt.Println(tt.name)
var s testGrpcGateway_GetResourcesServer
err := rd.GetResources(tt.args.req, &s)
assert.NoError(t, err)
test.CheckProtobufs(t, tt.want, s.got, test.AssertToCheckFunc(assert.Equal))
})
}
}
type testGrpcGateway_GetResourcesServer struct {
got map[string]*pb.Resource
grpc.ServerStream
}
func (s *testGrpcGateway_GetResourcesServer) Context() context.Context {
return context.Background()
}
func (s *testGrpcGateway_GetResourcesServer) Send(d *pb.Resource) error {
if s.got == nil {
s.got = make(map[string]*pb.Resource)
}
d.Data.AuditContext = nil
d.Data.EventMetadata = nil
s.got[d.GetData().GetResourceId().GetHref()] = d
return nil
}
|
"context"
"fmt"
"testing"
"time"
|
panel.js
|
// @flow
import React from 'react';
import PlayerInfo from './player-info';
|
const GamePanel = () => (
<div className={styles.container}>
<PlayerInfo/>
</div>
);
export default GamePanel;
|
import styles from './panel.scss';
|
root.go
|
package cmd
import (
"fmt"
"github.com/keptn/keptn/cli/pkg/logging"
"github.com/keptn/keptn/cli/pkg/version"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"os"
)
var cfgFile string
var verboseLogging bool
var quietLogging bool
var mocking bool
var insecureSkipTLSVerify bool
var kubectlOptions string
var namespace string
const authErrorMsg = "This command requires to be authenticated. See \"keptn auth\" for details"
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "keptn",
Short: "The CLI for using Keptn",
Long: `The CLI allows interaction with a Keptn installation to manage Keptn, to trigger workflows, and to get details.
`,
// Uncomment the following line if your bare application
// has an action associated with it:
// Run: func(cmd *cobra.Command, args []string) {},
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
// Set LogLevel to QuietLevel
currentLogLevel := logging.LogLevel
logging.LogLevel = logging.QuietLevel
isHelp := false
for _, n := range os.Args {
if n == "-h" || n == "--help" {
isHelp = true
}
}
if !isHelp {
runVersionCheck()
}
// Set LogLevel back to previous state
logging.LogLevel = currentLogLevel
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}
func init()
|
// initConfig reads in config file and ENV variables if set.
func initConfig() {
logging.LogLevel = logging.InfoLevel
if verboseLogging && quietLogging {
fmt.Println("Verbose logging and quiet output are mutually exclusive flags. Please use only one.")
os.Exit(1)
}
if verboseLogging {
logging.LogLevel = logging.VerboseLevel
}
if quietLogging {
logging.LogLevel = logging.QuietLevel
}
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Search config in home directory with name ".cli" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(".cli")
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
logging.PrintLog(fmt.Sprintf("Using config file: %s", viper.ConfigFileUsed()), logging.InfoLevel)
}
}
type options []string
func (s *options) appendIfNotEmpty(newOption string) {
if newOption != "" {
*s = append(*s, newOption)
}
}
func runVersionCheck() {
var cliMsgPrinted, cliChecked, keptnMsgPrinted, keptnChecked bool
vChecker := version.NewVersionChecker()
cliChecked, cliMsgPrinted = vChecker.CheckCLIVersion(Version, true)
if cliMsgPrinted {
fmt.Println("* Your Keptn CLI version: " + Version)
}
clusterVersion, err := getKeptnServerVersion()
if err != nil {
fmt.Fprintf(os.Stderr, "* Warning: could not check Keptn server version: %s\n", err.Error())
} else {
kvChecker := version.NewKeptnVersionChecker()
keptnChecked, keptnMsgPrinted = kvChecker.CheckKeptnVersion(Version, clusterVersion, true)
if keptnMsgPrinted {
fmt.Fprintf(os.Stderr, "* Your Keptn cluster version: %s\n", clusterVersion)
}
if clusterVersion != Version {
fmt.Fprintf(os.Stderr, "* Warning: Your Keptn CLI version (%s) and Keptn cluster version (%s) don't match. This can lead to problems. Please make sure to use the same versions.\n", Version, clusterVersion)
}
}
if cliMsgPrinted || keptnMsgPrinted {
fmt.Fprintf(os.Stderr, setVersionCheckMsg, "disable", "false")
}
if cliChecked || keptnChecked {
updateLastVersionCheck()
}
}
|
{
rootCmd.PersistentFlags().BoolVarP(&verboseLogging, "verbose", "v", false, "Enables verbose logging to print debug messages")
rootCmd.PersistentFlags().BoolVarP(&quietLogging, "quiet", "q", false, "Suppresses debug and info messages")
rootCmd.PersistentFlags().BoolVarP(&mocking, "mock", "", false, "Disables communication to a Keptn endpoint")
rootCmd.PersistentFlags().StringVarP(&namespace, "namespace", "n", "keptn",
"Specify the namespace where Keptn should be installed, used and uninstalled in (default keptn).")
cobra.OnInitialize(initConfig)
}
|
lib.rs
|
#![allow(unknown_lints)]
#![deny(unused_variables)]
#![deny(unused_mut)]
#![deny(clippy)]
#![deny(clippy_pedantic)]
#![allow(stutter)]
#![recursion_limit = "128"]
//!
//! Neon-serde
//! ==========
//!
//! This crate is a utility to easily convert values between
//!
//! A `Handle<JsValue>` from the `neon` crate
//! and any value implementing `serde::{Serialize, Deserialize}`
//!
//! ## Usage
//!
//! #### `neon_serde::from_value`
//! Convert a `Handle<js::JsValue>` to
//! a type implementing `serde::Deserialize`
//!
//! #### `neon_serde::to_value`
//! Convert a value implementing `serde::Serialize` to
//! a `Handle<JsValue>`
//!
//!
//! ## Example
//!
//! ```rust,no_run
//! # #![allow(dead_code)]
//! extern crate neon_serde;
//! extern crate neon;
//! #[macro_use]
//! extern crate serde_derive;
//!
//! use neon::prelude::*;
//!
//! #[derive(Serialize, Debug, Deserialize)]
//! struct AnObject {
//! a: u32,
//! b: Vec<f64>,
//! c: String,
//! }
//!
//! fn deserialize_something(mut cx: FunctionContext) -> JsResult<JsValue> {
//! let arg0 = cx.argument::<JsValue>(0)?;
//!
//! let arg0_value :AnObject = neon_serde::from_value(&mut cx, arg0)
//! .or_else(|e| cx.throw_error(e.to_string()))
//! .unwrap();
//! println!("{:?}", arg0_value);
//!
//! Ok(JsUndefined::new(&mut cx).upcast())
//! }
//!
//! fn serialize_something(mut cx: FunctionContext) -> JsResult<JsValue> {
//! let value = AnObject {
//! a: 1,
//! b: vec![2f64, 3f64, 4f64],
//! c: "a string".into()
//! };
//!
//! let js_value = neon_serde::to_value(&mut cx, &value)
//! .or_else(|e| cx.throw_error(e.to_string()))
//! .unwrap();
//! Ok(js_value)
//! }
//!
//! # fn main () {
//! # }
//!
//! ```
//!
#[macro_use]
extern crate error_chain;
extern crate neon;
extern crate num;
#[macro_use]
extern crate serde;
pub mod de;
pub mod errors;
pub mod ser;
mod macros;
pub use de::from_value;
pub use de::from_value_opt;
pub use ser::to_value;
#[cfg(test)]
mod tests {
use super::*;
use neon::prelude::*;
#[test]
fn test_it_compiles() {
fn check<'j>(mut cx: FunctionContext<'j>) -> JsResult<'j, JsValue> {
let result: () = {
let arg: Handle<'j, JsValue> = cx.argument::<JsValue>(0)?;
let () = from_value(&mut cx, arg)
.or_else(|e| cx.throw_error(e.to_string()))
.unwrap();
()
};
let result: Handle<'j, JsValue> = to_value(&mut cx, &result)
.or_else(|e| cx.throw_error(e.to_string()))
.unwrap();
Ok(result)
}
let _ = check;
}
#[test]
fn test_it_compiles_2() {
fn check<'j>(mut cx: FunctionContext<'j>) -> JsResult<'j, JsValue>
|
let _ = check;
}
}
|
{
let result: () = {
let arg: Option<Handle<'j, JsValue>> = cx.argument_opt(0);
let () = from_value_opt(&mut cx, arg)
.or_else(|e| cx.throw_error(e.to_string()))
.unwrap();
};
let result: Handle<'j, JsValue> = to_value(&mut cx, &result)
.or_else(|e| cx.throw_error(e.to_string()))
.unwrap();
Ok(result)
}
|
test_analytics_uve.py
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_uvetest.py
#
# UVE and Alarm tests
#
import os
import sys
import threading
threading._DummyThread._Thread__stop = lambda x: 42
import signal
import gevent
from gevent import monkey
monkey.patch_all()
import unittest
import testtools
import fixtures
import socket
from utils.util import obj_to_dict, find_buildroot
from utils.analytics_fixture import AnalyticsFixture
from utils.generator_fixture import GeneratorFixture
from mockredis import mockredis
from mockzoo import mockzoo
import logging
import time
from opserver.sandesh.viz.constants import *
from opserver.sandesh.viz.constants import _OBJECT_TABLES
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames
import platform
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
builddir = find_buildroot(os.getcwd())
class AnalyticsUveTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):
if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):
assert(False)
cls.redis_port = AnalyticsUveTest.get_free_port()
mockredis.start_redis(cls.redis_port)
@classmethod
def tearDownClass(cls):
mockredis.stop_redis(cls.redis_port)
#@unittest.skip('Skipping non-cassandra test with vizd')
def test_00_nocassandra(self):
'''
This test starts redis,vizd,opserver and qed
Then it checks that the collector UVE (via redis)
can be accessed from opserver.
'''
logging.info("%%% test_00_nocassandra %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
return True
# end test_00_nocassandra
#@unittest.skip('Skipping VM UVE test')
def test_01_vm_uve(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("%%% test_01_vm_uve %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Delete the VM UVE and verify that the deleted flag is set
# in the UVE cache
generator_obj.delete_vm_uve('abcd')
assert generator_obj.verify_vm_uve_cache(vm_id='abcd', delete=True)
# Add the VM UVE with the same vm_id and verify that the deleted flag
# is cleared in the UVE cache
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve_cache(vm_id='abcd')
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Generate VM with vm_id containing XML control character
generator_obj.send_vm_uve(vm_id='<abcd&>', num_vm_ifs=2, msg_count=2)
assert generator_obj.verify_vm_uve(vm_id='<abcd&>', num_vm_ifs=2,
msg_count=2)
return True
# end test_01_vm_uve
#@unittest.skip('Skipping VM UVE test')
def test_02_vm_uve_with_password(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("%%% test_02_vm_uve_with_password %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
redis_password='contrail'))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
return True
# end test_02_vm_uve_with_password
#@unittest.skip('verify redis-uve restart')
def test_03_redis_uve_restart(self):
logging.info('%%% test_03_redis_uve_restart %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', collectors, logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
self.verify_uve_resync(vizd_obj)
# Alarm should return after redis restart
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# should there be a return True here?
# end test_03_redis_uve_restart
#@unittest.skip('verify redis-uve restart')
def test_04_redis_uve_restart_with_password(self):
logging.info('%%% test_03_redis_uve_restart_with_password %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir, -1, 0,
redis_password='contrail'))
self.verify_uve_resync(vizd_obj)
return True
# end test_04_redis_uve_restart
def verify_uve_resync(self, vizd_obj):
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify redis-uve list
host = socket.gethostname()
gen_list = [host+':Analytics:contrail-collector:0',
host+':Analytics:contrail-query-engine:0',
host+':Analytics:contrail-analytics-api:0']
assert vizd_obj.verify_generator_uve_list(gen_list)
# stop redis-uve
vizd_obj.redis_uves[0].stop()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0], False)
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver, False)
# start redis-uve and verify that contrail-collector and Opserver are
# connected to the redis-uve
vizd_obj.redis_uves[0].start()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify that UVEs are resynced with redis-uve
assert vizd_obj.verify_generator_uve_list(gen_list)
#@unittest.skip('Skipping contrail-collector HA test')
def test_05_collector_ha(self):
logging.info('%%% test_05_collector_ha %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
# OpServer, AlarmGen and QE are started with collectors[0] as
# primary and collectors[1] as secondary
exp_genlist = ['contrail-collector', 'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# start the contrail-vrouter-agent with collectors[1] as primary and
# collectors[0] as secondary
collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr_agent = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr_agent.verify_on_setup()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# stop collectors[0] and verify that OpServer, AlarmGen and QE switch
# from primary to secondary collector
vizd_obj.collectors[0].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# start collectors[0]
vizd_obj.collectors[0].start()
exp_genlist = ['contrail-collector']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify that the old UVEs are flushed from redis when collector restarts
exp_genlist = [vizd_obj.collectors[0].get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop collectors[1] and verify that OpServer, AlarmGen and QE switch
# from secondary to primary and contrail-vrouter-agent from primary to
# secondary
vizd_obj.collectors[1].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vr_agent.get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vizd_obj.query_engine.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop QE
vizd_obj.query_engine.stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vr_agent.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# start a python generator and QE with collectors[1] as the primary and
# collectors[0] as the secondary. On generator startup, verify
# that they connect to the secondary collector, if the
# connection to the primary fails
vr2_collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr2_agent = self.useFixture(
GeneratorFixture("contrail-snmp-collector", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr2_agent.verify_on_setup()
vizd_obj.query_engine.set_primary_collector(
vizd_obj.collectors[1].get_addr())
vizd_obj.query_engine.set_secondary_collector(
vizd_obj.collectors[0].get_addr())
vizd_obj.query_engine.start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# stop the collectors[0] - both collectors[0] and collectors[1] are down
# send the VM UVE and verify that the VM UVE is synced after connection
# to the collector
vizd_obj.collectors[0].stop()
# Make sure the connection to the collector is teared down before
# sending the VM UVE
while True:
if vr_agent.verify_on_setup() is False:
break
vr_agent.send_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
vizd_obj.collectors[1].start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
assert vr_agent.verify_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
# end test_05_collector_ha
#@unittest.skip('Skipping AlarmGen basic test')
def test_06_alarmgen_basic(self):
'''
This test starts the analytics processes.
It enables partition 0 on alarmgen, and confirms
that it got enabled
'''
logging.info("%%% test_06_alarmgen_basic %%%")
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
assert(vizd_obj.verify_uvetable_alarm("ObjectCollectorInfo",
"ObjectCollectorInfo:" + socket.gethostname(), "process-status"))
# setup generator for sending Vrouter build_info
collector = vizd_obj.collectors[0].get_addr()
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute",
rules=[{"and_list": [{
"condition": {
"operation": "==",
"operand1": "ObjectVRouter.build_info",
"operand2": {
"json_value": "null"
}
},
"match": [{"json_operand1_value": "null"}]
}]}]
))
# Now try to clear the alarm by sending build_info
alarm_gen1.send_vrouterinfo("myvrouter1", b_info = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# Now try to clear the alarm by deleting the UVE
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
alarm_gen2 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname(), inst = "1"))
alarm_gen2.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen2.send_vrouterinfo("myvrouter2")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute"))
# Now try to clear the alarm by disconnecting the generator
alarm_gen2._sandesh_instance._client._connection.set_admin_state(\
down=True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute", is_set = False))
# send vrouter UVE of myvrouter without build_info again !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# Verify that we can give up partition ownership
assert(vizd_obj.set_alarmgen_partition(0,0) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'false'))
# Give up the other partitions
assert(vizd_obj.set_alarmgen_partition(1,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,0) == 'true')
# Confirm that alarms are all gone
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
None, None))
# Get the partitions again
assert(vizd_obj.set_alarmgen_partition(0,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(1,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,1) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'true'))
# The PartialSysinfo alarm om myvrouter should return
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
return True
# end test_06_alarmgen_basic
#@unittest.skip('Skipping Alarm test')
def test_07_alarm(self):
|
# end test_07_alarm
#@unittest.skip('Skipping UVE/Alarm Filter test')
def test_08_uve_alarm_filter(self):
'''
This test verifies the filter options kfilt, sfilt, mfilt and cfilt
in the UVE/Alarm GET and POST methods.
'''
logging.info('%%% test_08_uve_alarm_filter %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True, start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
api_server_name = socket.gethostname()+'_1'
api_server = self.useFixture(
GeneratorFixture('contrail-api', [collectors[0]], logging,
None, node_type='Config',
hostname=api_server_name))
vr_agent_name = socket.gethostname()+'_2'
vr_agent = self.useFixture(
GeneratorFixture('contrail-vrouter-agent', [collectors[1]],
logging, None, node_type='Compute',
hostname=vr_agent_name))
alarm_gen1_name = socket.gethostname()+'_1'
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, node_type='Analytics',
hostname=alarm_gen1_name))
alarm_gen2_name = socket.gethostname()+'_3'
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, node_type='Analytics',
hostname=alarm_gen2_name))
api_server.verify_on_setup()
vr_agent.verify_on_setup()
alarm_gen1.verify_on_setup()
alarm_gen2.verify_on_setup()
vn_list = ['default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&']
# generate UVEs for the filter test
api_server.send_vn_config_uve(name=vn_list[0],
partial_conn_nw=[vn_list[1]],
num_acl_rules=2)
api_server.send_vn_config_uve(name=vn_list[1],
num_acl_rules=3)
vr_agent.send_vn_agent_uve(name=vn_list[1], num_acl_rules=3,
ipkts=2, ibytes=1024)
vr_agent.send_vn_agent_uve(name=vn_list[2], ipkts=4, ibytes=128)
vr_agent.send_vn_agent_uve(name=vn_list[3], ipkts=8, ibytes=256)
# generate Alarms for the filter test
alarms = alarm_gen1.create_alarm('InPktsThreshold')
alarms += alarm_gen1.create_alarm('InBytesThreshold', ack=True)
alarm_gen1.send_alarm(vn_list[1], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[2], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[3], alarms, VN_TABLE)
filt_test = [
# no filter
{
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt
{
'kfilt': ['*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:*',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:vn1',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project2:*',
'invalid-vn:*'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&',
'invalid-vn'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['invalid-vn'],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# sfilt
{
'sfilt': socket.gethostname()+'_1',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
{
'sfilt': socket.gethostname()+'_3',
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'sfilt': 'invalid_source',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# mfilt
{
'mfilt': 'Config:contrail-api:0',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-alarm-gen:0',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-invalid:0',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# cfilt
{
'cfilt': ['UveVirtualNetworkAgent'],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:total_acl_rules',
'UveVirtualNetworkConfig:partially_connected_networks'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
]
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkConfig:invalid',
'UveVirtualNetworkAgent:in_tpkts',
'UVEAlarms:alarms'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:invalid',
'UVEAlarms:invalid_alarms',
'invalid'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# ackfilt
{
'ackfilt': True,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
}
}
]
},
},
{
'ackfilt': False,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt + sfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
}
]
},
},
# kfilt + sfilt + ackfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project2:*',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
# kfilt + sfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'sfilt': socket.gethostname()+'_1',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms',
'UveVirtualNetworkConfig:Invalid'
],
'uve_list_get': [
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
# kfilt + mfilt + cfilt
{
'kfilt': ['*'],
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms:alarms'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# kfilt + sfilt + mfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:*'
],
'sfilt': socket.gethostname()+'_1',
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkConfig:partially_connected_networks',
'UveVirtualNetworkConfig:total_acl_rules',
'UVEAlarms'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt + sfilt + mfilt + cfilt + ackfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1&',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1&'
],
'uve_get_post': {'value': []},
}
]
vn_table = _OBJECT_TABLES[VN_TABLE].log_query_name
for i in range(len(filt_test)):
filters = dict(kfilt=filt_test[i].get('kfilt'),
sfilt=filt_test[i].get('sfilt'),
mfilt=filt_test[i].get('mfilt'),
cfilt=filt_test[i].get('cfilt'),
ackfilt=filt_test[i].get('ackfilt'))
assert(vizd_obj.verify_uve_list(vn_table,
filts=filters, exp_uve_list=filt_test[i]['uve_list_get']))
assert(vizd_obj.verify_multi_uve_get(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
assert(vizd_obj.verify_uve_post(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
if 'get_alarms' in filt_test[i]:
filters['tablefilt'] = 'virtual-network'
assert(vizd_obj.verify_get_alarms(vn_table,
filts=filters, exp_uves=filt_test[i]['get_alarms']))
# end test_08_uve_alarm_filter
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
@staticmethod
def _check_skip_kafka():
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
if PLATFORM.lower() == 'ubuntu':
if VERSION.find('12.') == 0:
return True
if PLATFORM.lower() == 'centos':
if VERSION.find('6.') == 0:
return True
return False
def _term_handler(*_):
raise IntSignal()
if __name__ == '__main__':
gevent.signal(signal.SIGINT,_term_handler)
unittest.main(catchbreak=True)
|
'''
This test starts redis, collectors, analytics-api and
python generators that simulates alarm generator. This
test sends alarms from alarm generators and verifies the
retrieval of alarms from analytics-api.
'''
logging.info('%%% test_07_alarm %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
# collector_ha_test flag is set to True, because we wanna test
# retrieval of alarms across multiple redis servers.
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True,
start_kafka = True))
assert vizd_obj.verify_on_setup()
# create alarm-generator and attach it to the first collector.
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, hostname=socket.gethostname()+'_1'))
alarm_gen1.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen1.create_process_state_alarm(
'contrail-query-engine')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
analytics_tbl = _OBJECT_TABLES[COLLECTOR_INFO_TABLE].log_query_name
# send proces state alarm for control-node
alarms = alarm_gen1.create_process_state_alarm('contrail-dns')
alarm_gen1.send_alarm('<&'+socket.gethostname()+'_1>', alarms,
BGP_ROUTER_TABLE)
control_tbl = _OBJECT_TABLES[BGP_ROUTER_TABLE].log_query_name
# create another alarm-generator and attach it to the second collector.
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, hostname=socket.gethostname()+'_2'))
alarm_gen2.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen2.create_process_state_alarm(
'contrail-topology')
alarm_gen2.send_alarm(socket.gethostname()+'_2', alarms,
COLLECTOR_INFO_TABLE)
keys = [socket.gethostname()+'_1', socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[1], obj_to_dict(
alarm_gen2.alarms[COLLECTOR_INFO_TABLE][keys[1]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl, expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
# delete analytics-node alarm generated by alarm_gen2
alarm_gen2.delete_alarm(socket.gethostname()+'_2',
COLLECTOR_INFO_TABLE)
# verify analytics-node alarms
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
ukeys = [socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
# Disconnect alarm_gen1 from Collector and verify that all
# alarms generated by alarm_gen1 is removed by the Collector.
alarm_gen1.disconnect_from_collector()
ukeys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
ukeys = ['<&'+socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(control_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(control_tbl, ukeys[0], {}))
# update analytics-node alarm in disconnect state
alarms = alarm_gen1.create_process_state_alarm(
'contrail-snmp-collector')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
# Connect alarm_gen1 to Collector and verify that all
# alarms generated by alarm_gen1 is synced with Collector.
alarm_gen1.connect_to_collector()
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
|
grid.tsx
|
import * as React from 'react';
import { Grid, GridItem } from '@patternfly/react-core';
import { useRefWidth } from '../utils';
export const MEDIA_QUERY_LG = 992;
export const DashboardGrid: React.FC<DashboardGridProps> = ({ mainCards, leftCards, rightCards }) => {
const [containerRef, width] = useRefWidth();
const grid = width <= MEDIA_QUERY_LG ?
(
<Grid className="co-dashboard-grid">
<GridItem lg={12} md={12} sm={12}>
{mainCards}
</GridItem>
<GridItem key="left" lg={12} md={12} sm={12}>
{leftCards}
</GridItem>
<GridItem key="right" lg={12} md={12} sm={12}>
{rightCards}
</GridItem>
</Grid>
) : (
<Grid className="co-dashboard-grid">
<GridItem key="left" lg={3} md={3} sm={3}>
{leftCards}
</GridItem>
<GridItem lg={6} md={6} sm={6}>
{mainCards}
</GridItem>
<GridItem key="right" lg={3} md={3} sm={3}>
{rightCards}
</GridItem>
</Grid>
);
return <div ref={containerRef}>{grid}</div>;
|
};
type DashboardGridProps = {
mainCards: React.ReactNode,
leftCards?: React.ReactNode,
rightCards?: React.ReactNode,
};
| |
client.go
|
//
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hashivault
import (
"context"
"crypto"
"encoding/base64"
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"time"
"github.com/ReneKroon/ttlcache/v2"
vault "github.com/hashicorp/vault/api"
"github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/sigstore/sigstore/pkg/cryptoutils"
)
type hashivaultClient struct {
client *vault.Client
keyPath string
transitSecretEnginePath string
keyCache *ttlcache.Cache
}
var (
errReference = errors.New("kms specification should be in the format hashivault://<key>")
referenceRegex = regexp.MustCompile(`^hashivault://(?P<path>\w(([\w-.]+)?\w)?)$`)
prefixRegex = regexp.MustCompile("vault:v[0-9]+:")
)
const (
vaultV1DataPrefix = "vault:v1:"
// use a consistent key for cache lookups
CacheKey = "signer"
ReferenceScheme = "hashivault://"
)
func ValidReference(ref string) error {
if !referenceRegex.MatchString(ref) {
return errReference
}
return nil
}
func parseReference(resourceID string) (keyPath string, err error) {
i := referenceRegex.SubexpIndex("path")
v := referenceRegex.FindStringSubmatch(resourceID)
if len(v) < i+1 {
err = errors.Errorf("invalid vault format %q", resourceID)
return
}
keyPath = v[i]
return
}
func newHashivaultClient(keyResourceID string) (*hashivaultClient, error) {
keyPath, err := parseReference(keyResourceID)
if err != nil {
return nil, err
}
address := os.Getenv("VAULT_ADDR")
if address == "" {
return nil, errors.New("VAULT_ADDR is not set")
}
token := os.Getenv("VAULT_TOKEN")
if token == "" {
log.Printf("VAULT_TOKEN is not set, trying to read token from file at path ~/.vault-token")
homeDir, err := homedir.Dir()
if err != nil {
return nil, errors.Wrap(err, "get home directory")
}
tokenFromFile, err := os.ReadFile(filepath.Join(homeDir, ".vault-token"))
if err != nil {
return nil, errors.Wrap(err, "read .vault-token file")
}
token = string(tokenFromFile)
}
client, err := vault.NewClient(&vault.Config{
Address: address,
})
if err != nil {
return nil, errors.Wrap(err, "new vault client")
}
client.SetToken(token)
transitSecretEnginePath := os.Getenv("TRANSIT_SECRET_ENGINE_PATH")
if transitSecretEnginePath == "" {
transitSecretEnginePath = "transit"
}
hvClient := &hashivaultClient{
client: client,
keyPath: keyPath,
transitSecretEnginePath: transitSecretEnginePath,
keyCache: ttlcache.NewCache(),
}
hvClient.keyCache.SetLoaderFunction(hvClient.keyCacheLoaderFunction)
hvClient.keyCache.SkipTTLExtensionOnHit(true)
return hvClient, nil
}
func (h *hashivaultClient) keyCacheLoaderFunction(key string) (data interface{}, ttl time.Duration, err error) {
ttl = time.Second * 300
var pubKey crypto.PublicKey
pubKey, err = h.fetchPublicKey(context.Background())
if err != nil {
data = nil
return
}
data = pubKey
return data, ttl, err
}
func (h *hashivaultClient) fetchPublicKey(_ context.Context) (crypto.PublicKey, error) {
client := h.client.Logical()
keyResult, err := client.Read(fmt.Sprintf("/%s/keys/%s", h.transitSecretEnginePath, h.keyPath))
if err != nil {
return nil, errors.Wrap(err, "public key")
}
keysData, hasKeys := keyResult.Data["keys"]
latestVersion, hasVersion := keyResult.Data["latest_version"]
if !hasKeys || !hasVersion {
return nil, errors.New("Failed to read transit key keys: corrupted response")
}
keys, ok := keysData.(map[string]interface{})
if !ok {
return nil, errors.New("Failed to read transit key keys: Invalid keys map")
}
keyVersion := latestVersion.(json.Number)
keyData, ok := keys[string(keyVersion)]
if !ok {
return nil, errors.New("Failed to read transit key keys: corrupted response")
}
publicKeyPem, ok := keyData.(map[string]interface{})["public_key"]
if !ok {
return nil, errors.New("Failed to read transit key keys: corrupted response")
}
return cryptoutils.UnmarshalPEMToPublicKey([]byte(publicKeyPem.(string)))
}
func (h *hashivaultClient) public() (crypto.PublicKey, error) {
return h.keyCache.Get(CacheKey)
}
func (h hashivaultClient) sign(digest []byte, alg crypto.Hash) ([]byte, error) {
client := h.client.Logical()
signResult, err := client.Write(fmt.Sprintf("/%s/sign/%s%s", h.transitSecretEnginePath, h.keyPath, hashString(alg)), map[string]interface{}{
"input": base64.StdEncoding.Strict().EncodeToString(digest),
"prehashed": alg != crypto.Hash(0),
})
if err != nil {
return nil, errors.Wrap(err, "Transit: failed to sign payload")
}
encodedSignature, ok := signResult.Data["signature"]
if !ok {
return nil, errors.New("Transit: response corrupted in-transit")
}
return vaultDecode(encodedSignature)
}
func (h hashivaultClient) verify(sig, digest []byte, alg crypto.Hash) error {
client := h.client.Logical()
encodedSig := base64.StdEncoding.EncodeToString(sig)
vaultDataPrefix := os.Getenv("VAULT_KEY_PREFIX")
if vaultDataPrefix == "" {
vaultDataPrefix = vaultV1DataPrefix
}
result, err := client.Write(fmt.Sprintf("/%s/verify/%s/%s", h.transitSecretEnginePath, h.keyPath, hashString(alg)), map[string]interface{}{
"input": base64.StdEncoding.EncodeToString(digest),
"signature": fmt.Sprintf("%s%s", vaultDataPrefix, encodedSig),
})
if err != nil {
return errors.Wrap(err, "verify")
}
valid, ok := result.Data["valid"]
if !ok
|
if isValid, ok := valid.(bool); ok && isValid {
return errors.New("Failed vault verification")
}
return nil
}
// Vault likes to prefix base64 data with a version prefix
func vaultDecode(data interface{}) ([]byte, error) {
encoded, ok := data.(string)
if !ok {
return nil, errors.New("Received non-string data")
}
return base64.StdEncoding.DecodeString(prefixRegex.ReplaceAllString(encoded, ""))
}
func hashString(h crypto.Hash) string {
var hashStr string
switch h {
case crypto.SHA224:
hashStr = "/sha2-224"
case crypto.SHA256:
hashStr = "/sha2-256"
case crypto.SHA384:
hashStr = "/sha2-384"
case crypto.SHA512:
hashStr = "/sha2-512"
default:
hashStr = ""
}
return hashStr
}
func (h hashivaultClient) createKey(typeStr string) (crypto.PublicKey, error) {
client := h.client.Logical()
if _, err := client.Write(fmt.Sprintf("/%s/keys/%s", h.transitSecretEnginePath, h.keyPath), map[string]interface{}{
"type": typeStr,
}); err != nil {
return nil, errors.Wrap(err, "Failed to create transit key")
}
return h.public()
}
|
{
return errors.New("corrupted response")
}
|
script_test.go
|
// This file tests script primitives.
package awk
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"sort"
"strings"
"testing"
)
// TestReadRecordNewline tests reading newline-separated records.
func TestReadRecordNewline(t *testing.T) {
// Define the basic test we plan to repeat.
allRecords := []string{"X", "Word", "More than one word", "", "More text"}
allRecordsStr := strings.Join(allRecords, "\n")
scr := NewScript()
doTest := func() {
scr.input = bufio.NewReader(strings.NewReader(allRecordsStr))
scr.SetRS("\n")
scr.rsScanner = bufio.NewScanner(scr.input)
scr.rsScanner.Split(scr.makeRecordSplitter())
for _, oneRecord := range allRecords {
rec, err := scr.readRecord()
if err != nil {
t.Fatal(err)
}
if rec != oneRecord {
t.Fatalf("Expected %q but received %q", oneRecord, rec)
}
}
}
// Test with no trailing newline.
doTest()
// Test with a trailing newline.
allRecordsStr += "\n"
doTest()
}
// TestReadRecordWhitespace tests reading whitespace-separated records.
func TestReadRecordWhitespace(t *testing.T) {
allRecordsStr := " banana banana banana banana banana banana\tbanana banana\nbanana banana"
want := []string{
"",
"",
"banana",
"banana",
"banana",
"",
"banana",
"",
"",
"banana",
"banana\tbanana",
"banana\nbanana",
"banana",
}
scr := NewScript()
scr.input = bufio.NewReader(strings.NewReader(allRecordsStr))
scr.SetRS(" ")
scr.rsScanner = bufio.NewScanner(scr.input)
scr.rsScanner.Split(scr.makeRecordSplitter())
for _, str := range want {
rec, err := scr.readRecord()
if err != nil {
t.Fatal(err)
}
if rec != str {
t.Fatalf("Expected %q but received %q", str, rec)
}
}
}
// TestReadRecordRE tests reading regular-expression-separated records.
func TestReadRecordRE(t *testing.T) {
allRecordsStr := "hello<foo>howdy</foo>hello<bar>yellow</bar>hello<baz>goodbye</baz>"
scr := NewScript()
scr.input = bufio.NewReader(strings.NewReader(allRecordsStr))
scr.SetRS(`<[^>]+>[^<]*<[^>]+>`)
scr.rsScanner = bufio.NewScanner(scr.input)
scr.rsScanner.Split(scr.makeRecordSplitter())
for i := 0; i < 3; i++ {
rec, err := scr.readRecord()
if err != nil {
t.Fatal(err)
}
if rec != "hello" {
t.Fatalf("Expected %q but received %q", "hello", rec)
}
}
}
// TestSplitRecordWhitespace tests splitting a record into whitespace-separated
// fields.
func TestSplitRecordWhitespace(t *testing.T) {
recordStr := "The woods are lovely, dark and deep,"
fields := regexp.MustCompile(`\s+`).Split(recordStr, -1)
scr := NewScript()
scr.splitRecord(recordStr)
for i, f := range fields {
if scr.F(i+1).String() != f {
t.Fatalf("Expected %q but received %q", f, scr.F(i+1))
}
}
}
// TestSplitRecordComma tests splitting a record into comma-separated fields.
func TestSplitRecordComma(t *testing.T) {
recordStr := "The woods are lovely, dark and deep,"
fields := strings.Split(recordStr, ",")
scr := NewScript()
scr.SetFS(",")
scr.splitRecord(recordStr)
for i, f := range fields {
if scr.F(i+1).String() != f {
t.Fatalf("Expected %q but received %q", f, scr.F(i+1))
}
}
}
// TestSplitFieldRE tests splitting a field based on a regular expression.
func TestSplitFieldRE(t *testing.T) {
// Determine what we want to provide and see in return.
recordStr := "foo-bar---baz------------quux--corge-grault---garply-"
re, err := regexp.Compile(`\w+`)
if err != nil {
t.Fatal(err)
}
words := re.FindAllString(recordStr, -1)
words = append(words, "")
// Split the record.
scr := NewScript()
scr.SetFS("-+")
scr.splitRecord(recordStr)
// Check the result.
for i := 1; i <= scr.NF; i++ {
f := scr.F(i).String()
if f != words[i-1] {
t.Fatalf("Expected %q for field %d but received %q", words[i-1], i, f)
}
}
}
// TestSplitFieldREIgnCase tests splitting a field based on a case-insensitive
// regular expression.
func TestSplitFieldREIgnCase(t *testing.T) {
// Determine what we want to provide and see in return.
recordStr := "fooxbarXxxbazxxXXxxxXxxXxquucksxXcorgexgraultxxxgarplyx"
re, err := regexp.Compile(`[fobarzqucksgeltpy]+`)
if err != nil {
t.Fatal(err)
}
words := re.FindAllString(recordStr, -1)
words = append(words, "")
// Split the record.
scr := NewScript()
scr.SetFS("x+")
scr.IgnoreCase(true)
err = scr.splitRecord(recordStr)
if err != nil {
t.Fatal(err)
}
// Check the result.
for i := 1; i <= scr.NF; i++ {
f := scr.F(i).String()
if f != words[i-1] {
t.Fatalf("Expected %q for field %d but received %q", words[i-1], i, f)
}
}
}
// TestSplitFieldFixed tests splitting a field based on fixed-width columns.
func TestSplitFieldFixed(t *testing.T) {
// Determine what we want to provide and see in return.
inputStr := "CeterumcenseoCarthaginemessedelendam."
desiredOutput := []string{"Ceterum", "censeo", "Carthaginem", "esse", "delendam."}
// Split the record.
scr := NewScript()
scr.SetFieldWidths([]int{7, 6, 11, 4, 123})
err := scr.splitRecord(inputStr)
if err != nil {
t.Fatal(err)
}
// Check the result.
for i := 1; i <= scr.NF; i++ {
f := scr.F(i).String()
if f != desiredOutput[i-1] {
t.Fatalf("Expected %q for field %d but received %q", desiredOutput[i-1], i, f)
}
}
}
// TestSplitFieldREPat tests splitting a field based on a field-matching
// regular expression.
func TestSplitFieldREPat(t *testing.T) {
// Determine what we want to provide and see in return.
inputStr := "23 Skidoo. 3-2-1 blast off! 99 red balloons."
desiredOutput := 122
// Split the record.
scr := NewScript()
scr.SetFPat(`-?\d+`)
err := scr.splitRecord(inputStr)
if err != nil {
t.Fatal(err)
}
// Check the result.
output := 0
for i := 1; i <= scr.NF; i++ {
t.Log(scr.F(i))
output += scr.F(i).Int()
}
if output != desiredOutput {
t.Fatalf("Expected %d but received %d", desiredOutput, output)
}
}
// TestBeginEnd tests creating and running a script that contains a BEGIN
// action and an END action.
func TestBeginEnd(t *testing.T) {
scr := NewScript()
val := 123
scr.Begin = func(s *Script) { val *= 10 }
scr.End = func(s *Script) { val += 4 }
err := scr.Run(strings.NewReader("dummy data"))
if err != nil {
t.Fatal(err)
}
if val != 1234 {
t.Fatalf("Expected 1234 but received %d", val)
}
}
// TestSimpleSum tests adding up a column of numbers.
func TestSimpleSum(t *testing.T) {
scr := NewScript()
sum := 0
scr.AppendStmt(nil, func(s *Script) { sum += s.F(1).Int() })
err := scr.Run(strings.NewReader("2\n4\n6\n8\n"))
if err != nil {
t.Fatal(err)
}
if sum != 20 {
t.Fatalf("Expected 20 but received %d", sum)
}
}
// TestRunTwice tests running the same script twice.
func TestRunTwice(t *testing.T) {
// Run once.
scr := NewScript()
sum := 0
scr.AppendStmt(nil, func(s *Script) { sum += s.F(1).Int() * s.NR })
err := scr.Run(strings.NewReader("1\n3\n5\n7\n"))
if err != nil {
t.Fatal(err)
}
if sum != 50 {
t.Fatalf("Expected 50 but received %d on the first trial", sum)
}
// Run again.
sum = 0
err = scr.Run(strings.NewReader("1\n3\n5\n7\n"))
if err != nil {
t.Fatal(err)
}
if sum != 50 {
t.Fatalf("Expected 50 but received %d on the second trial", sum)
}
}
// TestFieldCreation tests creating ("autovivifying" in Perl-speak) new fields.
func TestFieldCreation(t *testing.T) {
scr := NewScript()
sum := 0
scr.AppendStmt(nil, func(s *Script) { sum += 1 << uint(s.F(2).Int()) })
err := scr.Run(strings.NewReader("x 3\ny 2\n\nz 1\n"))
if err != nil {
t.Fatal(err)
}
if sum != 15 {
t.Fatalf("Expected 15 but received %d", sum)
}
}
// TestRecordReplacement tests overwriting field 0 with a new record.
func TestRecordReplacement(t *testing.T) {
scr := NewScript()
sum := 0
scr.AppendStmt(nil, func(s *Script) {
sum += s.F(2).Int()
s.SetF(0, s.NewValue("10 20 30 40 50"))
sum += s.F(5).Int()
})
err := scr.Run(strings.NewReader("x 3\ny 2\n\nz 1\n"))
if err != nil {
t.Fatal(err)
}
if sum != 206 {
t.Fatalf("Expected 206 but received %d", sum)
}
}
// TestRecordChangeCase tests changing IgnoreCase during the execution of a
// script.
func TestRecordChangeCase(t *testing.T) {
scr := NewScript()
sum := 0
scr.AppendStmt(func(s *Script) bool { return s.F(1).Int()%2 == 0 },
func(s *Script) { sum += s.F(1).Int() })
scr.AppendStmt(func(s *Script) bool { return s.NR == 3 },
func(s *Script) { s.IgnoreCase(true) })
scr.SetRS("EOL")
err := scr.Run(strings.NewReader("1EOL2EOL3EOL4Eol5eol6eoL"))
if err != nil {
t.Fatal(err)
}
if sum != 12 {
t.Fatalf("Expected 12 but received %d", sum)
}
}
// TestRecordBlankLines tests the AWK special case of blank-line-separated
// records.
func TestRecordBlankLines(t *testing.T) {
recordStr := "uno\ndos\n\ntres\ncuatro\n\ncinco,seis,siete\nocho\n\nnueve,diez\n\n"
expected := regexp.MustCompile(`[\n,]+`).Split(recordStr, -1)
expected = expected[:len(expected)-1] // Skip empty final record.
actual := make([]string, 0, 10)
scr := NewScript()
scr.SetRS("")
scr.SetFS(",")
scr.AppendStmt(nil, func(s *Script) {
for i := 1; i <= s.NF; i++ {
actual = append(actual, s.F(i).String())
}
})
err := scr.Run(strings.NewReader(recordStr))
if err != nil {
t.Fatal(err)
}
for i, s1 := range expected {
s2 := actual[i]
if s1 != s2 {
t.Fatalf("Expected %v but received %v", expected, actual)
}
}
}
// TestExit tests premature script termination.
func TestExit(t *testing.T) {
scr := NewScript()
sum := 0
scr.Begin = func(s *Script) { s.IgnoreCase(true) }
scr.AppendStmt(nil, func(s *Script) { sum += s.F(1).Int() })
scr.AppendStmt(func(s *Script) bool { return s.F(1).StrEqual("stop") },
func(s *Script) { s.Exit() })
err := scr.Run(strings.NewReader("111\n222\n333\n444\nSTOP\n555\n666\n"))
if err != nil {
t.Fatal(err)
}
if sum != 1110 {
t.Fatalf("Expected 1110 but received %d", sum)
}
}
// TestRecordRange tests range patterns.
func TestRecordRange(t *testing.T) {
scr := NewScript()
all := []string{
"bad",
"terrible",
"BEGIN",
"good",
"great",
"fantastic",
"END",
"awful",
"dreadful",
}
want := []string{
"BEGIN",
"good",
"great",
"fantastic",
"END",
}
got := make([]string, 0, 10)
scr.AppendStmt(Range(func(s *Script) bool { return s.F(1).Match("BEGIN") },
func(s *Script) bool { return s.F(1).Match("END") }),
func(s *Script) { got = append(got, s.F(1).String()) })
err := scr.Run(strings.NewReader(strings.Join(all, "\n")))
if err != nil {
t.Fatal(err)
}
for i, s1 := range want {
s2 := got[i]
if s1 != s2 {
t.Fatalf("Expected %q but received %q", s1, s2)
}
}
}
// TestSplitRecordRE tests splitting the input string into regexp-separated
// records.
func TestSplitRecordRE(t *testing.T) {
scr := NewScript()
pluses := 0
scr.Begin = func(s *Script) { s.SetRS(`\++`) }
scr.AppendStmt(nil, func(s *Script) { pluses += len(s.RT) })
err := scr.Run(strings.NewReader("a++++++a++a++++a+++a+++++a+"))
if err != nil {
t.Fatal(err)
}
if pluses != 21 {
t.Fatalf("Expected 21 but received %d", pluses)
}
}
// TestDefaultAction tests the default printing action.
func TestDefaultAction(t *testing.T) {
// Define a script and some test input.
scr := NewScript()
scr.Output = new(bytes.Buffer)
scr.IgnoreCase(true)
scr.AppendStmt(func(s *Script) bool { return s.F(1).StrEqual("Duck") }, nil)
inputStr := `Duck 1
duck 2
duck 3
duck 4
Goose! 5
Duck 6
duck 7
DUCK 8
duck 9
Goose!
`
// Test with the default record separator.
err := scr.Run(strings.NewReader(inputStr))
if err != nil {
t.Fatal(err)
}
outputStr := string(scr.Output.(*bytes.Buffer).Bytes())
desiredOutputStr := `Duck 1
duck 2
duck 3
duck 4
Duck 6
duck 7
DUCK 8
duck 9
`
if outputStr != desiredOutputStr {
t.Fatalf("Expected %#v but received %#v", desiredOutputStr, outputStr)
}
// Test with a modified record separator.
scr.Output.(*bytes.Buffer).Reset()
scr.SetORS("|")
err = scr.Run(strings.NewReader(inputStr))
if err != nil {
t.Fatal(err)
}
outputStr = string(scr.Output.(*bytes.Buffer).Bytes())
desiredOutputStr = `Duck 1|duck 2|duck 3|duck 4|Duck 6|duck 7|DUCK 8|duck 9|`
if outputStr != desiredOutputStr {
t.Fatalf("Expected %#v but received %#v", desiredOutputStr, outputStr)
}
}
// TestFInts tests the bulk conversion of fields to ints.
func TestFInts(t *testing.T) {
// Define a script and some test inputs and outputs.
scr := NewScript()
inputStr := "8675309"
desiredOutput := []int{0, 3, 5, 6, 7, 8, 9}
var output []int
scr.SetFS("")
scr.AppendStmt(nil, func(s *Script) {
iList := s.FInts()
sort.Ints(iList)
output = iList
})
// Run the script.
err := scr.Run(strings.NewReader(inputStr))
if err != nil {
t.Fatal(err)
}
// Validate the output.
for i, val := range desiredOutput {
if val != output[i] {
t.Fatalf("Expected %v but received %v", desiredOutput, output)
}
}
}
// TestFieldCreation0 ensures that field creation updates F(0).
func TestFieldCreation0(t *testing.T) {
// Define a script and some test inputs and outputs.
input := "spam egg spam spam bacon spam"
desiredOutput := "spam,egg,spam,spam,bacon,spam,,,,,sausage"
var output string
scr := NewScript()
scr.Begin = func(s *Script) { scr.SetOFS(",") }
scr.AppendStmt(nil, func(s *Script) {
scr.SetF(scr.NF+5, scr.NewValue("sausage"))
output = scr.F(0).String()
})
// Run the script and validate the output.
err := scr.Run(strings.NewReader(input))
if err != nil {
t.Fatal(err)
}
if output != desiredOutput {
t.Fatalf("Expected %q but received %q", desiredOutput, output)
}
}
// TestFieldModification0 ensures that field modification updates F(0).
func TestFieldModification0(t *testing.T) {
// Define a script and some test inputs and outputs.
input := "spam egg spam spam bacon spam"
desiredOutput := "spam,egg,sausage,spam,bacon,spam"
var output string
scr := NewScript()
scr.Begin = func(s *Script) { scr.SetOFS(",") }
scr.AppendStmt(nil, func(s *Script) {
scr.SetF(3, scr.NewValue("sausage"))
output = scr.F(0).String()
})
// Run the script and validate the output.
err := scr.Run(strings.NewReader(input))
if err != nil {
t.Fatal(err)
}
if output != desiredOutput {
t.Fatalf("Expected %q but received %q", desiredOutput, output)
}
}
// TestNFModification0 ensures that modifying NF updates F(0).
func TestNFModification0(t *testing.T) {
// Define a script and some test inputs and outputs.
input := "spam egg spam spam bacon spam"
desiredOutput := "spam egg spam"
var output string
scr := NewScript()
scr.AppendStmt(nil, func(s *Script) {
scr.NF = 3
output = scr.F(0).String()
})
// Run the script and validate the output.
err := scr.Run(strings.NewReader(input))
if err != nil {
t.Fatal(err)
}
if output != desiredOutput {
t.Fatalf("Expected %q but received %q", desiredOutput, output)
}
}
// TestAutoInt tests the Auto function with an int argument.
func TestAutoInt(t *testing.T) {
// Define a script and some test inputs and outputs.
input := strings.Replace("It does not matter how slowly you go as long as you do not stop.", " ", "\n", -1)
var output string
desiredOutput := "go"
scr := NewScript()
scr.AppendStmt(Auto(8), func(s *Script) { output = s.F(1).String() })
// Run the script and validate the output.
err := scr.Run(strings.NewReader(input))
if err != nil {
t.Fatal(err)
}
if output != desiredOutput {
t.Fatalf("Expected %q but received %q", desiredOutput, output)
}
}
// TestAutoRegexp tests the Auto function with a Regexp argument.
func TestAutoRegexp(t *testing.T) {
// Define a script and some test inputs and outputs.
input := strings.Replace("It does not matter how slowly you go as long as you do not stop.", " ", "\n", -1)
var output string
desiredOutput := "go"
scr := NewScript()
re := regexp.MustCompile("Go")
scr.Begin = func(s *Script) { scr.IgnoreCase(true) }
scr.AppendStmt(Auto(re), func(s *Script) { output = s.F(1).String() })
// Run the script and validate the output.
err := scr.Run(strings.NewReader(input))
if err != nil {
t.Fatal(err)
}
if output != desiredOutput {
t.Fatalf("Expected %q but received %q", desiredOutput, output)
}
}
// TestAutoString tests the Auto function with a string argument.
func TestAutoString(t *testing.T) {
// Define a script and some test inputs and outputs.
input := strings.Replace("It does not matter how slowly you go as long as you do not stop.", " ", "\n", -1)
var output string
desiredOutput := "go"
scr := NewScript()
scr.Begin = func(s *Script) { scr.IgnoreCase(true) }
scr.AppendStmt(Auto("Go"), func(s *Script) { output = s.F(1).String() })
// Run the script and validate the output.
err := scr.Run(strings.NewReader(input))
if err != nil {
t.Fatal(err)
}
if output != desiredOutput {
t.Fatalf("Expected %q but received %q", desiredOutput, output)
}
}
// TestAutoIntRange tests the Auto function with a range of int arguments.
func TestAutoIntRange(t *testing.T) {
// Define a script and some test inputs and outputs.
input := strings.Replace("10 20 30 40 50 60 70 80 90 100", " ", "\n", -1)
var output int
desiredOutput := 150
scr := NewScript()
scr.AppendStmt(Auto(4, 6), func(s *Script) { output += s.F(1).Int() })
// Run the script and validate the output.
err := scr.Run(strings.NewReader(input))
if err != nil {
t.Fatal(err)
}
if output != desiredOutput {
t.Fatalf("Expected %d but received %d", desiredOutput, output)
}
}
// TestAutoIntRanges tests the Auto function with multiple ranges of int
// arguments.
func TestAutoIntRanges(t *testing.T) {
// Define a script and some test inputs and outputs.
input := strings.Replace("Don't be afraid to give up the good to go for the great.", " ", "\n", -1)
output := make([]string, 0, 15)
desiredOutput := strings.Split("Don't be afraid to go", " ")
scr := NewScript()
scr.Begin = func(s *Script) { scr.IgnoreCase(true) }
scr.AppendStmt(Auto(1, 3, 9, 10), func(s *Script) { output = append(output, s.F(1).String()) })
// Run the script and validate the output.
err := scr.Run(strings.NewReader(input))
if err != nil {
t.Fatal(err)
}
if len(output) != len(desiredOutput) {
t.Fatalf("Expected %v but received %v", desiredOutput, output)
}
for i, o := range desiredOutput {
if output[i] != o {
t.Fatalf("Expected %v but received %v", desiredOutput, output)
}
}
}
// TestCatchSetRSError tests that we properly catch invalid uses of SetRS.
func TestCatchSetRSError(t *testing.T) {
// Define a script.
scr := NewScript()
scr.Begin = func(s *Script) { scr.IgnoreCase(true) }
scr.AppendStmt(nil, func(s *Script) { s.SetRS("/") })
expected := "SetRS was called from a running script"
// Run the script and ensure it threw the expected error.
err := scr.Run(strings.NewReader("The progress of rivers to the ocean is not so rapid as that of man to error."))
if err == nil {
t.Fatalf("Expected error %q, but no error was returned", expected)
}
if err.Error() != expected {
t.Fatalf("Expected error %q, but received error %q", expected, err.Error())
}
}
// TestNext tests that Next immediately stops the current action and
// immediately continues with the next record.
func TestNext(t *testing.T) {
// Define a script.
var output []string
scr := NewScript()
scr.Begin = func(s *Script) { output = make([]string, 0, 3) }
scr.AppendStmt(nil, func(s *Script) {
output = append(output, s.F(0).String())
s.Next()
t.Fatal("Next did not immediately exit the current action")
})
scr.AppendStmt(nil, func(s *Script) {
t.Fatal("Next did not immediately go to the next record")
})
// Define our input and desired output.
input := []string{
"追いかけ", // Oikake
"待ち伏せ", // Machibuse
"気まぐれ", // Kimagure
"お惚け", // Otoboke
}
desiredOutput := strings.Join(input, " ")
// Run the script and validate the output.
err := scr.Run(strings.NewReader(strings.Join(input, "\n")))
if err != nil {
t.Fatal(err)
}
outputStr := strings.Join(output, " ")
if outputStr != desiredOutput {
t.Fatalf("Expected %q but received %q", desiredOutput, outputStr)
}
}
// TestGetLineSelf tests that GetLine can read the next record from the current
// input stream.
func TestGetLineSelf(t *testing.T) {
// Define a script.
var output []string
scr := NewScript()
scr.Begin = func(s *Script) { output = nil }
scr.AppendStmt(Auto("skip"), func(s *Script) {
nSkip := s.F(2).Int()
for i := 0; i < nSkip; i++ {
_, err := s.GetLine(nil)
if err != nil && err != io.EOF {
t.Fatal(err)
}
}
s.Next()
})
scr.AppendStmt(nil, func(s *Script) {
output = append(output, s.F(0).String())
})
// Define our input and desired output.
input := []string{
"apple",
"boy",
"skip 1",
"cat",
"skip 1",
"dog",
"east",
"five",
"skip 2",
"goat",
"house",
"skip 1",
"ice cream",
"July",
"skip 1",
"skip 1",
"king",
"lemon",
}
desiredOutput := []string{
"apple",
"boy",
"east",
"five",
"July",
"king",
"lemon",
}
// Run the script and validate the output.
err := scr.Run(strings.NewReader(strings.Join(input, "\n")))
if err != nil {
t.Fatal(err)
}
if len(output) != len(desiredOutput) {
t.Fatalf("Expected %v (length %d) but received %v (length %d)", desiredOutput, len(desiredOutput), output, len(output))
}
for i, o := range desiredOutput {
if output[i] != o {
t.Fatalf("Expected %v but received %v", desiredOutput, output)
}
}
// Repeat the test, but attempt to skip past the end of the file. The
// error check after the GetLine call is supposed to ignore EOF, not
// fail.
input = append(input, "skip 5")
err = scr.Run(strings.NewReader(strings.Join(input, "\n")))
if err != nil {
t.Fatal(err)
}
if len(output) != len(desiredOutput) {
t.Fatalf("Expected %v (length %d) but received %v (length %d)", desiredOutput, len(desiredOutput), output, len(output))
}
for i, o := range desiredOutput {
if output[i] != o {
t.Fatalf("Expected %v but received %v", desiredOutput, output)
}
}
}
// TestGetLineOther tests that GetLine can read the next record from an
// alternative input stream.
func TestGetLineOther(t *testing.T) {
// Define our inputs and desired output.
input := []string{
"INSERT",
"Boston",
"Chicago",
"Denver",
"INSERT",
"Frank",
"INSERT",
"INSERT",
"Ida",
"John",
"King",
"INSERT",
}
inserts := []string{
"Adams",
"Easy",
"George",
"Henry",
"Lincoln",
}
desiredOutput := []string{
"Adams",
"Boston",
"Chicago",
"Denver",
"Easy",
"Frank",
"George",
"Henry",
"Ida",
"John",
"King",
"Lincoln",
}
// Define a script.
var output []string
insertsStrm := strings.NewReader(strings.Join(inserts, "\n"))
scr := NewScript()
scr.Begin = func(s *Script) { output = nil }
scr.AppendStmt(Auto("INSERT"), func(s *Script) {
ins, err := s.GetLine(insertsStrm)
if err != nil {
t.Fatal(err)
}
output = append(output, ins.String())
s.Next()
})
scr.AppendStmt(nil, func(s *Script) {
output = append(output, s.F(0).String())
})
// Run the script and validate the output.
err := scr.Run(strings.NewReader(strings.Join(input, "\n")))
if err != nil {
t.Fatal(err)
}
if len(output) != len(desiredOutput) {
t.Fatalf("Expected %v (length %d) but received %v (length %d)", desiredOutput, len(desiredOutput), output, len(output))
}
for i, o := range desiredOutput {
if output[i] != o {
t.Fatalf("Expected %v but received %v", desiredOutput, output)
}
}
}
// TestGetLineSetF tests that GetLine + SetF can replace the current input line.
func TestGetLineSetF(t *testing.T)
|
script.
scr := NewScript()
scr.AppendStmt(nil, func(s *Script) {
// Validate the current line.
for i := 1; i <= 3; i++ {
if s.F(i).Int() != (s.NR-1)*3+i {
t.Fatalf("Expected %d but received %d", (s.NR-1)*3+i, s.F(i).Int())
}
}
// Read and validate the next line.
line, err := s.GetLine(nil)
if err != nil {
t.Fatal(err)
}
s.SetF(0, line)
for i := 1; i <= 3; i++ {
if s.F(i).Int() != (s.NR-1)*3+i {
t.Fatalf("Expected %d but received %d", (s.NR-1)*3+i, s.F(i).Int())
}
}
})
// Run the script and validate the output.
input := []string{
" 1 2 3",
" 4 5 6",
" 7 8 9",
"10 11 12",
}
err := scr.Run(strings.NewReader(strings.Join(input, "\n")))
if err != nil {
t.Fatal(err)
}
}
// TestBigLongLine tests splitting a very long record into whitespace-separated
// fields
func TestBigLongLine(t *testing.T) {
// Specify the word to appear in each field.
word := "pneumonoultramicroscopicsilicovolcanoconiosis"
// Define a script that simply verifies that each field is
// correct.
scr := NewScript()
scr.AppendStmt(nil, func(s *Script) {
// Validate the current line.
for i := 1; i <= s.NF; i++ {
if s.F(i).String() != word {
t.Fatalf("Expected %q but received %q", word, s.F(i).String())
}
}
})
// Define a function to test a record with a given number of fields.
testBigRecord := func(numFields int) error {
// Create a very long string.
recordStr := word
for i := 0; i < numFields-1; i++ {
recordStr += " " + word
}
// Run the script and return its error value.
input := strings.NewReader(recordStr)
return scr.Run(input)
}
// Try increasingly large records until we exhaust the default maximum
// record size.
var err error
var numFields int
for numFields = 100; numFields <= 100000000; numFields *= 10 {
err = testBigRecord(numFields)
if err != nil {
break
}
}
if err == nil {
// We never managed to exhaust the default maximum record size.
// Assume it's big enough for all practical purposes.
return
}
// Set the buffer size and try again. There should be no error this
// time.
scr.MaxRecordSize = (len(word) + 1) * numFields
err = testBigRecord(numFields)
if err != nil {
t.Fatal(err)
}
}
// TestRunPipeline1 tests that RunPipeline can implement a pipeline of a single
// operation.
func TestRunPipeline1(t *testing.T) {
// Define a script that repeats the first word of each line
rep := NewScript()
rep.AppendStmt(nil, func(s *Script) {
s.Println(s.F(1), s.F(1))
})
// Pipe inputs into the pipeline we're about to run and from the
// pipeline into a memory buffer.
pr, pw := io.Pipe()
rep.Output = bytes.NewBuffer(make([]byte, 0, 10000))
// Write numbers into the pipe in the background.
go func() {
for i := 1; i <= 100; i++ {
fmt.Fprintf(pw, "%3d\n", i)
}
pw.Close()
}()
// Execute a pipeline in the foreground.
err := RunPipeline(pr, rep)
if err != nil {
t.Fatal(err)
}
// Ensure we received the expected output.
exp := bytes.NewBuffer(make([]byte, 0, 10000))
for i := 1; i <= 100; i++ {
fmt.Fprintf(exp, "%d %d\n", i, i)
}
got := rep.Output.(*bytes.Buffer).String()
if exp.String() != got {
t.Fatalf("Incorrect output %q", got)
}
}
// TestRunPipeline2 tests that RunPipeline can implement a pipeline of two
// operations.
func TestRunPipeline2(t *testing.T) {
// Define a script that repeats the first word of each line
rep := NewScript()
rep.AppendStmt(nil, func(s *Script) {
s.Println(s.F(1), s.F(1))
})
// Define a script that replaces the second word of each line
// with twice its value.
dbl := NewScript()
dbl.AppendStmt(nil, func(s *Script) {
s.Println(s.F(1), s.F(2).Int()*2)
})
// Pipe inputs into the pipeline we're about to run and from the
// pipeline into a memory buffer.
pr, pw := io.Pipe()
dbl.Output = bytes.NewBuffer(make([]byte, 0, 10000))
// Write numbers into the pipe in the background.
go func() {
for i := 1; i <= 100; i++ {
fmt.Fprintf(pw, "%3d\n", i)
}
pw.Close()
}()
// Execute a pipeline in the foreground.
err := RunPipeline(pr, rep, dbl)
if err != nil {
t.Fatal(err)
}
// Ensure we received the expected output.
exp := bytes.NewBuffer(make([]byte, 0, 10000))
for i := 1; i <= 100; i++ {
fmt.Fprintf(exp, "%d %d\n", i, i*2)
}
got := dbl.Output.(*bytes.Buffer).String()
if exp.String() != got {
t.Fatalf("Incorrect output %q", got)
}
}
// TestRunPipeline5 tests that RunPipeline can implement a pipeline of five
// operations.
func TestRunPipeline5(t *testing.T) {
// Define a script that repeats the first word of each line
rep := NewScript()
rep.AppendStmt(nil, func(s *Script) {
s.Println(s.F(1), s.F(1))
})
// Define a script that replaces the second number in a line with
// "fizz" if the first number is a multiple of 3.
fizz := NewScript()
fizz.AppendStmt(nil, func(s *Script) {
if s.F(1).Int()%3 == 0 {
s.Println(s.F(1), "fizz")
} else {
s.Println()
}
})
// Define a script that replaces the second number in a line with
// "buzz" if the first number is a multiple of 5.
buzz := NewScript()
buzz.AppendStmt(nil, func(s *Script) {
if s.F(1).Int()%5 == 0 {
s.Println(s.F(1), "buzz")
} else {
s.Println()
}
})
// Define a script that replaces the second number in a line with
// "fizzbuzz" if the first number is a multiple of 15.
fizzbuzz := NewScript()
fizzbuzz.AppendStmt(nil, func(s *Script) {
if s.F(1).Int()%15 == 0 {
s.Println(s.F(1), "fizzbuzz")
} else {
s.Println()
}
})
// Define a script that outputs only the second field.
strip := NewScript()
strip.AppendStmt(nil, func(s *Script) {
s.Println(s.F(2))
})
// Pipe inputs into the pipeline we're about to run and from the
// pipeline into a memory buffer.
pr, pw := io.Pipe()
strip.Output = bytes.NewBuffer(make([]byte, 0, 10000))
// Write numbers into the pipe in the background.
go func() {
for i := 1; i <= 100; i++ {
fmt.Fprintf(pw, "%3d\n", i)
}
pw.Close()
}()
// Execute a pipeline in the foreground.
err := RunPipeline(pr, rep, fizz, buzz, fizzbuzz, strip)
if err != nil {
t.Fatal(err)
}
// Ensure we received the expected output.
exp := bytes.NewBuffer(make([]byte, 0, 10000))
for i := 1; i <= 100; i++ {
switch {
case i%15 == 0:
fmt.Fprintln(exp, "fizzbuzz")
case i%5 == 0:
fmt.Fprintln(exp, "buzz")
case i%3 == 0:
fmt.Fprintln(exp, "fizz")
default:
fmt.Fprintf(exp, "%d\n", i)
}
}
got := strip.Output.(*bytes.Buffer).String()
if exp.String() != got {
t.Fatalf("Incorrect output %q", got)
}
}
|
{
// Define a
|
startQiskit_QC456.py
|
# qubit number=3
# total number=84
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
|
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=70
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.y(input_qubit[2]) # number=56
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC456.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
|
infer_type.py
|
from typing import Callable, Dict, Any, Union
import numpy as np
from keanu.vartypes import (numpy_types, tensor_arg_types, runtime_numpy_types, runtime_pandas_types,
runtime_primitive_types, runtime_bool_types, runtime_int_types, runtime_float_types,
primitive_types)
from keanu.vertex.base import Vertex
def infer_type_and_execute(value: tensor_arg_types, actions: Dict[type, Callable]) -> Any:
return actions[get_type_of_value(value)](value)
def get_type_of_value(t: Union[tensor_arg_types, Vertex]) -> type:
if isinstance(t, Vertex):
return get_type_of_value(t.get_value())
if isinstance(t, runtime_numpy_types):
return __infer_type_from_ndarray(t)
elif isinstance(t, runtime_pandas_types):
return __infer_type_from_ndarray(t.values)
|
return __infer_type_from_scalar(t)
else:
raise NotImplementedError(
"Argument t must be either an ndarray or an instance of numbers.Number. Was given {} instead".format(
type(t)))
def __infer_type_from_ndarray(ndarray: numpy_types) -> type:
if np.issubdtype(ndarray.dtype, np.bool_):
return bool
elif np.issubdtype(ndarray.dtype, np.integer):
return int
elif np.issubdtype(ndarray.dtype, np.floating):
return float
else:
raise NotImplementedError("Generic types in an ndarray are not supported. Was given {}".format(ndarray.dtype))
def __infer_type_from_scalar(scalar: primitive_types) -> type:
if isinstance(scalar, runtime_bool_types):
return bool
elif isinstance(scalar, runtime_int_types):
return int
elif isinstance(scalar, runtime_float_types):
return float
else:
raise NotImplementedError("Generic types in an ndarray are not supported. Was given {}".format(type(scalar)))
|
elif isinstance(t, runtime_primitive_types):
|
bar_3d_chart.rs
|
// c:bar3DChart
use super::BarDirection;
use super::Grouping;
use super::VaryColors;
use super::AreaChartSeries;
use super::AreaChartSeriesList;
use super::DataLabels;
use super::GapWidth;
use super::Shape;
use super::AxisId;
use structs::Spreadsheet;
use writer::driver::*;
use quick_xml::Reader;
use quick_xml::events::{Event, BytesStart};
use quick_xml::Writer;
use std::io::Cursor;
#[derive(Clone, Default, Debug)]
pub struct Bar3DChart {
bar_direction: BarDirection,
grouping: Grouping,
vary_colors: VaryColors,
area_chart_series_list: AreaChartSeriesList,
data_labels: DataLabels,
gap_width: GapWidth,
shape: Shape,
axis_id: Vec<AxisId>,
}
impl Bar3DChart {
pub fn get_bar_direction(&self)-> &BarDirection {
&self.bar_direction
}
pub fn get_bar_direction_mut(&mut self)-> &mut BarDirection {
&mut self.bar_direction
}
pub fn set_bar_direction(&mut self, value:BarDirection)-> &mut Bar3DChart {
self.bar_direction = value;
self
}
pub fn get_grouping(&self)-> &Grouping {
&self.grouping
}
pub fn get_grouping_mut(&mut self)-> &mut Grouping {
&mut self.grouping
}
pub fn set_grouping(&mut self, value:Grouping)-> &mut Bar3DChart {
self.grouping = value;
self
}
pub fn get_vary_colors(&self)-> &VaryColors {
&self.vary_colors
}
pub fn get_vary_colors_mut(&mut self)-> &mut VaryColors {
&mut self.vary_colors
}
pub fn set_vary_colors(&mut self, value:VaryColors)-> &mut Bar3DChart {
self.vary_colors = value;
self
}
pub fn get_area_chart_series_list(&self)-> &AreaChartSeriesList {
&self.area_chart_series_list
}
pub fn get_area_chart_series_list_mut(&mut self)-> &mut AreaChartSeriesList {
&mut self.area_chart_series_list
}
pub fn set_area_chart_series_list(&mut self, value:AreaChartSeriesList)-> &mut Self {
self.area_chart_series_list = value;
self
}
pub fn get_data_labels(&self)-> &DataLabels {
&self.data_labels
}
pub fn get_data_labels_mut(&mut self)-> &mut DataLabels {
&mut self.data_labels
}
pub fn set_data_labels(&mut self, value:DataLabels)-> &mut Bar3DChart {
self.data_labels = value;
self
}
pub fn get_gap_width(&self)-> &GapWidth {
&self.gap_width
}
pub fn get_gap_width_mut(&mut self)-> &mut GapWidth {
&mut self.gap_width
}
pub fn set_gap_width(&mut self, value:GapWidth)-> &mut Bar3DChart {
self.gap_width = value;
self
}
pub fn get_shape(&self)-> &Shape {
&self.shape
}
pub fn get_shape_mut(&mut self)-> &mut Shape {
&mut self.shape
}
pub fn set_shape(&mut self, value:Shape)-> &mut Bar3DChart {
self.shape = value;
self
}
pub fn get_axis_id(&self)-> &Vec<AxisId> {
&self.axis_id
}
pub fn get_axis_id_mut(&mut self)-> &mut Vec<AxisId> {
&mut self.axis_id
}
pub fn set_axis_id(&mut self, value:Vec<AxisId>)-> &mut Bar3DChart {
self.axis_id = value;
self
}
pub fn add_axis_id(&mut self, value:AxisId)-> &mut Bar3DChart
|
pub(crate) fn set_attributes<R: std::io::BufRead>(
&mut self,
reader:&mut Reader<R>,
_e:&BytesStart
) {
let mut buf = Vec::new();
loop {
match reader.read_event(&mut buf) {
Ok(Event::Start(ref e)) => {
match e.name() {
b"c:ser" => {
let mut obj = AreaChartSeries::default();
obj.set_attributes(reader, e);
self.get_area_chart_series_list_mut().add_area_chart_series(obj);
},
b"c:dLbls" => {
self.data_labels.set_attributes(reader, e);
},
_ => (),
}
},
Ok(Event::Empty(ref e)) => {
match e.name() {
b"c:barDir" => {
self.bar_direction.set_attributes(reader, e);
},
b"c:grouping" => {
self.grouping.set_attributes(reader, e);
},
b"c:varyColors" => {
self.vary_colors.set_attributes(reader, e);
},
b"c:gapWidth" => {
self.gap_width.set_attributes(reader, e);
},
b"c:shape" => {
self.shape.set_attributes(reader, e);
},
b"c:axId" => {
let mut obj = AxisId::default();
obj.set_attributes(reader, e);
self.add_axis_id(obj);
},
_ => (),
}
},
Ok(Event::End(ref e)) => {
match e.name() {
b"c:bar3DChart" => return,
_ => (),
}
},
Ok(Event::Eof) => panic!("Error not find {} end element", "c:bar3DChart"),
Err(e) => panic!("Error at position {}: {:?}", reader.buffer_position(), e),
_ => (),
}
buf.clear();
}
}
pub(crate) fn write_to(&self, writer: &mut Writer<Cursor<Vec<u8>>>, spreadsheet: &Spreadsheet) {
// c:bar3DChart
write_start_tag(writer, "c:bar3DChart", vec![], false);
// c:barDir
&self.bar_direction.write_to(writer);
// c:grouping
&self.grouping.write_to(writer);
// c:varyColors
&self.vary_colors.write_to(writer);
// c:ser
for v in self.area_chart_series_list.get_area_chart_series() {
v.write_to(writer, spreadsheet);
}
// c:dLbls
&self.data_labels.write_to(writer);
// c:gapWidth
&self.gap_width.write_to(writer);
// c:shape
&self.shape.write_to(writer);
// c:axId
for v in &self.axis_id {
v.write_to(writer);
}
write_end_tag(writer, "c:bar3DChart");
}
}
|
{
self.axis_id.push(value);
self
}
|
output.rs
|
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UntagResourceOutput {}
impl std::fmt::Debug for UntagResourceOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UntagResourceOutput");
formatter.finish()
}
}
/// See [`UntagResourceOutput`](crate::output::UntagResourceOutput)
pub mod untag_resource_output {
/// A builder for [`UntagResourceOutput`](crate::output::UntagResourceOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`UntagResourceOutput`](crate::output::UntagResourceOutput)
pub fn build(self) -> crate::output::UntagResourceOutput {
crate::output::UntagResourceOutput {}
}
|
}
}
impl UntagResourceOutput {
/// Creates a new builder-style object to manufacture [`UntagResourceOutput`](crate::output::UntagResourceOutput)
pub fn builder() -> crate::output::untag_resource_output::Builder {
crate::output::untag_resource_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct TagResourceOutput {}
impl std::fmt::Debug for TagResourceOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("TagResourceOutput");
formatter.finish()
}
}
/// See [`TagResourceOutput`](crate::output::TagResourceOutput)
pub mod tag_resource_output {
/// A builder for [`TagResourceOutput`](crate::output::TagResourceOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`TagResourceOutput`](crate::output::TagResourceOutput)
pub fn build(self) -> crate::output::TagResourceOutput {
crate::output::TagResourceOutput {}
}
}
}
impl TagResourceOutput {
/// Creates a new builder-style object to manufacture [`TagResourceOutput`](crate::output::TagResourceOutput)
pub fn builder() -> crate::output::tag_resource_output::Builder {
crate::output::tag_resource_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListTagsForResourceOutput {
/// <p> The tags of the Elastic Inference Accelerator. </p>
pub tags:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl ListTagsForResourceOutput {
/// <p> The tags of the Elastic Inference Accelerator. </p>
pub fn tags(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.tags.as_ref()
}
}
impl std::fmt::Debug for ListTagsForResourceOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListTagsForResourceOutput");
formatter.field("tags", &self.tags);
formatter.finish()
}
}
/// See [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
pub mod list_tags_for_resource_output {
/// A builder for [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) tags: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
}
impl Builder {
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p> The tags of the Elastic Inference Accelerator. </p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.tags.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.tags = Some(hash_map);
self
}
/// <p> The tags of the Elastic Inference Accelerator. </p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
pub fn build(self) -> crate::output::ListTagsForResourceOutput {
crate::output::ListTagsForResourceOutput { tags: self.tags }
}
}
}
impl ListTagsForResourceOutput {
/// Creates a new builder-style object to manufacture [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
pub fn builder() -> crate::output::list_tags_for_resource_output::Builder {
crate::output::list_tags_for_resource_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAcceleratorTypesOutput {
/// <p> The available accelerator types. </p>
pub accelerator_types: std::option::Option<std::vec::Vec<crate::model::AcceleratorType>>,
}
impl DescribeAcceleratorTypesOutput {
/// <p> The available accelerator types. </p>
pub fn accelerator_types(&self) -> std::option::Option<&[crate::model::AcceleratorType]> {
self.accelerator_types.as_deref()
}
}
impl std::fmt::Debug for DescribeAcceleratorTypesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAcceleratorTypesOutput");
formatter.field("accelerator_types", &self.accelerator_types);
formatter.finish()
}
}
/// See [`DescribeAcceleratorTypesOutput`](crate::output::DescribeAcceleratorTypesOutput)
pub mod describe_accelerator_types_output {
/// A builder for [`DescribeAcceleratorTypesOutput`](crate::output::DescribeAcceleratorTypesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) accelerator_types:
std::option::Option<std::vec::Vec<crate::model::AcceleratorType>>,
}
impl Builder {
/// Appends an item to `accelerator_types`.
///
/// To override the contents of this collection use [`set_accelerator_types`](Self::set_accelerator_types).
///
/// <p> The available accelerator types. </p>
pub fn accelerator_types(mut self, input: crate::model::AcceleratorType) -> Self {
let mut v = self.accelerator_types.unwrap_or_default();
v.push(input);
self.accelerator_types = Some(v);
self
}
/// <p> The available accelerator types. </p>
pub fn set_accelerator_types(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AcceleratorType>>,
) -> Self {
self.accelerator_types = input;
self
}
/// Consumes the builder and constructs a [`DescribeAcceleratorTypesOutput`](crate::output::DescribeAcceleratorTypesOutput)
pub fn build(self) -> crate::output::DescribeAcceleratorTypesOutput {
crate::output::DescribeAcceleratorTypesOutput {
accelerator_types: self.accelerator_types,
}
}
}
}
impl DescribeAcceleratorTypesOutput {
/// Creates a new builder-style object to manufacture [`DescribeAcceleratorTypesOutput`](crate::output::DescribeAcceleratorTypesOutput)
pub fn builder() -> crate::output::describe_accelerator_types_output::Builder {
crate::output::describe_accelerator_types_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAcceleratorsOutput {
/// <p> The details of the Elastic Inference Accelerators. </p>
pub accelerator_set:
std::option::Option<std::vec::Vec<crate::model::ElasticInferenceAccelerator>>,
/// <p> A token to specify where to start paginating. This is the NextToken from a previously truncated response. </p>
pub next_token: std::option::Option<std::string::String>,
}
impl DescribeAcceleratorsOutput {
/// <p> The details of the Elastic Inference Accelerators. </p>
pub fn accelerator_set(
&self,
) -> std::option::Option<&[crate::model::ElasticInferenceAccelerator]> {
self.accelerator_set.as_deref()
}
/// <p> A token to specify where to start paginating. This is the NextToken from a previously truncated response. </p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for DescribeAcceleratorsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAcceleratorsOutput");
formatter.field("accelerator_set", &self.accelerator_set);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`DescribeAcceleratorsOutput`](crate::output::DescribeAcceleratorsOutput)
pub mod describe_accelerators_output {
/// A builder for [`DescribeAcceleratorsOutput`](crate::output::DescribeAcceleratorsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) accelerator_set:
std::option::Option<std::vec::Vec<crate::model::ElasticInferenceAccelerator>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `accelerator_set`.
///
/// To override the contents of this collection use [`set_accelerator_set`](Self::set_accelerator_set).
///
/// <p> The details of the Elastic Inference Accelerators. </p>
pub fn accelerator_set(mut self, input: crate::model::ElasticInferenceAccelerator) -> Self {
let mut v = self.accelerator_set.unwrap_or_default();
v.push(input);
self.accelerator_set = Some(v);
self
}
/// <p> The details of the Elastic Inference Accelerators. </p>
pub fn set_accelerator_set(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ElasticInferenceAccelerator>>,
) -> Self {
self.accelerator_set = input;
self
}
/// <p> A token to specify where to start paginating. This is the NextToken from a previously truncated response. </p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p> A token to specify where to start paginating. This is the NextToken from a previously truncated response. </p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`DescribeAcceleratorsOutput`](crate::output::DescribeAcceleratorsOutput)
pub fn build(self) -> crate::output::DescribeAcceleratorsOutput {
crate::output::DescribeAcceleratorsOutput {
accelerator_set: self.accelerator_set,
next_token: self.next_token,
}
}
}
}
impl DescribeAcceleratorsOutput {
/// Creates a new builder-style object to manufacture [`DescribeAcceleratorsOutput`](crate::output::DescribeAcceleratorsOutput)
pub fn builder() -> crate::output::describe_accelerators_output::Builder {
crate::output::describe_accelerators_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAcceleratorOfferingsOutput {
/// <p> The list of accelerator type offerings for a specific location. </p>
pub accelerator_type_offerings:
std::option::Option<std::vec::Vec<crate::model::AcceleratorTypeOffering>>,
}
impl DescribeAcceleratorOfferingsOutput {
/// <p> The list of accelerator type offerings for a specific location. </p>
pub fn accelerator_type_offerings(
&self,
) -> std::option::Option<&[crate::model::AcceleratorTypeOffering]> {
self.accelerator_type_offerings.as_deref()
}
}
impl std::fmt::Debug for DescribeAcceleratorOfferingsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAcceleratorOfferingsOutput");
formatter.field(
"accelerator_type_offerings",
&self.accelerator_type_offerings,
);
formatter.finish()
}
}
/// See [`DescribeAcceleratorOfferingsOutput`](crate::output::DescribeAcceleratorOfferingsOutput)
pub mod describe_accelerator_offerings_output {
/// A builder for [`DescribeAcceleratorOfferingsOutput`](crate::output::DescribeAcceleratorOfferingsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) accelerator_type_offerings:
std::option::Option<std::vec::Vec<crate::model::AcceleratorTypeOffering>>,
}
impl Builder {
/// Appends an item to `accelerator_type_offerings`.
///
/// To override the contents of this collection use [`set_accelerator_type_offerings`](Self::set_accelerator_type_offerings).
///
/// <p> The list of accelerator type offerings for a specific location. </p>
pub fn accelerator_type_offerings(
mut self,
input: crate::model::AcceleratorTypeOffering,
) -> Self {
let mut v = self.accelerator_type_offerings.unwrap_or_default();
v.push(input);
self.accelerator_type_offerings = Some(v);
self
}
/// <p> The list of accelerator type offerings for a specific location. </p>
pub fn set_accelerator_type_offerings(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AcceleratorTypeOffering>>,
) -> Self {
self.accelerator_type_offerings = input;
self
}
/// Consumes the builder and constructs a [`DescribeAcceleratorOfferingsOutput`](crate::output::DescribeAcceleratorOfferingsOutput)
pub fn build(self) -> crate::output::DescribeAcceleratorOfferingsOutput {
crate::output::DescribeAcceleratorOfferingsOutput {
accelerator_type_offerings: self.accelerator_type_offerings,
}
}
}
}
impl DescribeAcceleratorOfferingsOutput {
/// Creates a new builder-style object to manufacture [`DescribeAcceleratorOfferingsOutput`](crate::output::DescribeAcceleratorOfferingsOutput)
pub fn builder() -> crate::output::describe_accelerator_offerings_output::Builder {
crate::output::describe_accelerator_offerings_output::Builder::default()
}
}
| |
plotter-offline.py
|
"""
this is to plot graphs based on the db output:
db should be like this:
task | run | arch | benchmark | settings(fc, wl, sb, ...) | measuremnts(delay, min_cw, area ...)
take as input:
the user filtered table
prompt user input:
the overlay axis, and the choice of geometric mean
input data from the database:
in the form of [(a,b,c), (a,b,c), ...]: a list of tuples
"""
import numpy as np
import matplotlib.pyplot as plt
import collections
import operator
import interface_db as idb
import os
import re
import sqlite3
"""
to convert the input table (list of tuples) to the high dimensional array
x: 1st attribute name: string
y: attribute name starting at 2nd place: list of string / string(if y is only 1)
rest: filters name: list of string
return the object of data_collection
"""
err_msg = {
"choose axis": "**** wrong name, input again ****",
"choose method": "**** wrong method, input again ****",
"overlay axis": "**** wrong axis selection, input again ****",
"yes or no": "**** wrong input, please select 'y' or 'n' ****",
"choose plot to show": "**** plot_generator: wrong input mode or don't have gmean yet ****",
"choose overlay type": "**** wrong input for overlay type ****",
}
def data_converter(data_list, x_name, y_name_list, filter_name_list):
# just for convenience, support pass in y_name_list as single string or list
if type(y_name_list) == type("str"):
y_name_list = [y_name_list]
if type(filter_name_list) == type("str"):
filter_name_list = [filter_name_list]
xy_name_map = (
[item for sub in [[x_name], y_name_list] for item in sub],
list(set([tup[0] for tup in data_list])),
)
axis_name_supmap = {k: [] for k in filter_name_list}
for i in range(len(filter_name_list)):
axis_name_supmap[filter_name_list[i]] = list(
set([tup[i + 1 + len(y_name_list)] for tup in data_list])
)
# initialize the big y array
axis_len = [len(l) for (k, l) in axis_name_supmap.items()]
axis_len.append(len(xy_name_map[1]))
y_raw_list = []
for i in range(len(y_name_list)):
sub = np.ndarray(shape=axis_len, dtype=float)
sub.fill(-1)
y_raw_list.append(sub)
# fill in the data
for i in range(len(data_list)):
array_index = {k: -1 for k in filter_name_list}
# setup axis index for all filter axis
for filt in filter_name_list:
array_index[filt] = axis_name_supmap[filt].index(
data_list[i][filter_name_list.index(filt) + 1 + len(y_name_list)]
)
# append x index
index_list = array_index.values()
index_list.append(xy_name_map[1].index(data_list[i][0]))
# fill in y data
for y_i in range(len(y_name_list)):
y_raw_list[y_i][tuple(index_list)] = data_list[i][y_i + 1]
return Data_Collection(axis_name_supmap, xy_name_map, y_raw_list)
"""
holds all data and meta info needed for plotting and user interaction
"""
class Data_Collection:
y_raw_list = []
y_gmean_list = []
# a list of lists of axis name
axis_name_supmap = None
# separate the x axis from the axis_name_supmap
xy_name_map = None
# x axis is always the lowest dimension
# this cost is added to the axis_pos to further order the axis by hierarchy,
# the axes chosen by the second level filetering will have value 1, otherwise they will
# have value 0. And after adding the cost (which should be less than 1), we further assign
# hierarchy to filtered axes and the unfiltered axes.
axis_raw_cost = None
axis_gmean_cost = None
# to store the transposed axis order --> used by the restore function
axis_cur_raw_order = None
axis_cur_gmean_order = None
# fill in the data into the high dimensional x & y, from the input table
def
|
(self, axis_name_supmap, xy_name_map, y_raw_list):
self.axis_name_supmap = axis_name_supmap
self.xy_name_map = xy_name_map
self.axis_raw_cost = {k: 0 for k in self.axis_name_supmap.keys()}
# self.axis_gmean_cost is set after gmean axis is chosen
self.axis_cur_raw_order = self.axis_name_supmap.keys()
self.y_raw_list = y_raw_list
"""
ask user to choose overlay axes parameters
overlay_axis is a list of string
y_type is to choose between y_raw_list & y_gmean_list
y_type = "gmean" / "raw"
"""
def transpose_overlay_axes(self, overlay_axis, y_type="raw"):
if y_type == "gmean" and self.axis_cur_gmean_order == []:
print "**** CANNOT FILTER ON GMEAN YET. AXIS NOT SET ****"
axis_cost_temp = (y_type == "gmean" and [self.axis_gmean_cost] or [self.axis_raw_cost])[0]
axis_cost_temp = {k: (v + (k in overlay_axis)) for (k, v) in axis_cost_temp.items()}
# NOTE: now axis_raw_cost is converted from dict to list of tuples, so that it is ordered by the cost value.
# now set up y_raw_list
axis_cost_temp = sorted(axis_cost_temp.items(), key=operator.itemgetter(1))
# save the transposed axes order, to facilitate the reverse transpose
trans_order = [
(y_type == "gmean" and self.axis_cur_gmean_order or self.axis_cur_raw_order).index(v[0])
for v in axis_cost_temp
]
trans_order.append(len(axis_cost_temp))
# transpose the axes, based on the order of axis_raw_cost
for i in range(len(self.y_raw_list)):
if y_type == "gmean":
self.y_gmean_list[i] = self.y_gmean_list[i].transpose(trans_order)
elif y_type == "raw":
self.y_raw_list[i] = self.y_raw_list[i].transpose(trans_order)
if y_type == "gmean":
self.axis_cur_gmean_order = [ax[0] for ax in axis_cost_temp]
else:
self.axis_cur_raw_order = [ax[0] for ax in axis_cost_temp]
"""
merge the overlay axis, calculate the gmean.
"""
def merge_on_gmean(self):
self.y_gmean_list = self.y_raw_list
# switch benchmark axis to the last one, easier for gmean
# we cannot simply do mult on axis = n-1, cuz there are invalid points
self.y_gmean_list = [
y.swapaxes(len(y.shape) - 2, len(y.shape) - 1) for y in self.y_gmean_list
]
for i in range(len(self.y_gmean_list)):
# store the shape of the array after compressing the benchmark dimension
product = []
temp = self.y_gmean_list[i].reshape(-1, self.y_gmean_list[i].shape[-1])
for k in range(temp.shape[0]):
cur_root = len(temp[k]) - list(temp[k]).count(-1)
cur_prod = reduce(
lambda x, y: ((x != -1 and x - 1) + 1) * ((y != -1 and y - 1) + 1), temp[k]
)
# if all benchmark values are -1, then append -1, otherwise, append the gmean
product.append((cur_root != 0 and [cur_prod ** (1.0 / cur_root)] or [-1])[0])
self.y_gmean_list[i] = np.asarray(product).reshape(self.y_gmean_list[i][..., 0].shape)
# update the axis_cur_gmean_order & axis_gmean_cost
self.axis_cur_gmean_order = [
ax
for ax in self.axis_cur_raw_order
if ax not in [self.axis_cur_raw_order[len(self.axis_cur_raw_order) - 1]]
]
self.axis_gmean_cost = {k: self.axis_raw_cost[k] for k in self.axis_cur_gmean_order}
"""
actual plotter functions: show window, do subplots
"""
class UI:
def subplot_traverser(
self, y_sub_list, namemap, overlay_axis_left, xy_namemap, y_i, legend, plot_type="plot"
):
if overlay_axis_left == []:
x = xy_namemap[1]
y = y_sub_list
# sort x
dic = {x[i]: y[i] for i in range(len(x))}
dic = collections.OrderedDict(sorted(dic.items()))
x = dic.keys()
y = dic.values()
y = [(k == -1 and [None] or [k])[0] for k in y]
y = np.array(y).astype(np.double)
y_mask = np.isfinite(y)
if x != []:
if plot_type == "plot":
if type(x[0]) == type("str"):
plt.plot(np.array(range(len(x)))[y_mask], y[y_mask], "o--", label=legend)
plt.xticks(range(len(x)), x)
else:
plt.plot(np.array(x)[y_mask], y[y_mask], "o--", label=legend)
plt.xlabel(xy_namemap[0][0], fontsize=12)
plt.ylabel(xy_namemap[0][1 + y_i], fontsize=12)
if legend != "":
plt.legend(loc="lower right")
else:
cur_filter_axis = overlay_axis_left[0]
overlay_axis_left = [k for k in overlay_axis_left if k not in [cur_filter_axis]]
for i in range(y_sub_list.shape[0]):
legend_restore = legend
legend += str(cur_filter_axis) + ": "
legend += str(namemap[cur_filter_axis][i]) + " "
argu = overlay_axis_left
self.subplot_traverser(
y_sub_list[i], namemap, argu, xy_namemap, y_i, legend, plot_type
)
legend = legend_restore
"""
plot_type should be universal among all figures created
axis_left = [[plot_axis_left], [overlay_axis_left]]
y_sublist should be just a ndarray, not a list of ndarray,
i.e.: y_sublist = y_raw_list[i]
figure_name
"""
# TODO: if some x,y series are all -1, then we should not create the figure
def figure_traverser(
self, y_sub_list, namemap, axis_left, xy_namemap, y_i, figure_name, plot_type="plot"
):
if axis_left[0] == []:
plt.figure(figure_name)
self.subplot_traverser(
y_sub_list, namemap, axis_left[1], xy_namemap, y_i, "", plot_type
)
else:
cur_axis = axis_left[0][0]
axis_left[0] = [k for k in axis_left[0] if k not in [cur_axis]]
for i in range(y_sub_list.shape[0]):
figure_name_restore = figure_name
figure_name += str(namemap[cur_axis][i])
# have to copy the list of list, or it will be like pass by reference
temp1 = [tt for tt in axis_left[0]]
temp2 = [tt for tt in axis_left[1]]
argu = [temp1, temp2]
self.figure_traverser(
y_sub_list[i], namemap, argu, xy_namemap, y_i, figure_name, plot_type
)
figure_name = figure_name_restore
"""
plot_type can be "plot" or "bar"
y_list: store the values (y_raw_list / y_gmean_list)
namemap: store the values for different settings (axis_name_supmap)
axis_order: to be used with name_map (axis_cur_gmean_order / axis_cur_raw_order)
filter_axis: (self.filter_axis_gmean / self.filter_axis_raw)
mode: is to select whether to plot gmean or raw
"""
def plot_generator(self, data_collection, axis_order, overlay_axis, mode, plot_type="plot"):
namemap = data_collection.axis_name_supmap
xy_namemap = data_collection.xy_name_map
if mode == "raw":
y_list = data_collection.y_raw_list
overlay_axis = [k for k in data_collection.axis_cur_raw_order if k in overlay_axis]
axis_order = [k for k in data_collection.axis_cur_raw_order if k in axis_order]
for i in range(len(y_list)):
self.figure_traverser(
y_list[i],
namemap,
[axis_order, overlay_axis],
xy_namemap,
i,
data_collection.xy_name_map[0][1 + i],
plot_type,
)
elif mode == "gmean" and data_collection.y_gmean_list != []:
y_list = data_collection.y_gmean_list
overlay_axis = [k for k in data_collection.axis_cur_gmean_order if k in overlay_axis]
axis_order = [k for k in data_collection.axis_cur_gmean_order if k in axis_order]
for i in range(len(y_list)):
self.figure_traverser(
y_list[i],
namemap,
[axis_order, overlay_axis],
xy_namemap,
i,
"gmean" + data_collection.xy_name_map[0][1 + i],
plot_type,
)
else:
print err_msg["choose plot to show"]
plt.show()
"""
connect the database to the plotter, setup the data_collection object for plotting
"""
filter_method = {"TEXT": "IN", "INTEGER": "BETWEEN", "REAL": "BETWEEN"}
def db_connector():
tasks = idb.list_tasks()
print "available tasks: "
for i in range(len(tasks)):
print "[" + str(i) + "]: ", tasks[i]
task_num = int(raw_input("which task to choose (input the index): "))
available_choice = idb.describe_tasks([tasks[task_num]])
available_name = [k.raw()[0] for k in available_choice]
available_type = [k.raw()[1] for k in available_choice]
print "==========================================================="
print "available choices:"
print "\n".join(i for i in available_choice)
print "==========================================================="
while 1:
x = raw_input("choose a x axis name: ")
if x in available_name:
break
print err_msg["choose axis"]
while 1:
y = raw_input("choose a y axis name: ")
if y in available_name:
break
print err_msg["choose axis"]
filt_list = []
filt_name_list = []
cur_choice = None
print "==========================================================="
while 1:
while 1:
cur_choice = raw_input("choose filter name (enter empty string to exit): ")
if (cur_choice in available_name) or (cur_choice == ""):
break
print err_msg["choose axis"]
if cur_choice == "":
break
filt_name_list.append(cur_choice)
cur_type = available_type[available_name.index(cur_choice)]
fname = cur_choice
fmethod = filter_method[cur_type]
param_range = idb.describe_param(cur_choice + " " + cur_type, "range", tasks[task_num])
print "available range: ", param_range
frange = None
if len(param_range) == 1:
print "set range to: ", param_range
frange = param_range
else:
cur_range = raw_input(
'choose range: in the form of "attr1 attr2 ... (enter empty string to select all values)" '
)
choice_fields = cur_range.raw()
if fmethod == "BETWEEN" and choice_fields != []:
frange = (float(choice_fields[0]), float(choice_fields[1]))
elif fmethod == "IN" and choice_fields != []:
frange = choice_fields
elif choice_fields == []:
print "set range to: ", param_range
frange = param_range
else:
print err_msg["choose method"]
# filt_list.append(idb.Task_filter(fname, fmethod, frange))
filt_list.append(frange[1])
filt_list = [item for sublist in filt_list for item in sublist]
print "------"
print filt_list
data = idb.retrieve_data(x, y, filt_list, [tasks[task_num]])[1]
return {"data": data, "filt_name_list": filt_name_list, "x": x, "y": y}
"""
control board
"""
def main():
ret = db_connector()
data = ret["data"]
filt_name_list = ret["filt_name_list"]
data_collection = data_converter(data, ret["x"], ret["y"], filt_name_list)
print "########################################"
print "---- Description: ----\n" + ">>\n" + "VPR benchmark experiment should have 2 types of data: \n" + "parameter: settings in for the experiment (e.g.: fc, wire length, switch block ...)\n" + "metrics: measurements from the VPR output (e.g.: min chan width, critical path delay ...)\n" + ">>\n" + "Data passed into this plotter should have already been classified into 3 axes: \n" + "one [x] axis (chosen from parameter)\n" + "multiple [y] axis (chosen from metrics)\n" + "multiple [filter] axis (all the unchosen parameters)\n" + ">>\n" + "For example, if the experiment has: \n" + "[arch, circuit, wire length, switch block, fc, min chan width, critical path delay, area, total wire length]\n" + "and you choose fc as x axis, [min chan width, critical path delay, area, total wire length] as y axes,\n" + "then filter axes are the unchosen parameters, i.e.: arch, circuit, wire length, switch block. "
print "#########################################"
print "---- Usage ----\n" + ">>\n" + "1. choose overlay axes among the filter axes (overlay axes will become legend in a single plot)\n" + '2. choose whether to whether to calculate the geo mean over the overlay axis ("merge" function)\n' + " (Notice: you can choose as many overlay axes as you like, but when you choose merge, it will only\n" + " calculate the geo mean over the last overlay axis. So for example, if your overlay axes are [fc, circuit],\n" + " the merge will only get geo mean over all the circuits rather that all the (circuit,fc) combination, and \n" + " fc will still be overlaid in the merged plot.)\n" + '3. the data after geo mean calcultion will be referred to as "gmean", and the data before the geo mean will be \n' + ' referred to as "raw", you can switch the overlay axes for both gmean data and raw data, for as many times \n' + ' as you like. But once you "merge" on a new axis, the old gmean data will be replaced by the new one, and further\n' + " operation will be acted on only the new gmean data."
while 1:
print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
print "available axis to overlay: "
print "for the raw data", data_collection.axis_cur_raw_order
print "for the gmean data", data_collection.axis_cur_gmean_order
overlay_type = None
overlay_axis = []
if data_collection.y_gmean_list != []:
while 1:
overlay_type = raw_input("which array to overlay (gmean / raw): ")
if overlay_type == "gmean":
break
elif overlay_type == "raw":
break
else:
print err_msg["choose overlay type"]
else:
overlay_type = "raw"
while 1:
overlay_axis = raw_input("which axes to overlay: (input separated by space): ")
if overlay_axis != "":
overlay_axis = overlay_axis.raw()
if (
reduce(
lambda x, y: (x in data_collection.axis_cur_raw_order)
* (y in data_collection.axis_cur_raw_order),
overlay_axis,
)
and overlay_type == "raw"
):
break
elif (
reduce(
lambda x, y: (x in data_collection.axis_cur_gmean_order)
* (y in data_collection.axis_cur_gmean_order),
overlay_axis,
)
and overlay_type == "gmean"
):
break
else:
print err_msg["overlay axis"]
data_collection.transpose_overlay_axes(overlay_axis, overlay_type)
overlay_merge = 0
if overlay_type == "raw":
while 1:
choice = raw_input("merge the lowest axis (y/n)? ")
if choice == "y":
overlay_merge = 1
data_collection.merge_on_gmean()
break
elif choice == "n":
overlay_merge = 0
break
else:
print err_msg["yes or no"]
ui = UI()
if overlay_type == "raw":
axis_left = [k for k in data_collection.axis_cur_raw_order if k not in overlay_axis]
else:
axis_left = [k for k in data_collection.axis_cur_gmean_order if k not in overlay_axis]
while 1:
show_plot_type = raw_input("show plot (gmean / raw) ")
if show_plot_type == "gmean":
if overlay_merge == 1:
overlay_axis = [k for k in overlay_axis if k not in [overlay_axis[-1]]]
ui.plot_generator(data_collection, axis_left, overlay_axis, "gmean")
break
elif show_plot_type == "raw":
ui.plot_generator(data_collection, axis_left, overlay_axis, "raw")
break
elif show_plot_type == "":
break
else:
print err_msg["choose plot to show"]
if __name__ == "__main__":
main()
|
__init__
|
test_accum2.py
|
import unittest
import pandas as pd
import pytest
import riptable as rt
# N.B. TL;DR We have to import the actual implementation module to override the module global
# variable "tm.N" and "tm.K".
# In pandas 1.0 they move the code from pandas/util/testing.py to pandas/_testing.py.
# The "import pandas.util.testing" still works but because it doesn't contain the actual code
# our attempt to override the "tm.N" and "tm.K" will not change the actual value for
# makeTimeDataFrame, which will produce data with different shape and make the test
# "test_accum_table" fail. Maybe we want to reconsider using the pandas internal testing utils.
try:
import pandas._testing as tm
except ImportError:
import pandas.util.testing as tm
from riptable import *
from numpy.testing import (
assert_array_equal,
assert_almost_equal,
assert_array_almost_equal,
)
from riptable.rt_numpy import arange
# To create AccumTable test data
from riptable.Utils.pandas_utils import dataset_from_pandas_df
from riptable.rt_datetime import DateTimeNano
tm.N = 3
tm.K = 5
class Accum2_Test(unittest.TestCase):
'''
TODO: add more tests for different types
'''
def test_accum2(self):
c = cut(arange(10), 3)
self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3])) == 0)
c = cut(arange(10.0), 3)
self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3])) == 0)
c = cut(arange(11), 3)
self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3])) == 0)
c = cut(FA([2, 4, 6, 8, 10]), FA([0, 2, 4, 6, 8, 10]))
self.assertTrue(sum(c._np - FA([1, 2, 3, 4, 5])) == 0)
c = cut(
FA([2, 4, 6, 8, 10]),
FA([0, 2, 4, 6, 8, 10]),
labels=['a', 'b', 'c', 'd', 'e'],
)
self.assertTrue(sum(c._np - FA([1, 2, 3, 4, 5])) == 0)
def test_qcut(self):
c = qcut(arange(10), 3)
self.assertTrue(sum(c._np - FA([2, 2, 2, 2, 3, 3, 3, 4, 4, 4])) == 0)
c = qcut(arange(11), 3)
self.assertTrue(sum(c._np - FA([2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4])) == 0)
c = qcut(range(5), 3, labels=["good", "medium", "bad"])
self.assertTrue(sum(c._np - FA([2, 2, 3, 4, 4])) == 0)
c = cut(
FA([2, 4, 6, 8, 10]),
FA([0, 2, 4, 6, 8, 10]),
labels=['a', 'b', 'c', 'd', 'e'],
)
def test_cut_errors(self):
with self.assertRaises(ValueError):
c = cut(
FA([2, 4, 6, 8, 10]),
FA([0, 2, 4, 6, 8, 10]),
labels=['a', 'b', 'c', 'd', 'e', 'f'],
)
def test_simple_cats(self):
data = arange(1, 6) * 10
colnames = FastArray(['a', 'b', 'c', 'd', 'e'])
c1 = Categorical(colnames)
c2 = Categorical(arange(5))
# no filter
ac = Accum2(c2, c1)
result = ac.sum(data)
self.assertEqual(result._ncols, 7)
for i, colname in enumerate(colnames):
arr = result[colname]
self.assertEqual(arr[i], data[i])
def
|
(self):
data = arange(1, 6) * 10
colnames = FastArray(['a', 'b', 'c', 'd', 'e'])
c1 = Categorical(colnames)
c2 = Categorical(arange(5))
# filtered accum object
ac = Accum2(c2, c1, showfilter=True)
result = ac.sum(data)
self.assertEqual(result._ncols, 8)
for i, colname in enumerate(colnames):
arr = result[colname]
self.assertEqual(arr[i + 1], data[i])
def test_simple_cats_filter_operation(self):
data = arange(1, 6) * 10
colnames = FastArray(['a', 'b', 'c', 'd', 'e'])
c1 = Categorical(colnames)
c2 = Categorical(arange(5))
# filtered operation
ac = Accum2(c2, c1)
result = ac.sum(data, showfilter=True)
self.assertEqual(result._ncols, 8)
for i, colname in enumerate(colnames):
arr = result[colname]
self.assertEqual(arr[i + 1], data[i])
def test_multikey_cats(self):
unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])
ints = arange(1, 6) * 10
data = np.random.rand(5) * 10
# unsorted no filter
c1 = Categorical([unsorted_str, ints])
c2 = Categorical([unsorted_str, ints])
ac = Accum2(c2, c1)
result = ac.sum(data)
self.assertEqual(result._ncols, 8)
for i, key1 in enumerate(unsorted_str):
k1 = bytes.decode(key1)
k2 = ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i], data[i])
# sorted no filter
sortidx = np.argsort(unsorted_str)
sorted_str = unsorted_str[sortidx]
sorted_ints = ints[sortidx]
sorted_data = data[sortidx]
c1 = Categorical([unsorted_str, ints], ordered=True)
c2 = Categorical([unsorted_str, ints], ordered=True)
ac = Accum2(c2, c1)
result = ac.sum(data)
self.assertEqual(result._ncols, 8)
for i, key1 in enumerate(sorted_str):
k1 = bytes.decode(key1)
k2 = sorted_ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i], sorted_data[i])
@pytest.mark.xfail(reason='20200416 This test was previously overridden by a later test in the file with the same name. Need to revisit and get back in a working state.')
def test_multikey_cats_filter_accum_sorted(self):
unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])
ints = arange(1, 6) * 10
data = np.random.rand(5) * 10
# unsorted filter accum object
c1 = Categorical([unsorted_str, ints])
c2 = Categorical([unsorted_str, ints])
ac = Accum2(c2, c1, showfilter=True)
result = ac.sum(data)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(unsorted_str):
k1 = bytes.decode(key1)
k2 = ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i + 1], data[i])
# sorted filter accum object
sortidx = np.argsort(unsorted_str)
sorted_str = unsorted_str[sortidx]
sorted_ints = ints[sortidx]
sorted_data = data[sortidx]
c1 = Categorical([unsorted_str, ints], sort_gb=True)
c2 = Categorical([unsorted_str, ints], sort_gb=True)
ac = Accum2(c2, c1, showfilter=True)
result = ac.sum(data)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(sorted_str):
k1 = bytes.decode(key1)
k2 = sorted_ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
# TODO fix this regression that was masked due to duplicate test names
# self.assertAlmostEqual(arr[i + 1], sorted_data[i])
def test_multikey_cats_filter_accum_ordered(self):
unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])
ints = arange(1, 6) * 10
data = np.random.rand(5) * 10
# unsorted filter accum object
c1 = Categorical([unsorted_str, ints])
c2 = Categorical([unsorted_str, ints])
ac = Accum2(c2, c1)
result = ac.sum(data, showfilter=True)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(unsorted_str):
k1 = bytes.decode(key1)
k2 = ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i + 1], data[i])
# sorted filter accum object
sortidx = np.argsort(unsorted_str)
sorted_str = unsorted_str[sortidx]
sorted_ints = ints[sortidx]
sorted_data = data[sortidx]
c1 = Categorical([unsorted_str, ints], ordered=True)
c2 = Categorical([unsorted_str, ints], ordered=True)
ac = Accum2(c2, c1)
result = ac.sum(data, showfilter=True)
self.assertEqual(result._ncols, 9)
for i, key1 in enumerate(sorted_str):
k1 = bytes.decode(key1)
k2 = sorted_ints[i]
full_colname = "('" + k1 + "', " + str(k2) + ")"
arr = result[full_colname]
self.assertEqual(arr[i + 1], sorted_data[i])
def test_dataset_accum2(self):
# test from accum2 off dataset and with a filter
ds = Dataset({'test': arange(10), 'data': arange(10) // 2})
x = ds.accum2('data', 'test').sum(ds.test, filter=ds.data == 3)
totalcol = x.summary_get_names()[0]
self.assertEqual(x[totalcol][3], 13)
def test_accum2_mean(self):
ds = Dataset({'time': arange(200.0)})
ds.data = np.random.randint(7, size=200)
ds.data2 = np.random.randint(7, size=200)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
ds.symbol = Cat(1 + arange(200) % 5, symbols)
ac = Accum2(ds.data, ds.symbol).mean(ds.time)
totalcol = ac[ac.summary_get_names()[0]]
footer = ac.footer_get_values()['Mean']
for i in range(len(symbols)):
s_mean = ds[ds.symbol == symbols[i], :].time.mean()
self.assertEqual(footer[i + 1], s_mean)
for i in range(7):
s_mean = ds[ds.data == i, :].time.mean()
self.assertEqual(totalcol[i], s_mean)
def test_accum2_median(self):
ds = Dataset({'time': arange(200.0)})
ds.data = np.random.randint(7, size=200)
ds.data2 = np.random.randint(7, size=200)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
ds.symbol = Cat(1 + arange(200) % 5, symbols)
ac = Accum2(ds.data, ds.symbol).median(ds.time)
totalcol = ac[ac.summary_get_names()[0]]
footer = ac.footer_get_values()['Median']
for i in range(len(symbols)):
s_median = ds[ds.symbol == symbols[i], :].time.median()
self.assertEqual(footer[i + 1], s_median)
for i in range(7):
s_median = ds[ds.data == i, :].time.median()
self.assertEqual(totalcol[i], s_median)
def test_accum2_nanmedian_with_filter(self):
ds = Dataset({'time': arange(200.0)})
ds.data = np.random.randint(7, size=200)
ds.data2 = np.random.randint(7, size=200)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
# N.B. make a copy here for testing
symbol_categorical = Cat(1 + arange(200) % 5, symbols)
# N.B. Categorical.copy and Categorical constructor doesn't do deep copy?!
ds.symbol = Cat(1 + arange(200) % 5, symbols)
chosen_symbols = ['AMZN', 'AAPL']
filt = symbol_categorical.isin(chosen_symbols)
ac = Accum2(ds.data, ds.symbol)
stat1 = ac.nanmedian(ds.time, filter=filt)
totalcol = stat1[stat1.summary_get_names()[0]]
footer = stat1.footer_get_values()['Median']
# Make sure we don't change the input data
self.assertTrue(not rt.any(ds.symbol._fa == 0))
for sym in chosen_symbols:
s_median = rt.nanmedian(ds[symbol_categorical == sym, :].time)
i = rt.where(symbol_categorical.category_array == sym)[0].item()
self.assertEqual(footer[i + 1], s_median)
for i in range(7):
s_median = rt.nanmedian(ds[(ds.data == i) & filt, :].time)
self.assertEqual(totalcol[i], s_median)
chosen_symbols = ['IBM', 'FB']
filt = symbol_categorical.isin(chosen_symbols)
stat2 = ac.nanmedian(ds.time, filter=filt)
totalcol = stat2[stat2.summary_get_names()[0]]
footer = stat2.footer_get_values()['Median']
# Make sure we don't change the input data
self.assertTrue(not rt.any(ds.symbol._fa == 0))
for sym in chosen_symbols:
s_median = rt.nanmedian(ds[symbol_categorical == sym, :].time)
i = rt.where(symbol_categorical.category_array == sym)[0].item()
self.assertEqual(footer[i + 1], s_median)
for i in range(7):
s_median = rt.nanmedian(ds[(ds.data == i) & filt, :].time)
self.assertEqual(totalcol[i], s_median)
def test_showfilter_label_subclass(self):
d = Date.range('20190201', '20190210')
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, Date))
self.assertTrue(result.YLabel.isnan()[0])
d = DateTimeNano.random(10)
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, DateTimeNano))
self.assertTrue(result.YLabel.isnan()[0])
d = DateSpan(arange(10, 20))
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, DateSpan))
self.assertTrue(result.YLabel.isnan()[0])
d = TimeSpan(np.random.rand(10) * 10_000_000_000)
c = Categorical(d)
c2 = Categorical(arange(10))
ac = Accum2(c, c2)
result = ac.count(showfilter=True)
self.assertTrue(isinstance(result.YLabel, TimeSpan))
self.assertTrue(result.YLabel.isnan()[0])
def test_apply(self):
arrsize = 200
numrows = 7
ds = Dataset({'time': arange(arrsize * 1.0)})
ds.data = np.random.randint(numrows, size=arrsize)
ds.data2 = np.random.randint(numrows, size=arrsize)
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']
ds.symbol = Cat(1 + arange(arrsize) % len(symbols), symbols)
ds.accum2('symbol', 'data').sum(ds.data2)
ds.accum2('symbol', 'data').sum(ds.data2, showfilter=True)
ds.accum2('symbol', 'data').median(ds.data2, showfilter=True)
ds.accum2('symbol', 'data').median(ds.data2, showfilter=False)
ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, showfilter=True)
ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, showfilter=False)
f = logical(arange(200) % 2)
ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, filter=f)
ds.accum2('symbol', 'data').apply_reduce(
np.median, ds.data2, filter=f, showfilter=True
)
ds.accum2('symbol', 'data').median(ds.data2, filter=f, showfilter=True)
def test_apply_nonreduce(self):
arrsize = 200
numrows = 7
ds = rt.Dataset({'time': rt.arange(arrsize * 1.0)})
ds.data = arange(arrsize) % numrows
ds.data2 = (arange(arrsize) + 3) % numrows
symbols = [
'AAPL',
'AMZN',
'FB',
'GOOG',
'IBM',
'6',
'7',
'8',
'9',
'10',
'11',
'12',
'13',
'14',
'15',
'16',
'17',
'18',
]
ds.symbol = rt.Cat(1 + rt.arange(arrsize) % len(symbols), symbols)
result = ds.symbol.apply_reduce(
lambda x, y: np.sum(np.minimum(x, y)), (ds.data, ds.data)
)
ac = ds.accum2('symbol', 'data')
newds = ac.apply_nonreduce(np.cumsum)
ds2 = ac.apply_reduce(
lambda x, y: np.sum(np.maximum(x, y)), (newds.data, newds.data2)
)
x = np.maximum(newds.data, newds.data2)
y = ac.apply_nonreduce(
lambda x, y: np.maximum(x, y), (newds.data, newds.data2)
)[0]
self.assertTrue(np.all(x == y))
class AccumTable_Test(unittest.TestCase):
@pytest.mark.skip(reason="Test needs to be re-written to remove the np.random.seed usage -- it's not stable across numpy versions.")
def test_accum_table(self):
# Create the test data
def unpivot(frame):
N, K = frame.shape
data = {
'value': frame.values.ravel('F'),
'variable': np.asarray(frame.columns).repeat(N),
'date': np.tile(np.asarray(frame.index), K),
}
return pd.DataFrame(data, columns=['date', 'variable', 'value'])
np.random.seed(1234)
df = unpivot(pd.concat([tm.makeTimeDataFrame(), tm.makeTimeDataFrame()]))
ds = dataset_from_pandas_df(df)
ds.date = DateTimeNano(ds.date, from_tz='NYC').to_iso()
ds.date = rt.FastArray([d[:10] for d in ds.date])
ds.variable = rt.Categorical(ds.variable)
ds.date = rt.Categorical(ds.date)
at = rt.AccumTable(ds.date, ds.variable)
# Add and view inner tables with totals
at['Sum'] = at.sum(ds.value)
self.assertEqual(at['Sum'].shape, (3, 7))
assert_array_almost_equal(
at['Sum']['A'], np.array([0.47, -0.79, 1.72]), decimal=2
)
vw = at.gen('Sum')
self.assertEqual(vw.shape, (3, 7))
assert_array_almost_equal(vw['A'], np.array([0.47, -0.79, 1.72]), decimal=2)
assert_array_almost_equal(vw['Sum'], np.array([-0.10, -5.02, 5.37]), decimal=2)
assert_array_almost_equal(
vw.footer_get_values(columns=['Sum'])['Sum'], np.array([0.25]), decimal=2
)
at['Mean'] = at.mean(ds.value)
self.assertEqual(at['Mean'].shape, (3, 7))
assert_array_almost_equal(
at['Mean']['A'], np.array([0.24, -0.39, 0.86]), decimal=2
)
at['Half'] = at['Mean'] / at['Sum']
self.assertEqual(at['Half'].shape, (3, 7))
assert_array_almost_equal(at['Half']['A'], np.array([0.5, 0.5, 0.5]), decimal=2)
# Add and view inner tables with blanks
at['Blanks'] = at['Sum'].copy()
at['Blanks']['C'] = 0.0
for col in at['Blanks'][:, 1:]:
at['Blanks'][col][2] = np.nan
vw = at.gen('Blanks')
self.assertEqual(vw.shape, (2, 9))
assert_array_almost_equal(vw['A'], np.array([0.47, -0.79]), decimal=2)
assert_array_almost_equal(vw['Blanks'], np.array([-0.10, -5.02]), decimal=2)
self.assertAlmostEqual(
vw.footer_get_dict()['Blanks']['Blanks'], 0.245, places=2
)
vw = at.gen('Blanks', remove_blanks=False)
self.assertEqual(vw.shape, (3, 10))
assert_array_almost_equal(vw['A'], np.array([0.47, -0.79, np.nan]), decimal=2)
assert_array_almost_equal(
vw['Blanks'], np.array([-0.10, -5.02, np.nan]), decimal=2
)
# Test division with zeros and nans
at['Bad'] = at['Blanks'] / at['Half']
self.assertEqual(at['Blanks'].shape, (3, 7))
vw = at.gen('Bad')
self.assertEqual(vw.shape, (2, 10))
vw = at.gen('Blanks')
self.assertEqual(vw.shape, (2, 10))
vw = at.gen('Half')
self.assertEqual(vw.shape, (3, 11))
# Set margin columns to the right
at.set_margin_columns(['Blanks', 'Mean'])
vw = at.gen('Half')
self.assertEqual(vw.shape, (3, 9))
self.assertEqual(vw.keys()[6], 'Half')
self.assertEqual(vw.keys()[7], 'Blanks')
self.assertEqual(vw.keys()[8], 'Mean')
self.assertEqual(
list(vw.footer_get_dict().keys()), ['Half', 'Sum', 'Mean', 'Blanks', 'Bad']
)
vw = at.gen()
self.assertEqual(vw.keys()[6], 'Half')
vw = at.gen('Sum')
self.assertEqual(vw.keys()[6], 'Sum')
self.assertEqual(vw.keys()[7], 'Blanks')
self.assertEqual(vw.keys()[8], 'Mean')
self.assertEqual(
list(vw.footer_get_dict().keys()), ['Sum', 'Mean', 'Half', 'Blanks', 'Bad']
)
# Set footer rows at the bottom
at.set_footer_rows(['Mean'])
vw = at.gen('Half')
self.assertEqual(vw.shape, (3, 9))
self.assertEqual(vw.keys()[6], 'Half')
self.assertEqual(vw.keys()[7], 'Blanks')
self.assertEqual(vw.keys()[8], 'Mean')
self.assertEqual(list(vw.footer_get_dict().keys()), ['Half', 'Mean'])
vw = at.gen('Sum')
self.assertEqual(vw.keys()[6], 'Sum')
self.assertEqual(vw.keys()[7], 'Blanks')
self.assertEqual(vw.keys()[8], 'Mean')
self.assertEqual(list(vw.footer_get_dict().keys()), ['Sum', 'Mean'])
# Access view Dataset elements
vw = at.gen('Sum')
assert_array_equal(
vw.date, rt.FastArray(['2000-01-03', '2000-01-04', '2000-01-05'])
)
assert_array_almost_equal(vw['Sum'], np.array([-0.10, -5.02, 5.37]), decimal=2)
assert_almost_equal(vw[vw.date == '2000-01-03', 'A'][0], 0.47355353, decimal=2)
assert_almost_equal(
list(vw.footer_get_values('Sum', columns=['A']).values())[0],
1.409830,
decimal=2,
)
if __name__ == "__main__":
tester = unittest.main()
|
test_simple_cats_filter_accum
|
InMemoryRefreshTokensRepository.ts
|
import { RefreshToken } from '@modules/accounts/domain/refresh_token'
import { IRefreshTokensRepository } from '../IRefreshTokensRepository'
export class InMemoryRefreshTokensRepository
implements IRefreshTokensRepository
{
private refreshTokens: RefreshToken[] = []
async findByUserId(userId: string): Promise<RefreshToken | null> {
const refreshToken = this.refreshTokens.find(
(refreshToken) => refreshToken.user_id === userId
)
return refreshToken || null
}
async findByToken(token: string): Promise<RefreshToken | null> {
const refreshToken = this.refreshTokens.find(
(refreshToken) => refreshToken.token.value === token
)
return refreshToken || null
}
|
}
async deleteById(id: string): Promise<void> {
this.refreshTokens = this.refreshTokens.filter(
(refreshToken) => refreshToken.id !== id
)
}
}
|
async create(refreshToken: RefreshToken): Promise<void> {
this.refreshTokens.push(refreshToken)
|
appStorage_test.go
|
// Copyright © 2016 The Things Network
// Use of this source code is governed by the MIT license that can be found in the LICENSE file.
package collector
import (
"testing"
"github.com/TheThingsNetwork/ttn/core/types"
. "github.com/smartystreets/assertions"
)
func createStorage() AppStorage {
storage, err := ConnectRedis("localhost:6379", 0)
if err != nil {
panic(err)
}
return storage
}
func TestConnect(t *testing.T) {
a := New(t)
c, err := ConnectRedis("localhost:6379", 0)
a.So(err, ShouldBeNil)
defer c.Close()
_, err = ConnectRedis("", 0)
a.So(err, ShouldNotBeNil)
}
func TestAccessSetKey(t *testing.T) {
|
eui, _ := types.ParseAppEUI("8000000000000001")
key := "key"
storage := createStorage()
defer storage.Close()
defer storage.Reset()
err := storage.SetAccessKey(eui, key)
a.So(err, ShouldBeNil)
fetchedKey, err := storage.GetAccessKey(eui)
a.So(err, ShouldBeNil)
a.So(fetchedKey, ShouldEqual, key)
}
func TestList(t *testing.T) {
a := New(t)
eui1, _ := types.ParseAppEUI("8000000000000001")
eui2, _ := types.ParseAppEUI("8000000000000002")
storage := createStorage()
defer storage.Close()
defer storage.Reset()
err := storage.Add(eui1)
a.So(err, ShouldBeNil)
err = storage.Add(eui2)
a.So(err, ShouldBeNil)
apps, err := storage.List()
a.So(err, ShouldBeNil)
a.So(apps, ShouldHaveLength, 2)
err = storage.Remove(eui1)
a.So(err, ShouldBeNil)
apps, err = storage.List()
a.So(err, ShouldBeNil)
a.So(apps, ShouldHaveLength, 1)
}
|
a := New(t)
|
currency.js
|
import { insert } from '../utils'
export default {
regexp: /\$/,
format (value, formatType, roundingFunction, numerify) {
const symbols = {
before: formatType.match(/^([+|\-|(|\s|$]*)/)[0],
after: formatType.match(/([+|\-|)|\s|$]*)$/)[0]
}
let symbol
formatType = formatType.replace(/\s?\$\s?/, '')
let output = numerify._numberToFormat(value, formatType, roundingFunction)
if (value >= 0) {
symbols.before = symbols.before.replace(/[-(]/, '')
symbols.after = symbols.after.replace(/[-)]/, '')
} else if (value < 0 &&
(!~symbols.before.indexOf('-') && !~symbols.before.indexOf('('))) {
symbols.before = '-' + symbols.before
}
for (let i = 0; i < symbols.before.length; i++) {
symbol = symbols.before[i]
switch (symbol) {
case '$':
output = insert(output, '$', i)
break
case ' ':
output = insert(output, ' ', i)
break
}
}
for (let i = symbols.after.length - 1; i >= 0; i--) {
symbol = symbols.after[i]
switch (symbol) {
case '$':
output = i === symbols.after.length - 1
? output + '$'
: insert(output, '$', -(symbols.after.length - (1 + i)))
break
case ' ':
output = i === symbols.after.length - 1
? output + ' '
: insert(output, ' ', -(symbols.after.length - (1 + i)))
break
|
}
|
}
}
return output
}
|
vigenere-cipher.js
|
class
|
{
constructor(isDirect) {
if (isDirect === true || typeof isDirect === 'undefined') {
this.isDirect = true
} else {
this.isDirect = false
}
this.alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
}
loopKey(message, key) {
let loopedKey = '',
keyIndex = 0;
for (let i = 0; i < message.length; i++) {
if (this.alphabet.indexOf(message[i]) < 0) {
loopedKey += message[i]
} else {
loopedKey += key[keyIndex]
keyIndex++
if (keyIndex > key.length - 1) {
keyIndex = 0
}
}
}
return loopedKey
}
encrypt(message, key) {
let encryptedMessage = ''
if (typeof message === 'undefined' || typeof key === 'undefined') throw Error
message = message.toUpperCase()
key = this.loopKey(message, key.toUpperCase())
for (let i = 0; i < message.length; i++) {
if (this.alphabet.indexOf(message[i]) >= 0) {
const index = (this.alphabet.indexOf(message[i]) + this.alphabet.indexOf(key[i])) % this.alphabet.length
encryptedMessage += this.alphabet[index]
} else {
encryptedMessage += message[i]
}
}
return (this.isDirect) ? encryptedMessage : encryptedMessage.split('').reverse().join('')
}
decrypt(encryptedMessage, key) {
let message = ''
if (typeof encryptedMessage === 'undefined' || typeof key === 'undefined') throw Error
encryptedMessage = encryptedMessage.toUpperCase()
key = this.loopKey(encryptedMessage, key.toUpperCase())
for (let i = 0; i < encryptedMessage.length; i++) {
if (this.alphabet.indexOf(encryptedMessage[i]) >= 0) {
const index = (this.alphabet.indexOf(encryptedMessage[i]) + this.alphabet.length - this.alphabet.indexOf(key[i])) % this.alphabet.length
message += this.alphabet[index]
} else {
message += encryptedMessage[i]
}
}
return (this.isDirect) ? message : message.split('').reverse().join('')
}
}
module.exports = VigenereCipheringMachine;
|
VigenereCipheringMachine
|
__init__.py
|
"""
The :mod:`sklearn.linear_model` module implements a variety of linear models.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
|
from ._least_angle import (Lars, LassoLars, lars_path, lars_path_gram, LarsCV,
LassoLarsCV, LassoLarsIC)
from ._coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from ._glm import (PoissonRegressor,
GammaRegressor, TweedieRegressor)
from ._huber import HuberRegressor
from ._sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from ._stochastic_gradient import SGDClassifier, SGDRegressor, SGDOneClassSVM
from ._ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from ._logistic import LogisticRegression, LogisticRegressionCV
from ._omp import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV)
from ._passive_aggressive import PassiveAggressiveClassifier
from ._passive_aggressive import PassiveAggressiveRegressor
from ._perceptron import Perceptron
from ._ransac import RANSACRegressor
from ._theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SGDOneClassSVM',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lars_path_gram',
'lasso_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor',
'PoissonRegressor',
'GammaRegressor',
'TweedieRegressor']
|
from ._base import LinearRegression
from ._bayes import BayesianRidge, ARDRegression
|
device.js
|
'use strict';
define({
setSlow: setSlow,
isSlow: isSlow,
addListener: addListener,
});
var deviceIsSlow = false;
var listeners = [];
function setSlow() {
deviceIsSlow = true;
$('.hide-when-slow').hide();
$('.show-when-slow').show();
for (var i in listeners) {
listeners[i]();
}
}
function isSlow() {
return deviceIsSlow;
}
function addListener(listener) {
listeners.push(listener);
|
}
}
|
if (deviceIsSlow) {
listener();
|
task_step_api.py
|
import tornado.web
from app.domain.task_step import TaskStep
from app.service import token_service
from app.database import task_step_db
from app.utils import mytime
import json
class TaskStepApi(tornado.web.RequestHandler):
async def post(self, *args, **kwargs):
|
async def get(self, *args, **kwargs):
id = self.get_argument('id', None)
taskUuid = self.get_argument('taskUuid', None)
stepName = self.get_argument('stepName', None)
where = 'WHERE id > {} and task_uuid = "{}" and step_name = "{}"'.format(id, taskUuid, stepName)
result = await task_step_db.get_task_steps(where)
self.write(json.dumps(result))
async def put(self, *args, **kwargs):
token = token_service.get_token(self.request)
if not token.is_valid:
self.send_error(403)
return
user_login = token.username
# updateTask只能由umu微服务中的异步任务OnBoardingServie调用,不能由前端用户调用
if user_login != 'internal':
self.send_error(403)
return
body = json.loads(str(self.request.body, encoding='utf-8'))
await task_step_db.delete_task_steps(body.get('task_uuid'), body.get('start_progress'), body.get('end_progress'))
self.set_status(201)
self.finish()
|
token = token_service.get_token(self.request)
if not token.is_valid:
self.send_error(403)
return
user_login = token.username
if user_login != 'internal':
self.send_error(403)
return
task_step = TaskStep()
task_step.__dict__ = json.loads(str(self.request.body, encoding='utf-8'))
task_step.stepDate = mytime.now()
await task_step_db.create_task_step(task_step)
self.set_status(201)
self.finish()
|
sampled_multi_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import hashlib
import logging
import time
from bisect import bisect_right
from collections import OrderedDict, defaultdict
from enum import Enum
from typing import List
import numpy as np
import torch
from fairseq import distributed_utils
from fairseq.data import FairseqDataset, data_utils
def get_time_gap(s, e):
return (
datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)
).__str__()
logger = logging.getLogger(__name__)
def default_virtual_size_func(datasets, ratios, max_scale_up=1.5):
sizes = [len(d) for d in datasets]
if ratios is None:
return sum(sizes)
largest_idx = np.argmax(sizes)
largest_r = ratios[largest_idx]
largest_s = sizes[largest_idx]
# set virtual sizes relative to the largest dataset
virtual_sizes = [(r / largest_r) * largest_s for r in ratios]
vsize = sum(virtual_sizes)
max_size = sum(sizes) * max_scale_up
return int(vsize if vsize < max_size else max_size)
class CollateFormat(Enum):
single = 1
ordered_dict = 2
class SampledMultiDataset(FairseqDataset):
"""Samples from multiple sub-datasets according to given sampling ratios.
Args:
datasets (
List[~torch.utils.data.Dataset]
or OrderedDict[str, ~torch.utils.data.Dataset]
): datasets
sampling_ratios (List[float]): list of probability of each dataset to be sampled
(default: None, which corresponds to concatenating all dataset together).
seed (int): RNG seed to use (default: 2).
epoch (int): starting epoch number (default: 1).
eval_key (str, optional): a key used at evaluation time that causes
this instance to pass-through batches from *datasets[eval_key]*.
collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or
CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures
the collater to output batches of data mixed from all sub-datasets,
and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys
of sub-datasets.
Note that not all sub-datasets will present in a single batch in both formats.
virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func).
split (str): the split of the data, e.g. 'train', 'valid' or 'test'.
shared_collater (bool): whether or not to all sub-datasets have the same collater.
shuffle (bool): whether or not to shuffle data (default: True).
"""
def __init__(
self,
datasets,
sampling_ratios=None,
seed=2,
epoch=1,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=default_virtual_size_func,
split="",
shared_collater=False,
shuffle=True,
):
super().__init__()
self.shared_collater = shared_collater
self.shuffle = shuffle
if isinstance(datasets, OrderedDict):
self.keys = list(datasets.keys())
datasets = list(datasets.values())
elif isinstance(datasets, List):
self.keys = list(range(len(datasets)))
else:
raise AssertionError()
self.datasets = datasets
self.split = split
self.eval_key = eval_key
if self.eval_key is not None:
self.collate_format = CollateFormat.single
else:
self.collate_format = collate_format
self.seed = seed
self._cur_epoch = None
self.cumulated_sizes = None
# self.datasets[k][self._cur_indices[i]] is the data item i in this sampled dataset
# namely, data item i is sampled from the kth sub-dataset self.datasets[k]
# where self.cumulated_sizes[k-1] <= i < self.cumulated_sizes[k]
self._cur_indices = None
self._sizes = None
self.virtual_size_per_dataset = None
# caching properties
self._reset_cached_properties()
self.setup_sampling(sampling_ratios, virtual_size)
self.set_epoch(epoch)
def _clean_if_not_none(self, var_list):
for v in var_list:
if v is not None:
del v
def _reset_cached_properties(self):
self._clean_if_not_none([self._sizes, self._cur_indices])
self._sizes = None
self._cur_indices = None
def setup_sampling(self, sample_ratios, virtual_size):
sizes = [len(d) for d in self.datasets]
if sample_ratios is None:
# default back to concating datasets
self.sample_ratios = None
self.virtual_size = sum(sizes)
else:
if not isinstance(sample_ratios, np.ndarray):
sample_ratios = np.array(sample_ratios)
self.sample_ratios = sample_ratios
virtual_size = (
default_virtual_size_func if virtual_size is None else virtual_size
)
self.virtual_size = (
virtual_size(self.datasets, self.sample_ratios)
if callable(virtual_size)
else virtual_size
)
def adjust_sampling(self, epoch, sampling_ratios, virtual_size):
if sampling_ratios is not None:
sampling_ratios = self._sync_sample_ratios(sampling_ratios)
self.setup_sampling(sampling_ratios, virtual_size)
def _sync_sample_ratios(self, ratios):
# in case the ratios are not precisely the same across processes
# also to ensure every procresses update the ratios in the same pace
ratios = torch.DoubleTensor(ratios)
if torch.distributed.is_initialized():
if torch.cuda.is_available():
distributed_utils.all_reduce(
ratios.cuda(), group=distributed_utils.get_data_parallel_group()
)
else:
distributed_utils.all_reduce(
ratios, group=distributed_utils.get_data_parallel_group()
)
ret = ratios.cpu()
ret = ret.numpy()
return ret
def random_choice_in_dataset(self, rng, dataset, choice_size):
if hasattr(dataset, "random_choice_in_dataset"):
return dataset.random_choice_in_dataset(rng, choice_size)
dataset_size = len(dataset)
return rng.choice(
dataset_size, choice_size, replace=(choice_size > dataset_size)
)
def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size):
def get_counts(sample_ratios):
counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64)
diff = virtual_size - counts.sum()
assert diff >= 0
# due to round-offs, the size might not match the desired sizes
if diff > 0:
dataset_indices = rng.choice(
len(sample_ratios), size=diff, p=sample_ratios
)
for i in dataset_indices:
counts[i] += 1
return counts
def get_in_dataset_indices(datasets, sizes, sample_ratios):
counts = get_counts(sample_ratios)
# uniformally sample desired counts for each dataset
# if the desired counts are large, sample with replacement:
indices = [
self.random_choice_in_dataset(rng, d, c)
for c, d in zip(counts, datasets)
]
return indices
sizes = [len(d) for d in datasets]
if sample_ratios is None:
# default back to concating datasets
in_dataset_indices = [list(range(s)) for s in sizes]
virtual_sizes_per_dataset = sizes
else:
ratios = sample_ratios / sample_ratios.sum()
in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios)
virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices]
virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64)
cumulative_sizes = np.cumsum(virtual_sizes_per_dataset)
assert sum(virtual_sizes_per_dataset) == virtual_size
assert cumulative_sizes[-1] == virtual_size
if virtual_size < sum(sizes):
logger.warning(
f"virtual data size ({virtual_size}) is less than real data size ({sum(sizes)})."
" If virtual size << real data size, there could be data coverage issue."
)
in_dataset_indices = np.hstack(in_dataset_indices)
return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset
def _get_dataset_and_index(self, index):
i = bisect_right(self.cumulated_sizes, index)
return i, self._cur_indices[index]
def __getitem__(self, index):
# self.__getitem__(index) returns self.datasets[k][self._cur_indices[index]]
# where k satisfies self.cumulated_sizes[k - 1] <= k < self.cumulated_sizes[k]
ds_idx, ds_sample_idx = self._get_dataset_and_index(index)
ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx])
return ret
def num_tokens(self, index):
return self.sizes[index].max()
def num_tokens_vec(self, indices):
sizes_vec = self.sizes[np.array(indices)]
# max across all dimensions but first one
return np.amax(sizes_vec, axis=tuple(range(1, len(sizes_vec.shape))))
def size(self, index):
return self.sizes[index]
def __len__(self):
return self.virtual_size
def collater(self, samples, **extra_args):
"""Merge a list of samples to form a mini-batch."""
if len(samples) == 0:
return None
if self.collate_format == "ordered_dict":
collect_samples = [[] for _ in range(len(self.datasets))]
for (i, sample) in samples:
collect_samples[i].append(sample)
batch = OrderedDict(
[
(self.keys[i], dataset.collater(collect_samples[i]))
for i, (key, dataset) in enumerate(zip(self.keys, self.datasets))
if len(collect_samples[i]) > 0
]
)
elif self.shared_collater:
batch = self.datasets[0].collater([s for _, s in samples])
else:
samples_dict = defaultdict(list)
pad_to_length = (
defaultdict(int)
if "pad_to_length" not in extra_args
else extra_args["pad_to_length"]
)
for ds_idx, s in samples:
pad_to_length["source"] = max(
pad_to_length["source"], s["source"].size(0)
)
if s["target"] is not None:
pad_to_length["target"] = max(
pad_to_length["target"], s["target"].size(0)
)
samples_dict[ds_idx].append(s)
batches = [
self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length)
for i in range(len(self.datasets))
if len(samples_dict[i]) > 0
]
def straight_data(tensors):
batch = torch.cat(tensors, dim=0)
return batch
src_lengths = straight_data(
[b["net_input"]["src_lengths"] for b in batches]
)
src_lengths, sort_order = src_lengths.sort(descending=True)
def straight_order(tensors):
batch = straight_data(tensors)
return batch.index_select(0, sort_order)
batch = {
"id": straight_order([b["id"] for b in batches]),
"nsentences": sum(b["nsentences"] for b in batches),
"ntokens": sum(b["ntokens"] for b in batches),
"net_input": {
"src_tokens": straight_order(
[b["net_input"]["src_tokens"] for b in batches]
),
"src_lengths": src_lengths,
},
"target": straight_order([b["target"] for b in batches])
if batches[0]["target"] is not None
else None,
}
if "prev_output_tokens" in batches[0]["net_input"]:
batch["net_input"]["prev_output_tokens"] = straight_order(
[b["net_input"]["prev_output_tokens"] for b in batches]
)
if "src_lang_id" in batches[0]["net_input"]:
batch["net_input"]["src_lang_id"] = straight_order(
[b["net_input"]["src_lang_id"] for b in batches]
)
if "tgt_lang_id" in batches[0]:
|
return batch
@property
def sizes(self):
if self._sizes is not None:
return self._sizes
start_time = time.time()
in_sub_dataset_indices = [
self._cur_indices[
0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i]
]
for i in range(len(self.datasets))
]
sub_dataset_sizes = [
d.sizes[indices]
for d, indices in zip(self.datasets, in_sub_dataset_indices)
]
self._sizes = np.vstack(sub_dataset_sizes)
logger.info(f"sizes() calling time: {get_time_gap(start_time, time.time())}")
return self._sizes
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")]
sort_indices = indices[np.argsort(src_sizes[indices], kind="mergesort")]
return sort_indices
def prefetch(self, indices):
prefetch_indices = [[] for _ in range(len(self.datasets))]
for i in indices:
ds_idx, ds_sample_idx = self._get_dataset_and_index(i)
prefetch_indices[ds_idx].append(ds_sample_idx)
for i in range(len(prefetch_indices)):
self.datasets[i].prefetch(prefetch_indices[i])
@property
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch):
super().set_epoch(epoch)
if epoch == self._cur_epoch:
# re-enter so return
return
for d in self.datasets:
if hasattr(d, "set_epoch"):
d.set_epoch(epoch)
self._cur_epoch = epoch
self._establish_virtual_datasets()
def _establish_virtual_datasets(self):
if self.sample_ratios is None and self._cur_indices is not None:
# not a samping dataset, no need to resample if indices are already established
return
self._reset_cached_properties()
start_time = time.time()
# Generate a weighted sample of indices as a function of the
# random seed and the current epoch.
rng = np.random.RandomState(
[
int(
hashlib.sha1(
str(self.__class__.__name__).encode("utf-8")
).hexdigest(),
16,
)
% (2 ** 32),
self.seed % (2 ** 32), # global seed
self._cur_epoch, # epoch index,
]
)
self._clean_if_not_none(
[self.cumulated_sizes, self.virtual_size_per_dataset, self._sizes]
)
self._sizes = None
indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices(
rng, self.datasets, self.sample_ratios, self.virtual_size
)
self._cur_indices = indices
self.cumulated_sizes = cumulated_sizes
self.virtual_size_per_dataset = virtual_size_per_dataset
raw_sizes = [len(d) for d in self.datasets]
sampled_sizes = self.virtual_size_per_dataset
logger.info(
f"[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; "
f"raw total size: {sum(raw_sizes)}"
)
logger.info(
f"[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; "
f"resampled total size: {sum(sampled_sizes)}"
)
if self.sample_ratios is not None:
logger.info(
f"[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios)))}"
)
else:
logger.info(f"[{self.split}] A concat dataset")
logger.info(
f"[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}"
)
def filter_indices_by_size(self, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
sizes = self.sizes
tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
return data_utils.filter_paired_dataset_indices_by_size(
src_sizes, tgt_sizes, indices, max_sizes
)
|
batch["tgt_lang_id"] = straight_order(
[b["tgt_lang_id"] for b in batches]
)
|
kubernetes_test.go
|
/*
Copyright 2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
import (
"sort"
"testing"
pb "github.com/gravitational/satellite/agent/proto/agentpb"
"github.com/gravitational/satellite/lib/test"
. "gopkg.in/check.v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
)
func TestNodes(t *testing.T)
|
type KubernetesSuite struct{}
var _ = Suite(&KubernetesSuite{})
// TestMembers verfies members can be queried.
func (r *KubernetesSuite) TestMembers(c *C) {
comment := Commentf("List all nodes")
nodes := []v1.Node{
r.newNode("satellite-1", "192.168.1.101", "master"),
r.newNode("satellite-2", "192.168.1.102", "master"),
}
expected := []*pb.MemberStatus{
pb.NewMemberStatus("satellite-1", "192.168.1.101",
map[string]string{
"role": "master",
"publicip": "192.168.1.101",
}),
pb.NewMemberStatus("satellite-2", "192.168.1.102",
map[string]string{
"role": "master",
"publicip": "192.168.1.102",
}),
}
factory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(&v1.NodeList{Items: nodes}), 0)
informer := factory.Core().V1().Nodes().Informer()
stop := make(chan struct{})
defer close(stop)
go informer.Run(stop)
c.Assert(cache.WaitForCacheSync(stop, informer.HasSynced), Equals, true, comment)
cluster, err := NewCluster(&Config{
Informer: informer,
})
c.Assert(err, IsNil, comment)
members, err := cluster.Members()
c.Assert(err, IsNil, comment)
sort.Sort(byName(members))
c.Assert(members, test.DeepCompare, expected, comment)
}
// TestMember verifies single member can be queried.
func (r *KubernetesSuite) TestMember(c *C) {
comment := Commentf("Query satellite-1")
nodes := []v1.Node{
r.newNode("satellite-1", "192.168.1.101", "master"),
}
expected := pb.NewMemberStatus("satellite-1", "192.168.1.101",
map[string]string{
"role": "master",
"publicip": "192.168.1.101",
})
factory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(&v1.NodeList{Items: nodes}), 0)
informer := factory.Core().V1().Nodes().Informer()
stop := make(chan struct{})
defer close(stop)
go informer.Run(stop)
c.Assert(cache.WaitForCacheSync(stop, informer.HasSynced), Equals, true, comment)
cluster, err := NewCluster(&Config{
Informer: informer,
})
c.Assert(err, IsNil, comment)
member, err := cluster.Member("satellite-1")
c.Assert(err, IsNil, comment)
c.Assert(member, test.DeepCompare, expected, comment)
}
// newNode constructs a new pod.
func (r *KubernetesSuite) newNode(name, addr, role string) v1.Node {
return v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
AdvertiseIPKey: addr,
RoleKey: role,
},
},
}
}
// byName implements sort.Interface.
// Enables MemberStatus to be sorted by name.
type byName []*pb.MemberStatus
func (r byName) Len() int { return len(r) }
func (r byName) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r byName) Less(i, j int) bool { return r[i].NodeName < r[j].NodeName }
|
{ TestingT(t) }
|
backup.py
|
import os
import re
import tempfile
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any
from django.conf import settings
from django.db import connection
from django.utils.timezone import now as timezone_now
from scripts.lib.zulip_tools import TIMESTAMP_FORMAT, parse_os_release, run
from version import ZULIP_VERSION
from zerver.lib.management import ZulipBaseCommand
from zerver.logging_handlers import try_git_describe
class Command(ZulipBaseCommand):
# Fix support for multi-line usage strings
def
|
(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--output", help="Filename of output tarball")
parser.add_argument("--skip-db", action="store_true", help="Skip database backup")
parser.add_argument("--skip-uploads", action="store_true", help="Skip uploads backup")
def handle(self, *args: Any, **options: Any) -> None:
timestamp = timezone_now().strftime(TIMESTAMP_FORMAT)
with tempfile.TemporaryDirectory(
prefix=f"zulip-backup-{timestamp}-",
) as tmp:
os.mkdir(os.path.join(tmp, "zulip-backup"))
members = []
paths = []
with open(os.path.join(tmp, "zulip-backup", "zulip-version"), "w") as f:
print(ZULIP_VERSION, file=f)
git = try_git_describe()
if git:
print(git, file=f)
members.append("zulip-backup/zulip-version")
with open(os.path.join(tmp, "zulip-backup", "os-version"), "w") as f:
print(
"{ID} {VERSION_ID}".format(**parse_os_release()),
file=f,
)
members.append("zulip-backup/os-version")
with open(os.path.join(tmp, "zulip-backup", "postgres-version"), "w") as f:
print(connection.pg_version, file=f)
members.append("zulip-backup/postgres-version")
if settings.DEVELOPMENT:
members.append(
os.path.join(settings.DEPLOY_ROOT, "zproject", "dev-secrets.conf"),
)
paths.append(
("zproject", os.path.join(settings.DEPLOY_ROOT, "zproject")),
)
else:
members.append("/etc/zulip")
paths.append(("settings", "/etc/zulip"))
if not options["skip_db"]:
pg_dump_command = [
"pg_dump",
"--format=directory",
"--file=" + os.path.join(tmp, "zulip-backup", "database"),
"--host=" + settings.DATABASES["default"]["HOST"],
"--port=" + settings.DATABASES["default"]["PORT"],
"--username=" + settings.DATABASES["default"]["USER"],
"--dbname=" + settings.DATABASES["default"]["NAME"],
"--no-password",
]
os.environ["PGPASSWORD"] = settings.DATABASES["default"]["PASSWORD"]
run(
pg_dump_command,
cwd=tmp,
)
members.append("zulip-backup/database")
if (
not options["skip_uploads"]
and settings.LOCAL_UPLOADS_DIR is not None
and os.path.exists(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
):
members.append(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
paths.append(
(
"uploads",
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
),
)
assert not any("|" in name or "|" in path for name, path in paths)
transform_args = [
r"--transform=s|^{}(/.*)?$|zulip-backup/{}\1|x".format(
re.escape(path),
name.replace("\\", r"\\"),
)
for name, path in paths
]
try:
if options["output"] is None:
tarball_path = tempfile.NamedTemporaryFile(
prefix=f"zulip-backup-{timestamp}-",
suffix=".tar.gz",
delete=False,
).name
else:
tarball_path = options["output"]
run(
[
"tar",
f"--directory={tmp}",
"-cPzf",
tarball_path,
*transform_args,
"--",
*members,
]
)
print(f"Backup tarball written to {tarball_path}")
except BaseException:
if options["output"] is None:
os.unlink(tarball_path)
raise
|
create_parser
|
gapAdvertise.py
|
#!/usr/bin/python
# SPDX-License-Identifier: LGPL-2.1-or-later
from __future__ import print_function
import argparse
import dbus
import dbus.exceptions
import dbus.mainloop.glib
import dbus.service
import time
import threading
try:
from gi.repository import GObject # python3
except ImportError:
import gobject as GObject # python2
mainloop = None
BLUEZ_SERVICE_NAME = 'org.bluez'
LE_ADVERTISING_MANAGER_IFACE = 'org.bluez.LEAdvertisingManager1'
DBUS_OM_IFACE = 'org.freedesktop.DBus.ObjectManager'
DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties'
LE_ADVERTISEMENT_IFACE = 'org.bluez.LEAdvertisement1'
class InvalidArgsException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.freedesktop.DBus.Error.InvalidArgs'
class NotSupportedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotSupported'
class NotPermittedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotPermitted'
class InvalidValueLengthException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.InvalidValueLength'
class FailedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.Failed'
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
BUS_NAME = 'org.bluez'
AGENT_INTERFACE = 'org.bluez.Agent1'
AGENT_PATH = "/org/bluez/justWorks/agent"
AGENT_CAPABILITY = "NoInputNoOutput"
bus = None
device_obj = None
dev_path = None
def ask(prompt):
try:
return raw_input(prompt)
except:
return input(prompt)
def set_trusted(path):
props = dbus.Interface(bus.get_object("org.bluez", path),
"org.freedesktop.DBus.Properties")
props.Set("org.bluez.Device1", "Trusted", True)
def dev_connect(path):
dev = dbus.Interface(bus.get_object("org.bluez", path),
"org.bluez.Device1")
dev.Connect()
class Rejected(dbus.DBusException):
_dbus_error_name = "org.bluez.Error.Rejected"
def pair_reply():
print("Device paired")
set_trusted(dev_path)
dev_connect(dev_path)
mainloop.quit()
def pair_error(error):
err_name = error.get_dbus_name()
if err_name == "org.freedesktop.DBus.Error.NoReply" and device_obj:
print("Timed out. Cancelling pairing")
device_obj.CancelPairing()
else:
print("Creating device failed: %s" % (error))
def register_ad_cb():
print('Advertisement registered')
def register_ad_error_cb(error):
print('Failed to register advertisement: ' + str(error))
mainloop.quit()
def find_adapter(bus):
remote_om = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, '/'),
DBUS_OM_IFACE)
objects = remote_om.GetManagedObjects()
for o, props in objects.items():
if LE_ADVERTISING_MANAGER_IFACE in props:
return o
return None
def shutdown(timeout):
print('Advertising for {} seconds...'.format(timeout))
time.sleep(timeout)
mainloop.quit()
class Agent(dbus.service.Object):
exit_on_release = True
def set_exit_on_release(self, exit_on_release):
self.exit_on_release = exit_on_release
@dbus.service.method(AGENT_INTERFACE,
in_signature="", out_signature="")
def Release(self):
print("Release")
if self.exit_on_release:
mainloop.quit()
@dbus.service.method(AGENT_INTERFACE,
in_signature="os", out_signature="")
def AuthorizeService(self, device, uuid):
print("AuthorizeService (%s, %s)" % (device, uuid))
authorize = ask("Authorize connection (yes/no): ")
if (authorize == "yes"):
return
raise Rejected("Connection rejected by user")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="s")
def RequestPinCode(self, device):
print("RequestPinCode (%s)" % (device))
set_trusted(device)
return ask("Enter PIN Code: ")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="u")
def RequestPasskey(self, device):
print("RequestPasskey (%s)" % (device))
set_trusted(device)
passkey = ask("Enter passkey: ")
return dbus.UInt32(passkey)
@dbus.service.method(AGENT_INTERFACE,
in_signature="ouq", out_signature="")
def DisplayPasskey(self, device, passkey, entered):
print("DisplayPasskey (%s, %06u entered %u)" %
(device, passkey, entered))
@dbus.service.method(AGENT_INTERFACE,
in_signature="os", out_signature="")
def DisplayPinCode(self, device, pincode):
print("DisplayPinCode (%s, %s)" % (device, pincode))
@dbus.service.method(AGENT_INTERFACE,
in_signature="ou", out_signature="")
def RequestConfirmation(self, device, passkey):
print("RequestConfirmation (%s, %06d)" % (device, passkey))
confirm = ask("Confirm passkey (yes/no): ")
if (confirm == "yes"):
set_trusted(device)
return
raise Rejected("Passkey doesn't match")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="")
def RequestAuthorization(self, device):
print("RequestAuthorization (%s)" % (device))
auth = ask("Authorize? (yes/no): ")
if (auth == "yes"):
return
raise Rejected("Pairing rejected")
@dbus.service.method(AGENT_INTERFACE,
in_signature="", out_signature="")
def Cancel(self):
print("Cancel")
class Advertisement(dbus.service.Object):
PATH_BASE = '/org/bluez/example/advertisement'
def __init__(self, bus, index, advertising_type):
self.path = self.PATH_BASE + str(index)
self.bus = bus
self.ad_type = advertising_type
self.service_uuids = None
self.manufacturer_data = None
self.solicit_uuids = None
|
self.service_data = None
self.local_name = None
self.include_tx_power = False
self.data = None
dbus.service.Object.__init__(self, bus, self.path)
def get_properties(self):
properties = dict()
properties['Type'] = self.ad_type
if self.service_uuids is not None:
properties['ServiceUUIDs'] = dbus.Array(self.service_uuids,
signature='s')
if self.solicit_uuids is not None:
properties['SolicitUUIDs'] = dbus.Array(self.solicit_uuids,
signature='s')
if self.manufacturer_data is not None:
properties['ManufacturerData'] = dbus.Dictionary(
self.manufacturer_data, signature='qv')
if self.service_data is not None:
properties['ServiceData'] = dbus.Dictionary(self.service_data,
signature='sv')
if self.local_name is not None:
properties['LocalName'] = dbus.String(self.local_name)
properties['Appearance'] = dbus.UInt16(961)
properties['Discoverable'] = dbus.Boolean(True)
properties['DiscoverableTimeout'] = dbus.UInt16(0)
if self.include_tx_power:
properties['Includes'] = dbus.Array(["tx-power"], signature='s')
if self.data is not None:
properties['Data'] = dbus.Dictionary(
self.data, signature='yv')
return {LE_ADVERTISEMENT_IFACE: properties}
def get_path(self):
return dbus.ObjectPath(self.path)
def add_service_uuid(self, uuid):
if not self.service_uuids:
self.service_uuids = []
self.service_uuids.append(uuid)
def add_solicit_uuid(self, uuid):
if not self.solicit_uuids:
self.solicit_uuids = []
self.solicit_uuids.append(uuid)
def add_manufacturer_data(self, manuf_code, data):
if not self.manufacturer_data:
self.manufacturer_data = dbus.Dictionary({}, signature='qv')
self.manufacturer_data[manuf_code] = dbus.Array(data, signature='y')
def add_service_data(self, uuid, data):
if not self.service_data:
self.service_data = dbus.Dictionary({}, signature='sv')
self.service_data[uuid] = dbus.Array(data, signature='y')
def add_local_name(self, name):
if not self.local_name:
self.local_name = ""
self.local_name = dbus.String(name)
def add_data(self, ad_type, data):
if not self.data:
self.data = dbus.Dictionary({}, signature='yv')
self.data[ad_type] = dbus.Array(data, signature='y')
@dbus.service.method(DBUS_PROP_IFACE,
in_signature='s',
out_signature='a{sv}')
def GetAll(self, interface):
print('GetAll')
if interface != LE_ADVERTISEMENT_IFACE:
raise InvalidArgsException()
print('returning props')
return self.get_properties()[LE_ADVERTISEMENT_IFACE]
@dbus.service.method(LE_ADVERTISEMENT_IFACE,
in_signature='',
out_signature='')
def Release(self):
print('%s: Released!' % self.path)
class TestAdvertisement(Advertisement):
def __init__(self, bus, index):
Advertisement.__init__(self, bus, index, 'peripheral')
#self.add_service_uuid('180D')
#self.add_service_uuid('180F')
#self.add_manufacturer_data(0xffff, [0x00, 0x01, 0x02, 0x03])
#self.add_service_data('9999', [0x00, 0x01, 0x02, 0x03, 0x04])
self.add_local_name('TestAdvertisement')
self.include_tx_power = True
#self.add_data(0x26, [0x01, 0x01, 0x00])
def main(timeout=0):
global mainloop
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
adapter = find_adapter(bus)
if not adapter:
print('LEAdvertisingManager1 interface not found')
return
path = AGENT_PATH
capability = AGENT_CAPABILITY
agent = Agent(bus, path)
obj = bus.get_object(BUS_NAME, "/org/bluez");
manager = dbus.Interface(obj, "org.bluez.AgentManager1")
manager.RegisterAgent(path, capability)
manager.RequestDefaultAgent(path)
print("Agent registered")
adapter_props = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
"org.freedesktop.DBus.Properties")
adapter_props.Set("org.bluez.Adapter1", "Powered", dbus.Boolean(1))
ad_manager = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
LE_ADVERTISING_MANAGER_IFACE)
test_advertisement = TestAdvertisement(bus, 0)
mainloop = GObject.MainLoop()
ad_manager.RegisterAdvertisement(test_advertisement.get_path(), {},
reply_handler=register_ad_cb,
error_handler=register_ad_error_cb)
if timeout > 0:
threading.Thread(target=shutdown, args=(timeout,)).start()
else:
print('Advertising forever...')
mainloop.run() # blocks until mainloop.quit() is called
ad_manager.UnregisterAdvertisement(test_advertisement)
print('Advertisement unregistered')
dbus.service.Object.remove_from_connection(test_advertisement)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--timeout', default=0, type=int, help="advertise " +
"for this many seconds then stop, 0=run forever " +
"(default: 0)")
args = parser.parse_args()
main(args.timeout)
| |
model_synthetics_delete_tests_response.go
|
/*
* Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
* This product includes software developed at Datadog (https://www.datadoghq.com/).
* Copyright 2019-Present Datadog, Inc.
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package datadog
import (
"encoding/json"
)
// SyntheticsDeleteTestsResponse Response object for deleting Synthetic tests.
type SyntheticsDeleteTestsResponse struct {
// Array of objects containing a deleted Synthetic test ID with the associated deletion timestamp.
DeletedTests *[]SyntheticsDeletedTest `json:"deleted_tests,omitempty"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:-`
}
// NewSyntheticsDeleteTestsResponse instantiates a new SyntheticsDeleteTestsResponse object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewSyntheticsDeleteTestsResponse() *SyntheticsDeleteTestsResponse {
this := SyntheticsDeleteTestsResponse{}
return &this
}
// NewSyntheticsDeleteTestsResponseWithDefaults instantiates a new SyntheticsDeleteTestsResponse object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewSyntheticsDeleteTestsResponseWithDefaults() *SyntheticsDeleteTestsResponse {
this := SyntheticsDeleteTestsResponse{}
return &this
}
// GetDeletedTests returns the DeletedTests field value if set, zero value otherwise.
func (o *SyntheticsDeleteTestsResponse) GetDeletedTests() []SyntheticsDeletedTest {
if o == nil || o.DeletedTests == nil
|
return *o.DeletedTests
}
// GetDeletedTestsOk returns a tuple with the DeletedTests field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SyntheticsDeleteTestsResponse) GetDeletedTestsOk() (*[]SyntheticsDeletedTest, bool) {
if o == nil || o.DeletedTests == nil {
return nil, false
}
return o.DeletedTests, true
}
// HasDeletedTests returns a boolean if a field has been set.
func (o *SyntheticsDeleteTestsResponse) HasDeletedTests() bool {
if o != nil && o.DeletedTests != nil {
return true
}
return false
}
// SetDeletedTests gets a reference to the given []SyntheticsDeletedTest and assigns it to the DeletedTests field.
func (o *SyntheticsDeleteTestsResponse) SetDeletedTests(v []SyntheticsDeletedTest) {
o.DeletedTests = &v
}
func (o SyntheticsDeleteTestsResponse) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.UnparsedObject != nil {
return json.Marshal(o.UnparsedObject)
}
if o.DeletedTests != nil {
toSerialize["deleted_tests"] = o.DeletedTests
}
return json.Marshal(toSerialize)
}
func (o *SyntheticsDeleteTestsResponse) UnmarshalJSON(bytes []byte) (err error) {
raw := map[string]interface{}{}
all := struct {
DeletedTests *[]SyntheticsDeletedTest `json:"deleted_tests,omitempty"`
}{}
err = json.Unmarshal(bytes, &all)
if err != nil {
err = json.Unmarshal(bytes, &raw)
if err != nil {
return err
}
o.UnparsedObject = raw
return nil
}
o.DeletedTests = all.DeletedTests
return nil
}
type NullableSyntheticsDeleteTestsResponse struct {
value *SyntheticsDeleteTestsResponse
isSet bool
}
func (v NullableSyntheticsDeleteTestsResponse) Get() *SyntheticsDeleteTestsResponse {
return v.value
}
func (v *NullableSyntheticsDeleteTestsResponse) Set(val *SyntheticsDeleteTestsResponse) {
v.value = val
v.isSet = true
}
func (v NullableSyntheticsDeleteTestsResponse) IsSet() bool {
return v.isSet
}
func (v *NullableSyntheticsDeleteTestsResponse) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableSyntheticsDeleteTestsResponse(val *SyntheticsDeleteTestsResponse) *NullableSyntheticsDeleteTestsResponse {
return &NullableSyntheticsDeleteTestsResponse{value: val, isSet: true}
}
func (v NullableSyntheticsDeleteTestsResponse) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableSyntheticsDeleteTestsResponse) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
|
{
var ret []SyntheticsDeletedTest
return ret
}
|
fix_value.rs
|
use crate::tagvalue::datatypes::*;
use crate::{Buffer, TagU16};
use std::convert::TryInto;
use std::string::ToString;
/// A trait for (de)serializing data directly into a [`Buffer`].
pub trait FixValue<'a>
where
Self: Sized,
{
type Error;
type SerializeSettings: Default;
/// Flag that is enabled if and only if the byte representation of `Self` is
/// always valid ASCII.
///
/// This flag is currently not used, but it might be once Rust supports
/// fully-fledged `const` generics.
const IS_ASCII: bool;
/// Writes `self` to `buffer` using default settings.
#[inline(always)]
fn serialize<B>(&self, buffer: &mut B) -> usize
where
B: Buffer,
{
self.serialize_with(buffer, Self::SerializeSettings::default())
}
/// Writes `self` to `buffer` using custom serialization `settings`.
fn serialize_with<B>(&self, buffer: &mut B, _settings: Self::SerializeSettings) -> usize
where
B: Buffer;
/// Parses and deserializes from `data`.
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error>;
/// Like [`Self::deserialize`], but it's allowed to skip *some* amount of
/// input checking. Invalid inputs might not trigger errors and instead be
/// deserialized as random values.
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
Self::deserialize(data)
}
/// Serializes `self` to a [`Vec`] of bytes, allocated on the fly.
fn to_bytes(&self) -> Vec<u8> {
let mut buffer = Vec::new();
self.serialize(&mut buffer);
buffer
}
/// Allocates a [`String`] representation of `self`.
///
/// # Panics
/// This function will panic if the underlying byte representation is not
/// valid UTF-8. As such, you should only *ever* use this function for
/// [`FixValue`] implementors that are guaranteed to be representable
/// with valid UTF-8
/// (like numbers with ASCII digits).
fn to_string(&self) -> String {
String::from_utf8(self.to_bytes()).expect("Invalid UTF-8 representation of FIX field.")
}
}
/// Byte-padding instructions for byte strings.
#[derive(Debug, Copy, Clone)]
pub struct Padding {
pub len: usize,
pub byte: u8,
}
impl Default for Padding {
#[inline(always)]
fn default() -> Self {
Self { len: 0, byte: 0 }
}
}
impl Padding {
#[inline(always)]
pub fn zeros(len: usize) -> Self {
Self { len, byte: b'0' }
}
}
#[derive(Debug, Copy, Clone)]
pub struct WithMilliseconds(pub bool);
impl Default for WithMilliseconds {
fn default() -> Self {
Self(true)
}
}
#[cfg(feature = "utils-chrono")]
impl<'a> FixValue<'a> for chrono::DateTime<chrono::Utc> {
type Error = &'static str;
type SerializeSettings = WithMilliseconds;
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize<B>(&self, buffer: &mut B) -> usize
where
B: Buffer,
{
// Serialize with milliseconds by default.
self.serialize_with(buffer, WithMilliseconds(true))
}
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, settings: Self::SerializeSettings) -> usize
where
B: Buffer,
{
use chrono::{Datelike, Timelike};
(self.year() as u32).serialize_with(buffer, Padding::zeros(4));
(self.month() as u32).serialize_with(buffer, Padding::zeros(2));
(self.day() as u32).serialize_with(buffer, Padding::zeros(2));
buffer.extend_from_slice(b"-");
(self.hour() as u32).serialize_with(buffer, Padding::zeros(2));
buffer.extend_from_slice(b":");
(self.minute() as u32).serialize_with(buffer, Padding::zeros(2));
buffer.extend_from_slice(b":");
(self.second() as u32).serialize_with(buffer, Padding::zeros(2));
if settings.0 {
buffer.extend_from_slice(b".");
(self.nanosecond() / 10E6 as u32).serialize_with(buffer, Padding::zeros(3));
21
} else {
17
}
}
#[inline(always)]
fn deserialize(_data: &'a [u8]) -> Result<Self, Self::Error> {
Err("TODO")
}
}
#[cfg(feature = "utils-chrono")]
impl<'a> FixValue<'a> for chrono::NaiveDate {
type Error = &'static str;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: Self::SerializeSettings) -> usize
where
B: Buffer,
{
use chrono::Datelike;
(self.year() as u32).serialize_with(buffer, Padding::zeros(4));
(self.month() as u32).serialize_with(buffer, Padding::zeros(2));
(self.day() as u32).serialize_with(buffer, Padding::zeros(2));
8
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let date = Date::deserialize(data).map_err(|_| "Invalid date format.")?;
date.to_chrono_naive().ok_or("Invalid date range.")
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
let date = Date::deserialize_lossy(data).map_err(|_| "Invalid date format.")?;
date.to_chrono_naive().ok_or("Invalid date range.")
}
}
#[cfg(feature = "utils-rust-decimal")]
impl<'a> FixValue<'a> for rust_decimal::Decimal {
type Error = error::Decimal;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
// TODO: Remove allocations.
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.as_bytes().len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
use std::str::FromStr;
let s = std::str::from_utf8(data).map_err(|_| Self::Error::NotUtf8)?;
rust_decimal::Decimal::from_str(s).map_err(|err| Self::Error::Other(err.to_string()))
}
}
#[cfg(feature = "utils-decimal")]
impl<'a> FixValue<'a> for decimal::d128 {
type Error = decimal::Status;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
// TODO: Remove allocations.
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.as_bytes().len()
}
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
use std::str::FromStr;
decimal::d128::set_status(decimal::Status::empty());
let s = std::str::from_utf8(data).unwrap_or("invalid UTF-8");
let number =
decimal::d128::from_str(s).expect("decimal::d128 should always parse without errors");
let status = decimal::d128::get_status();
if status.is_empty() {
Ok(number)
} else {
Err(status)
}
}
}
impl<'a> FixValue<'a> for bool {
type Error = error::Bool;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let byte = if *self { b'Y' } else { b'N' };
buffer.extend_from_slice(&[byte]);
1
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
if data.len() != 1 {
Err(Self::Error::WrongLength)
} else if data[0] == b'Y' {
Ok(true)
} else if data[0] == b'N' {
Ok(false)
} else {
Err(Self::Error::InvalidCharacter)
}
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
if data.len() != 1 {
Err(Self::Error::WrongLength)
} else {
Ok(data[0] == b'Y')
}
}
}
impl<'a> FixValue<'a> for &'a str {
type Error = std::str::Utf8Error;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
buffer.extend_from_slice(self.as_bytes());
self.as_bytes().len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
std::str::from_utf8(data)
}
}
impl<'a> FixValue<'a> for u8 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = false;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
buffer.extend_from_slice(&[*self]);
1
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
Ok(data[0])
}
}
impl<'a> FixValue<'a> for &'a [u8] {
type Error = ();
type SerializeSettings = ();
const IS_ASCII: bool = false;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
buffer.extend_from_slice(self);
self.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
Ok(data)
}
}
impl<'a, const N: usize> FixValue<'a> for [u8; N] {
type Error = ();
type SerializeSettings = ();
const IS_ASCII: bool = false;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, settings: ()) -> usize
where
B: Buffer,
{
(&self).serialize_with(buffer, settings)
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
data.try_into().map_err(|_| ())
}
}
impl<'a, const N: usize> FixValue<'a> for &'a [u8; N] {
type Error = ();
type SerializeSettings = ();
const IS_ASCII: bool = false;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
buffer.extend_from_slice(&self[..]);
self.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
data.try_into().map_err(|_| ())
}
}
impl<'a> FixValue<'a> for TagU16 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn
|
(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_u16(digit: u8) -> u16 {
(digit as u16).wrapping_sub(b'0' as u16)
}
let mut n = 0u16;
for byte in data.iter().copied() {
n = n.wrapping_mul(10).wrapping_add(ascii_digit_to_u16(byte));
}
TagU16::new(n).ok_or(Self::Error::Other)
}
}
impl<'a> FixValue<'a> for u32 {
type Error = error::Int;
type SerializeSettings = Padding;
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, padding: Self::SerializeSettings) -> usize
where
B: Buffer,
{
if padding.len == 0 {
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
return s.len();
}
let initial_len = buffer.len();
buffer.resize(buffer.len() + padding.len, padding.byte);
let bytes = buffer.as_mut_slice();
let mut multiplier = 1;
for i in (0..padding.len).rev() {
bytes[i + initial_len] = ((self / multiplier) % 10).wrapping_add(b'0' as u32) as u8;
multiplier *= 10;
}
padding.len
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_u32(digit: u8) -> u32 {
(digit as u32).wrapping_sub(b'0' as u32)
}
let mut n = 0u32;
for byte in data.iter().copied() {
n = n.wrapping_mul(10).wrapping_add(ascii_digit_to_u32(byte));
}
Ok(n)
}
}
impl<'a> FixValue<'a> for i32 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_i32(digit: u8) -> i32 {
digit as i32 - b'0' as i32
}
let mut n = 0;
for byte in data.iter().copied() {
n = n * 10 + ascii_digit_to_i32(byte);
}
let sign = if data[0] == b'-' { -1 } else { 1 };
Ok(n * sign)
}
}
impl<'a> FixValue<'a> for u64 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_u64(digit: u8) -> u64 {
digit as u64 - b'0' as u64
}
let mut n = 0;
for byte in data.iter().copied() {
n = n * 10 + ascii_digit_to_u64(byte);
}
Ok(n)
}
}
impl<'a> FixValue<'a> for i64 {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_i64(digit: u8) -> i64 {
digit as i64 - b'0' as i64
}
let mut n = 0;
for byte in data.iter().copied() {
n = n * 10 + ascii_digit_to_i64(byte);
}
let sign = if data[0] == b'-' { -1 } else { 1 };
Ok(n * sign)
}
}
impl<'a> FixValue<'a> for usize {
type Error = error::Int;
type SerializeSettings = ();
const IS_ASCII: bool = true;
#[inline(always)]
fn serialize_with<B>(&self, buffer: &mut B, _settings: ()) -> usize
where
B: Buffer,
{
let s = ToString::to_string(self);
buffer.extend_from_slice(s.as_bytes());
s.len()
}
#[inline(always)]
fn deserialize(data: &'a [u8]) -> Result<Self, Self::Error> {
let s = std::str::from_utf8(data).map_err(|_| Self::Error::InvalidUtf8)?;
s.parse().map_err(|_| Self::Error::Other)
}
#[inline(always)]
fn deserialize_lossy(data: &'a [u8]) -> Result<Self, Self::Error> {
fn ascii_digit_to_usize(digit: u8) -> usize {
digit as usize - b'0' as usize
}
let mut n = 0;
for byte in data.iter().copied() {
n = n * 10 + ascii_digit_to_usize(byte);
}
Ok(n)
}
}
#[cfg(test)]
mod test {
use super::*;
use quickcheck_macros::quickcheck;
#[test]
fn serialize_bools() {
let mut buffer = Vec::new();
assert_eq!(true.serialize(&mut buffer), 1);
assert_eq!(false.serialize(&mut buffer), 1);
assert_eq!(&buffer[..], b"YN" as &[u8]);
}
#[quickcheck]
fn serialize_bytes(data: Vec<Vec<u8>>) -> bool {
let mut buffer = Vec::new();
for slice in data.iter() {
assert_eq!((&slice[..]).serialize(&mut buffer), slice.len());
}
&buffer[..] == &data.iter().flatten().copied().collect::<Vec<u8>>()[..]
}
#[quickcheck]
fn u32_serialize(n: u32) -> bool {
let buffer = &mut Vec::new();
let s = FixValue::to_string(&n);
let bytes = s.as_bytes();
let len = n.serialize(buffer);
bytes == buffer.as_slice() && len == bytes.len()
}
#[test]
fn serialize_country() {
let mut buffer = Vec::new();
assert_eq!(b"IT".serialize(&mut buffer), 2);
assert_eq!(&buffer[..], b"IT" as &[u8]);
}
#[test]
fn serialize_currency() {
let mut buffer = Vec::new();
assert_eq!(b"USD".serialize(&mut buffer), 3);
assert_eq!(&buffer[..], b"USD" as &[u8]);
}
}
|
deserialize_lossy
|
dpstf2.go
|
// Copyright ©2021 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testlapack
import (
"fmt"
"math"
"testing"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/blas"
"gonum.org/v1/gonum/blas/blas64"
"gonum.org/v1/gonum/lapack"
)
type Dpstf2er interface {
Dpstf2(uplo blas.Uplo, n int, a []float64, lda int, piv []int, tol float64, work []float64) (rank int, ok bool)
}
func Dpstf2Test(t *testing.T, impl Dpstf2er) {
|
func dpstf2Test(t *testing.T, impl Dpstf2er, rnd *rand.Rand, uplo blas.Uplo, n, lda, rankWant int) {
const tol = 1e-14
name := fmt.Sprintf("n=%v,lda=%v", n, lda)
bi := blas64.Implementation()
// Generate a random, symmetric A with the given rank by applying rankWant
// rank-1 updates to the zero matrix.
a := make([]float64, n*lda)
for i := 0; i < rankWant; i++ {
x := randomSlice(n, rnd)
bi.Dsyr(uplo, n, 1, x, 1, a, lda)
}
// Make a copy of A for storing the factorization.
aFac := make([]float64, len(a))
copy(aFac, a)
// Allocate a slice for pivots and fill it with invalid index values.
piv := make([]int, n)
for i := range piv {
piv[i] = -1
}
// Allocate the work slice.
work := make([]float64, 2*n)
// Call Dpstf2 to Compute the Cholesky factorization with complete pivoting.
rank, ok := impl.Dpstf2(uplo, n, aFac, lda, piv, -1, work)
if ok != (rank == n) {
t.Errorf("%v: unexpected ok; got %v, want %v", name, ok, rank == n)
}
if rank != rankWant {
t.Errorf("%v: unexpected rank; got %v, want %v", name, rank, rankWant)
}
if n == 0 {
return
}
// Reconstruct the symmetric positive semi-definite matrix A from its L or U
// factors and the permutation matrix P.
perm := zeros(n, n, n)
if uplo == blas.Upper {
// Change notation.
u, ldu := aFac, lda
// Zero out last n-rank rows of the factor U.
for i := rank; i < n; i++ {
for j := i; j < n; j++ {
u[i*ldu+j] = 0
}
}
// Extract U to aRec.
aRec := zeros(n, n, n)
for i := 0; i < n; i++ {
for j := i; j < n; j++ {
aRec.Data[i*aRec.Stride+j] = u[i*ldu+j]
}
}
// Multiply U by Uᵀ from the left.
bi.Dtrmm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, n, n,
1, u, ldu, aRec.Data, aRec.Stride)
// Form P * Uᵀ * U * Pᵀ.
for i := 0; i < n; i++ {
for j := 0; j < n; j++ {
if piv[i] > piv[j] {
// Don't set the lower triangle.
continue
}
if i <= j {
perm.Data[piv[i]*perm.Stride+piv[j]] = aRec.Data[i*aRec.Stride+j]
} else {
perm.Data[piv[i]*perm.Stride+piv[j]] = aRec.Data[j*aRec.Stride+i]
}
}
}
// Compute the difference P*Uᵀ*U*Pᵀ - A.
for i := 0; i < n; i++ {
for j := i; j < n; j++ {
perm.Data[i*perm.Stride+j] -= a[i*lda+j]
}
}
} else {
// Change notation.
l, ldl := aFac, lda
// Zero out last n-rank columns of the factor L.
for i := rank; i < n; i++ {
for j := rank; j <= i; j++ {
l[i*ldl+j] = 0
}
}
// Extract L to aRec.
aRec := zeros(n, n, n)
for i := 0; i < n; i++ {
for j := 0; j <= i; j++ {
aRec.Data[i*aRec.Stride+j] = l[i*ldl+j]
}
}
// Multiply L by Lᵀ from the right.
bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.NonUnit, n, n,
1, l, ldl, aRec.Data, aRec.Stride)
// Form P * L * Lᵀ * Pᵀ.
for i := 0; i < n; i++ {
for j := 0; j < n; j++ {
if piv[i] < piv[j] {
// Don't set the upper triangle.
continue
}
if i >= j {
perm.Data[piv[i]*perm.Stride+piv[j]] = aRec.Data[i*aRec.Stride+j]
} else {
perm.Data[piv[i]*perm.Stride+piv[j]] = aRec.Data[j*aRec.Stride+i]
}
}
}
// Compute the difference P*L*Lᵀ*Pᵀ - A.
for i := 0; i < n; i++ {
for j := 0; j <= i; j++ {
perm.Data[i*perm.Stride+j] -= a[i*lda+j]
}
}
}
// Compute |P*Uᵀ*U*Pᵀ - A| / n or |P*L*Lᵀ*Pᵀ - A| / n.
resid := dlansy(lapack.MaxColumnSum, uplo, n, perm.Data, perm.Stride) / float64(n)
if resid > tol || math.IsNaN(resid) {
t.Errorf("%v: residual too large; got %v, want<=%v", name, resid, tol)
}
}
|
rnd := rand.New(rand.NewSource(1))
for _, uplo := range []blas.Uplo{blas.Upper, blas.Lower} {
t.Run(uploToString(uplo), func(t *testing.T) {
for _, n := range []int{0, 1, 2, 3, 4, 5, 10, 20, 50} {
for _, lda := range []int{max(1, n), n + 5} {
for _, rank := range []int{int(0.7 * float64(n)), n} {
dpstf2Test(t, impl, rnd, uplo, n, lda, rank)
}
}
}
})
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.