file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
test_runtime.py
|
"""Test DeltaPySimulator functionality pre-execution."""
import unittest
import deltalanguage as dl
from deltalanguage.test._graph_lib import (getg_const_chain,
getg_optional_queues)
class DeltaQueueCreationTest(unittest.TestCase):
"""Test that the simulator creates queues properly."""
def test_queue_types(self):
"""Test that queues of correct types are created (or not) depending on
the type of the source and destination nodes.
"""
graph = getg_const_chain()
dl.DeltaPySimulator(graph)
self.assertEqual(len(graph.nodes[0].out_queues), 0)
self.assertEqual(len(graph.nodes[1].out_queues), 1)
self.assertEqual(len(graph.nodes[2].out_queues), 1)
self.assertEqual(type(graph.nodes[1].out_queues['output']),
dl.runtime.ConstQueue)
self.assertEqual(type(graph.nodes[2].out_queues['output']),
dl.runtime.DeltaQueue)
def test_queue_optionality(self):
|
if __name__ == "__main__":
unittest.main()
|
"""Test that queues inhere correct optionality depending on the type of
the destination node.
"""
graph = getg_optional_queues()
dl.DeltaPySimulator(graph)
self.assertEqual(graph.nodes[0].out_queues['output'].optional, True)
self.assertEqual(graph.nodes[1].out_queues['output'].optional, False)
|
Decode_Ways_Solution_1.py
|
# Space: O(n)
# Time: O(n)
class Solution:
def numDecodings(self, s: str) -> int:
if len(s) == 0: return 0
self.cache = {}
self.cache[''] = 1
def recursive(string):
|
if len(string) == 1: return 1
temp_res = recursive(string[1:])
prefix = int(string[:2])
if 0 < prefix <= 26:
temp_res += recursive(string[2:])
self.cache[string] = temp_res
return temp_res
return recursive(s)
|
if string in self.cache: return self.cache[string]
if string[0] == '0': return 0
|
input_value.rs
|
use std::sync::Arc;
use async_graphql::*;
#[tokio::test]
pub async fn test_input_value_custom_error() {
struct Query;
#[Object]
impl Query {
async fn parse_int(&self, _n: i8) -> bool {
true
}
}
let schema = Schema::new(Query, EmptyMutation, EmptySubscription);
let query = r#"{ parseInt(n:289) }"#;
assert_eq!(
schema.execute(query).await.into_result().unwrap_err(),
vec![ServerError {
message: "Failed to parse \"Int\": Only integers from -128 to 127 are accepted."
.to_owned(),
source: None,
locations: vec![Pos {
line: 1,
column: 14,
}],
path: vec![PathSegment::Field("parseInt".to_owned())],
extensions: None,
}],
);
}
#[tokio::test]
pub async fn test_input_box_str() {
struct Query;
#[Object]
impl Query {
async fn box_str(&self, s: Box<str>) -> String {
s.to_string()
}
async fn arc_str(&self, s: Arc<str>) -> String {
s.to_string()
}
}
let schema = Schema::new(Query, EmptyMutation, EmptySubscription);
let query = r#"{ boxStr(s: "abc") arcStr(s: "def") }"#;
assert_eq!(
schema.execute(query).await.into_result().unwrap().data,
value!({
"boxStr": "abc",
"arcStr": "def",
})
);
}
#[tokio::test]
pub async fn test_input_box_slice() {
struct Query;
#[Object]
impl Query {
async fn box_slice(&self, s: Box<[i32]>) -> Box<[i32]> {
s
}
async fn arc_slice(&self, s: Arc<[i32]>) -> Arc<[i32]> {
s
}
}
let schema = Schema::new(Query, EmptyMutation, EmptySubscription);
let query = r#"{ boxSlice(s: [1, 2, 3]) arcSlice(s: [4, 5, 6]) }"#;
assert_eq!(
schema.execute(query).await.into_result().unwrap().data,
value!({
"boxSlice": [1, 2, 3],
"arcSlice": [4, 5, 6],
})
|
);
}
|
|
bubble_order_test.go
|
package order
import (
"reflect"
"testing"
)
func
|
(t *testing.T) {
type args struct {
array []int
}
tests := []struct {
name string
args args
want []int
}{
// TODO: Add test cases.
{name: "test_1", args: args{array: []int{1, 2, 3}}, want: []int{1, 2, 3}},
{name: "test_2", args: args{array: []int{3, 2, 1}}, want: []int{1, 2, 3}},
{name: "test_3", args: args{array: []int{3, 1, 2}}, want: []int{1, 2, 3}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := Bubble(tt.args.array); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Bubble() = %v, want %v", got, tt.want)
}
})
}
}
|
TestBubble
|
integration_two.rs
|
use actix_web::{web, HttpResponse, Responder};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Serialize, Debug)]
struct AppState {
count: i32,
}
#[allow(dead_code)]
async fn index(data: web::Data<AppState>) -> impl Responder {
HttpResponse::Ok().json(data.get_ref())
}
// <integration-two>
#[cfg(test)]
mod tests {
use super::*;
use actix_web::{test, web, App};
#[actix_web::test]
async fn test_index_get()
|
}
// </integration-two>
|
{
let app = test::init_service(
App::new()
.app_data(web::Data::new(AppState { count: 4 }))
.route("/", web::get().to(index)),
)
.await;
let req = test::TestRequest::get().uri("/").to_request();
let resp: AppState = test::call_and_read_body_json(&app, req).await;
assert_eq!(resp.count, 4);
}
|
app.module.ts
|
angular
.module("app", [
"ngRoute"
]);
})();
|
/// <reference path="../typings/tsd.d.ts" />
(():void => {
|
|
counts.rs
|
use serde::{Deserialize, Serialize};
use abstio::MapName;
use abstutil::{prettyprint_usize, Counter, Timer};
use geom::Distance;
use map_model::{IntersectionID, Map, PathRequest, PathStepV2, PathV2, Pathfinder, RoadID};
/// This represents the number of vehicles (or trips, or something else) crossing roads and
/// intersections over some span of time. The data could represent real observations or something
/// from a simulation.
///
/// There's some nice UIs in other crates to compare counts.
#[derive(Clone, Serialize, Deserialize)]
pub struct TrafficCounts {
pub map: MapName,
// TODO For now, squeeze everything into this -- mode, weekday/weekend, time of day, data
// source, etc
pub description: String,
// TODO Maybe per direction, movement
pub per_road: Counter<RoadID>,
pub per_intersection: Counter<IntersectionID>,
}
impl Default for TrafficCounts {
fn default() -> Self {
Self {
map: MapName::new("zz", "place", "holder"),
description: String::new(),
per_road: Counter::new(),
per_intersection: Counter::new(),
}
}
}
impl TrafficCounts {
/// Run pathfinding on all of the requests, then count the throughput on every road and
/// intersection. Each request has the count it should contribute -- use
/// `PathRequest::deduplicate` to easily generate this.
pub fn from_path_requests(
map: &Map,
description: String,
requests: &[(PathRequest, usize)],
pathfinder: &Pathfinder,
timer: &mut Timer,
) -> Self {
let mut counts = Self {
map: map.get_name().clone(),
description,
per_road: Counter::new(),
per_intersection: Counter::new(),
};
// Statistic::Min will be wrong later for roads that're 0. So explicitly start with 0 for every
// road/intersection.
for r in map.all_roads() {
counts.per_road.add(r.id, 0);
}
for i in map.all_intersections() {
counts.per_intersection.add(i.id, 0);
}
// It's very memory intensive to calculate all of the paths in one chunk, then process them to
// get counts. Increment the counters as we go.
//
// TODO But that makes it hard to use timer.parallelize for this. We could make a thread-local
// Counter and aggregte them at the end, but the way timer.parallelize uses scoped_threadpool
// right now won't let that work. Stick to single-threaded for now.
timer.start_iter("calculate routes", requests.len());
for (req, count) in requests {
timer.next();
if let Some(path) = pathfinder.pathfind_v2(req.clone(), map) {
counts.update_with_path(path, *count, map);
}
}
counts
}
pub fn update_with_path(&mut self, path: PathV2, count: usize, map: &Map) {
for step in path.get_steps() {
match step {
PathStepV2::Along(dr) | PathStepV2::Contraflow(dr) => {
self.per_road.add(dr.road, count);
}
PathStepV2::Movement(m) | PathStepV2::ContraflowMovement(m) => {
self.per_intersection.add(m.parent, count);
}
}
}
// If we're starting or ending at a border, count it
let req = path.get_req();
if req.start.dist_along() == Distance::ZERO {
// TODO src_i and dst_i may not work for pedestrians on contraflow sidewalks
let i = map.get_l(req.start.lane()).src_i;
if map.get_i(i).is_border() {
self.per_intersection.add(i, count);
}
} else
|
}
/// Print a comparison of counts. Only look at roads/intersections in `self`.
pub fn quickly_compare(&self, other: &TrafficCounts) {
// TODO Easy ASCII art table without huge dependencies?
println!("{} vs {}", self.description, other.description);
let mut sum = 0.0;
let mut n = 0;
for (r, cnt1) in self.per_road.borrow() {
let cnt1 = *cnt1;
let cnt2 = other.per_road.get(*r);
println!(
"{}: {} vs {}",
r,
prettyprint_usize(cnt1),
prettyprint_usize(cnt2)
);
sum += (cnt1 as f64 - cnt2 as f64).powi(2);
n += 1;
}
for (i, cnt1) in self.per_intersection.borrow() {
let cnt1 = *cnt1;
let cnt2 = other.per_intersection.get(*i);
println!(
"{}: {} vs {}",
i,
prettyprint_usize(cnt1),
prettyprint_usize(cnt2)
);
sum += (cnt1 as f64 - cnt2 as f64).powi(2);
n += 1;
}
println!("RMSE = {:.2}", (sum / n as f64).sqrt());
}
}
|
{
let i = map.get_l(req.end.lane()).dst_i;
if map.get_i(i).is_border() {
self.per_intersection.add(i, count);
}
}
|
image_versions.pb.go
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.18.1
// source: google/cloud/orchestration/airflow/service/v1/image_versions.proto
package service
import (
context "context"
proto "github.com/golang/protobuf/proto"
_ "google.golang.org/genproto/googleapis/api/annotations"
date "google.golang.org/genproto/googleapis/type/date"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// List ImageVersions in a project and location.
type ListImageVersionsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// List ImageVersions in the given project and location, in the form:
// "projects/{projectId}/locations/{locationId}"
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// The maximum number of image_versions to return.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// The next_page_token value returned from a previous List request, if any.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// Whether or not image versions from old releases should be included.
IncludePastReleases bool `protobuf:"varint,4,opt,name=include_past_releases,json=includePastReleases,proto3" json:"include_past_releases,omitempty"`
}
func (x *ListImageVersionsRequest) Reset() {
*x = ListImageVersionsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListImageVersionsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListImageVersionsRequest) ProtoMessage() {}
func (x *ListImageVersionsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListImageVersionsRequest.ProtoReflect.Descriptor instead.
func (*ListImageVersionsRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescGZIP(), []int{0}
}
func (x *ListImageVersionsRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *ListImageVersionsRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
func (x *ListImageVersionsRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
func (x *ListImageVersionsRequest) GetIncludePastReleases() bool {
if x != nil {
return x.IncludePastReleases
}
return false
}
// The ImageVersions in a project and location.
type ListImageVersionsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The list of supported ImageVersions in a location.
ImageVersions []*ImageVersion `protobuf:"bytes,1,rep,name=image_versions,json=imageVersions,proto3" json:"image_versions,omitempty"`
// The page token used to query for the next page if one exists.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
}
func (x *ListImageVersionsResponse) Reset() {
*x = ListImageVersionsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListImageVersionsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListImageVersionsResponse) ProtoMessage() {}
func (x *ListImageVersionsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListImageVersionsResponse.ProtoReflect.Descriptor instead.
func (*ListImageVersionsResponse) Descriptor() ([]byte, []int) {
return file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescGZIP(), []int{1}
}
func (x *ListImageVersionsResponse) GetImageVersions() []*ImageVersion {
if x != nil {
return x.ImageVersions
}
return nil
}
func (x *ListImageVersionsResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
// ImageVersion information
type ImageVersion struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The string identifier of the ImageVersion, in the form:
// "composer-x.y.z-airflow-a.b(.c)"
ImageVersionId string `protobuf:"bytes,1,opt,name=image_version_id,json=imageVersionId,proto3" json:"image_version_id,omitempty"`
// Whether this is the default ImageVersion used by Composer during
// environment creation if no input ImageVersion is specified.
IsDefault bool `protobuf:"varint,2,opt,name=is_default,json=isDefault,proto3" json:"is_default,omitempty"`
// supported python versions
SupportedPythonVersions []string `protobuf:"bytes,3,rep,name=supported_python_versions,json=supportedPythonVersions,proto3" json:"supported_python_versions,omitempty"`
// The date of the version release.
ReleaseDate *date.Date `protobuf:"bytes,4,opt,name=release_date,json=releaseDate,proto3" json:"release_date,omitempty"`
// Whether it is impossible to create an environment with the image version.
CreationDisabled bool `protobuf:"varint,5,opt,name=creation_disabled,json=creationDisabled,proto3" json:"creation_disabled,omitempty"`
// Whether it is impossible to upgrade an environment running with the image
// version.
UpgradeDisabled bool `protobuf:"varint,6,opt,name=upgrade_disabled,json=upgradeDisabled,proto3" json:"upgrade_disabled,omitempty"`
}
func (x *ImageVersion) Reset() {
*x = ImageVersion{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ImageVersion) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ImageVersion) ProtoMessage() {}
func (x *ImageVersion) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ImageVersion.ProtoReflect.Descriptor instead.
func (*ImageVersion) Descriptor() ([]byte, []int) {
return file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescGZIP(), []int{2}
}
func (x *ImageVersion) GetImageVersionId() string {
if x != nil {
return x.ImageVersionId
}
return ""
}
func (x *ImageVersion) GetIsDefault() bool {
if x != nil {
return x.IsDefault
}
return false
}
func (x *ImageVersion) GetSupportedPythonVersions() []string {
if x != nil {
return x.SupportedPythonVersions
}
return nil
}
func (x *ImageVersion) GetReleaseDate() *date.Date {
if x != nil {
return x.ReleaseDate
}
return nil
}
func (x *ImageVersion) GetCreationDisabled() bool {
if x != nil {
return x.CreationDisabled
}
return false
}
func (x *ImageVersion) GetUpgradeDisabled() bool {
if x != nil {
return x.UpgradeDisabled
}
return false
}
var File_google_cloud_orchestration_airflow_service_v1_image_versions_proto protoreflect.FileDescriptor
var file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDesc = []byte{
0x0a, 0x42, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6f,
0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x61, 0x69, 0x72,
0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f,
0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x2e, 0x61, 0x69, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f,
0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x22, 0xa2, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65,
0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f,
0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65,
0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f,
0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70,
0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01,
0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x73, 0x74, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74,
0x49, 0x6d, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x0e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x72, 0x63,
0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x69, 0x72, 0x66, 0x6c,
0x6f, 0x77, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d,
0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x69, 0x6d, 0x61, 0x67,
0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x22, 0xa1, 0x02, 0x0a, 0x0c, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73,
0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6d,
0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a,
0x69, 0x73, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
0x52, 0x09, 0x69, 0x73, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x3a, 0x0a, 0x19, 0x73,
0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f,
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17,
0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x56,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x0c, 0x72, 0x65, 0x6c, 0x65, 0x61,
0x73, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65,
0x52, 0x0b, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x44, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a,
0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c,
0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x70,
0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06,
0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x44, 0x69, 0x73,
0x61, 0x62, 0x6c, 0x65, 0x64, 0x32, 0xc9, 0x02, 0x0a, 0x0d, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x56,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xea, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74,
0x49, 0x6d, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x72, 0x63,
0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x69, 0x72, 0x66, 0x6c,
0x6f, 0x77, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x69, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65,
0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x42, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x12, 0x31, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70,
0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6d,
0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61,
0x72, 0x65, 0x6e, 0x74, 0x1a, 0x4b, 0xca, 0x41, 0x17, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65,
0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
0xd2, 0x41, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75,
0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
0x6d, 0x42, 0x8b, 0x01, 0x0a, 0x31, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x69, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x50, 0x01, 0x5a, 0x54, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x61, 0x69, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescOnce sync.Once
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescData = file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDesc
)
func file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescGZIP() []byte {
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescOnce.Do(func() {
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescData)
})
return file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDescData
}
var file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_goTypes = []interface{}{
(*ListImageVersionsRequest)(nil), // 0: google.cloud.orchestration.airflow.service.v1.ListImageVersionsRequest
(*ListImageVersionsResponse)(nil), // 1: google.cloud.orchestration.airflow.service.v1.ListImageVersionsResponse
(*ImageVersion)(nil), // 2: google.cloud.orchestration.airflow.service.v1.ImageVersion
(*date.Date)(nil), // 3: google.type.Date
}
var file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_depIdxs = []int32{
2, // 0: google.cloud.orchestration.airflow.service.v1.ListImageVersionsResponse.image_versions:type_name -> google.cloud.orchestration.airflow.service.v1.ImageVersion
3, // 1: google.cloud.orchestration.airflow.service.v1.ImageVersion.release_date:type_name -> google.type.Date
0, // 2: google.cloud.orchestration.airflow.service.v1.ImageVersions.ListImageVersions:input_type -> google.cloud.orchestration.airflow.service.v1.ListImageVersionsRequest
1, // 3: google.cloud.orchestration.airflow.service.v1.ImageVersions.ListImageVersions:output_type -> google.cloud.orchestration.airflow.service.v1.ListImageVersionsResponse
3, // [3:4] is the sub-list for method output_type
2, // [2:3] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_init() }
func file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_init()
|
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ImageVersionsClient is the client API for ImageVersions service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ImageVersionsClient interface {
// List ImageVersions for provided location.
ListImageVersions(ctx context.Context, in *ListImageVersionsRequest, opts ...grpc.CallOption) (*ListImageVersionsResponse, error)
}
type imageVersionsClient struct {
cc grpc.ClientConnInterface
}
func NewImageVersionsClient(cc grpc.ClientConnInterface) ImageVersionsClient {
return &imageVersionsClient{cc}
}
func (c *imageVersionsClient) ListImageVersions(ctx context.Context, in *ListImageVersionsRequest, opts ...grpc.CallOption) (*ListImageVersionsResponse, error) {
out := new(ListImageVersionsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.orchestration.airflow.service.v1.ImageVersions/ListImageVersions", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ImageVersionsServer is the server API for ImageVersions service.
type ImageVersionsServer interface {
// List ImageVersions for provided location.
ListImageVersions(context.Context, *ListImageVersionsRequest) (*ListImageVersionsResponse, error)
}
// UnimplementedImageVersionsServer can be embedded to have forward compatible implementations.
type UnimplementedImageVersionsServer struct {
}
func (*UnimplementedImageVersionsServer) ListImageVersions(context.Context, *ListImageVersionsRequest) (*ListImageVersionsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListImageVersions not implemented")
}
func RegisterImageVersionsServer(s *grpc.Server, srv ImageVersionsServer) {
s.RegisterService(&_ImageVersions_serviceDesc, srv)
}
func _ImageVersions_ListImageVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListImageVersionsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ImageVersionsServer).ListImageVersions(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.orchestration.airflow.service.v1.ImageVersions/ListImageVersions",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ImageVersionsServer).ListImageVersions(ctx, req.(*ListImageVersionsRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ImageVersions_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.orchestration.airflow.service.v1.ImageVersions",
HandlerType: (*ImageVersionsServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ListImageVersions",
Handler: _ImageVersions_ListImageVersions_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/orchestration/airflow/service/v1/image_versions.proto",
}
|
{
if File_google_cloud_orchestration_airflow_service_v1_image_versions_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListImageVersionsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListImageVersionsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ImageVersion); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDesc,
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_goTypes,
DependencyIndexes: file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_depIdxs,
MessageInfos: file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_msgTypes,
}.Build()
File_google_cloud_orchestration_airflow_service_v1_image_versions_proto = out.File
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_rawDesc = nil
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_goTypes = nil
file_google_cloud_orchestration_airflow_service_v1_image_versions_proto_depIdxs = nil
}
|
auth_test.go
|
package cmd
import (
"bytes"
"fmt"
"os"
"testing"
"github.com/keptn/keptn/cli/pkg/logging"
"github.com/keptn/keptn/cli/utils/credentialmanager"
)
func init() {
logging.InitLoggers(os.Stdout, os.Stdout, os.Stderr)
}
// TestAuthCmd tests the auth command. Therefore, this test assumes a file "~/keptn/.keptnmock" containing
// the endpoint and api-token.
func TestAuthCmd(t *testing.T)
|
{
credentialmanager.MockAuthCreds = true
endPoint, apiToken, err := credentialmanager.GetCreds()
if err != nil {
t.Error(err)
return
}
buf := new(bytes.Buffer)
rootCmd.SetOutput(buf)
args := []string{
"auth",
fmt.Sprintf("--endpoint=%s", endPoint.String()),
fmt.Sprintf("--api-token=%s", apiToken),
"--mock",
}
rootCmd.SetArgs(args)
err = rootCmd.Execute()
if err != nil {
t.Errorf("An error occured: %v", err)
}
}
|
|
android_xing.py
|
#Pluginname="Xing (Android)"
#Filename="conversations.db"
#Type=App
import struct
def convertdata(db):
#ctx.gui_clearData()
waconn=ctx.sqlite_run_cmd(db,"SELECT _id, im_skype, company_name, bussiness_province, birthdate, display_name, page_name, bussiness_city, occupation_title from users.users_table;")
if (waconn==-1):
print ("Error: "+ctx.sqlite_last_error(db))
return
contacts={}
if waconn!=-1:
rows=ctx.sqlite_get_data_size(waconn)[0]
for i in range(0,rows):
id=str(ctx.sqlite_get_data(waconn,i,0))
skype=str(ctx.sqlite_get_data(waconn,i,1))
company_name=str(ctx.sqlite_get_data(waconn,i,2))
bussiness_province=str(ctx.sqlite_get_data(waconn,i,3))
birthdate=str(ctx.sqlite_get_data(waconn,i,4))
display_name=str(ctx.sqlite_get_data(waconn,i,5))
page_name=str(ctx.sqlite_get_data(waconn,i,6))
bussiness_city=str(ctx.sqlite_get_data(waconn,i,7))
occupation_title=str(ctx.sqlite_get_data(waconn,i,8))
if (id not in contacts) or (contacts[id]==id):
if display_name != None:
contacts[id]=display_name
elif page_name != None:
contacts[id]=page_name
else:
contacts[id]=id
attconn=ctx.sqlite_run_cmd(db,"select msg_id, file_name from attachments_table;")
attachments={}
if attconn!=-1:
attrows=ctx.sqlite_get_data_size(attconn)[0]
for i in range(0,attrows):
id=str(ctx.sqlite_get_data(attconn,i,0))
filename=str(ctx.sqlite_get_data(attconn,i,1))
if (id not in attachments):
attachments[id]=filename
else:
attachments[id]+=";"+filename
conn=ctx.sqlite_run_cmd(db,"select messages_table.rowid, date, _id, body, sender, has_attachments from messages_table;")
rows=ctx.sqlite_get_data_size(conn)[0]
oldpos=0
r=0
for i in range(0,rows):
newpos=int(i/rows*100)
if (oldpos<newpos):
oldpos=newpos
ctx.gui_setMainProgressBar(oldpos)
rowid=ctx.sqlite_get_data(conn,i,0)
timestamp=ctx.sqlite_get_data(conn,i,1)
id=ctx.sqlite_get_data(conn,i,2)
body=ctx.sqlite_get_data(conn,i,3)
sender_id=ctx.sqlite_get_data(conn,i,4)
has_attachments=ctx.sqlite_get_data(conn,i,5)
name=""
sender_name=""
attaches=""
if id in attachments:
attaches=attachments[id]
if sender_id in contacts:
sender_name=contacts[sender_id]
ctx.gui_set_data(r,0,rowid)
ctx.gui_set_data(r,1,timestamp)
ctx.gui_set_data(r,2,sender_id)
ctx.gui_set_data(r,3,sender_name)
ctx.gui_set_data(r,4,body)
ctx.gui_set_data(r,5,attaches)
r+=1
ctx.sqlite_cmd_close(attconn)
ctx.sqlite_cmd_close(waconn)
ctx.sqlite_cmd_close(conn)
def main():
headers=["rowid (int)","timestamp (int)","_sender (QString)","_sender_alias (QString)","body (QString)","Attachments (QString)"]
ctx.gui_set_headers(headers)
ctx.gui_setMainLabel("Xing: Parsing Strings");
ctx.gui_setMainProgressBar(0)
db=ctx.sqlite_open("gui",True)
convertdata(db)
ctx.gui_update()
ctx.gui_setMainLabel("Status: Idle.")
ctx.gui_setMainProgressBar(0)
ctx.sqlite_close(db)
|
return "Finished running plugin."
|
|
index.ts
|
export { default } from './Logo';
export * from './Logo';
|
||
config.rs
|
// Copyright 2021 UINB Technologies Pte. Ltd.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use argparse::{ArgumentParser, Store};
use cfg_if::cfg_if;
use lazy_static::lazy_static;
use log4rs::config::{Logger, RawConfig as LogConfig};
use serde::Deserialize;
#[derive(Debug, Deserialize)]
pub struct Config {
pub server: ServerConfig,
pub sequence: SequenceConfig,
pub mysql: MysqlConfig,
pub redis: RedisConfig,
pub log: LogConfig,
pub fusotao: Option<FusotaoConfig>,
}
#[cfg(feature = "enc-conf")]
pub trait EncryptedConfig {
fn decrypt(&mut self, key: &str) -> anyhow::Result<()>;
}
#[derive(Debug, Deserialize)]
pub struct ServerConfig {
pub bind_addr: String,
}
#[derive(Debug, Deserialize)]
pub struct
|
{
pub coredump_dir: String,
pub checkpoint: usize,
pub batch_size: usize,
pub dump_mode: String,
pub fetch_intervel_ms: u64,
pub enable_from_genesis: bool,
}
#[derive(Debug, Deserialize)]
pub struct MysqlConfig {
pub url: String,
}
#[cfg(feature = "enc-conf")]
impl EncryptedConfig for MysqlConfig {
fn decrypt(&mut self, key: &str) -> anyhow::Result<()> {
use magic_crypt::MagicCryptTrait;
let mc = magic_crypt::new_magic_crypt!(key, 64);
let dec = mc.decrypt_base64_to_string(&self.url)?;
self.url.replace_range(.., &dec);
Ok(())
}
}
#[derive(Debug, Deserialize)]
pub struct FusotaoConfig {
pub node_url: String,
pub key_seed: String,
pub claim_block: u32,
pub fee_adjust_threshold: u64,
}
impl Default for FusotaoConfig {
fn default() -> Self {
Self {
node_url: String::from(""),
key_seed: String::from(""),
claim_block: 1,
fee_adjust_threshold: 10,
}
}
}
#[cfg(feature = "enc-conf")]
impl EncryptedConfig for FusotaoConfig {
fn decrypt(&mut self, key: &str) -> anyhow::Result<()> {
use magic_crypt::MagicCryptTrait;
let mc = magic_crypt::new_magic_crypt!(key, 64);
let dec = mc.decrypt_base64_to_string(&self.key_seed)?;
self.key_seed.replace_range(.., &dec);
Ok(())
}
}
#[derive(Debug, Deserialize)]
pub struct RedisConfig {
pub url: String,
}
lazy_static! {
pub static ref C: Config = init_config_file().unwrap();
}
fn init_config_file() -> anyhow::Result<Config> {
let mut file = String::new();
{
let mut args = ArgumentParser::new();
args.refer(&mut file)
.add_option(&["-c"], Store, "toml config file");
args.parse_args_or_exit();
}
init_config(&std::fs::read_to_string(file)?)
}
fn init_config(toml: &str) -> anyhow::Result<Config> {
cfg_if! {
if #[cfg(feature = "enc-conf")] {
let mut cfg: Config = toml::from_str(toml)?;
let key = std::env::var_os("MAGIC_KEY")
.ok_or(anyhow::anyhow!("env MAGIC_KEY not set"))?;
let key = key.to_str().ok_or_else(||anyhow::anyhow!("env MAGIC_KEY not set"))?;
cfg.mysql.decrypt(&key)?;
if let Some(ref mut fuso) = cfg.fusotao {
fuso.decrypt(&key)?;
}
} else {
let cfg: Config = toml::from_str(toml)?;
}
}
let mut loggers = cfg
.log
.loggers()
.iter()
.map(|l| (l.name().to_string(), l.clone()))
.collect::<std::collections::HashMap<String, _>>();
loggers
.entry("ws".to_string())
.or_insert_with(|| Logger::builder().build("ws".to_string(), log::LevelFilter::Error));
loggers
.entry("fusotao_client".to_string())
.or_insert_with(|| {
Logger::builder().build("fusotao_client".to_string(), log::LevelFilter::Error)
});
let log = log4rs::Config::builder()
.loggers::<Vec<_>>(loggers.into_values().collect())
.appenders(cfg.log.appenders_lossy(&Default::default()).0)
.build(cfg.log.root())?;
log4rs::init_config(log)?;
Ok(cfg)
}
#[test]
#[cfg(not(feature = "fusotao"))]
pub fn test_default() {
let toml = r#"
[server]
bind_addr = "127.0.0.1:8097"
[mysql]
url = "mysql://username:password@localhost:3306/galois"
[redis]
url = "redis://localhost:6379/0"
[sequence]
checkpoint = 100000
coredump_dir = "/tmp/snapshot"
batch_size = 1000
dump_mode = "disk"
fetch_intervel_ms = 5
enable_from_genesis = true
[log]
[log.appenders.console]
kind = "console"
[log.root]
level = "info"
appenders = ["console"]
"#;
let config = init_config(&toml).unwrap();
let mysql_opts = mysql::Opts::from_url(&config.mysql.url).unwrap();
assert_eq!("password", mysql_opts.get_pass().unwrap());
}
|
SequenceConfig
|
clevr.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Grzegorz Jacenków.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Training and evaluation pipeline for the networks."""
import csv
import os
import tensorflow as tf
from tensorflow.keras.metrics import Mean
from inside import config
from inside.callbacks import setup_callbacks
from inside.constructor import setup_comet_ml, setup_model
from inside.loaders import CLEVR
from inside.metrics import DiceScore
def _write_results(logs):
"""Write final logs to a CSV file."""
w = csv.writer(open(os.path.join(
config.EXPERIMENT_FOLDER, "results.csv"), "w"))
for key, val in logs.items():
w.writerow([key, val])
class Pipeline:
def __init__(self):
# Model.
self.model = setup_model()
# Comet.ml experiment.
self.comet_ml = setup_comet_ml()
# Testing metrics.
self.test_dice = DiceScore(name="testing_dice")
self.test_loss = Mean(name="testing_loss")
# Training metrics.
self.training_dice = DiceScore(name="training_dice")
self.training_loss = Mean(name="training_loss")
# Callbacks.
self.cl, self.es, self.mc, self.pp = setup_callbacks()
self.cl.model, self.es.model, self.mc.model = \
self.model, self.model, self.model
self.pp.model = self.model
self.pp.comet_ml = self.comet_ml
def fit(self):
"""Train the model."""
# Toy dataset.
loader = CLEVR()
train_ds, valid_ds, test_ds = loader.load()
with self.comet_ml.train():
self.cl.on_train_begin()
self.es.on_train_begin()
self.mc.on_train_begin()
self.pp.on_train_begin()
for epoch in range(config.EXPERIMENT_EPOCHS):
self.comet_ml.set_epoch(epoch)
for images, labels in train_ds:
self.train_step(images, labels)
for batch, (images, labels) in enumerate(valid_ds):
self.test_step(images, labels)
if not batch: # Log only first mini-batch from an epoch.
self.pp.on_epoch_end(epoch, images, labels)
# Get results.
logs = {
"dice": self.training_dice.result().numpy(),
"loss": self.training_loss.result().numpy(),
"validation_dice": self.test_dice.result().numpy(),
"validation_loss": self.test_loss.result().numpy(),
}
template = ("Epoch {}. Training Loss: {}. Training Dice: {}. "
"Validation Loss: {}. Validation Dice: {}.")
print(template.format(epoch + 1,
logs['loss'],
logs['dice'],
logs['validation_loss'],
logs['validation_dice']))
# Log metrics.
self.comet_ml.log_metrics(logs, epoch=epoch)
self.cl.on_epoch_end(epoch, logs)
self.es.on_epoch_end(epoch, logs)
self.mc.on_epoch_end(epoch, logs)
# Reset the metrics for the next epoch.
self.training_dice.reset_states()
self.training_loss.reset_states()
self.test_dice.reset_states()
self.test_loss.reset_states()
# Early stopping criterion.
if self.es.model.stop_training:
self.cl.on_train_end()
self.es.on_train_end()
self.mc.on_train_end()
break
with self.comet_ml.test():
for batch, (images, labels) in enumerate(test_ds):
self.test_step(images, labels)
if not batch:
s
|
# Get results.
logs = {
"dice": self.test_dice.result().numpy(),
"loss": self.test_loss.result().numpy(),
}
print("Test Loss: {}. Test Dice: {}.".format(
logs['loss'], logs['dice']))
# Log metrics.
self.comet_ml.log_metrics(logs)
_write_results(logs)
@tf.function
def train_step(self, images, labels):
with tf.GradientTape() as tape:
predictions = self.model.inference(images)
loss = self.model.loss(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimiser.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.training_loss(loss)
self.training_dice(labels, predictions)
@tf.function
def test_step(self, images, labels):
predictions = self.model.inference(images)
t_loss = self.model.loss(labels, predictions)
self.test_loss(t_loss)
self.test_dice(labels, predictions)
|
elf.pp.on_test_end(images, labels)
|
termbox.go
|
// +build !windows
package termbox
import "unicode/utf8"
import "bytes"
import "syscall"
import "unsafe"
import "strings"
import "strconv"
import "os"
import "io"
// private API
const (
t_enter_ca = iota
t_exit_ca
t_show_cursor
t_hide_cursor
t_clear_screen
t_sgr0
t_underline
t_bold
t_blink
t_reverse
t_enter_keypad
t_exit_keypad
t_enter_mouse
t_exit_mouse
t_max_funcs
)
const (
coord_invalid = -2
attr_invalid = Attribute(0xFFFF)
)
type input_event struct {
data []byte
err error
}
var (
// term specific sequences
keys []string
funcs []string
// termbox inner state
orig_tios syscall_Termios
back_buffer cellbuf
front_buffer cellbuf
termw int
termh int
input_mode = InputEsc
output_mode = OutputNormal
out *os.File
in int
lastfg = attr_invalid
lastbg = attr_invalid
lastx = coord_invalid
lasty = coord_invalid
cursor_x = cursor_hidden
cursor_y = cursor_hidden
foreground = ColorDefault
background = ColorDefault
inbuf = make([]byte, 0, 64)
outbuf bytes.Buffer
sigwinch = make(chan os.Signal, 1)
sigio = make(chan os.Signal, 1)
quit = make(chan int)
input_comm = make(chan input_event)
interrupt_comm = make(chan struct{})
intbuf = make([]byte, 0, 16)
// grayscale indexes
grayscale = []Attribute{
0, 17, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 232,
}
)
func write_cursor(x, y int) {
outbuf.WriteString("\033[")
outbuf.Write(strconv.AppendUint(intbuf, uint64(y+1), 10))
outbuf.WriteString(";")
outbuf.Write(strconv.AppendUint(intbuf, uint64(x+1), 10))
outbuf.WriteString("H")
}
func
|
(a Attribute) {
switch output_mode {
case Output256, Output216, OutputGrayscale:
outbuf.WriteString("\033[38;5;")
outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
outbuf.WriteString("m")
default:
outbuf.WriteString("\033[3")
outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
outbuf.WriteString("m")
}
}
func write_sgr_bg(a Attribute) {
switch output_mode {
case Output256, Output216, OutputGrayscale:
outbuf.WriteString("\033[48;5;")
outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
outbuf.WriteString("m")
default:
outbuf.WriteString("\033[4")
outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
outbuf.WriteString("m")
}
}
func write_sgr(fg, bg Attribute) {
switch output_mode {
case Output256, Output216, OutputGrayscale:
outbuf.WriteString("\033[38;5;")
outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10))
outbuf.WriteString("m")
outbuf.WriteString("\033[48;5;")
outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10))
outbuf.WriteString("m")
default:
outbuf.WriteString("\033[3")
outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10))
outbuf.WriteString(";4")
outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10))
outbuf.WriteString("m")
}
}
type winsize struct {
rows uint16
cols uint16
xpixels uint16
ypixels uint16
}
func get_term_size(fd uintptr) (int, int) {
var sz winsize
_, _, _ = syscall.Syscall(syscall.SYS_IOCTL,
fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz)))
return int(sz.cols), int(sz.rows)
}
func send_attr(fg, bg Attribute) {
if fg == lastfg && bg == lastbg {
return
}
outbuf.WriteString(funcs[t_sgr0])
var fgcol, bgcol Attribute
switch output_mode {
case Output256:
fgcol = fg & 0x1FF
bgcol = bg & 0x1FF
case Output216:
fgcol = fg & 0xFF
bgcol = bg & 0xFF
if fgcol > 216 {
fgcol = ColorDefault
}
if bgcol > 216 {
bgcol = ColorDefault
}
if fgcol != ColorDefault {
fgcol += 0x10
}
if bgcol != ColorDefault {
bgcol += 0x10
}
case OutputGrayscale:
fgcol = fg & 0x1F
bgcol = bg & 0x1F
if fgcol > 26 {
fgcol = ColorDefault
}
if bgcol > 26 {
bgcol = ColorDefault
}
if fgcol != ColorDefault {
fgcol = grayscale[fgcol]
}
if bgcol != ColorDefault {
bgcol = grayscale[bgcol]
}
default:
fgcol = fg & 0x0F
bgcol = bg & 0x0F
}
if fgcol != ColorDefault {
if bgcol != ColorDefault {
write_sgr(fgcol, bgcol)
} else {
write_sgr_fg(fgcol)
}
} else if bgcol != ColorDefault {
write_sgr_bg(bgcol)
}
if fg&AttrBold != 0 {
outbuf.WriteString(funcs[t_bold])
}
if bg&AttrBold != 0 {
outbuf.WriteString(funcs[t_blink])
}
if fg&AttrUnderline != 0 {
outbuf.WriteString(funcs[t_underline])
}
if fg&AttrReverse|bg&AttrReverse != 0 {
outbuf.WriteString(funcs[t_reverse])
}
lastfg, lastbg = fg, bg
}
func send_char(x, y int, ch rune) {
var buf [8]byte
n := utf8.EncodeRune(buf[:], ch)
if x-1 != lastx || y != lasty {
write_cursor(x, y)
}
lastx, lasty = x, y
outbuf.Write(buf[:n])
}
func flush() error {
_, err := io.Copy(out, &outbuf)
outbuf.Reset()
if err != nil {
return err
}
return nil
}
func send_clear() error {
send_attr(foreground, background)
outbuf.WriteString(funcs[t_clear_screen])
if !is_cursor_hidden(cursor_x, cursor_y) {
write_cursor(cursor_x, cursor_y)
}
// we need to invalidate cursor position too and these two vars are
// used only for simple cursor positioning optimization, cursor
// actually may be in the correct place, but we simply discard
// optimization once and it gives us simple solution for the case when
// cursor moved
lastx = coord_invalid
lasty = coord_invalid
return flush()
}
func update_size_maybe() error {
w, h := get_term_size(out.Fd())
if w != termw || h != termh {
termw, termh = w, h
back_buffer.resize(termw, termh)
front_buffer.resize(termw, termh)
front_buffer.clear()
return send_clear()
}
return nil
}
func tcsetattr(fd uintptr, termios *syscall_Termios) error {
r, _, e := syscall.Syscall(syscall.SYS_IOCTL,
fd, uintptr(syscall_TCSETS), uintptr(unsafe.Pointer(termios)))
if r != 0 {
return os.NewSyscallError("SYS_IOCTL", e)
}
return nil
}
func tcgetattr(fd uintptr, termios *syscall_Termios) error {
r, _, e := syscall.Syscall(syscall.SYS_IOCTL,
fd, uintptr(syscall_TCGETS), uintptr(unsafe.Pointer(termios)))
if r != 0 {
return os.NewSyscallError("SYS_IOCTL", e)
}
return nil
}
func parse_escape_sequence(event *Event, buf []byte) (int, bool) {
bufstr := string(buf)
// mouse
if len(bufstr) >= 6 && strings.HasPrefix(bufstr, "\033[M") {
switch buf[3] & 3 {
case 0:
event.Key = MouseLeft
case 1:
event.Key = MouseMiddle
case 2:
event.Key = MouseRight
case 3:
return 6, false
}
event.Type = EventMouse // KeyEvent by default
// wheel up outputs MouseLeft
if buf[3] == 0x60 || buf[3] == 0x70 {
event.Key = MouseMiddle
}
// the coord is 1,1 for upper left
event.MouseX = int(buf[4]) - 1 - 32
event.MouseY = int(buf[5]) - 1 - 32
return 6, true
}
for i, key := range keys {
if strings.HasPrefix(bufstr, key) {
event.Ch = 0
event.Key = Key(0xFFFF - i)
return len(key), true
}
}
return 0, true
}
func extract_raw_event(data []byte, event *Event) bool {
if len(inbuf) == 0 {
return false
}
n := len(data)
if n == 0 {
return false
}
n = copy(data, inbuf)
copy(inbuf, inbuf[n:])
inbuf = inbuf[:len(inbuf)-n]
event.N = n
event.Type = EventRaw
return true
}
func extract_event(inbuf []byte, event *Event) bool {
if len(inbuf) == 0 {
event.N = 0
return false
}
if inbuf[0] == '\033' {
// possible escape sequence
n, ok := parse_escape_sequence(event, inbuf)
if n != 0 {
event.N = n
return ok
}
// it's not escape sequence, then it's Alt or Esc, check input_mode
switch {
case input_mode&InputEsc != 0:
// if we're in escape mode, fill Esc event, pop buffer, return success
event.Ch = 0
event.Key = KeyEsc
event.Mod = 0
event.N = 1
return true
case input_mode&InputAlt != 0:
// if we're in alt mode, set Alt modifier to event and redo parsing
event.Mod = ModAlt
ok := extract_event(inbuf[1:], event)
if ok {
event.N++
} else {
event.N = 0
}
return ok
default:
panic("unreachable")
}
}
// if we're here, this is not an escape sequence and not an alt sequence
// so, it's a FUNCTIONAL KEY or a UNICODE character
// first of all check if it's a functional key
if Key(inbuf[0]) <= KeySpace || Key(inbuf[0]) == KeyBackspace2 {
// fill event, pop buffer, return success
event.Ch = 0
event.Key = Key(inbuf[0])
event.N = 1
return true
}
// the only possible option is utf8 rune
if r, n := utf8.DecodeRune(inbuf); r != utf8.RuneError {
event.Ch = r
event.Key = 0
event.N = n
return true
}
return false
}
func fcntl(fd int, cmd int, arg int) (val int, err error) {
r, _, e := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd),
uintptr(arg))
val = int(r)
if e != 0 {
err = e
}
return
}
|
write_sgr_fg
|
text.js
|
import {
constrain,
copy,
distance,
draw_brackets,
draw_fn,
draw_matrix,
draw_network,
draw_simple,
enter_select,
format_matrix,
guid,
guidIndex,
interpolate,
matrix_size,
pretty_round,
rgbToHex,
save_state,
transform_props,
} from '../index';
import {
math,
parser,
rtv,
BORDER_OPACITY,
BRACKETS,
BRACKET_COLOR,
CHAR,
GRID_SIZE,
MAT_NUM_WIDTH,
PI2,
} from '../resources';
export default function Text(text, pos) {
this.type = 'Text';
this.guid = guid();
this.properties = {};
this.properties[rtv.frame] = {
t: text,
p: pos,
c: [0, 0, 0, 1],
w: 1,
h: 1,
r: 0,
};
// ephemeral
this.new = true; // loaded or just created
this.selected = false;
this.dragged = false;
this.cursor = 0;
this.cursor_selection = 0;
this.command = '';
this.args = [];
this.cargs = []; // compiled arguments
this.text_val = '';
this.matrix_vals = [];
this.near_mouse = false;
this.size = { w: 0, h: 0 }; // pixel width and height
this.image = null;
this.select = () => {
this.selected = true;
rtv.formula_text.value = this.properties[rtv.frame].t;
};
this.is_selected = () => this.selected;
this.selection_indices = () => {
const s = Math.min(this.cursor, this.cursor_selection);
const e = Math.max(this.cursor, this.cursor_selection);
return { s, e };
};
this.text_selected = () => {
if (!this.is_text_selected()) {
return;
}
const props = this.properties[rtv.frame];
if (!props) {
return;
}
const s = this.selection_indices();
return props.t.slice(s.s, s.e);
};
this.is_text_selected = () => this.cursor !== this.cursor_selection;
this.replace_selected_text = (replace) => {
const props = this.properties[rtv.frame];
if (!props) {
return;
}
const { t } = props;
const s = this.selection_indices();
const newText = t.slice(0, s.s) + replace + t.slice(s.e, t.length);
this.cursor = s.s + replace.length;
this.cursor_selection = this.cursor;
return newText;
};
this.paste_text = (copiedText) => {
if (this.is_text_selected()) {
// wipe out some text in between
this.change_text(this.replace_selected_text(copiedText));
} else {
const { t } = this.properties[rtv.frame];
this.properties[rtv.frame].t = t.slice(0, this.cursor)
+ copiedText
+ t.slice(this.cursor, t.length);
this.cursor += copiedText.length;
this.cursor_selection = this.cursor;
}
};
this.constrain_cursors = () => {
const props = this.properties[rtv.frame];
if (!props) {
return;
}
const { t } = props;
this.cursor = Math.max(0, Math.min(this.cursor, t.length));
this.cursor_selection = Math.max(
0,
Math.min(this.cursor_selection, t.length),
);
};
this.char_index_at_x = (x) => {
const props = this.properties[rtv.frame];
if (!props) {
return 0;
}
const idx = Math.round((x - props.p.x) / CHAR.SIZE);
return Math.max(0, Math.min(idx, props.t.length));
};
this.duplicate = () => {
if (!this.selected) {
return;
}
const newc = new Text(this.text, null);
newc.properties[rtv.frame] = copy(this.properties[rtv.frame]);
newc.selected = true;
this.selected = false;
rtv.objs.push(newc);
};
this.copy_properties = (f, n) => {
this.properties[n] = copy(this.properties[f]);
};
this.set_color = (rgba) => {
if (this.selected) {
Object.assign(this.properties[rtv.frame].c, rgba.slice(0, 3));
}
};
this.hide = () => {
if (this.selected) {
if (this.properties[rtv.frame].c[3] === 1) {
this.properties[rtv.frame].c[3] = 0;
} else {
this.properties[rtv.frame].c[3] = 1;
}
this.selected = false;
}
};
this.clear_props = (f) => {
delete this.properties[f];
};
this.clear_all_props = () => {
if (!this.selected) {
return;
}
Object.keys(this.properties).forEach((key) => {
if (key !== rtv.frame) {
delete this.properties[key];
}
});
};
this.del_props_before = () => {
if (!this.selected) {
return;
}
if (this.properties && this.properties[rtv.frame - 1]) {
delete this.properties[rtv.frame - 1];
}
};
this.hidden = () => {
if (!this.properties[rtv.frame]) {
return true;
}
if (rtv.transition.transitioning) {
return (
this.properties[rtv.frame].c[3] === 0
&& this.properties[rtv.next_frame].c[3] === 0
);
}
return this.properties[rtv.frame].c[3] === 0;
};
this.in_rect = (x, y, x2, y2) => {
if (this.hidden()) {
return false;
}
const props = this.properties[rtv.frame];
let p;
if (props.ge) {
p = {
x: props.p.x + rtv.cam.props.p.x,
y: props.p.y + rtv.cam.props.p.y,
};
} else {
p = props.p;
}
if (p.x > x && p.y > y && p.x < x2 && p.y < y2) {
this.select();
return true;
}
return false;
};
this.split = () => {
if (!this.is_selected()) {
return;
}
const { t } = this.properties[rtv.frame];
if (t.includes('visnet')) {
// very hacky but it works.. :-)
const { p } = this.properties[rtv.frame];
const l = math.evaluate(t.substring(t.indexOf('['), t.indexOf(']') + 1));
draw_network(l._data, [p.x, p.y]);
// hide
this.properties[rtv.frame].c[3] = 0;
this.selected = false;
return;
}
// for each character, make it it's own text obj
if (!t) {
return;
}
const { p } = this.properties[rtv.frame];
// if its a matrix split that up too
if (this.matrix_vals.length !== 0) {
// create a bunch of matrix numbers
const pad = 24;
const matrix = format_matrix(this.matrix_vals);
for (let i = 0; i < matrix.length; i++) {
for (let j = 0; j < matrix[i].length; j++) {
const newT = new Text(matrix[i][j], {
x: p.x + j * (MAT_NUM_WIDTH + pad) + 110,
y: p.y + i * GRID_SIZE,
});
rtv.objs.push(newT);
}
}
const size = matrix_size(matrix);
draw_brackets(0, 0, size[0], size[1]);
return;
}
const N = t.length;
let xoff = 0;
for (let i = 0; i < N; i++) {
const c = t[i];
if (c === ' ') {
xoff += GRID_SIZE / 2;
continue;
}
const newT = new Text(c, { x: p.x + xoff, y: p.y });
rtv.objs.push(newT);
xoff += GRID_SIZE / 2;
}
this.deleted = true;
};
this.onkeydown = (evt) => {
if (!this.selected) {
return false;
}
const { key } = evt;
let { t } = this.properties[rtv.frame];
if (rtv.keys.ctrl) {
this.properties[rtv.frame] = transform_props(
key,
this.properties[rtv.frame],
);
return true;
}
if (rtv.keys.meta || rtv.keys.ctrl) {
if (this.is_selected()) {
if (key === 'c') {
// copy
rtv.text_copied = this.text_selected();
// hacky but works
const el = document.createElement('textarea');
el.value = this.text_selected();
document.body.appendChild(el);
el.select();
document.execCommand('copy');
document.body.removeChild(el);
return true;
}
if (key === 'v') {
// paste, let event take over
return false;
}
if (key === 'a') {
// select all
this.cursor = this.properties[rtv.frame].t.length;
this.cursor_selection = 0;
return true;
}
}
return true;
}
if (rtv.keys.tab) {
// auto complete
const fn = t.split(/[^A-Za-z]/).pop();
if (fn.length !== 0) {
const keys = Object.keys(math);
for (let i = 0; i < keys.length; i++) {
const funcName = keys[i];
if (funcName.startsWith(fn)) {
let newText = t.split(fn)[0] + keys[i];
if (`${math[funcName]}`.split('\n')[0].includes('(')) {
newText += '(';
}
this.change_text(newText);
this.cursor = newText.length;
this.cursor_selection = this.cursor;
break;
}
}
}
return true;
}
if (key === 'Escape') {
this.selected = false;
return false;
}
if (key === 'Enter') {
this.selected = false;
this.eval();
if (rtv.keys.shift) {
// create a new text below this one
const { p } = this.properties[rtv.frame];
const newT = new Text('', { x: p.x, y: p.y + CHAR.SIZE * 2 });
rtv.objs.push(newT);
newT.select();
save_state();
} else {
enter_select();
}
return false;
}
if (!rtv.keys.shift && this.is_text_selected()) {
const s = this.selection_indices();
if (key === 'ArrowRight') {
this.cursor = s.e;
} else if (key === 'ArrowLeft') {
this.cursor = s.s;
}
} else if (key === 'ArrowRight') {
this.cursor += 1;
} else if (key === 'ArrowLeft') {
this.cursor -= 1;
}
if (key === 'ArrowUp') {
// find text above
const texts = rtv.objs.filter((o) => o.type === 'Text');
texts.sort((a, b) => {
const ap = a.properties[rtv.frame].p;
const bp = b.properties[rtv.frame].p;
return ap.y > bp.y;
});
const i = guidIndex(texts, this);
if (i === 0) {
return true;
}
const newObj = texts[i - 1];
newObj.selected = true;
this.selected = false;
return true;
}
if (key === 'ArrowDown') {
// find text below
const texts = rtv.objs.filter((o) => o.type === 'Text');
texts.sort((a, b) => {
const ap = a.properties[rtv.frame].p;
const bp = b.properties[rtv.frame].p;
return ap.y > bp.y;
});
const i = guidIndex(texts, this);
if (i === texts.length - 1) {
return true;
}
const newObj = texts[i + 1];
newObj.selected = true;
this.selected = false;
return true;
}
if (key === 'Backspace') {
if (!this.is_text_selected()) {
this.cursor_selection = this.cursor - 1;
this.constrain_cursors();
t = this.replace_selected_text('');
} else {
t = this.replace_selected_text('');
}
} else if (key === 'Delete') {
if (!this.is_text_selected()) {
this.cursor_selection = this.cursor + 1;
this.constrain_cursors();
t = this.replace_selected_text('');
} else {
t = this.replace_selected_text('');
}
} else if (key.length === 1) {
// type character
if (this.is_text_selected()) {
t = this.replace_selected_text(key);
} else {
t = t.slice(0, this.cursor)
+ key
+ t.slice(this.cursor, t.length);
this.cursor += 1;
}
}
if (!rtv.keys.shift || (key !== 'ArrowRight' && key !== 'ArrowLeft')) {
this.cursor_selection = this.cursor;
}
this.change_text(t);
return true;
};
this.eval = () => {
if ((!rtv.presenting && this.is_selected()) || this.hidden()) {
return;
}
this.text_val = '';
this.matrix_vals = [];
if (this.new) {
this.new = false;
this.parse_text(this.properties[rtv.frame].t);
}
if (!this.cargs[0]) {
return;
}
rtv.ctx.save();
const a = this.properties[rtv.frame];
const b = this.properties[rtv.next_frame];
let i;
if (rtv.transition.transitioning) {
i = interpolate(a, b);
} else {
i = a;
}
const color = rgbToHex(i.c);
rtv.ctx.strokeStyle = color;
rtv.ctx.fillStyle = color;
rtv.ctx.globalAlpha = i.c[3];
if (rtv.transition.transitioning) {
if (a.t !== b.t) {
// text is diff, cross fade result
// ctx.globalAlpha = -math.cos(t_percent*2*math.PI-math.PI)/2 + .5;
/*
if (t_percent > .5) {
this.parse_text(this.properties[next_frame].t);
} */
}
}
try {
parser.set('text_props', i);
const val = this.cargs[0].evaluate(parser.scope);
// only display the value if its not an assignment or constant
const opType = math.parse(this.args[0]).type;
if (!opType.includes('Assignment') && opType !== 'ConstantNode') {
const type = typeof val;
// set display text
if (type === 'number') {
if (rtv.keys.ctrl) {
// nothing
this.text_val = `=${val}`;
} else {
this.text_val = `=${pretty_round(val)}`;
}
} else if (type === 'boolean') {
this.text_val = ` = ${val}`;
} else if (type === 'object' && val._data && val._data.length !== 0) {
// prob a matrix, render entries
this.matrix_vals = val._data;
this.text_val = null;
} else if (val && 're' in val && val.im) {
if (val) {
if (rtv.keys.ctrl) {
// nothing
this.text_val = `=${val}`;
} else {
this.text_val = `=${pretty_round(
val.re,
).toString()} + ${pretty_round(val.im).toString()}i`;
}
}
} else if (val) {
this.text_val = `=${val.toString()}`;
}
}
} catch (e) {
console.error('eval error: ', e);
}
rtv.ctx.restore();
};
this.change_text = (newText) => {
const changed = this.properties[rtv.frame].t !== newText;
this.properties[rtv.frame].t = newText;
this.constrain_cursors();
if (changed) {
this.parse_text(newText);
}
};
this.mouse_down = () => {
if (this.hidden()) {
return false;
}
this.near_mouse = this.point_in_text_rect(rtv.mouse.pos);
if (this.near_mouse) {
return true;
}
return false;
};
this.point_in_text_rect = (point) => {
const props = this.properties[rtv.frame];
if (!props) {
return false;
}
const { p } = props;
if (this.image) {
const w = this.image.width * props.w;
const h = this.image.height * props.h;
if (
point.x > p.x - w / 2
&& point.x < p.x + w / 2
&& point.y > p.y - h / 2
&& point.y < p.y + h / 2
) {
return true;
}
} else if (
point.x > p.x
&& point.x < p.x + this.size.w
&& point.y > p.y - this.size.h / 2
&& point.y < p.y + this.size.h / 2
) {
return true;
}
return false;
};
this.mouse_move = () => {
const props = this.properties[rtv.frame];
if (!props) {
return;
}
this.near_mouse = this.point_in_text_rect(rtv.mouse.pos);
};
this.var_name = () => {
let varName = this.args[0].split('=')[0];
varName = varName.replace(/\s+/g, '');
return varName;
};
this.mouse_drag = () => {
if (rtv.tool === 'camera') {
return false;
}
const props = this.properties[rtv.frame];
if (!props) {
return false;
}
if (
Math.abs(rtv.mouse.pos.x - rtv.mouse.start.x) > CHAR.SIZE
|| Math.abs(rtv.mouse.pos.y - rtv.mouse.start.y) > CHAR.SIZE
) {
this.dragged = true;
}
if (rtv.presenting) {
if (
!(this.args && this.args[0] && this.args[0]._data)
&& this.command === 'slide'
&& this.point_in_text_rect(rtv.mouse.start)
) {
// change the value of the variable
const varName = this.var_name();
let oldVal;
try {
oldVal = parser.evaluate(varName);
if (Number.isNaN(oldVal)) {
oldVal = 0;
}
} catch {
oldVal = 0;
}
let delta = (rtv.mouse.pos.x - rtv.mouse.last.x) / GRID_SIZE;
if (rtv.keys.meta || rtv.keys.ctrl) {
delta *= 0.01;
}
const newVal = oldVal + delta;
this.text_val = `=${pretty_round(newVal)}`;
try {
parser.set(varName, newVal);
} catch (e) {
console.error('slide error: ', e);
}
return true;
}
} else if (this.is_selected() && this.near_mouse && this.image == null) {
this.cursor = this.char_index_at_x(rtv.mouse.pos.x);
this.cursor_selection = this.char_index_at_x(rtv.mouse.start.x);
this.constrain_cursors();
this.dragged = true;
} else if (
rtv.tool === 'select'
&& (this.near_mouse || this.is_selected())
) {
// shift it
const { p } = props;
const offset = {
x: rtv.mouse.grid.x - rtv.mouse.gridLast.x,
y: rtv.mouse.grid.y - rtv.mouse.gridLast.y,
};
props.p = { x: p.x + offset.x, y: p.y + offset.y };
return true;
}
return false;
};
this.mouse_up = () => {
if (this.hidden()) {
return false;
}
if (this.near_mouse) {
if (!this.dragged) {
this.select();
// move cursor
this.cursor = this.char_index_at_x(rtv.mouse.pos.x);
this.cursor_selection = this.cursor;
this.constrain_cursors();
return true;
}
} else if (!rtv.keys.shift && this.is_selected()) {
this.selected = false;
}
this.dragged = false;
return false;
};
this.draw_text = (ctx, t) => {
let size;
if (this.command === 'f' && !this.is_selected()) {
const fn = t.slice(this.command.length + 1); // +1 for semicolon
size = draw_fn(fn);
} else {
const N = t.length;
size = { w: N * CHAR.SIZE, h: CHAR.SIZE * 2 };
size = { w: draw_simple(t), h: CHAR.SIZE * 2 };
let plevel = 0;
for (let i = 0; i < N; i++) {
if (i < this.cursor) {
if (t[i] in BRACKETS) plevel += BRACKETS[t[i]];
}
}
// draw red brackets
ctx.save();
if (this.is_selected() && plevel !== 0) {
ctx.fillStyle = BRACKET_COLOR;
let p2 = plevel;
for (let i = this.cursor; i < N; i++) {
if (t[i] in BRACKETS) p2 += BRACKETS[t[i]];
|
}
}
p2 = plevel;
for (let i = this.cursor - 1; i >= 0; i--) {
if (t[i] in BRACKETS) p2 += BRACKETS[t[i]];
if (p2 === plevel + 1) {
ctx.fillText(t[i], i * CHAR.SIZE, 0);
break;
}
}
}
ctx.restore();
}
if (this.matrix_vals.length !== 0) {
ctx.save();
ctx.translate(size.w, 0);
ctx.fillText('=', 0, 0);
ctx.translate(135, 0);
ctx.translate(-100, -20);
const formatted = format_matrix(this.matrix_vals);
draw_matrix(formatted);
ctx.restore();
} else if (!this.selected && this.text_val && this.text_val.length) {
ctx.save();
ctx.translate(size.w, 0);
size.w += draw_simple(this.text_val);
ctx.restore();
}
return size;
};
this.parse_text = (unparsedText) => {
this.command = '';
this.args = [];
this.cargs = [];
let parsedText = unparsedText;
// replace @ with anonymous fn name
if (parsedText && parsedText.length) {
const split = parsedText.split('@');
let newT = '';
const N = split.length;
for (let i = 0; i < N - 1; i++) {
newT += `${split[i]}anon${guid().slice(0, 8)}`;
}
newT += split[N - 1];
parsedText = newT;
}
if (parsedText && parsedText.includes(':')) {
const split = parsedText.split(':');
this.command = split[0];
this.args = [split[1]];
try {
this.cargs = math.compile(this.args);
} catch (e) {
// report_error(e);
}
} else {
this.args = [parsedText];
try {
this.cargs = math.compile(this.args);
} catch (e) {
console.log('compile2 error: ', e);
}
}
};
this.draw_tree = (ctx, props) => {
ctx.save();
if (this.args.length !== 1) {
return;
}
let stuff;
try {
stuff = [math.parse(this.args[0])];
} catch {
return;
}
const yoff = GRID_SIZE * 3;
const xoff = GRID_SIZE * 3;
const opSize = GRID_SIZE;
const p = { x: props.p.x, y: props.p.y + GRID_SIZE };
if (!stuff) {
return;
}
while (true) {
let nextStuff = [];
let addedAllSpaces = true;
for (let i = 0; i < stuff.length; i++) {
const o = stuff[i];
if (o.args) {
nextStuff = nextStuff.concat(o.args);
addedAllSpaces = false;
} else {
nextStuff.push(' ');
}
}
let lx = (-(nextStuff.length - 1) / 2) * xoff;
let li = 0;
for (let i = 0; i < stuff.length; i++) {
const o = stuff[i];
if (o === ' ') {
continue;
}
let t;
const np = {
x: p.x + i * xoff - ((stuff.length - 1) / 2) * xoff,
y: p.y,
};
if (o.args) {
// draw the op name
if (o.name && o.name.length) {
t = o.name;
} else if (o.op && o.op.length) {
t = o.op;
}
if (distance(rtv.mouse.pos, np) < GRID_SIZE) {
t = o.toString();
}
/* ctx.beginPath();
ctx.arc(np.x, np.y, op_size, 0, pi2);
ctx.stroke(); */
ctx.fillText(t, np.x, np.y);
for (let j = 0; j < o.args.length; j++) {
while (nextStuff[li] === ' ') {
lx += xoff;
li += 1;
}
const argp = { x: p.x + lx, y: np.y + yoff };
let diff = { x: argp.x - np.x, y: argp.y - np.y };
const n = math.norm([diff.x, diff.y]);
diff = { x: diff.x / n, y: diff.y / n };
ctx.beginPath();
ctx.moveTo(np.x + diff.x * opSize, np.y + diff.y * opSize);
ctx.lineTo(argp.x - diff.x * opSize, argp.y - diff.y * opSize);
ctx.stroke();
lx += xoff;
li += 1;
}
} else {
if (o.name && o.name.length) {
t = o.name;
} else if (o.items) {
t = 'A'; // array
} else if (o.value) {
t = o.value;
} else if (o.content) {
t = o.content;
} else {
t = '?';
}
ctx.fillText(t, np.x, np.y);
}
}
if (nextStuff.length === 0) {
break;
}
if (addedAllSpaces) {
break;
}
stuff = nextStuff;
p.y += yoff;
}
ctx.restore();
};
this.draw_border = (ctx) => {
ctx.save();
ctx.globalAlpha = BORDER_OPACITY;
if (this.image) {
ctx.strokeRect(
-this.image.width / 2,
-this.image.height / 2,
this.image.width,
this.image.height,
);
} else {
ctx.strokeRect(0, -this.size.h / 2, this.size.w, this.size.h);
}
ctx.restore();
};
this.render = (ctx) => {
const a = this.properties[rtv.frame];
if (!a) {
return;
}
let b = this.properties[rtv.next_frame];
const itn = rtv.transition.transitioning
? interpolate(a, b)
: a;
if (itn.c[3] === 0) {
return;
}
let { p } = itn;
if (b && b.c[3] > a.c[3]) {
// fade in, use final position always
p = b.p;
} else if (b && b.c[3] < a.c[3]) {
// fade out, use initial position
p = a.p;
}
ctx.save();
ctx.globalAlpha = itn.c[3];
ctx.fillStyle = rgbToHex(itn.c);
ctx.strokeStyle = rgbToHex(itn.c);
let shouldDrawText = true;
const c = this.command;
if (c === 'tree') {
this.draw_tree(ctx, itn);
if (rtv.presenting) {
shouldDrawText = false;
}
}
if (rtv.presenting && (a.ph || (b && b.ph))) {
shouldDrawText = false;
}
// text
this.size = { w: 0, h: 0 };
ctx.translate(p.x, p.y);
ctx.rotate(itn.r);
ctx.scale(itn.w, itn.h);
// image display
if (
itn.t.includes('http')
&& (itn.t.includes('png')
|| itn.t.includes('jpg')
|| itn.t.includes('gif')
|| itn.t.includes('jpeg'))
) {
if (this.image == null || this.image.src !== itn.t) {
this.image = new Image();
this.image.src = itn.t;
} else {
ctx.drawImage(
this.image,
-this.image.width / 2,
-this.image.height / 2,
);
this.size = { w: this.image.width * itn.w, h: this.image.height * itn.h };
}
} else if (shouldDrawText) {
if (!b) {
b = a;
}
const fadingIn = a.c[3] === 0 && b.c[3] === 1;
const fadingOut = a.c[3] === 1 && b.c[3] === 0;
let at = a.t;
let bt = b.t;
if (rtv.transition.transitioning) {
if (fadingIn) {
at = b.t;
bt = b.t;
} else if (fadingOut) {
at = a.t;
bt = a.t;
}
}
const textDifferent = at !== bt;
if (textDifferent && rtv.transition.transitioning) {
// changing text
const constrained = constrain(rtv.t_ease);
ctx.globalAlpha = 1 - constrained;
this.draw_text(ctx, a.t);
ctx.globalAlpha = constrained;
this.draw_text(ctx, b.t);
} else {
ctx.globalAlpha = itn.c[3];
this.size = this.draw_text(ctx, at);
}
}
if (c === 'slide' && rtv.presenting && this.near_mouse && !this.hidden()) {
// draw slider rect
this.draw_border(ctx);
}
if (!rtv.presenting && !this.hidden() && this.near_mouse) {
// draw border
this.draw_border(ctx);
}
if (!rtv.presenting && this.is_selected()) {
// draw cursor
ctx.fillRect(this.cursor * CHAR.SIZE, -GRID_SIZE / 2, 2, GRID_SIZE);
if (this.is_text_selected()) {
// draw selection
const s = this.selection_indices();
const xstart = s.s * CHAR.SIZE;
const xend = s.e * CHAR.SIZE;
ctx.save();
ctx.globalAlpha = 0.1;
ctx.fillRect(xstart, -GRID_SIZE / 2, xend - xstart, GRID_SIZE);
ctx.restore();
}
// draw function information
if (itn.t) {
const t = itn.t.slice(0, this.cursor);
const fn = t.split(/[^A-Za-z]/).pop();
if (fn.length !== 0) {
const keys = Object.keys(math);
let yoff = 0;
for (let i = 0; i < keys.length; i++) {
const funcName = keys[i];
if (funcName.startsWith(fn)) {
ctx.save();
ctx.translate(0, CHAR.SIZE * 2 + yoff);
ctx.scale(0.5, 0.5);
ctx.globalAlpha = 0.5;
draw_simple(`${funcName}: ${`${math[funcName]}`.split('\n')[0]}`);
ctx.restore();
yoff += GRID_SIZE;
}
}
}
}
}
ctx.restore();
};
this.generate_javascript = () => {
const props = this.properties[rtv.frame];
const { p } = props;
const cp = rtv.cam.properties[rtv.frame].p;
const { t } = props;
let js = '';
js += 'ctx.save();\n';
js += `ctx.translate(x + ${p.x - cp.x}, y + ${p.y - cp.y});\n`;
js += `ctx.rotate(${props.r});\n`;
js += `ctx.scale(${props.w}, ${props.h});\n`;
js += `ctx.fillStyle = "${rgbToHex(props.c)}";\n`;
for (let i = 0; i < t.length; i++) {
if (t[i] === '*') {
js += 'ctx.beginPath();\n';
js += `ctx.arc(${i * CHAR.SIZE + CHAR.SIZE / 2}, 0, 3, 0, ${PI2});\n`;
js += 'ctx.fill();\n';
} else {
js += `ctx.fillText("${t[i]}", ${i * CHAR.SIZE}, 0);\n`;
}
}
js += 'ctx.restore();\n';
return js;
};
this.parse_text(text);
}
|
if (p2 === plevel - 1) {
ctx.fillText(t[i], i * CHAR.SIZE, 0);
break;
|
runtime~main.0021e2e3.iframe.bundle.js
|
(()=>{"use strict";var deferred,inProgress,__webpack_modules__={},__webpack_module_cache__={};function
|
(moduleId){var cachedModule=__webpack_module_cache__[moduleId];if(void 0!==cachedModule)return cachedModule.exports;var module=__webpack_module_cache__[moduleId]={id:moduleId,loaded:!1,exports:{}};return __webpack_modules__[moduleId].call(module.exports,module,module.exports,__webpack_require__),module.loaded=!0,module.exports}__webpack_require__.m=__webpack_modules__,deferred=[],__webpack_require__.O=(result,chunkIds,fn,priority)=>{if(!chunkIds){var notFulfilled=1/0;for(i=0;i<deferred.length;i++){for(var[chunkIds,fn,priority]=deferred[i],fulfilled=!0,j=0;j<chunkIds.length;j++)(!1&priority||notFulfilled>=priority)&&Object.keys(__webpack_require__.O).every((key=>__webpack_require__.O[key](chunkIds[j])))?chunkIds.splice(j--,1):(fulfilled=!1,priority<notFulfilled&&(notFulfilled=priority));fulfilled&&(deferred.splice(i--,1),result=fn())}return result}priority=priority||0;for(var i=deferred.length;i>0&&deferred[i-1][2]>priority;i--)deferred[i]=deferred[i-1];deferred[i]=[chunkIds,fn,priority]},__webpack_require__.n=module=>{var getter=module&&module.__esModule?()=>module.default:()=>module;return __webpack_require__.d(getter,{a:getter}),getter},__webpack_require__.d=(exports,definition)=>{for(var key in definition)__webpack_require__.o(definition,key)&&!__webpack_require__.o(exports,key)&&Object.defineProperty(exports,key,{enumerable:!0,get:definition[key]})},__webpack_require__.f={},__webpack_require__.e=chunkId=>Promise.all(Object.keys(__webpack_require__.f).reduce(((promises,key)=>(__webpack_require__.f[key](chunkId,promises),promises)),[])),__webpack_require__.u=chunkId=>chunkId+"."+{90:"a29094a1",331:"a4d69bca",459:"5a886fb5",881:"61c8f29e",913:"a1de9192"}[chunkId]+".iframe.bundle.js",__webpack_require__.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),__webpack_require__.hmd=module=>((module=Object.create(module)).children||(module.children=[]),Object.defineProperty(module,"exports",{enumerable:!0,set:()=>{throw new Error("ES Modules may not assign module.exports or exports.*, Use ESM export syntax, instead: "+module.id)}}),module),__webpack_require__.o=(obj,prop)=>Object.prototype.hasOwnProperty.call(obj,prop),inProgress={},__webpack_require__.l=(url,done,key,chunkId)=>{if(inProgress[url])inProgress[url].push(done);else{var script,needAttach;if(void 0!==key)for(var scripts=document.getElementsByTagName("script"),i=0;i<scripts.length;i++){var s=scripts[i];if(s.getAttribute("src")==url||s.getAttribute("data-webpack")=="cubism-react:"+key){script=s;break}}script||(needAttach=!0,(script=document.createElement("script")).charset="utf-8",script.timeout=120,__webpack_require__.nc&&script.setAttribute("nonce",__webpack_require__.nc),script.setAttribute("data-webpack","cubism-react:"+key),script.src=url),inProgress[url]=[done];var onScriptComplete=(prev,event)=>{script.onerror=script.onload=null,clearTimeout(timeout);var doneFns=inProgress[url];if(delete inProgress[url],script.parentNode&&script.parentNode.removeChild(script),doneFns&&doneFns.forEach((fn=>fn(event))),prev)return prev(event)},timeout=setTimeout(onScriptComplete.bind(null,void 0,{type:"timeout",target:script}),12e4);script.onerror=onScriptComplete.bind(null,script.onerror),script.onload=onScriptComplete.bind(null,script.onload),needAttach&&document.head.appendChild(script)}},__webpack_require__.r=exports=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(exports,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(exports,"__esModule",{value:!0})},__webpack_require__.nmd=module=>(module.paths=[],module.children||(module.children=[]),module),__webpack_require__.p="",(()=>{var installedChunks={303:0};__webpack_require__.f.j=(chunkId,promises)=>{var installedChunkData=__webpack_require__.o(installedChunks,chunkId)?installedChunks[chunkId]:void 0;if(0!==installedChunkData)if(installedChunkData)promises.push(installedChunkData[2]);else if(303!=chunkId){var promise=new Promise(((resolve,reject)=>installedChunkData=installedChunks[chunkId]=[resolve,reject]));promises.push(installedChunkData[2]=promise);var url=__webpack_require__.p+__webpack_require__.u(chunkId),error=new Error;__webpack_require__.l(url,(event=>{if(__webpack_require__.o(installedChunks,chunkId)&&(0!==(installedChunkData=installedChunks[chunkId])&&(installedChunks[chunkId]=void 0),installedChunkData)){var errorType=event&&("load"===event.type?"missing":event.type),realSrc=event&&event.target&&event.target.src;error.message="Loading chunk "+chunkId+" failed.\n("+errorType+": "+realSrc+")",error.name="ChunkLoadError",error.type=errorType,error.request=realSrc,installedChunkData[1](error)}}),"chunk-"+chunkId,chunkId)}else installedChunks[chunkId]=0},__webpack_require__.O.j=chunkId=>0===installedChunks[chunkId];var webpackJsonpCallback=(parentChunkLoadingFunction,data)=>{var moduleId,chunkId,[chunkIds,moreModules,runtime]=data,i=0;for(moduleId in moreModules)__webpack_require__.o(moreModules,moduleId)&&(__webpack_require__.m[moduleId]=moreModules[moduleId]);if(runtime)var result=runtime(__webpack_require__);for(parentChunkLoadingFunction&&parentChunkLoadingFunction(data);i<chunkIds.length;i++)chunkId=chunkIds[i],__webpack_require__.o(installedChunks,chunkId)&&installedChunks[chunkId]&&installedChunks[chunkId][0](),installedChunks[chunkIds[i]]=0;return __webpack_require__.O(result)},chunkLoadingGlobal=self.webpackChunkcubism_react=self.webpackChunkcubism_react||[];chunkLoadingGlobal.forEach(webpackJsonpCallback.bind(null,0)),chunkLoadingGlobal.push=webpackJsonpCallback.bind(null,chunkLoadingGlobal.push.bind(chunkLoadingGlobal))})()})();
//# sourceMappingURL=runtime~main.0021e2e3.iframe.bundle.js.map
|
__webpack_require__
|
common.py
|
from __future__ import print_function
import argparse
|
import json
import datetime
import pyjq
import yaml
import sys
from netaddr import IPNetwork
from shared.nodes import Account, Region
from shared.query import query_aws, get_parameter_file
class Severity:
# For logging
DEBUG = 0
INFO = 1
WARN = 2
ERROR = 3
@classmethod
def str_to_int(cls, level):
if level == "DEBUG":
return cls.DEBUG
elif level == "INFO":
return cls.INFO
elif level == "WARN":
return cls.WARN
elif level == "ERROR":
return cls.ERROR
else:
raise Exception("Unknown log level {}".format(level))
@staticmethod
def string(severity_level):
if severity_level == Severity.DEBUG:
return "DEBUG"
elif severity_level == Severity.INFO:
return "INFO"
elif severity_level == Severity.WARN:
return "WARN"
elif severity_level == Severity.ERROR:
return "ERROR"
else:
raise Exception("Unknown severity level")
LOG_LEVEL = Severity.INFO
def log_debug(msg, location=None, reasons=[]):
log_issue(Severity.DEBUG, msg, location, reasons)
def log_info(msg, location=None, reasons=[]):
log_issue(Severity.INFO, msg, location, reasons)
def log_warning(msg, location=None, reasons=[]):
log_issue(Severity.WARN, msg, location, reasons)
def log_error(msg, location=None, reasons=[]):
log_issue(Severity.ERROR, msg, location, reasons)
def log_issue(severity, msg, location=None, reasons=[]):
if severity >= LOG_LEVEL:
json_issue = {
"Severity": Severity.string(severity),
"Issue": msg,
"Location": location,
"Reasons": reasons,
}
print(json.dumps(json_issue, sort_keys=True), file=sys.stderr)
class Finding(object):
"""Used for auditing"""
region = None
issue_id = None
resource_id = None
resource_details = None
def __init__(self, region, issue_id, resource_id, resource_details=None):
self.region = region
self.issue_id = issue_id
self.resource_id = resource_id
self.resource_details = resource_details
def __str__(self):
return json.dumps(
{
"account_id": self.region.account.local_id,
"account_name": self.region.account.name,
"region": self.region.name,
"issue": self.issue_id,
"resource": self.resource_id,
"details": self.resource_details,
}
)
@property
def account_name(self):
return self.region.account.name
def custom_serializer(x):
if isinstance(x, datetime.datetime):
return x.isoformat()
elif isinstance(x, bytes):
return x.decode()
raise TypeError("Unknown type")
def make_list(v):
if not isinstance(v, list):
return [v]
return v
def is_external_cidr(cidr):
ipnetwork = IPNetwork(cidr)
if (
ipnetwork in IPNetwork("10.0.0.0/8")
or ipnetwork in IPNetwork("172.16.0.0/12")
or ipnetwork in IPNetwork("192.168.0.0/16")
):
return False
return True
def is_unblockable_cidr(cidr):
ipnetwork = IPNetwork(cidr)
if (
ipnetwork in IPNetwork("169.254.0.0/16")
or ipnetwork in IPNetwork("127.0.0.0/8") # link local
or ipnetwork in IPNetwork("192.0.2.0/24") # loopback
or ipnetwork in IPNetwork("198.51.100.0/24") # Test network from RFC 5737
or ipnetwork in IPNetwork("203.0.113.0/24") # Test network
or ipnetwork in IPNetwork("224.0.0.0/4") # Test network
or ipnetwork in IPNetwork("240.0.0.0/5") # class D multicast
or ipnetwork in IPNetwork("248.0.0.0/5") # class E reserved
or ipnetwork in IPNetwork("255.255.255.255/32") # reserved # broadcast
):
return True
return False
def get_regions(account, outputfilter={}):
# aws ec2 describe-regions
region_data = query_aws(account, "describe-regions")
region_filter = ""
if "regions" in outputfilter:
region_filter = "| select(.RegionName | contains({}))".format(
outputfilter["regions"]
)
regions = pyjq.all(".Regions[]{}".format(region_filter), region_data)
return regions
def get_account(account_name, config=None, config_filename="config.json.demo"):
if config is None:
config = json.load(open(config_filename))
for account in config["accounts"]:
if account["name"] == account_name:
return account
if account_name is None and account.get("default", False):
return account
# Else could not find account
if account_name is None:
exit(
"ERROR: Must specify an account, or set one in {} as a default".format(
config_filename
)
)
exit(
'ERROR: Account named "{}" not found in {}'.format(
account_name, config_filename
)
)
def parse_arguments(arguments, parser=None):
"""Returns (args, accounts, config)"""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", help="Config file name", default="config.json", type=str
)
parser.add_argument(
"--accounts", help="Accounts to collect from", required=True, type=str
)
parser.add_argument(
"--log_level",
help="Log level to record (DEBUG, INFO, WARN, ERROR)",
default="INFO",
required=False,
type=str,
)
args = parser.parse_args(arguments)
global LOG_LEVEL
LOG_LEVEL = Severity.str_to_int(args.log_level)
# Read accounts file
try:
config = json.load(open(args.config))
except IOError:
exit('ERROR: Unable to load config file "{}"'.format(args.config))
except ValueError as e:
exit(
'ERROR: Config file "{}" could not be loaded ({}), see config.json.demo for an example'.format(
args.config, e
)
)
# Get accounts
account_names = args.accounts.split(",")
accounts = []
# TODO Need to be able to tag accounts into sets (ex. Prod, or by business unit) so the tag can be referenced
# as opposed to the individual account names.
for account_name in account_names:
if account_name == "all":
for account in config["accounts"]:
accounts.append(account)
break
accounts.append(get_account(account_name, config, args.config))
return (args, accounts, config)
def get_account_stats(account, all_resources=False):
"""Returns stats for an account"""
with open("stats_config.yaml", "r") as f:
resources = yaml.safe_load(f)
account = Account(None, account)
log_debug(
"Collecting stats in account {} ({})".format(account.name, account.local_id)
)
stats = {}
stats["keys"] = []
for resource in resources:
# If the resource is marked as verbose, and we're not showing all resources, skip it.
if resource.get("verbose", False) and not all_resources:
continue
stats["keys"].append(resource["name"])
stats[resource["name"]] = {}
for region_json in get_regions(account):
region = Region(account, region_json)
for resource in resources:
if resource.get("verbose", False) and not all_resources:
continue
# Skip global services (just CloudFront)
if ("region" in resource) and (resource["region"] != region.name):
continue
# S3 buckets require special code to identify their location
if resource["name"] == "S3 buckets":
if region.name == "us-east-1":
buckets = pyjq.all(
".Buckets[].Name",
query_aws(region.account, "s3-list-buckets", region),
)
for bucket in buckets:
# Get the bucket's location
bucket_region = get_parameter_file(
region, "s3", "get-bucket-location", bucket
)["LocationConstraint"]
# Convert the value to a name.
# See https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
if bucket_region is None:
bucket_region = "us-east-1"
elif bucket_region == "EU":
bucket_region = "eu-west-1"
# Increment the count
tmp = stats[resource["name"]].get(bucket_region, 0)
stats[resource["name"]][bucket_region] = tmp + 1
else:
if region.name != 'ap-east-1':
# Normal path
stats[resource["name"]][region.name] = sum(
pyjq.all(
resource["query"],
query_aws(region.account, resource["source"], region),
)
)
return stats
def get_us_east_1(account):
for region_json in get_regions(account):
region = Region(account, region_json)
if region.name == "us-east-1":
return region
raise Exception("us-east-1 not found")
def iso_date(d):
""" Convert ISO format date string such as 2018-04-08T23:33:20+00:00"""
time_format = "%Y-%m-%dT%H:%M:%S"
return datetime.datetime.strptime(d.split("+")[0], time_format)
def days_between(s1, s2):
"""s1 and s2 are date strings"""
d1 = iso_date(s1)
d2 = iso_date(s2)
return abs((d1 - d2).days)
def get_collection_date(account):
if type(account) is not Account:
account = Account(None, account)
account_struct = account
json_blob = query_aws(
account_struct, "iam-get-credential-report", get_us_east_1(account_struct)
)
if not json_blob:
raise Exception(
"File iam-get-credential-report.json does not exist or is not well-formed. Likely cause is you did not run the collect command for this account."
)
# GeneratedTime looks like "2019-01-30T15:43:24+00:00"
return json_blob["GeneratedTime"]
def get_access_advisor_active_counts(account, max_age=90):
region = get_us_east_1(account)
json_account_auth_details = query_aws(
region.account, "iam-get-account-authorization-details", region
)
account_stats = {
"users": {"active": 0, "inactive": 0},
"roles": {"active": 0, "inactive": 0},
}
for principal_auth in [
*json_account_auth_details["UserDetailList"],
*json_account_auth_details["RoleDetailList"],
]:
stats = {}
stats["auth"] = principal_auth
principal_type = "roles"
if "UserName" in principal_auth:
principal_type = "users"
job_id = get_parameter_file(
region,
"iam",
"generate-service-last-accessed-details",
principal_auth["Arn"],
)["JobId"]
json_last_access_details = get_parameter_file(
region, "iam", "get-service-last-accessed-details", job_id
)
stats["last_access"] = json_last_access_details
stats["is_inactive"] = True
job_completion_date = datetime.datetime.strptime(
json_last_access_details["JobCompletionDate"][0:10], "%Y-%m-%d"
)
for service in json_last_access_details["ServicesLastAccessed"]:
if "LastAuthenticated" in service:
last_access_date = datetime.datetime.strptime(
service["LastAuthenticated"][0:10], "%Y-%m-%d"
)
if (job_completion_date - last_access_date).days < max_age:
stats["is_inactive"] = False
break
if stats["is_inactive"]:
account_stats[principal_type]["inactive"] += 1
else:
account_stats[principal_type]["active"] += 1
return account_stats
def get_current_policy_doc(policy):
for doc in policy["PolicyVersionList"]:
if doc["IsDefaultVersion"]:
return doc["Document"]
raise Exception("No default document version in policy {}".format(policy["Arn"]))
| |
file.rs
|
use std;
use std::io::{Write, Read};
use std::fs::OpenOptions;
use client::Key;
pub trait KeyStore {
fn write_keys(&self, Vec<Key>) -> std::io::Result<usize>;
}
// TODO: figure out how to resolve ~/ or get the user's home directory path.
const AUTHORIZED_KEYS_PATH: &'static str = "/Users/dpetersen/.ssh/authorized_keys";
pub struct AuthorizedKeyFileStore;
impl KeyStore for AuthorizedKeyFileStore {
// TODO look at making this not a &self method? Can you impl a trait for a struct in that way?
fn
|
(&self, keys: Vec<Key>) -> std::io::Result<usize> {
let file = OpenOptions::new()
.read(true)
.write(true)
.append(true)
.create(true)
.open(AUTHORIZED_KEYS_PATH);
match file {
Ok(mut f) => {
let mut written_count = 0;
let mut existing_keys = String::new();
f.read_to_string(&mut existing_keys).ok().expect("Failed reading authorized_keys!");
for key in &keys {
if existing_keys.contains(&key.key) {
debug!("Skipping key '{}', already exists", key.id);
continue
}
info!("Writing key '{}'", key.id);
match f.write_all(&key.to_authorized_keys_line().as_bytes()) {
Ok(_) => written_count += 1,
Err(e) => return Err(e),
}
}
Ok(written_count)
},
Err(e) => Err(e),
}
}
}
|
write_keys
|
ipaddress.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of my-weather-indicator
#
# Copyright (c) 2012 Lorenzo Carbonell Cerezo <a.k.a. atareao>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import dbus
import comun
import re
import json
from functools import partial
from collections import namedtuple
from geocodeapi import get_inv_direction
def convert(dbus_obj):
"""Converts dbus_obj from dbus type to python type.
:param dbus_obj: dbus object.
:returns: dbus_obj in python type.
"""
_isinstance = partial(isinstance, dbus_obj)
ConvertType = namedtuple('ConvertType', 'pytype dbustypes')
pyint = ConvertType(int, (dbus.Byte, dbus.Int16, dbus.Int32, dbus.Int64,
dbus.UInt16, dbus.UInt32, dbus.UInt64))
pybool = ConvertType(bool, (dbus.Boolean, ))
pyfloat = ConvertType(float, (dbus.Double, ))
pylist = ConvertType(lambda _obj: list(map(convert, dbus_obj)),
(dbus.Array, ))
pytuple = ConvertType(lambda _obj: tuple(map(convert, dbus_obj)),
(dbus.Struct, ))
types_str = (dbus.ObjectPath, dbus.Signature, dbus.String)
pystr = ConvertType(str, types_str)
pydict = ConvertType(
lambda _obj: dict(list(zip(list(map(convert, dbus_obj.keys())),
list(map(convert, dbus_obj.values()))
))
),
(dbus.Dictionary, )
)
for conv in (pyint, pybool, pyfloat, pylist, pytuple, pystr, pydict):
if any(map(_isinstance, conv.dbustypes)):
return conv.pytype(dbus_obj)
else:
return dbus_obj
def get_current_location():
latitude, longitude = get_current_location_option1()
if latitude == 0 and longitude == 0:
latitude, longitude = get_current_location_option2()
return latitude, longitude
def get_current_location_option1():
'''Gets the current location from geolocation via IP (only method
currently supported)
'''
latitude = 0
longitude = 0
bus = dbus.SessionBus()
# For now we default to the UbuntuGeoIP provider and we fall back to
# Hostip. We should probably be cleverer about provider detection, but
# this solution works for now and does not rely solely on UbuntuGeoIP,
# which means qreator can run on other distros
try:
geoclue = bus.get_object(
'org.freedesktop.Geoclue.Providers.UbuntuGeoIP',
'/org/freedesktop/Geoclue/Providers/UbuntuGeoIP')
position_info = geoclue.GetPosition(
dbus_interface='org.freedesktop.Geoclue.Position')
latitude = convert(position_info[2])
longitude = convert(position_info[3])
except dbus.exceptions.DBusException as e:
print('Error 1', e)
try:
geoclue = bus.get_object(
'org.freedesktop.Geoclue.Providers.Hostip',
'/org/freedesktop/Geoclue/Providers/Hostip')
position_info = geoclue.GetPosition(
dbus_interface='org.freedesktop.Geoclue.Position')
latitude = convert(position_info[2])
longitude = convert(position_info[3])
except dbus.exceptions.DBusException as e:
print('Error 2', e)
return latitude, longitude
def get_ip():
url = 'http://whatismyip.org'
ans = comun.read_from_url(url)
# print(ans)
return re.compile(r'(\d+\.\d+\.\d+\.\d+)').search(ans).group(1)
def
|
():
try:
url = 'http://ip-api.com/json'
ans = json.loads(comun.read_from_url(url))
return ans['lat'], ans['lon']
except Exception as e:
print(e)
return 0, 0
def get_address_from_ip():
lat, lon = get_current_location()
ans = get_inv_direction(lat, lon)
return ans
if __name__ == "__main__":
# import requests
# r = requests.get("https://stackoverflow.com")
print(get_current_location_option2())
print('======')
print(get_current_location())
# print(get_address_from_ip())
|
get_current_location_option2
|
windows.rs
|
use error::InstallError;
use std::error::Error;
use std::fmt::Debug;
use std::path::{Path, PathBuf};
use Args;
use platform::common::*;
// fs::remove_dir_all has issues in windows, so need to shell out
pub fn remove_tree<P: AsRef<Path> + Debug>(dir: P) -> Result<(), Box<Error>> {
//rmdir c:\test /s /q
println!("removing {:?}", dir);
let command: String = "rmdir".to_string();
let mut args: Vec<String> = Vec::new();
args.push(dir.as_ref().to_str().unwrap().to_string());
args.push("/s".to_string());
args.push("/q".to_string());
let result_string = run_command(&protoc_hack(command), args)?;
if result_string != "" {
return Err(Box::new(InstallError::new(format!(
"ERROR trying to delete files {}",
result_string
))));
}
Ok(())
|
// not being found)
pub fn get_core(install_dir: PathBuf, command_args: &Args) -> Result<(), Box<Error>> {
use platform::common;
use std::env;
println!("installing core...");
let orig_dir_pathbuf = env::current_dir()?;
println!("{:?}", orig_dir_pathbuf);
env::set_current_dir(&install_dir)?;
let command: String = "git".to_string();
let mut args: Vec<String> = Vec::new();
args.push("clone".to_string());
args.push("https://github.com/SCAII/SCAII.git".to_string());
let result_string = run_command(&command, args)?;
verify_git_clone_success(&result_string)?;
let mut scaii_dir = install_dir;
scaii_dir.push("SCAII".to_string());
if command_args.flag_branch {
println!("{:?}", scaii_dir);
env::set_current_dir(scaii_dir.clone())?;
checkout(command_args.arg_branch_name.clone())?;
}
ensure_google_closure_lib_installed(scaii_dir.clone())?;
common::install_protobuf_javascript_lib(scaii_dir)?;
env::set_current_dir(orig_dir_pathbuf)?;
Ok(())
}
pub fn copy_built_core(source_dir: PathBuf, target: PathBuf) -> Result<(), Box<Error>> {
//cp target/release/scaii_core.dll ~/.scaii/bin/
copy_source_named(
source_dir,
target,
"scaii_core.dll".to_string(),
"scaii_core.dll".to_string(),
)
}
pub fn copy_recursive<P: AsRef<Path> + Debug>(source: PathBuf, dest: P) -> Result<(), Box<Error>> {
println!("copy {:?} to {:?}", source, dest);
let command: String = "xcopy".to_string();
let mut args: Vec<String> = Vec::new();
args.push(source.as_path().to_str().unwrap().to_string());
args.push("/i".to_string());
args.push("/s".to_string());
args.push("/Y".to_string());
args.push(dest.as_ref().to_str().unwrap().to_string());
let _result_string = run_command(&protoc_hack(command), args)?;
Ok(())
}
pub fn run_command(command: &str, args: Vec<String>) -> Result<String, Box<Error>> {
use error::InstallError;
use platform::common;
use std::process::{Command, Stdio};
let mut c = Command::new("cmd");
let c = c.arg("/C");
let c = c.arg(command);
for arg in args.iter() {
c.arg(arg);
}
println!("running {:?}", c);
let output = c
.stdout(Stdio::inherit())
.output()
.expect(&String::as_str(&format!(
"failed to launch command {}",
command
)));
common::emit_error_output(&output);
if output.status.success() {
let result = String::from_utf8(output.stdout);
match result {
Ok(output_string) => Ok(output_string),
Err(_utf8_convert_error) => Err(Box::new(InstallError::new(
"problem converting command result from utf8".to_string(),
))),
}
} else {
Err(Box::new(InstallError::new(
String::from_utf8_lossy(&output.stderr).to_string(),
)))
}
}
|
}
//shelling out to git on windows due to build error on Jed's windows laptop trying to build git2
// (cmake invocation of cl.exe uses forward slashes for path - likely explanation for dll adjacent to cl.exe
|
multiply-strings.rs
|
fn main()
|
struct Solution;
impl Solution {
pub fn multiply(num1: String, num2: String) -> String {
if &num1 == "0" || &num2 == "0" {
return "0".to_string();
}
let mut res = vec![0i32; num1.len() + num2.len()];
for (i1, &v1) in num1.as_bytes().iter().enumerate() {
for (i2, &v2) in num2.as_bytes().iter().enumerate() {
let s = (v1 - b'0') as i32 * (v2 - b'0') as i32;
res[i1 + i2 + 1] += s % 10;
res[i1 + i2] += s / 10;
}
}
let mut s = vec!["-".to_string(); res.len()];
let (mut i, len) = (0, res.len() - 1);
while i <= len {
if res[len - i] < 10 {
s[len - i] = res[len - i].to_string();
} else {
s[len - i] = (res[len - i] % 10).to_string();
res[len - i - 1] += res[len - i] / 10;
}
i += 1;
}
s.join("").trim_start_matches('0').to_string()
}
}
|
{
assert_eq!(
"6".to_string(),
Solution::multiply("2".to_string(), "3".to_string())
);
assert_eq!(
"56088".to_string(),
Solution::multiply("123".to_string(), "456".to_string())
);
assert_eq!(
"998001".to_string(),
Solution::multiply("999".to_string(), "999".to_string())
);
assert_eq!("30501687172287445993560048081057096686019986405658336826483685740920318317486606305094807387278589614".to_string(), Solution::multiply("60974249908865105026646412538664653190280198809433017".to_string(), "500238825698990292381312765074025160144624723742".to_string()));
}
|
timezone.d.ts
|
export declare function initTimezone(): void;
export declare function getZoneAbbr(isUTC: boolean): "" | "UTC";
|
export declare function getZoneName(isUTC: boolean): "" | "Coordinated Universal Time";
|
|
replica.go
|
// Copyright 2014 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver
import (
"context"
"fmt"
"sync/atomic"
"time"
"unsafe"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/abortspan"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/ctpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/gc"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/rangefeed"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/split"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/cloud"
enginepb "github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/quotapool"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/redact"
"github.com/google/btree"
"github.com/kr/pretty"
"go.etcd.io/etcd/raft"
)
const (
// configGossipTTL is the time-to-live for configuration maps.
// optimizePutThreshold is the minimum length of a contiguous run
// of batched puts or conditional puts, after which the constituent
// put operations will possibly be optimized by determining whether
// the key space being written is starting out empty.
optimizePutThreshold = 10
replicaChangeTxnName = "change-replica"
splitTxnName = "split"
mergeTxnName = "merge"
defaultReplicaRaftMuWarnThreshold = 500 * time.Millisecond
)
var testingDisableQuiescence = envutil.EnvOrDefaultBool("COCKROACH_DISABLE_QUIESCENCE", false)
var disableSyncRaftLog = settings.RegisterBoolSetting(
"kv.raft_log.disable_synchronization_unsafe",
"set to true to disable synchronization on Raft log writes to persistent storage. "+
"Setting to true risks data loss or data corruption on server crashes. "+
"The setting is meant for internal testing only and SHOULD NOT be used in production.",
false,
)
// UseAtomicReplicationChanges determines whether to issue atomic replication changes.
// This has no effect until the cluster version is 19.2 or higher.
var UseAtomicReplicationChanges = settings.RegisterBoolSetting(
"kv.atomic_replication_changes.enabled",
"use atomic replication changes",
true,
)
// MaxCommandSizeFloor is the minimum allowed value for the MaxCommandSize
// cluster setting.
const MaxCommandSizeFloor = 4 << 20 // 4MB
// MaxCommandSize wraps "kv.raft.command.max_size".
var MaxCommandSize = settings.RegisterValidatedByteSizeSetting(
"kv.raft.command.max_size",
"maximum size of a raft command",
64<<20,
func(size int64) error {
if size < MaxCommandSizeFloor {
return fmt.Errorf("max_size must be greater than %s", humanizeutil.IBytes(MaxCommandSizeFloor))
}
return nil
},
)
// StrictGCEnforcement controls whether requests are rejected based on the GC
// threshold and the current GC TTL (true) or just based on the GC threshold
// (false).
var StrictGCEnforcement = settings.RegisterBoolSetting(
"kv.gc_ttl.strict_enforcement.enabled",
"if true, fail to serve requests at timestamps below the TTL even if the data still exists",
true,
)
type proposalReevaluationReason int
const (
proposalNoReevaluation proposalReevaluationReason = iota
// proposalIllegalLeaseIndex indicates the proposal failed to apply at
// a Lease index it was not legal for. The command should be re-evaluated.
proposalIllegalLeaseIndex
)
type atomicDescString struct {
strPtr unsafe.Pointer
}
// store atomically updates d.strPtr with the string representation of desc.
func (d *atomicDescString) store(replicaID roachpb.ReplicaID, desc *roachpb.RangeDescriptor) {
str := redact.Sprintfn(func(w redact.SafePrinter) {
w.Printf("%d/", desc.RangeID)
if replicaID == 0 {
w.SafeString("?:")
} else {
w.Printf("%d:", replicaID)
}
if !desc.IsInitialized() {
w.SafeString("{-}")
} else {
const maxRangeChars = 30
rngStr := keys.PrettyPrintRange(roachpb.Key(desc.StartKey), roachpb.Key(desc.EndKey), maxRangeChars)
w.UnsafeString(rngStr)
}
})
atomic.StorePointer(&d.strPtr, unsafe.Pointer(&str))
}
// String returns the string representation of the range; since we are not
// using a lock, the copy might be inconsistent.
func (d *atomicDescString) String() string {
return d.get().StripMarkers()
}
// SafeFormat renders the string safely.
func (d *atomicDescString) SafeFormat(w redact.SafePrinter, _ rune) {
w.Print(d.get())
}
// Get returns the string representation of the range; since we are not
// using a lock, the copy might be inconsistent.
func (d *atomicDescString) get() redact.RedactableString {
return *(*redact.RedactableString)(atomic.LoadPointer(&d.strPtr))
}
// atomicConnectionClass stores an rpc.ConnectionClass atomically.
type atomicConnectionClass uint32
// get reads the current value of the ConnectionClass.
func (c *atomicConnectionClass) get() rpc.ConnectionClass {
return rpc.ConnectionClass(atomic.LoadUint32((*uint32)(c)))
}
// set updates the current value of the ConnectionClass.
func (c *atomicConnectionClass) set(cc rpc.ConnectionClass) {
atomic.StoreUint32((*uint32)(c), uint32(cc))
}
// A Replica is a contiguous keyspace with writes managed via an
// instance of the Raft consensus algorithm. Many ranges may exist
// in a store and they are unlikely to be contiguous. Ranges are
// independent units and are responsible for maintaining their own
// integrity by replacing failed replicas, splitting and merging
// as appropriate.
type Replica struct {
log.AmbientContext
// TODO(tschottdorf): Duplicates r.mu.state.desc.RangeID; revisit that.
RangeID roachpb.RangeID // Only set by the constructor
store *Store
abortSpan *abortspan.AbortSpan // Avoids anomalous reads after abort
// leaseholderStats tracks all incoming BatchRequests to the replica and which
// localities they come from in order to aid in lease rebalancing decisions.
leaseholderStats *replicaStats
// writeStats tracks the number of keys written by applied raft commands
// in order to aid in replica rebalancing decisions.
writeStats *replicaStats
// creatingReplica is set when a replica is created as uninitialized
// via a raft message.
creatingReplica *roachpb.ReplicaDescriptor
// Held in read mode during read-only commands. Held in exclusive mode to
// prevent read-only commands from executing. Acquired before the embedded
// RWMutex.
readOnlyCmdMu syncutil.RWMutex
// rangeStr is a string representation of a RangeDescriptor that can be
// atomically read and updated without needing to acquire the replica.mu lock.
// All updates to state.Desc should be duplicated here.
rangeStr atomicDescString
// connectionClass controls the ConnectionClass used to send raft messages.
connectionClass atomicConnectionClass
// raftMu protects Raft processing the replica.
//
// Locking notes: Replica.raftMu < Replica.mu
raftMu struct {
syncutil.Mutex
// Note that there are two StateLoaders, in raftMu and mu,
// depending on which lock is being held.
stateLoader stateloader.StateLoader
// on-disk storage for sideloaded SSTables. nil when there's no ReplicaID.
sideloaded SideloadStorage
// stateMachine is used to apply committed raft entries.
stateMachine replicaStateMachine
// decoder is used to decode committed raft entries.
decoder replicaDecoder
}
// Contains the lease history when enabled.
leaseHistory *leaseHistory
// concMgr sequences incoming requests and provides isolation between
// requests that intend to perform conflicting operations. It is the
// centerpiece of transaction contention handling.
concMgr concurrency.Manager
mu struct {
// Protects all fields in the mu struct.
syncutil.RWMutex
// The destroyed status of a replica indicating if it's alive, corrupt,
// scheduled for destruction or has been GCed.
// destroyStatus should only be set while also holding the raftMu.
destroyStatus
// Is the range quiescent? Quiescent ranges are not Tick()'d and unquiesce
// whenever a Raft operation is performed.
quiescent bool
// mergeComplete is non-nil if a merge is in-progress, in which case any
// requests should be held until the completion of the merge is signaled by
// the closing of the channel.
mergeComplete chan struct{}
// The state of the Raft state machine.
state kvserverpb.ReplicaState
// Last index/term persisted to the raft log (not necessarily
// committed). Note that lastTerm may be 0 (and thus invalid) even when
// lastIndex is known, in which case the term will have to be retrieved
// from the Raft log entry. Use the invalidLastTerm constant for this
// case.
lastIndex, lastTerm uint64
// A map of raft log index of pending snapshots to deadlines.
// Used to prohibit raft log truncations that would leave a gap between
// the snapshot and the new first index. The map entry has a zero
// deadline while the snapshot is being sent and turns nonzero when the
// snapshot has completed, preventing truncation for a grace period
// (since there is a race between the snapshot completing and its being
// reflected in the raft status used to make truncation decisions).
//
// NB: If we kept only one value, we could end up in situations in which
// we're either giving some snapshots no grace period, or keep an
// already finished snapshot "pending" for extended periods of time
// (preventing log truncation).
snapshotLogTruncationConstraints map[uuid.UUID]snapTruncationInfo
// raftLogSize is the approximate size in bytes of the persisted raft
// log, including sideloaded entries' payloads. The value itself is not
// persisted and is computed lazily, paced by the raft log truncation
// queue which will recompute the log size when it finds it
// uninitialized. This recomputation mechanism isn't relevant for ranges
// which see regular write activity (for those the log size will deviate
// from zero quickly, and so it won't be recomputed but will undercount
// until the first truncation is carried out), but it prevents a large
// dormant Raft log from sitting around forever, which has caused problems
// in the past.
raftLogSize int64
// If raftLogSizeTrusted is false, don't trust the above raftLogSize until
// it has been recomputed.
raftLogSizeTrusted bool
// raftLogLastCheckSize is the value of raftLogSize the last time the Raft
// log was checked for truncation or at the time of the last Raft log
// truncation.
raftLogLastCheckSize int64
// pendingLeaseRequest is used to coalesce RequestLease requests.
pendingLeaseRequest pendingLeaseRequest
// minLeaseProposedTS is the minimum acceptable lease.ProposedTS; only
// leases proposed after this timestamp can be used for proposing commands.
// This is used to protect against several hazards:
// - leases held (or even proposed) before a restart cannot be used after a
// restart. This is because:
// a) the spanlatch manager is wiped during the restart; there might be
// writes in flight that do not have the latches they held reflected. So,
// we need to synchronize all new reads with those old in-flight writes.
// Forcing acquisition of a new lease essentially flushes all the
// previous raft commands.
// b) a lease transfer might have been in progress at the time of the
// restart. Using the existing lease after the restart would break the
// transfer proposer's promise to not use the existing lease.
// - a lease cannot be used after a transfer is initiated. Moreover, even
// lease extension that were in flight at the time of the transfer cannot be
// used, if they eventually apply.
minLeaseProposedTS hlc.Timestamp
// A pointer to the zone config for this replica.
zone *zonepb.ZoneConfig
// proposalBuf buffers Raft commands as they are passed to the Raft
// replication subsystem. The buffer is populated by requests after
// evaluation and is consumed by the Raft processing thread. Once
// consumed, commands are proposed through Raft and moved to the
// proposals map.
//
// Access to proposalBuf must occur *without* holding the mutex.
// Instead, the buffer internally holds a reference to mu and will use
// it appropriately.
proposalBuf propBuf
// proposals stores the Raft in-flight commands which originated at
// this Replica, i.e. all commands for which propose has been called,
// but which have not yet applied.
//
// The *ProposalData in the map are "owned" by it. Elements from the
// map must only be referenced while the Replica.mu is held, except
// if the element is removed from the map first. Modifying the proposal
// itself may require holding the raftMu as fields can be accessed
// underneath raft. See comments on ProposalData fields for synchronization
// requirements.
//
// Due to Raft reproposals, multiple in-flight Raft entries can have
// the same CmdIDKey, all corresponding to the same KV request. However,
// not all Raft entries with a given command ID will correspond directly
// to the *RaftCommand contained in its associated *ProposalData. This
// is because the *RaftCommand can be mutated during reproposals by
// Replica.tryReproposeWithNewLeaseIndex.
//
// TODO(ajwerner): move the proposal map and ProposalData entirely under
// the raftMu.
proposals map[kvserverbase.CmdIDKey]*ProposalData
internalRaftGroup *raft.RawNode
// The ID of the replica within the Raft group. This value may never be 0.
replicaID roachpb.ReplicaID
// The minimum allowed ID for this replica. Initialized from
// RangeTombstone.NextReplicaID.
tombstoneMinReplicaID roachpb.ReplicaID
// The ID of the leader replica within the Raft group. Used to determine
// when the leadership changes.
leaderID roachpb.ReplicaID
// The most recently added replica for the range and when it was added.
// Used to determine whether a replica is new enough that we shouldn't
// penalize it for being slightly behind. These field gets cleared out once
// we know that the replica has caught up.
lastReplicaAdded roachpb.ReplicaID
lastReplicaAddedTime time.Time
// initialMaxClosed is the initial maxClosed timestamp for the replica as known
// from its left-hand-side upon creation.
initialMaxClosed hlc.Timestamp
// The most recently updated time for each follower of this range. This is updated
// every time a Raft message is received from a peer.
// Note that superficially it seems that similar information is contained in the
// Progress of a RaftStatus, which has a RecentActive field. However, that field
// is always true unless CheckQuorum is active, which at the time of writing in
// CockroachDB is not the case.
//
// The lastUpdateTimes map is also updated when a leaseholder steps up
// (making the assumption that all followers are live at that point),
// and when the range unquiesces (marking all replicating followers as
// live).
//
// TODO(tschottdorf): keeping a map on each replica seems to be
// overdoing it. We should map the replicaID to a NodeID and then use
// node liveness (or any sensible measure of the peer being around).
// The danger in doing so is that a single stuck replica on an otherwise
// functioning node could fill up the quota pool. We are already taking
// this kind of risk though: a replica that gets stuck on an otherwise
// live node will not lose leaseholdership.
lastUpdateTimes lastUpdateTimesMap
// The last seen replica descriptors from incoming Raft messages. These are
// stored so that the replica still knows the replica descriptors for itself
// and for its message recipients in the circumstances when its RangeDescriptor
// is out of date.
//
// Normally, a replica knows about the other replica descriptors for a
// range via the RangeDescriptor stored in Replica.mu.state.Desc. But that
// descriptor is only updated during a Split or ChangeReplicas operation.
// There are periods during a Replica's lifetime when that information is
// out of date:
//
// 1. When a replica is being newly created as the result of an incoming
// Raft message for it. This is the common case for ChangeReplicas and an
// uncommon case for Splits. The leader will be sending the replica
// messages and the replica needs to be able to respond before it can
// receive an updated range descriptor (via a snapshot,
// changeReplicasTrigger, or splitTrigger).
//
// 2. If the node containing a replica is partitioned or down while the
// replicas for the range are updated. When the node comes back up, other
// replicas may begin communicating with it and it needs to be able to
// respond. Unlike 1 where there is no range descriptor, in this situation
// the replica has a range descriptor but it is out of date. Note that a
// replica being removed from a node and then quickly re-added before the
// replica has been GC'd will also use the last seen descriptors. In
// effect, this is another path for which the replica's local range
// descriptor is out of date.
//
// The last seen replica descriptors are updated on receipt of every raft
// message via Replica.setLastReplicaDescriptors (see
// Store.HandleRaftRequest). These last seen descriptors are used when
// the replica's RangeDescriptor contains missing or out of date descriptors
// for a replica (see Replica.sendRaftMessage).
//
// Removing a replica from Store.mu.replicas is not a problem because
// when a replica is completely removed, it won't be recreated until
// there is another event that will repopulate the replicas map in the
// range descriptor. When it is temporarily dropped and recreated, the
// newly recreated replica will have a complete range descriptor.
lastToReplica, lastFromReplica roachpb.ReplicaDescriptor
// Computed checksum at a snapshot UUID.
checksums map[uuid.UUID]ReplicaChecksum
// proposalQuota is the quota pool maintained by the lease holder where
// incoming writes acquire quota from a fixed quota pool before going
// through. If there is no quota available, the write is throttled
// until quota is made available to the pool.
// Acquired quota for a given command is only released when all the
// replicas have persisted the corresponding entry into their logs.
proposalQuota *quotapool.IntPool
// The base index is the index up to (including) which quota was already
// released. That is, the first element in quotaReleaseQueue below is
// released as the base index moves up by one, etc.
proposalQuotaBaseIndex uint64
// Once the leader observes a proposal come 'out of Raft', we add the size
// of the associated command to a queue of quotas we have yet to release
// back to the quota pool. At that point ownership of the quota is
// transferred from r.mu.proposals to this queue.
// We'll release the respective quota once all replicas have persisted the
// corresponding entry into their logs (or once we give up waiting on some
// replica because it looks like it's dead).
quotaReleaseQueue []*quotapool.IntAlloc
// Counts calls to Replica.tick()
ticks int
// Counts Raft messages refused due to queue congestion.
droppedMessages int
// Note that there are two replicaStateLoaders, in raftMu and mu,
// depending on which lock is being held.
stateLoader stateloader.StateLoader
// draining specifies whether this replica is draining. Raft leadership
// transfers due to a lease change will be attempted even if the target does
// not have all the log entries.
draining bool
// cachedProtectedTS provides the state of the protected timestamp
// subsystem as used on the request serving path to determine the effective
// gc threshold given the current TTL when using strict GC enforcement.
//
// It would be too expensive to go read from the protected timestamp cache
// for every request. Instead, if clients want to ensure that their request
// will see the effect of a protected timestamp record, they need to verify
// the request. See the comment on the struct for more details.
cachedProtectedTS cachedProtectedTimestampState
// largestPreviousMaxRangeSizeBytes tracks a previous zone.RangeMaxBytes
// which exceeded the current zone.RangeMaxBytes to help defeat the range
// backpressure mechanism in cases where a user reduces the configured range
// size. It is set when the zone config changes to a smaller value and the
// current range size exceeds the new value. It is cleared after the range's
// size drops below its current zone.MaxRangeBytes or if the
// zone.MaxRangeBytes increases to surpass the current value.
largestPreviousMaxRangeSizeBytes int64
// failureToGossipSystemConfig is set to true when the leaseholder of the
// range containing the system config span fails to gossip due to an
// outstanding intent (see MaybeGossipSystemConfig). It is reset when the
// system config is successfully gossiped or when the Replica loses the
// lease. It is read when handling a MaybeGossipSystemConfigIfHaveFailure
// local result trigger. That trigger is set when an EndTransaction with an
// ABORTED status is evaluated on a range containing the system config span.
//
// While the gossipping of the system config span is best-effort, the sql
// schema leasing mechanism degrades dramatically if changes are not
// gossiped. This degradation is due to the fact that schema changes, after
// writing intents, often need to ensure that there aren't outstanding
// leases on old versions and if there are, roll back and wait until there
// are not. The problem is that this waiting may take a long time if the
// current leaseholders are not notified. We deal with this by detecting the
// abort of a transaction which might have blocked the system config from
// being gossiped and attempting to gossip again.
failureToGossipSystemConfig bool
}
rangefeedMu struct {
syncutil.RWMutex
// proc is an instance of a rangefeed Processor that is capable of
// routing rangefeed events to a set of subscribers. Will be nil if no
// subscribers are registered.
//
// Requires Replica.rangefeedMu be held when mutating the pointer.
// Requires Replica.raftMu be held when providing logical ops and
// informing the processor of closed timestamp updates. This properly
// synchronizes updates that are linearized and driven by the Raft log.
proc *rangefeed.Processor
// opFilter is a best-effort filter that informs the raft processing
// goroutine of which logical operations the rangefeed processor is
// interested in based on the processor's current registrations.
//
// The filter is allowed to return false positives, but not false
// negatives. False negatives are avoided by updating (expanding) the
// filter while holding the Replica.raftMu when adding new registrations
// after flushing the rangefeed.Processor event channel. This ensures
// that no events that were filtered before the new registration was
// added will be observed by the new registration and all events after
// the new registration will respect the updated filter.
//
// Requires Replica.rangefeedMu be held when mutating the pointer.
opFilter *rangefeed.Filter
}
// Throttle how often we offer this Replica to the split and merge queues.
// We have triggers downstream of Raft that do so based on limited
// information and without explicit throttling some replicas will offer once
// per applied Raft command, which is silly and also clogs up the queues'
// semaphores.
splitQueueThrottle, mergeQueueThrottle util.EveryN
// loadBasedSplitter keeps information about load-based splitting.
loadBasedSplitter split.Decider
unreachablesMu struct {
syncutil.Mutex
remotes map[roachpb.ReplicaID]struct{}
}
// r.mu < r.protectedTimestampMu
protectedTimestampMu struct {
syncutil.Mutex
// minStateReadTimestamp is a lower bound on the timestamp of the cached
// protected timestamp state which may be used when updating
// pendingGCThreshold. This field acts to eliminate races between
// verification of protected timestamp records and the setting of a new
// GC threshold
minStateReadTimestamp hlc.Timestamp
// pendingGCThreshold holds a timestamp which is being proposed as a new
// GC threshold for the range.
pendingGCThreshold hlc.Timestamp
}
}
var _ batcheval.EvalContext = &Replica{}
// KeyRange is an interface type for the replicasByKey BTree, to compare
// Replica and ReplicaPlaceholder.
type KeyRange interface {
Desc() *roachpb.RangeDescriptor
rangeKeyItem
btree.Item
fmt.Stringer
}
var _ KeyRange = &Replica{}
var _ kv.Sender = &Replica{}
// String returns the string representation of the replica using an
// inconsistent copy of the range descriptor. Therefore, String does not
// require a lock and its output may not be atomic with other ongoing work in
// the replica. This is done to prevent deadlocks in logging sites.
func (r *Replica) String() string {
return redact.StringWithoutMarkers(r)
}
// SafeFormat implements the redact.SafeFormatter interface.
func (r *Replica) SafeFormat(w redact.SafePrinter, _ rune) {
w.Printf("[n%d,s%d,r%s]",
r.store.Ident.NodeID, r.store.Ident.StoreID, r.rangeStr.get())
}
// ReplicaID returns the ID for the Replica. It may be zero if the replica does
// not know its ID. Once a Replica has a non-zero ReplicaID it will never change.
func (r *Replica) ReplicaID() roachpb.ReplicaID {
r.mu.RLock()
defer r.mu.RUnlock()
return r.mu.replicaID
}
// cleanupFailedProposal cleans up after a proposal that has failed. It
// clears any references to the proposal and releases associated quota.
// It requires that both Replica.mu and Replica.raftMu are exclusively held.
func (r *Replica) cleanupFailedProposalLocked(p *ProposalData) {
r.raftMu.AssertHeld()
r.mu.AssertHeld()
delete(r.mu.proposals, p.idKey)
p.releaseQuota()
}
// GetMinBytes gets the replica's minimum byte threshold.
func (r *Replica) GetMinBytes() int64 {
r.mu.RLock()
defer r.mu.RUnlock()
return *r.mu.zone.RangeMinBytes
}
// GetMaxBytes gets the replica's maximum byte threshold.
func (r *Replica) GetMaxBytes() int64 {
r.mu.RLock()
defer r.mu.RUnlock()
return *r.mu.zone.RangeMaxBytes
}
// SetZoneConfig sets the replica's zone config.
func (r *Replica) SetZoneConfig(zone *zonepb.ZoneConfig) {
r.mu.Lock()
defer r.mu.Unlock()
if r.isInitializedRLocked() &&
r.mu.zone != nil &&
zone != nil {
total := r.mu.state.Stats.Total()
// Set largestPreviousMaxRangeSizeBytes if the current range size is above
// the new limit and we don't already have a larger value. Reset it if
// the new limit is larger than the current largest we're aware of.
if total > *zone.RangeMaxBytes &&
*zone.RangeMaxBytes < *r.mu.zone.RangeMaxBytes &&
r.mu.largestPreviousMaxRangeSizeBytes < *r.mu.zone.RangeMaxBytes &&
// Check to make sure that we're replacing a real zone config. Otherwise
// the default value would prevent backpressure until the range was
// larger than the default value. When the store starts up it sets the
// zone for the replica to this default value; later on it overwrites it
// with a new instance even if the value is the same as the default.
r.mu.zone != r.store.cfg.DefaultZoneConfig &&
r.mu.zone != r.store.cfg.DefaultSystemZoneConfig {
r.mu.largestPreviousMaxRangeSizeBytes = *r.mu.zone.RangeMaxBytes
} else if r.mu.largestPreviousMaxRangeSizeBytes > 0 &&
r.mu.largestPreviousMaxRangeSizeBytes < *zone.RangeMaxBytes {
r.mu.largestPreviousMaxRangeSizeBytes = 0
}
}
r.mu.zone = zone
}
// IsFirstRange returns true if this is the first range.
func (r *Replica) IsFirstRange() bool {
return r.RangeID == 1
}
// IsDestroyed returns a non-nil error if the replica has been destroyed
// and the reason if it has.
func (r *Replica) IsDestroyed() (DestroyReason, error) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.isDestroyedRLocked()
}
func (r *Replica) isDestroyedRLocked() (DestroyReason, error) {
return r.mu.destroyStatus.reason, r.mu.destroyStatus.err
}
// DescAndZone returns the authoritative range descriptor as well
// as the zone config for the replica.
func (r *Replica) DescAndZone() (*roachpb.RangeDescriptor, *zonepb.ZoneConfig) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.mu.state.Desc, r.mu.zone
}
// Desc returns the authoritative range descriptor, acquiring a replica lock in
// the process.
func (r *Replica) Desc() *roachpb.RangeDescriptor {
r.mu.RLock()
defer r.mu.RUnlock()
return r.mu.state.Desc
}
func (r *Replica) descRLocked() *roachpb.RangeDescriptor {
r.mu.AssertRHeld()
return r.mu.state.Desc
}
// NodeID returns the ID of the node this replica belongs to.
func (r *Replica) NodeID() roachpb.NodeID {
return r.store.nodeDesc.NodeID
}
// GetNodeLocality returns the locality of the node this replica belongs to.
func (r *Replica) GetNodeLocality() roachpb.Locality {
return r.store.nodeDesc.Locality
}
// ClusterSettings returns the node's ClusterSettings.
func (r *Replica) ClusterSettings() *cluster.Settings {
return r.store.cfg.Settings
}
// StoreID returns the Replica's StoreID.
func (r *Replica) StoreID() roachpb.StoreID {
return r.store.StoreID()
}
// EvalKnobs returns the EvalContext's Knobs.
func (r *Replica) EvalKnobs() kvserverbase.BatchEvalTestingKnobs {
return r.store.cfg.TestingKnobs.EvalKnobs
}
// Clock returns the hlc clock shared by this replica.
func (r *Replica) Clock() *hlc.Clock {
return r.store.Clock()
}
// DB returns the Replica's client DB.
func (r *Replica) DB() *kv.DB {
return r.store.DB()
}
// Engine returns the Replica's underlying Engine. In most cases the
// evaluation Batch should be used instead.
func (r *Replica) Engine() storage.Engine {
return r.store.Engine()
}
// AbortSpan returns the Replica's AbortSpan.
func (r *Replica) AbortSpan() *abortspan.AbortSpan {
// Despite its name, the AbortSpan doesn't hold on-disk data in
// memory. It just provides methods that take a Batch, so SpanSet
// declarations are enforced there.
return r.abortSpan
}
// GetLimiters returns the Replica's limiters.
func (r *Replica) GetLimiters() *batcheval.Limiters {
return &r.store.limiters
}
// GetConcurrencyManager returns the Replica's concurrency.Manager.
func (r *Replica) GetConcurrencyManager() concurrency.Manager {
return r.concMgr
}
// GetTerm returns the term of the given index in the raft log.
func (r *Replica) GetTerm(i uint64) (uint64, error) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.raftTermRLocked(i)
}
// GetRangeID returns the Range ID.
func (r *Replica) GetRangeID() roachpb.RangeID {
return r.RangeID
}
// GetGCThreshold returns the GC threshold.
func (r *Replica) GetGCThreshold() hlc.Timestamp {
r.mu.RLock()
defer r.mu.RUnlock()
return *r.mu.state.GCThreshold
}
// getImpliedGCThresholdRLocked returns the gc threshold of the replica which
// should be used to determine the validity of commands. The returned timestamp
// may be newer than the replica's true GC threshold if strict enforcement
// is enabled and the TTL has passed. If this is an admin command or this range
// contains data outside of the user keyspace, we return the true GC threshold.
func (r *Replica) getImpliedGCThresholdRLocked(
st *kvserverpb.LeaseStatus, isAdmin bool,
) hlc.Timestamp {
threshold := *r.mu.state.GCThreshold
// The GC threshold is the oldest value we can return here.
if isAdmin || !StrictGCEnforcement.Get(&r.store.ClusterSettings().SV) ||
r.isSystemRangeRLocked() {
return threshold
}
// In order to make this check inexpensive, we keep a copy of the reading of
// protected timestamp state in the replica. This state may be stale, may not
// exist, or may be unusable given the current lease status. In those cases we
// must return the GC threshold. On the one hand this seems like a big deal,
// after a lease transfer, for minutes, users will be able to read data that
// has technically expired. Fortunately this strict enforcement is merely a
// user experience win; it's always safe to allow reads to continue so long
// as they are after the GC threshold.
c := r.mu.cachedProtectedTS
if st.State != kvserverpb.LeaseState_VALID || c.readAt.Less(st.Lease.Start) {
return threshold
}
impliedThreshold := gc.CalculateThreshold(st.Timestamp, *r.mu.zone.GC)
threshold.Forward(impliedThreshold)
// If we have a protected timestamp record which precedes the implied
// threshold, use the threshold it implies instead.
if c.earliestRecord != nil && c.earliestRecord.Timestamp.Less(threshold) {
threshold = c.earliestRecord.Timestamp.Prev()
}
return threshold
}
// isSystemRange returns true if r's key range precedes the start of user
// structured data (SQL keys) for the range's tenant keyspace.
func (r *Replica) isSystemRange() bool {
r.mu.RLock()
defer r.mu.RUnlock()
return r.isSystemRangeRLocked()
}
func (r *Replica) isSystemRangeRLocked() bool {
rem, _, err := keys.DecodeTenantPrefix(r.mu.state.Desc.StartKey.AsRawKey())
return err == nil && roachpb.Key(rem).Compare(keys.UserTableDataMin) < 0
}
// maxReplicaIDOfAny returns the maximum ReplicaID of any replica, including
// voters and learners.
func maxReplicaIDOfAny(desc *roachpb.RangeDescriptor) roachpb.ReplicaID {
if desc == nil || !desc.IsInitialized() {
return 0
}
var maxID roachpb.ReplicaID
for _, repl := range desc.Replicas().All() {
if repl.ReplicaID > maxID {
maxID = repl.ReplicaID
}
}
return maxID
}
// LastReplicaAdded returns the ID of the most recently added replica and the
// time at which it was added.
func (r *Replica) LastReplicaAdded() (roachpb.ReplicaID, time.Time) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.mu.lastReplicaAdded, r.mu.lastReplicaAddedTime
}
// GetReplicaDescriptor returns the replica for this range from the range
// descriptor. Returns a *RangeNotFoundError if the replica is not found.
// No other errors are returned.
func (r *Replica) GetReplicaDescriptor() (roachpb.ReplicaDescriptor, error) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.getReplicaDescriptorRLocked()
}
// getReplicaDescriptorRLocked is like getReplicaDescriptor, but assumes that
// r.mu is held for either reading or writing.
func (r *Replica) getReplicaDescriptorRLocked() (roachpb.ReplicaDescriptor, error) {
repDesc, ok := r.mu.state.Desc.GetReplicaDescriptor(r.store.StoreID())
if ok {
return repDesc, nil
}
return roachpb.ReplicaDescriptor{}, roachpb.NewRangeNotFoundError(r.RangeID, r.store.StoreID())
}
func (r *Replica) getMergeCompleteCh() chan struct{} {
r.mu.RLock()
defer r.mu.RUnlock()
return r.getMergeCompleteChRLocked()
}
func (r *Replica) getMergeCompleteChRLocked() chan struct{} {
return r.mu.mergeComplete
}
// setLastReplicaDescriptors sets the the most recently seen replica
// descriptors to those contained in the *RaftMessageRequest, acquiring r.mu
// to do so.
func (r *Replica) setLastReplicaDescriptors(req *RaftMessageRequest) {
r.mu.Lock()
r.mu.lastFromReplica = req.FromReplica
r.mu.lastToReplica = req.ToReplica
r.mu.Unlock()
}
// GetMVCCStats returns a copy of the MVCC stats object for this range.
// This accessor is thread-safe, but provides no guarantees about its
// synchronization with any concurrent writes.
func (r *Replica) GetMVCCStats() enginepb.MVCCStats {
r.mu.RLock()
defer r.mu.RUnlock()
return *r.mu.state.Stats
}
// GetSplitQPS returns the Replica's queries/s request rate.
//
// NOTE: This should only be used for load based splitting, only
// works when the load based splitting cluster setting is enabled.
//
// Use QueriesPerSecond() for current QPS stats for all other purposes.
func (r *Replica) GetSplitQPS() float64 {
return r.loadBasedSplitter.LastQPS(timeutil.Now())
}
// ContainsKey returns whether this range contains the specified key.
//
// TODO(bdarnell): This is not the same as RangeDescriptor.ContainsKey.
func (r *Replica) ContainsKey(key roachpb.Key) bool {
return kvserverbase.ContainsKey(r.Desc(), key)
}
// ContainsKeyRange returns whether this range contains the specified
// key range from start to end.
func (r *Replica) ContainsKeyRange(start, end roachpb.Key) bool {
return kvserverbase.ContainsKeyRange(r.Desc(), start, end)
}
// GetLastReplicaGCTimestamp reads the timestamp at which the replica was
// last checked for removal by the replica gc queue.
func (r *Replica) GetLastReplicaGCTimestamp(ctx context.Context) (hlc.Timestamp, error) {
key := keys.RangeLastReplicaGCTimestampKey(r.RangeID)
var timestamp hlc.Timestamp
_, err := storage.MVCCGetProto(ctx, r.store.Engine(), key, hlc.Timestamp{}, ×tamp,
storage.MVCCGetOptions{})
if err != nil {
return hlc.Timestamp{}, err
}
return timestamp, nil
}
func (r *Replica) setLastReplicaGCTimestamp(ctx context.Context, timestamp hlc.Timestamp) error {
key := keys.RangeLastReplicaGCTimestampKey(r.RangeID)
return storage.MVCCPutProto(ctx, r.store.Engine(), nil, key, hlc.Timestamp{}, nil, ×tamp)
}
// getQueueLastProcessed returns the last processed timestamp for the
// specified queue, or the zero timestamp if not available.
func (r *Replica) getQueueLastProcessed(ctx context.Context, queue string) (hlc.Timestamp, error) {
key := keys.QueueLastProcessedKey(r.Desc().StartKey, queue)
var timestamp hlc.Timestamp
if r.store != nil {
_, err := storage.MVCCGetProto(ctx, r.store.Engine(), key, hlc.Timestamp{}, ×tamp,
storage.MVCCGetOptions{})
if err != nil {
log.VErrEventf(ctx, 2, "last processed timestamp unavailable: %s", err)
return hlc.Timestamp{}, err
}
}
log.VEventf(ctx, 2, "last processed timestamp: %s", timestamp)
return timestamp, nil
}
// setQueueLastProcessed writes the last processed timestamp for the
// specified queue.
func (r *Replica) setQueueLastProcessed(
ctx context.Context, queue string, timestamp hlc.Timestamp,
) error {
key := keys.QueueLastProcessedKey(r.Desc().StartKey, queue)
return r.store.DB().PutInline(ctx, key, ×tamp)
}
// RaftStatus returns the current raft status of the replica. It returns nil
// if the Raft group has not been initialized yet.
func (r *Replica) RaftStatus() *raft.Status {
r.mu.RLock()
defer r.mu.RUnlock()
return r.raftStatusRLocked()
}
func (r *Replica) raftStatusRLocked() *raft.Status {
if rg := r.mu.internalRaftGroup; rg != nil {
s := rg.Status()
return &s
}
return nil
}
func (r *Replica) raftBasicStatusRLocked() raft.BasicStatus {
if rg := r.mu.internalRaftGroup; rg != nil {
return rg.BasicStatus()
}
return raft.BasicStatus{}
}
// State returns a copy of the internal state of the Replica, along with some
// auxiliary information.
func (r *Replica) State() kvserverpb.RangeInfo {
var ri kvserverpb.RangeInfo
// NB: this acquires an RLock(). Reentrant RLocks are deadlock prone, so do
// this first before RLocking below. Performance of this extra lock
// acquisition is not a concern.
ri.ActiveClosedTimestamp, _ = r.maxClosed(context.Background())
// NB: numRangefeedRegistrations doesn't require Replica.mu to be locked.
// However, it does require coordination between multiple goroutines, so
// it's best to keep it out of the Replica.mu critical section.
ri.RangefeedRegistrations = int64(r.numRangefeedRegistrations())
r.mu.RLock()
defer r.mu.RUnlock()
ri.ReplicaState = *(protoutil.Clone(&r.mu.state)).(*kvserverpb.ReplicaState)
ri.LastIndex = r.mu.lastIndex
ri.NumPending = uint64(r.numPendingProposalsRLocked())
ri.RaftLogSize = r.mu.raftLogSize
ri.RaftLogSizeTrusted = r.mu.raftLogSizeTrusted
ri.NumDropped = uint64(r.mu.droppedMessages)
if r.mu.proposalQuota != nil {
ri.ApproximateProposalQuota = int64(r.mu.proposalQuota.ApproximateQuota())
ri.ProposalQuotaBaseIndex = int64(r.mu.proposalQuotaBaseIndex)
ri.ProposalQuotaReleaseQueue = make([]int64, len(r.mu.quotaReleaseQueue))
for i, a := range r.mu.quotaReleaseQueue {
if a != nil {
ri.ProposalQuotaReleaseQueue[i] = int64(a.Acquired())
}
}
}
ri.RangeMaxBytes = *r.mu.zone.RangeMaxBytes
if desc := ri.ReplicaState.Desc; desc != nil {
// Learner replicas don't serve follower reads, but they still receive
// closed timestamp updates, so include them here.
allReplicas := desc.Replicas().All()
for i := range allReplicas {
replDesc := &allReplicas[i]
r.store.cfg.ClosedTimestamp.Storage.VisitDescending(replDesc.NodeID, func(e ctpb.Entry) (done bool) {
mlai, found := e.MLAI[r.RangeID]
if !found {
return false // not done
}
if ri.NewestClosedTimestamp.ClosedTimestamp.Less(e.ClosedTimestamp) {
ri.NewestClosedTimestamp.NodeID = replDesc.NodeID
ri.NewestClosedTimestamp.ClosedTimestamp = e.ClosedTimestamp
ri.NewestClosedTimestamp.MLAI = int64(mlai)
ri.NewestClosedTimestamp.Epoch = int64(e.Epoch)
}
return true // done
})
}
}
return ri
}
// assertStateLocked can be called from the Raft goroutine to check that the
// in-memory and on-disk states of the Replica are congruent.
// Requires that both r.raftMu and r.mu are held.
//
// TODO(tschottdorf): Consider future removal (for example, when #7224 is resolved).
func (r *Replica) assertStateLocked(ctx context.Context, reader storage.Reader) {
diskState, err := r.mu.stateLoader.Load(ctx, reader, r.mu.state.Desc)
if err != nil {
log.Fatalf(ctx, "%v", err)
}
if !diskState.Equal(r.mu.state) {
// The roundabout way of printing here is to expose this information in sentry.io.
//
// TODO(dt): expose properly once #15892 is addressed.
log.Errorf(ctx, "on-disk and in-memory state diverged:\n%s",
pretty.Diff(diskState, r.mu.state))
r.mu.state.Desc, diskState.Desc = nil, nil
log.Fatalf(ctx, "on-disk and in-memory state diverged: %s",
log.Safe(pretty.Diff(diskState, r.mu.state)))
}
}
// checkExecutionCanProceed returns an error if a batch request cannot be
// executed by the Replica. An error indicates that the Replica is not live and
// able to serve traffic or that the request is not compatible with the state of
// the Range.
//
// The method accepts a concurrency Guard and a LeaseStatus parameter. These are
// used to indicate whether the caller has acquired latches and checked the
// Range lease. The method will only check for a pending merge if both of these
// conditions are true. If either !g.HoldingLatches() or st == nil then the
// method will not check for a pending merge. Callers might be ok with this if
// they know that they will end up checking for a pending merge at some later
// time.
func (r *Replica) checkExecutionCanProceed(
ctx context.Context, ba *roachpb.BatchRequest, g *concurrency.Guard, st *kvserverpb.LeaseStatus,
) error {
rSpan, err := keys.Range(ba.Requests)
if err != nil {
return err
}
r.mu.RLock()
defer r.mu.RUnlock()
if _, err := r.isDestroyedRLocked(); err != nil {
return err
} else if err := r.checkSpanInRangeRLocked(ctx, rSpan); err != nil {
return err
} else if err := r.checkTSAboveGCThresholdRLocked(
ba.EarliestActiveTimestamp(), st, ba.IsAdmin(),
); err != nil {
return err
} else if g.HoldingLatches() && st != nil {
// Only check for a pending merge if latches are held and the Range
// lease is held by this Replica. Without both of these conditions,
// checkForPendingMergeRLocked could return false negatives.
//
// In practice, this means that follower reads or any request where
// concurrency.shouldAcquireLatches() == false (e.g. lease requests)
// will not check for a pending merge before executing and, as such,
// can execute while a range is in a merge's critical phase.
return r.checkForPendingMergeRLocked(ba)
}
return nil
}
// checkExecutionCanProceedForRangeFeed returns an error if a rangefeed request
// cannot be executed by the Replica.
func (r *Replica) checkExecutionCanProceedForRangeFeed(
ctx context.Context, rSpan roachpb.RSpan, ts hlc.Timestamp,
) error {
now := r.Clock().Now()
r.mu.RLock()
defer r.mu.RUnlock()
status := r.leaseStatus(ctx, *r.mu.state.Lease, now, r.mu.minLeaseProposedTS)
if _, err := r.isDestroyedRLocked(); err != nil {
return err
} else if err := r.checkSpanInRangeRLocked(ctx, rSpan); err != nil {
return err
} else if err := r.checkTSAboveGCThresholdRLocked(ts, &status, false /* isAdmin */); err != nil {
return err
} else if r.requiresExpiringLeaseRLocked() {
// Ensure that the range does not require an expiration-based lease. If it
// does, it will never get closed timestamp updates and the rangefeed will
// never be able to advance its resolved timestamp.
return errors.New("expiration-based leases are incompatible with rangefeeds")
}
return nil
}
// checkSpanInRangeRLocked returns an error if a request (identified by its
// key span) can not be run on the replica.
func (r *Replica) checkSpanInRangeRLocked(ctx context.Context, rspan roachpb.RSpan) error {
desc := r.mu.state.Desc
if desc.ContainsKeyRange(rspan.Key, rspan.EndKey) {
return nil
}
return roachpb.NewRangeKeyMismatchError(
ctx, rspan.Key.AsRawKey(), rspan.EndKey.AsRawKey(), desc, r.mu.state.Lease,
)
}
// checkTSAboveGCThresholdRLocked returns an error if a request (identified
// by its MVCC timestamp) can be run on the replica.
func (r *Replica) checkTSAboveGCThresholdRLocked(
ts hlc.Timestamp, st *kvserverpb.LeaseStatus, isAdmin bool,
) error {
threshold := r.getImpliedGCThresholdRLocked(st, isAdmin)
if threshold.Less(ts) {
return nil
}
return &roachpb.BatchTimestampBeforeGCError{
Timestamp: ts,
Threshold: threshold,
}
}
// checkForPendingMergeRLocked determines whether the replica is being merged
// into its left-hand neighbor. If so, an error is returned to prevent the
// request from proceeding until the merge completes.
func (r *Replica) checkForPendingMergeRLocked(ba *roachpb.BatchRequest) error {
if r.getMergeCompleteChRLocked() == nil {
return nil
}
if ba.IsSingleSubsumeRequest() {
return nil
}
// The replica is being merged into its left-hand neighbor. This request
// cannot proceed until the merge completes, signaled by the closing of the
// channel.
//
// It is very important that this check occur after we have acquired latches
// from the spanlatch manager. Only after we release these latches are we
// guaranteed that we're not racing with a Subsume command. (Subsume
// commands declare a conflict with all other commands.) It is also
// important that this check occur after we have verified that this replica
// is the leaseholder. Only the leaseholder will have its merge complete
// channel set.
//
// Note that Subsume commands are exempt from waiting on the mergeComplete
// channel. This is necessary to avoid deadlock. While normally a Subsume
// request will trigger the installation of a mergeComplete channel after it
// is executed, it may sometimes execute after the mergeComplete channel has
// been installed. Consider the case where the RHS replica acquires a new
// lease after the merge transaction deletes its local range descriptor but
// before the Subsume command is sent. The lease acquisition request will
// notice the intent on the local range descriptor and install a
// mergeComplete channel. If the forthcoming Subsume blocked on that
// channel, the merge transaction would deadlock.
//
// This exclusion admits a small race condition. If a Subsume request is
// sent to the right-hand side of a merge, outside of a merge transaction,
// after the merge has committed but before the RHS has noticed that the
// merge has committed, the request may return stale data. Since the merge
// has committed, the LHS may have processed writes to the keyspace
// previously owned by the RHS that the RHS is unaware of. This window
// closes quickly, as the RHS will soon notice the merge transaction has
// committed and mark itself as destroyed, which prevents it from serving
// all traffic, including Subsume requests.
//
// In our current, careful usage of Subsume, this race condition is
// irrelevant. Subsume is only sent from within a merge transaction, and
// merge transactions read the RHS descriptor at the beginning of the
// transaction to verify that it has not already been merged away.
//
// We can't wait for the merge to complete here, though. The replica might
// need to respond to a Subsume request in order for the merge to complete,
// and blocking here would force that Subsume request to sit in hold its
// latches forever, deadlocking the merge. Instead, we release the latches
// we acquired above and return a MergeInProgressError. The store will catch
// that error and resubmit the request after mergeCompleteCh closes. See
// #27442 for the full context.
return &roachpb.MergeInProgressError{}
}
// isNewerThanSplit is a helper used in split(Pre|Post)Apply to
// determine whether the Replica on the right hand side of the split must
// have been removed from this store after the split. There is one
// false negative where false will be returned but the hard state may
// be due to a newer replica which is outlined below. It should be safe.
//
// TODO(ajwerner): Ideally if this store had ever learned that the replica
// created by the split were removed it would not forget that fact.
// There exists one edge case where the store may learn that it should house
// a replica of the same range with a higher replica ID and then forget.
// If the first raft message this store ever receives for the this range
// contains a replica ID higher than the replica ID in the split trigger
// then an in-memory replica at that higher replica ID will be created and
// no tombstone at a lower replica ID will be written. If the server then
// crashes it will forget that it had ever been the higher replica ID. The
// server may then proceed to process the split and initialize a replica at
// the replica ID implied by the split. This is potentially problematic as
// the replica may have voted as this higher replica ID and when it rediscovers
// the higher replica ID it will delete all of the state corresponding to the
// older replica ID including its hard state which may have been synthesized
// with votes as the newer replica ID. This case tends to be handled safely
// in practice because the replica should only be receiving messages as the
// newer replica ID after it has been added to the range. Prior to learner
// replicas we would only add a store to a range after we've successfully
// applied a pre-emptive snapshot. If the store were to split between the
// preemptive snapshot and the addition then the addition would fail due to
// the conditional put logic. If the store were to then enable learners then
// we're still okay because we won't promote a learner unless we succeed in
// sending a learner snapshot. If we fail to send the replica never becomes
// a voter then its votes don't matter and are safe to discard.
//
// Despite the safety due to the change replicas protocol explained above
// it'd be good to know for sure that a replica ID for a range on a store
// is always monotonically increasing, even across restarts.
//
// See TestProcessSplitAfterRightHandSideHasBeenRemoved.
func (r *Replica) isNewerThanSplit(split *roachpb.SplitTrigger) bool {
r.mu.RLock()
defer r.mu.RUnlock()
return r.isNewerThanSplitRLocked(split)
}
func (r *Replica) isNewerThanSplitRLocked(split *roachpb.SplitTrigger) bool {
rightDesc, _ := split.RightDesc.GetReplicaDescriptor(r.StoreID())
// If we have written a tombstone for this range then we know that the RHS
// must have already been removed at the split replica ID.
return r.mu.tombstoneMinReplicaID != 0 ||
// If the first raft message we received for the RHS range was for a replica
// ID which is above the replica ID of the split then we would not have
// written a tombstone but we will have a replica ID that will exceed the
// split replica ID.
r.mu.replicaID > rightDesc.ReplicaID
}
// endCmds holds necessary information to end a batch after Raft
// command processing.
type endCmds struct {
repl *Replica
g *concurrency.Guard
}
// move moves the endCmds into the return value, clearing and making
// a call to done on the receiver a no-op.
func (ec *endCmds) move() endCmds {
res := *ec
*ec = endCmds{}
return res
}
// done releases the latches acquired by the command and updates
// the timestamp cache using the final timestamp of each command.
//
// No-op if the receiver has been zeroed out by a call to move.
// Idempotent and is safe to call more than once.
func (ec *endCmds) done(
ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error,
) {
if ec.repl == nil {
// The endCmds were cleared.
return
}
defer ec.move() // clear
// Update the timestamp cache if the request is not being re-evaluated. Each
// request is considered in turn; only those marked as affecting the cache are
// processed.
ec.repl.updateTimestampCache(ctx, ba, br, pErr)
// Release the latches acquired by the request and exit lock wait-queues.
// Must be done AFTER the timestamp cache is updated. ec.g is only set when
// the Raft proposal has assumed responsibility for the request.
if ec.g != nil {
ec.repl.concMgr.FinishReq(ec.g)
}
}
// maybeWatchForMerge checks whether a merge of this replica into its left
// neighbor is in its critical phase and, if so, arranges to block all requests
// until the merge completes.
func (r *Replica) maybeWatchForMerge(ctx context.Context) error {
desc := r.Desc()
descKey := keys.RangeDescriptorKey(desc.StartKey)
_, intent, err := storage.MVCCGet(ctx, r.Engine(), descKey, r.Clock().Now(),
storage.MVCCGetOptions{Inconsistent: true})
if err != nil {
return err
} else if intent == nil {
return nil
}
val, _, err := storage.MVCCGetAsTxn(
ctx, r.Engine(), descKey, intent.Txn.WriteTimestamp, intent.Txn)
if err != nil {
return err
} else if val != nil {
return nil
}
// At this point, we know we have a deletion intent on our range descriptor.
// That means a merge is in progress. Block all commands until we can
// retrieve an updated range descriptor from meta2, which will indicate
// whether the merge succeeded or not.
mergeCompleteCh := make(chan struct{})
r.mu.Lock()
if r.mu.mergeComplete != nil {
// Another request already noticed the merge, installed a mergeComplete
// channel, and launched a goroutine to watch for the merge's completion.
// Nothing more to do.
r.mu.Unlock()
return nil
}
r.mu.mergeComplete = mergeCompleteCh
// The RHS of a merge is not permitted to quiesce while a mergeComplete
// channel is installed. (If the RHS is quiescent when the merge commits, any
// orphaned followers would fail to queue themselves for GC.) Unquiesce the
// range in case it managed to quiesce between when the Subsume request
// arrived and now, which is rare but entirely legal.
r.unquiesceLocked()
r.mu.Unlock()
taskCtx := r.AnnotateCtx(context.Background())
err = r.store.stopper.RunAsyncTask(taskCtx, "wait-for-merge", func(ctx context.Context) {
var pushTxnRes *roachpb.PushTxnResponse
for retry := retry.Start(base.DefaultRetryOptions()); retry.Next(); {
// Wait for the merge transaction to complete by attempting to push it. We
// don't want to accidentally abort the merge transaction, so we use the
// minimum transaction priority. Note that a push type of
// roachpb.PUSH_TOUCH, though it might appear more semantically correct,
// returns immediately and causes us to spin hot, whereas
// roachpb.PUSH_ABORT efficiently blocks until the transaction completes.
b := &kv.Batch{}
b.Header.Timestamp = r.Clock().Now()
b.AddRawRequest(&roachpb.PushTxnRequest{
RequestHeader: roachpb.RequestHeader{Key: intent.Txn.Key},
PusherTxn: roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{Priority: enginepb.MinTxnPriority},
},
PusheeTxn: intent.Txn,
PushType: roachpb.PUSH_ABORT,
})
if err := r.DB().Run(ctx, b); err != nil {
select {
case <-r.store.stopper.ShouldQuiesce():
// The server is shutting down. The error while pushing the
// transaction was probably caused by the shutdown, so ignore it.
return
default:
log.Warningf(ctx, "error while watching for merge to complete: PushTxn: %+v", err)
// We can't safely unblock traffic until we can prove that the merge
// transaction is committed or aborted. Nothing to do but try again.
continue
}
}
pushTxnRes = b.RawResponse().Responses[0].GetInner().(*roachpb.PushTxnResponse)
break
}
var mergeCommitted bool
switch pushTxnRes.PusheeTxn.Status {
case roachpb.PENDING, roachpb.STAGING:
log.Fatalf(ctx, "PushTxn returned while merge transaction %s was still %s",
intent.Txn.ID.Short(), pushTxnRes.PusheeTxn.Status)
case roachpb.COMMITTED:
// If PushTxn claims that the transaction committed, then the transaction
// definitely committed.
mergeCommitted = true
case roachpb.ABORTED:
// If PushTxn claims that the transaction aborted, it's not a guarantee
// that the transaction actually aborted. It could also mean that the
// transaction completed, resolved its intents, and GC'd its transaction
// record before our PushTxn arrived. To figure out what happened, we
// need to look in meta2.
var getRes *roachpb.GetResponse
for retry := retry.Start(base.DefaultRetryOptions()); retry.Next(); {
metaKey := keys.RangeMetaKey(desc.EndKey)
res, pErr := kv.SendWrappedWith(ctx, r.DB().NonTransactionalSender(), roachpb.Header{
// Use READ_UNCOMMITTED to avoid trying to resolve intents, since
// resolving those intents might involve sending requests to this
// range, and that could deadlock. See the comment on
// TestStoreRangeMergeConcurrentSplit for details.
ReadConsistency: roachpb.READ_UNCOMMITTED,
}, &roachpb.GetRequest{
RequestHeader: roachpb.RequestHeader{Key: metaKey.AsRawKey()},
})
if pErr != nil {
select {
case <-r.store.stopper.ShouldQuiesce():
// The server is shutting down. The error while fetching the range
// descriptor was probably caused by the shutdown, so ignore it.
return
default:
log.Warningf(ctx, "error while watching for merge to complete: Get %s: %s", metaKey, pErr)
// We can't safely unblock traffic until we can prove that the merge
// transaction is committed or aborted. Nothing to do but try again.
continue
}
}
getRes = res.(*roachpb.GetResponse)
break
}
if getRes.Value == nil {
// A range descriptor with our end key is no longer present in meta2, so
// the merge must have committed.
mergeCommitted = true
} else {
// A range descriptor with our end key is still present in meta2. The
// merge committed iff that range descriptor has a different range ID.
var meta2Desc roachpb.RangeDescriptor
if err := getRes.Value.GetProto(&meta2Desc); err != nil {
log.Fatalf(ctx, "error while watching for merge to complete: "+
"unmarshaling meta2 range descriptor: %s", err)
}
if meta2Desc.RangeID != r.RangeID {
mergeCommitted = true
}
}
}
r.raftMu.Lock()
r.mu.Lock()
if mergeCommitted && r.mu.destroyStatus.IsAlive() {
// The merge committed but the left-hand replica on this store hasn't
// subsumed this replica yet. Mark this replica as destroyed so it
// doesn't serve requests when we close the mergeCompleteCh below.
r.mu.destroyStatus.Set(roachpb.NewRangeNotFoundError(r.RangeID, r.store.StoreID()), destroyReasonMergePending)
}
// Unblock pending requests. If the merge committed, the requests will
// notice that the replica has been destroyed and return an appropriate
// error. If the merge aborted, the requests will be handled normally.
r.mu.mergeComplete = nil
close(mergeCompleteCh)
r.mu.Unlock()
r.raftMu.Unlock()
})
if errors.Is(err, stop.ErrUnavailable) {
// We weren't able to launch a goroutine to watch for the merge's completion
// because the server is shutting down. Normally failing to launch the
// watcher goroutine would wedge pending requests on the replica's
// mergeComplete channel forever, but since we're shutting down those
// requests will get dropped and retried on another node. Suppress the error.
err = nil
}
return err
}
func (r *Replica) maybeTransferRaftLeadership(ctx context.Context) {
r.mu.Lock()
r.maybeTransferRaftLeadershipLocked(ctx)
r.mu.Unlock()
}
// maybeTransferRaftLeadershipLocked attempts to transfer the leadership away
// from this node to the leaseholder, if this node is the current raft leader
// but not the leaseholder. We don't attempt to transfer leadership if the
// leaseholder is behind on applying the log.
//
// We like it when leases and raft leadership are collocated because that
// facilitates quick command application (requests generally need to make it to
// both the lease holder and the raft leader before being applied by other
// replicas).
func (r *Replica) maybeTransferRaftLeadershipLocked(ctx context.Context) {
if r.store.TestingKnobs().DisableLeaderFollowsLeaseholder {
return
}
lease := *r.mu.state.Lease
if lease.OwnedBy(r.StoreID()) || !r.isLeaseValidRLocked(ctx, lease, r.Clock().Now()) {
return
}
raftStatus := r.raftStatusRLocked()
if raftStatus == nil || raftStatus.RaftState != raft.StateLeader {
return
}
lhReplicaID := uint64(lease.Replica.ReplicaID)
lhProgress, ok := raftStatus.Progress[lhReplicaID]
if (ok && lhProgress.Match >= raftStatus.Commit) || r.mu.draining {
log.VEventf(ctx, 1, "transferring raft leadership to replica ID %v", lhReplicaID)
r.store.metrics.RangeRaftLeaderTransfers.Inc(1)
r.mu.internalRaftGroup.TransferLeader(lhReplicaID)
}
}
func (r *Replica) mergeInProgressRLocked() bool {
return r.mu.mergeComplete != nil
}
func (r *Replica) getReplicaDescriptorByIDRLocked(
replicaID roachpb.ReplicaID, fallback roachpb.ReplicaDescriptor,
) (roachpb.ReplicaDescriptor, error) {
if repDesc, ok := r.mu.state.Desc.GetReplicaDescriptorByID(replicaID); ok {
return repDesc, nil
}
if fallback.ReplicaID == replicaID {
return fallback, nil
}
return roachpb.ReplicaDescriptor{},
errors.Errorf("replica %d not present in %v, %v",
replicaID, fallback, r.mu.state.Desc.Replicas())
}
// checkIfTxnAborted checks the txn AbortSpan for the given
// transaction. In case the transaction has been aborted, return a
// transaction abort error.
func
|
(
ctx context.Context, rec batcheval.EvalContext, reader storage.Reader, txn roachpb.Transaction,
) *roachpb.Error {
var entry roachpb.AbortSpanEntry
aborted, err := rec.AbortSpan().Get(ctx, reader, txn.ID, &entry)
if err != nil {
return roachpb.NewError(roachpb.NewReplicaCorruptionError(
errors.Wrap(err, "could not read from AbortSpan")))
}
if aborted {
// We hit the cache, so let the transaction restart.
log.VEventf(ctx, 1, "found AbortSpan entry for %s with priority %d",
txn.ID.Short(), entry.Priority)
newTxn := txn.Clone()
if entry.Priority > newTxn.Priority {
newTxn.Priority = entry.Priority
}
newTxn.Status = roachpb.ABORTED
return roachpb.NewErrorWithTxn(
roachpb.NewTransactionAbortedError(roachpb.ABORT_REASON_ABORT_SPAN), newTxn)
}
return nil
}
func (r *Replica) startKey() roachpb.RKey {
return r.Desc().StartKey
}
// Less implements the btree.Item interface.
func (r *Replica) Less(i btree.Item) bool {
return r.startKey().Less(i.(rangeKeyItem).startKey())
}
// GetLeaseHistory returns the lease history stored on this replica.
func (r *Replica) GetLeaseHistory() []roachpb.Lease {
if r.leaseHistory == nil {
return nil
}
return r.leaseHistory.get()
}
// EnableLeaseHistory turns on the lease history for testing purposes. Returns
// a function to return it to its original state that can be deferred.
func EnableLeaseHistory(maxEntries int) func() {
originalValue := leaseHistoryMaxEntries
leaseHistoryMaxEntries = maxEntries
return func() {
leaseHistoryMaxEntries = originalValue
}
}
// GetExternalStorage returns an ExternalStorage object, based on
// information parsed from a URI, stored in `dest`.
func (r *Replica) GetExternalStorage(
ctx context.Context, dest roachpb.ExternalStorage,
) (cloud.ExternalStorage, error) {
return r.store.cfg.ExternalStorage(ctx, dest)
}
// GetExternalStorageFromURI returns an ExternalStorage object, based on the given URI.
func (r *Replica) GetExternalStorageFromURI(
ctx context.Context, uri string, user string,
) (cloud.ExternalStorage, error) {
return r.store.cfg.ExternalStorageFromURI(ctx, uri, user)
}
func (r *Replica) markSystemConfigGossipSuccess() {
r.mu.Lock()
defer r.mu.Unlock()
r.mu.failureToGossipSystemConfig = false
}
func (r *Replica) markSystemConfigGossipFailed() {
r.mu.Lock()
defer r.mu.Unlock()
r.mu.failureToGossipSystemConfig = true
}
func init() {
tracing.RegisterTagRemapping("r", "range")
}
|
checkIfTxnAborted
|
scanners.rs
|
// Copyright 2015 Google Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//! Scanners for fragments of CommonMark syntax
use std::char;
use std::convert::TryInto;
use crate::entities;
use crate::parse::HtmlScanGuard;
pub(crate) use crate::puncttable::{is_ascii_punctuation, is_punctuation};
use crate::strings::CowStr;
use crate::{Alignment, LinkType};
use memchr::memchr;
// sorted for binary search
const HTML_TAGS: [&str; 62] = [
"address",
"article",
"aside",
"base",
"basefont",
"blockquote",
"body",
"caption",
"center",
"col",
"colgroup",
"dd",
"details",
"dialog",
"dir",
"div",
"dl",
"dt",
"fieldset",
"figcaption",
"figure",
"footer",
"form",
"frame",
"frameset",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"head",
"header",
"hr",
"html",
"iframe",
"legend",
"li",
"link",
"main",
"menu",
"menuitem",
"nav",
"noframes",
"ol",
"optgroup",
"option",
"p",
"param",
"section",
"source",
"summary",
"table",
"tbody",
"td",
"tfoot",
"th",
"thead",
"title",
"tr",
"track",
"ul",
];
/// Analysis of the beginning of a line, including indentation and container
/// markers.
#[derive(Clone)]
pub(crate) struct LineStart<'a> {
bytes: &'a [u8],
tab_start: usize,
ix: usize,
spaces_remaining: usize,
// no thematic breaks can occur before this offset.
// this prevents scanning over and over up to a certain point
min_hrule_offset: usize,
}
impl<'a> LineStart<'a> {
pub(crate) fn new(bytes: &[u8]) -> LineStart
|
/// Try to scan a number of spaces.
///
/// Returns true if all spaces were consumed.
///
/// Note: consumes some spaces even if not successful.
pub(crate) fn scan_space(&mut self, n_space: usize) -> bool {
self.scan_space_inner(n_space) == 0
}
/// Scan a number of spaces up to a maximum.
///
/// Returns number of spaces scanned.
pub(crate) fn scan_space_upto(&mut self, n_space: usize) -> usize {
n_space - self.scan_space_inner(n_space)
}
/// Returns unused remainder of spaces.
fn scan_space_inner(&mut self, mut n_space: usize) -> usize {
let n_from_remaining = self.spaces_remaining.min(n_space);
self.spaces_remaining -= n_from_remaining;
n_space -= n_from_remaining;
while n_space > 0 && self.ix < self.bytes.len() {
match self.bytes[self.ix] {
b' ' => {
self.ix += 1;
n_space -= 1;
}
b'\t' => {
let spaces = 4 - (self.ix - self.tab_start) % 4;
self.ix += 1;
self.tab_start = self.ix;
let n = spaces.min(n_space);
n_space -= n;
self.spaces_remaining = spaces - n;
}
_ => break,
}
}
n_space
}
/// Scan all available ASCII whitespace (not including eol).
pub(crate) fn scan_all_space(&mut self) {
self.spaces_remaining = 0;
self.ix += self.bytes[self.ix..]
.iter()
.take_while(|&&b| b == b' ' || b == b'\t')
.count();
}
/// Determine whether we're at end of line (includes end of file).
pub(crate) fn is_at_eol(&self) -> bool {
self.bytes
.get(self.ix)
.map(|&c| c == b'\r' || c == b'\n')
.unwrap_or(true)
}
fn scan_ch(&mut self, c: u8) -> bool {
if self.ix < self.bytes.len() && self.bytes[self.ix] == c {
self.ix += 1;
true
} else {
false
}
}
pub(crate) fn scan_blockquote_marker(&mut self) -> bool {
let save = self.clone();
let _ = self.scan_space(3);
if self.scan_ch(b'>') {
let _ = self.scan_space(1);
true
} else {
*self = save;
false
}
}
/// Scan a list marker.
///
/// Return value is the character, the start index, and the indent in spaces.
/// For ordered list markers, the character will be one of b'.' or b')'. For
/// bullet list markers, it will be one of b'-', b'+', or b'*'.
pub(crate) fn scan_list_marker(&mut self) -> Option<(u8, u64, usize)> {
let save = self.clone();
let indent = self.scan_space_upto(3);
if self.ix < self.bytes.len() {
let c = self.bytes[self.ix];
if c == b'-' || c == b'+' || c == b'*' {
if self.ix >= self.min_hrule_offset {
// there could be an hrule here
if let Err(min_offset) = scan_hrule(&self.bytes[self.ix..]) {
self.min_hrule_offset = min_offset;
} else {
*self = save;
return None;
}
}
self.ix += 1;
if self.scan_space(1) || self.is_at_eol() {
return self.finish_list_marker(c, 0, indent + 2);
}
} else if c >= b'0' && c <= b'9' {
let start_ix = self.ix;
let mut ix = self.ix + 1;
let mut val = u64::from(c - b'0');
while ix < self.bytes.len() && ix - start_ix < 10 {
let c = self.bytes[ix];
ix += 1;
if c >= b'0' && c <= b'9' {
val = val * 10 + u64::from(c - b'0');
} else if c == b')' || c == b'.' {
self.ix = ix;
if self.scan_space(1) || self.is_at_eol() {
return self.finish_list_marker(c, val, indent + self.ix - start_ix);
} else {
break;
}
} else {
break;
}
}
}
}
*self = save;
None
}
fn finish_list_marker(
&mut self,
c: u8,
start: u64,
mut indent: usize,
) -> Option<(u8, u64, usize)> {
let save = self.clone();
// skip the rest of the line if it's blank
if scan_blank_line(&self.bytes[self.ix..]).is_some() {
return Some((c, start, indent));
}
let post_indent = self.scan_space_upto(4);
if post_indent < 4 {
indent += post_indent;
} else {
*self = save;
}
Some((c, start, indent))
}
/// Returns Some(is_checked) when a task list marker was found. Resets itself
/// to original state otherwise.
pub(crate) fn scan_task_list_marker(&mut self) -> Option<bool> {
let save = self.clone();
self.scan_space_upto(3);
if !self.scan_ch(b'[') {
*self = save;
return None;
}
let is_checked = match self.bytes.get(self.ix) {
Some(&c) if is_ascii_whitespace_no_nl(c) => {
self.ix += 1;
false
}
Some(b'x') | Some(b'X') => {
self.ix += 1;
true
}
_ => {
*self = save;
return None;
}
};
if !self.scan_ch(b']') {
*self = save;
return None;
}
if !self
.bytes
.get(self.ix)
.map(|&b| is_ascii_whitespace_no_nl(b))
.unwrap_or(false)
{
*self = save;
return None;
}
Some(is_checked)
}
pub(crate) fn bytes_scanned(&self) -> usize {
self.ix
}
pub(crate) fn remaining_space(&self) -> usize {
self.spaces_remaining
}
}
pub(crate) fn is_ascii_whitespace(c: u8) -> bool {
(c >= 0x09 && c <= 0x0d) || c == b' '
}
pub(crate) fn is_ascii_whitespace_no_nl(c: u8) -> bool {
c == b'\t' || c == 0x0b || c == 0x0c || c == b' '
}
fn is_ascii_alpha(c: u8) -> bool {
match c {
b'a'..=b'z' | b'A'..=b'Z' => true,
_ => false,
}
}
fn is_ascii_alphanumeric(c: u8) -> bool {
match c {
b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true,
_ => false,
}
}
fn is_ascii_letterdigitdash(c: u8) -> bool {
c == b'-' || is_ascii_alphanumeric(c)
}
fn is_digit(c: u8) -> bool {
b'0' <= c && c <= b'9'
}
fn is_valid_unquoted_attr_value_char(c: u8) -> bool {
match c {
b'\'' | b'"' | b' ' | b'=' | b'>' | b'<' | b'`' | b'\n' | b'\r' => false,
_ => true,
}
}
// scan a single character
pub(crate) fn scan_ch(data: &[u8], c: u8) -> usize {
if !data.is_empty() && data[0] == c {
1
} else {
0
}
}
pub(crate) fn scan_while<F>(data: &[u8], mut f: F) -> usize
where
F: FnMut(u8) -> bool,
{
data.iter().take_while(|&&c| f(c)).count()
}
pub(crate) fn scan_rev_while<F>(data: &[u8], mut f: F) -> usize
where
F: FnMut(u8) -> bool,
{
data.iter().rev().take_while(|&&c| f(c)).count()
}
pub(crate) fn scan_ch_repeat(data: &[u8], c: u8) -> usize {
scan_while(data, |x| x == c)
}
// Note: this scans ASCII whitespace only, for Unicode whitespace use
// a different function.
pub(crate) fn scan_whitespace_no_nl(data: &[u8]) -> usize {
scan_while(data, is_ascii_whitespace_no_nl)
}
fn scan_attr_value_chars(data: &[u8]) -> usize {
scan_while(data, is_valid_unquoted_attr_value_char)
}
pub(crate) fn scan_eol(bytes: &[u8]) -> Option<usize> {
if bytes.is_empty() {
return Some(0);
}
match bytes[0] {
b'\n' => Some(1),
b'\r' => Some(if bytes.get(1) == Some(&b'\n') { 2 } else { 1 }),
_ => None,
}
}
pub(crate) fn scan_blank_line(bytes: &[u8]) -> Option<usize> {
let i = scan_whitespace_no_nl(bytes);
scan_eol(&bytes[i..]).map(|n| i + n)
}
pub(crate) fn scan_nextline(bytes: &[u8]) -> usize {
memchr(b'\n', bytes).map_or(bytes.len(), |x| x + 1)
}
// return: end byte for closing code fence, or None
// if the line is not a closing code fence
pub(crate) fn scan_closing_code_fence(
bytes: &[u8],
fence_char: u8,
n_fence_char: usize,
) -> Option<usize> {
if bytes.is_empty() {
return Some(0);
}
let mut i = 0;
let num_fence_chars_found = scan_ch_repeat(&bytes[i..], fence_char);
if num_fence_chars_found < n_fence_char {
return None;
}
i += num_fence_chars_found;
let num_trailing_spaces = scan_ch_repeat(&bytes[i..], b' ');
i += num_trailing_spaces;
scan_eol(&bytes[i..]).map(|_| i)
}
// returned pair is (number of bytes, number of spaces)
fn calc_indent(text: &[u8], max: usize) -> (usize, usize) {
let mut spaces = 0;
let mut offset = 0;
for (i, &b) in text.iter().enumerate() {
match b {
b' ' => {
spaces += 1;
if spaces == max {
break;
}
}
b'\t' => {
let new_spaces = spaces + 4 - (spaces & 3);
if new_spaces > max {
break;
}
spaces = new_spaces;
}
_ => break,
}
offset = i;
}
(offset, spaces)
}
/// Scan hrule opening sequence.
///
/// Returns Ok(x) when it finds an hrule, where x is the
/// size of line containing the hrule, including the trailing newline.
///
/// Returns Err(x) when it does not find an hrule and x is
/// the offset in data before no hrule can appear.
pub(crate) fn scan_hrule(bytes: &[u8]) -> Result<usize, usize> {
if bytes.len() < 3 {
return Err(0);
}
let c = bytes[0];
if !(c == b'*' || c == b'-' || c == b'_') {
return Err(0);
}
let mut n = 0;
let mut i = 0;
while i < bytes.len() {
match bytes[i] {
b'\n' | b'\r' => {
i += scan_eol(&bytes[i..]).unwrap_or(0);
break;
}
c2 if c2 == c => {
n += 1;
}
b' ' | b'\t' => (),
_ => return Err(i),
}
i += 1;
}
if n >= 3 {
Ok(i)
} else {
Err(i)
}
}
/// Scan an ATX heading opening sequence.
///
/// Returns number of bytes in prefix and level.
pub(crate) fn scan_atx_heading(data: &[u8]) -> Option<usize> {
let level = scan_ch_repeat(data, b'#');
if level >= 1 && level <= 6 && data.get(level).cloned().map_or(true, is_ascii_whitespace) {
Some(level)
} else {
None
}
}
/// Scan a setext heading underline.
///
/// Returns number of bytes in line (including trailing newline) and level.
pub(crate) fn scan_setext_heading(data: &[u8]) -> Option<(usize, u32)> {
let c = *data.get(0)?;
if !(c == b'-' || c == b'=') {
return None;
}
let mut i = 1 + scan_ch_repeat(&data[1..], c);
i += scan_blank_line(&data[i..])?;
let level = if c == b'=' { 1 } else { 2 };
Some((i, level))
}
// returns number of bytes in line (including trailing
// newline) and column alignments
pub(crate) fn scan_table_head(data: &[u8]) -> (usize, Vec<Alignment>) {
let (mut i, spaces) = calc_indent(data, 4);
if spaces > 3 || i == data.len() {
return (0, vec![]);
}
let mut cols = vec![];
let mut active_col = Alignment::None;
let mut start_col = true;
if data[i] == b'|' {
i += 1;
}
for c in &data[i..] {
if let Some(n) = scan_eol(&data[i..]) {
i += n;
break;
}
match *c {
b' ' => (),
b':' => {
active_col = match (start_col, active_col) {
(true, Alignment::None) => Alignment::Left,
(false, Alignment::Left) => Alignment::Center,
(false, Alignment::None) => Alignment::Right,
_ => active_col,
};
start_col = false;
}
b'-' => {
start_col = false;
}
b'|' => {
start_col = true;
cols.push(active_col);
active_col = Alignment::None;
}
_ => {
cols = vec![];
start_col = true;
break;
}
}
i += 1;
}
if !start_col {
cols.push(active_col);
}
(i, cols)
}
/// Scan code fence.
///
/// Returns number of bytes scanned and the char that is repeated to make the code fence.
pub(crate) fn scan_code_fence(data: &[u8]) -> Option<(usize, u8)> {
let c = *data.get(0)?;
if !(c == b'`' || c == b'~') {
return None;
}
let i = 1 + scan_ch_repeat(&data[1..], c);
if i >= 3 {
if c == b'`' {
let suffix = &data[i..];
let next_line = i + scan_nextline(suffix);
// FIXME: make sure this is correct
if suffix[..(next_line - i)].iter().any(|&b| b == b'`') {
return None;
}
}
Some((i, c))
} else {
None
}
}
pub(crate) fn scan_blockquote_start(data: &[u8]) -> Option<usize> {
if data.starts_with(b"> ") {
Some(2)
} else {
None
}
}
/// This already assumes the list item has been scanned.
pub(crate) fn scan_empty_list(data: &[u8]) -> bool {
let mut ix = 0;
for _ in 0..2 {
if let Some(bytes) = scan_blank_line(&data[ix..]) {
ix += bytes;
} else {
return false;
}
}
true
}
// return number of bytes scanned, delimiter, start index, and indent
pub(crate) fn scan_listitem(bytes: &[u8]) -> Option<(usize, u8, usize, usize)> {
let mut c = *bytes.get(0)?;
let (w, start) = match c {
b'-' | b'+' | b'*' => (1, 0),
b'0'..=b'9' => {
let (length, start) = parse_decimal(bytes);
c = *bytes.get(length)?;
if !(c == b'.' || c == b')') {
return None;
}
(length + 1, start)
}
_ => {
return None;
}
};
// TODO: replace calc_indent with scan_leading_whitespace, for tab correctness
let (mut postn, mut postindent) = calc_indent(&bytes[w..], 5);
if postindent == 0 {
scan_eol(&bytes[w..])?;
postindent += 1;
} else if postindent > 4 {
postn = 1;
postindent = 1;
}
if scan_blank_line(&bytes[w..]).is_some() {
postn = 0;
postindent = 1;
}
Some((w + postn, c, start, w + postindent))
}
// returns (number of bytes, parsed decimal)
fn parse_decimal(bytes: &[u8]) -> (usize, usize) {
match bytes
.iter()
.take_while(|&&b| is_digit(b))
.try_fold((0, 0usize), |(count, acc), c| {
let digit = usize::from(c - b'0');
match acc
.checked_mul(10)
.and_then(|ten_acc| ten_acc.checked_add(digit))
{
Some(number) => Ok((count + 1, number)),
// stop early on overflow
None => Err((count, acc)),
}
}) {
Ok(p) | Err(p) => p,
}
}
// returns (number of bytes, parsed hex)
fn parse_hex(bytes: &[u8]) -> (usize, usize) {
match bytes.iter().try_fold((0, 0usize), |(count, acc), c| {
let mut c = *c;
let digit = if c >= b'0' && c <= b'9' {
usize::from(c - b'0')
} else {
// make lower case
c |= 0x20;
if c >= b'a' && c <= b'f' {
usize::from(c - b'a' + 10)
} else {
return Err((count, acc));
}
};
match acc
.checked_mul(16)
.and_then(|sixteen_acc| sixteen_acc.checked_add(digit))
{
Some(number) => Ok((count + 1, number)),
// stop early on overflow
None => Err((count, acc)),
}
}) {
Ok(p) | Err(p) => p,
}
}
fn char_from_codepoint(input: usize) -> Option<char> {
let mut codepoint = input.try_into().ok()?;
if codepoint == 0 {
codepoint = 0xFFFD;
}
char::from_u32(codepoint)
}
// doesn't bother to check data[0] == '&'
pub(crate) fn scan_entity(bytes: &[u8]) -> (usize, Option<CowStr<'static>>) {
let mut end = 1;
if scan_ch(&bytes[end..], b'#') == 1 {
end += 1;
let (bytecount, codepoint) = if end < bytes.len() && bytes[end] | 0x20 == b'x' {
end += 1;
parse_hex(&bytes[end..])
} else {
parse_decimal(&bytes[end..])
};
end += bytecount;
return if bytecount == 0 || scan_ch(&bytes[end..], b';') == 0 {
(0, None)
} else if let Some(c) = char_from_codepoint(codepoint) {
(end + 1, Some(c.into()))
} else {
(0, None)
};
}
end += scan_while(&bytes[end..], is_ascii_alphanumeric);
if scan_ch(&bytes[end..], b';') == 1 {
if let Some(value) = entities::get_entity(&bytes[1..end]) {
return (end + 1, Some(value.into()));
}
}
(0, None)
}
// FIXME: we can most likely re-use other scanners
// returns (bytelength, title_str)
pub(crate) fn scan_refdef_title(text: &str) -> Option<(usize, &str)> {
let mut chars = text.chars().peekable();
let closing_delim = match chars.next()? {
'\'' => '\'',
'"' => '"',
'(' => ')',
_ => return None,
};
let mut bytecount = 1;
while let Some(c) = chars.next() {
match c {
'\n' => {
bytecount += 1;
let mut next = *chars.peek()?;
while is_ascii_whitespace_no_nl(next as u8) {
bytecount += chars.next()?.len_utf8();
next = *chars.peek()?;
}
if *chars.peek()? == '\n' {
// blank line - not allowed
return None;
}
}
'\\' => {
let next_char = chars.next()?;
bytecount += 1 + next_char.len_utf8();
}
c if c == closing_delim => {
return Some((bytecount + 1, &text[1..bytecount]));
}
c => {
bytecount += c.len_utf8();
}
}
}
None
}
// note: dest returned is raw, still needs to be unescaped
// TODO: check that nested parens are really not allowed for refdefs
// TODO(performance): this func should probably its own unescaping
pub(crate) fn scan_link_dest(
data: &str,
start_ix: usize,
max_next: usize,
) -> Option<(usize, &str)> {
let bytes = &data.as_bytes()[start_ix..];
let mut i = scan_ch(bytes, b'<');
if i != 0 {
// pointy links
while i < bytes.len() {
match bytes[i] {
b'\n' | b'\r' | b'<' => return None,
b'>' => return Some((i + 1, &data[(start_ix + 1)..(start_ix + i)])),
b'\\' if i + 1 < bytes.len() && is_ascii_punctuation(bytes[i + 1]) => {
i += 1;
}
_ => {}
}
i += 1;
}
None
} else {
// non-pointy links
let mut nest = 0;
while i < bytes.len() {
match bytes[i] {
0x0..=0x20 => {
break;
}
b'(' => {
if nest > max_next {
return None;
}
nest += 1;
}
b')' => {
if nest == 0 {
break;
}
nest -= 1;
}
b'\\' if i + 1 < bytes.len() && is_ascii_punctuation(bytes[i + 1]) => {
i += 1;
}
_ => {}
}
i += 1;
}
Some((i, &data[start_ix..(start_ix + i)]))
}
}
/// Returns bytes scanned
fn scan_attribute_name(data: &[u8]) -> Option<usize> {
let (&c, tail) = data.split_first()?;
if is_ascii_alpha(c) || c == b'_' || c == b':' {
Some(
1 + scan_while(tail, |c| {
is_ascii_alphanumeric(c) || c == b'_' || c == b'.' || c == b':' || c == b'-'
}),
)
} else {
None
}
}
/// Returns the index immediately following the attribute on success.
/// The argument `buffer_ix` refers to the index into `data` from which we
/// should copy into `buffer` when we find bytes to skip.
fn scan_attribute(
data: &[u8],
mut ix: usize,
newline_handler: Option<&dyn Fn(&[u8]) -> usize>,
buffer: &mut Vec<u8>,
buffer_ix: &mut usize,
) -> Option<usize> {
ix += scan_attribute_name(&data[ix..])?;
let n_whitespace =
scan_whitespace_with_newline_handler(data, ix, newline_handler, buffer, buffer_ix)? - ix;
ix += n_whitespace;
if scan_ch(&data[ix..], b'=') == 1 {
ix += 1;
ix = scan_whitespace_with_newline_handler(data, ix, newline_handler, buffer, buffer_ix)?;
ix = scan_attribute_value(&data, ix, newline_handler, buffer, buffer_ix)?;
} else if n_whitespace > 0 {
// Leave whitespace for next attribute.
ix -= 1;
}
Some(ix)
}
/// Scans whitespace and possibly newlines according to the
/// behavior defined by the newline handler. When bytes are skipped,
/// all preceeding non-skipped bytes are pushed to the buffer.
fn scan_whitespace_with_newline_handler(
data: &[u8],
mut i: usize,
newline_handler: Option<&dyn Fn(&[u8]) -> usize>,
buffer: &mut Vec<u8>,
buffer_ix: &mut usize,
) -> Option<usize> {
while i < data.len() {
if !is_ascii_whitespace(data[i]) {
return Some(i);
}
if let Some(eol_bytes) = scan_eol(&data[i..]) {
let handler = newline_handler?;
i += eol_bytes;
let skipped_bytes = handler(&data[i..]);
if skipped_bytes > 0 {
buffer.extend(&data[*buffer_ix..i]);
*buffer_ix = i + skipped_bytes;
}
i += skipped_bytes;
} else {
i += 1;
}
}
Some(i)
}
/// Returns the index immediately following the attribute value on success.
fn scan_attribute_value(
data: &[u8],
mut i: usize,
newline_handler: Option<&dyn Fn(&[u8]) -> usize>,
buffer: &mut Vec<u8>,
buffer_ix: &mut usize,
) -> Option<usize> {
match *data.get(i)? {
b @ b'"' | b @ b'\'' => {
i += 1;
while i < data.len() {
if data[i] == b {
return Some(i + 1);
}
if let Some(eol_bytes) = scan_eol(&data[i..]) {
let handler = newline_handler?;
i += eol_bytes;
let skipped_bytes = handler(&data[i..]);
if skipped_bytes > 0 {
buffer.extend(&data[*buffer_ix..i]);
*buffer_ix = i + skipped_bytes;
}
i += skipped_bytes;
} else {
i += 1;
}
}
return None;
}
b' ' | b'=' | b'>' | b'<' | b'`' | b'\n' | b'\r' => {
return None;
}
_ => {
// unquoted attribute value
i += scan_attr_value_chars(&data[i..]);
}
}
Some(i)
}
// Remove backslash escapes and resolve entities
pub(crate) fn unescape(input: &str) -> CowStr<'_> {
let mut result = String::new();
let mut mark = 0;
let mut i = 0;
let bytes = input.as_bytes();
while i < bytes.len() {
match bytes[i] {
b'\\' if i + 1 < bytes.len() && is_ascii_punctuation(bytes[i + 1]) => {
result.push_str(&input[mark..i]);
mark = i + 1;
i += 2;
}
b'&' => match scan_entity(&bytes[i..]) {
(n, Some(value)) => {
result.push_str(&input[mark..i]);
result.push_str(&value);
i += n;
mark = i;
}
_ => i += 1,
},
b'\r' => {
result.push_str(&input[mark..i]);
i += 1;
mark = i;
}
_ => i += 1,
}
}
if mark == 0 {
input.into()
} else {
result.push_str(&input[mark..]);
result.into()
}
}
/// Assumes `data` is preceded by `<`.
pub(crate) fn scan_html_block_tag(data: &[u8]) -> (usize, &[u8]) {
let i = scan_ch(data, b'/');
let n = scan_while(&data[i..], is_ascii_alphanumeric);
// TODO: scan attributes and >
(i + n, &data[i..i + n])
}
pub(crate) fn is_html_tag(tag: &[u8]) -> bool {
HTML_TAGS
.binary_search_by(|probe| {
let probe_bytes_iter = probe.as_bytes().iter();
let tag_bytes_iter = tag.iter();
probe_bytes_iter
.zip(tag_bytes_iter)
.find_map(|(&a, &b)| {
// We can compare case insensitively because the probes are
// all lower case alpha strings.
match a.cmp(&(b | 0x20)) {
std::cmp::Ordering::Equal => None,
inequality => Some(inequality),
}
})
.unwrap_or_else(|| probe.len().cmp(&tag.len()))
})
.is_ok()
}
/// Assumes that `data` starts with `<`.
/// Returns the index into data directly after the html tag on success.
pub(crate) fn scan_html_type_7(data: &[u8]) -> Option<usize> {
// Block type html does not allow for newlines, so we
// do not pass a newline handler.
let (_span, i) = scan_html_block_inner(data, None)?;
scan_blank_line(&data[i..])?;
Some(i)
}
/// Assumes that `data` starts with `<`.
/// Returns the number of bytes scanned and the html in case of
/// success.
/// When some bytes were skipped, because the html was split over
/// multiple leafs (e.g. over multiple lines in a blockquote),
/// the html is returned as a vector of bytes.
/// If no bytes were skipped, the buffer will be empty.
pub(crate) fn scan_html_block_inner(
data: &[u8],
newline_handler: Option<&dyn Fn(&[u8]) -> usize>,
) -> Option<(Vec<u8>, usize)> {
let mut buffer = Vec::new();
let mut last_buf_index = 0;
let close_tag_bytes = scan_ch(&data[1..], b'/');
let l = scan_while(&data[(1 + close_tag_bytes)..], is_ascii_alpha);
if l == 0 {
return None;
}
let mut i = 1 + close_tag_bytes + l;
i += scan_while(&data[i..], is_ascii_letterdigitdash);
if close_tag_bytes == 0 {
loop {
let old_i = i;
loop {
i += scan_whitespace_no_nl(&data[i..]);
if let Some(eol_bytes) = scan_eol(&data[i..]) {
if eol_bytes == 0 {
return None;
}
let handler = newline_handler?;
i += eol_bytes;
let skipped_bytes = handler(&data[i..]);
if skipped_bytes > 0 {
buffer.extend(&data[last_buf_index..i]);
i += skipped_bytes;
last_buf_index = i;
}
} else {
break;
}
}
if let Some(b'/') | Some(b'>') = data.get(i) {
break;
}
if old_i == i {
// No whitespace, which is mandatory.
return None;
}
i = scan_attribute(&data, i, newline_handler, &mut buffer, &mut last_buf_index)?;
}
}
i += scan_whitespace_no_nl(&data[i..]);
if close_tag_bytes == 0 {
i += scan_ch(&data[i..], b'/');
}
if scan_ch(&data[i..], b'>') == 0 {
None
} else {
i += 1;
if !buffer.is_empty() {
buffer.extend(&data[last_buf_index..i]);
}
Some((buffer, i))
}
}
/// Returns (next_byte_offset, uri, type)
pub(crate) fn scan_autolink(text: &str, start_ix: usize) -> Option<(usize, CowStr<'_>, LinkType)> {
scan_uri(text, start_ix)
.map(|(bytes, uri)| (bytes, uri, LinkType::Autolink))
.or_else(|| scan_email(text, start_ix).map(|(bytes, uri)| (bytes, uri, LinkType::Email)))
}
/// Returns (next_byte_offset, uri)
fn scan_uri(text: &str, start_ix: usize) -> Option<(usize, CowStr<'_>)> {
let bytes = &text.as_bytes()[start_ix..];
// scheme's first byte must be an ascii letter
if bytes.is_empty() || !is_ascii_alpha(bytes[0]) {
return None;
}
let mut i = 1;
while i < bytes.len() {
let c = bytes[i];
i += 1;
match c {
c if is_ascii_alphanumeric(c) => (),
b'.' | b'-' | b'+' => (),
b':' => break,
_ => return None,
}
}
// scheme length must be between 2 and 32 characters long. scheme
// must be followed by colon
if i < 3 || i > 33 {
return None;
}
while i < bytes.len() {
match bytes[i] {
b'>' => return Some((start_ix + i + 1, text[start_ix..(start_ix + i)].into())),
b'\0'..=b' ' | b'<' => return None,
_ => (),
}
i += 1;
}
None
}
/// Returns (next_byte_offset, email)
fn scan_email(text: &str, start_ix: usize) -> Option<(usize, CowStr<'_>)> {
// using a regex library would be convenient, but doing it by hand is not too bad
let bytes = &text.as_bytes()[start_ix..];
let mut i = 0;
while i < bytes.len() {
let c = bytes[i];
i += 1;
match c {
c if is_ascii_alphanumeric(c) => (),
b'.' | b'!' | b'#' | b'$' | b'%' | b'&' | b'\'' | b'*' | b'+' | b'/' | b'=' | b'?'
| b'^' | b'_' | b'`' | b'{' | b'|' | b'}' | b'~' | b'-' => (),
b'@' => break,
_ => return None,
}
}
loop {
let label_start_ix = i;
let mut fresh_label = true;
while i < bytes.len() {
match bytes[i] {
c if is_ascii_alphanumeric(c) => (),
b'-' if fresh_label => {
return None;
}
b'-' => (),
_ => break,
}
fresh_label = false;
i += 1;
}
if i == label_start_ix || i - label_start_ix > 63 || bytes[i - 1] == b'-' {
return None;
}
if scan_ch(&bytes[i..], b'.') == 0 {
break;
}
i += 1;
}
if scan_ch(&bytes[i..], b'>') == 0 {
return None;
}
Some((start_ix + i + 1, text[start_ix..(start_ix + i)].into()))
}
/// Scan comment, declaration, or CDATA section, with initial "<!" already consumed.
/// Returns byte offset on match.
pub(crate) fn scan_inline_html_comment(
bytes: &[u8],
mut ix: usize,
scan_guard: &mut HtmlScanGuard,
) -> Option<usize> {
let c = *bytes.get(ix)?;
ix += 1;
match c {
b'-' => {
let dashes = scan_ch_repeat(&bytes[ix..], b'-');
if dashes < 1 {
return None;
}
// Saw "<!--", scan comment.
ix += dashes;
if scan_ch(&bytes[ix..], b'>') == 1 {
return None;
}
while let Some(x) = memchr(b'-', &bytes[ix..]) {
ix += x + 1;
if scan_ch(&bytes[ix..], b'-') == 1 {
ix += 1;
return if scan_ch(&bytes[ix..], b'>') == 1 {
Some(ix + 1)
} else {
None
};
}
}
None
}
b'[' if bytes[ix..].starts_with(b"CDATA[") && ix > scan_guard.cdata => {
ix += b"CDATA[".len();
ix = memchr(b']', &bytes[ix..]).map_or(bytes.len(), |x| ix + x);
let close_brackets = scan_ch_repeat(&bytes[ix..], b']');
ix += close_brackets;
if close_brackets == 0 || scan_ch(&bytes[ix..], b'>') == 0 {
scan_guard.cdata = ix;
None
} else {
Some(ix + 1)
}
}
b'A'..=b'Z' if ix > scan_guard.declaration => {
// Scan declaration.
ix += scan_while(&bytes[ix..], |c| c >= b'A' && c <= b'Z');
let whitespace = scan_while(&bytes[ix..], is_ascii_whitespace);
if whitespace == 0 {
return None;
}
ix += whitespace;
ix = memchr(b'>', &bytes[ix..]).map_or(bytes.len(), |x| ix + x);
if scan_ch(&bytes[ix..], b'>') == 0 {
scan_guard.declaration = ix;
None
} else {
Some(ix + 1)
}
}
_ => None,
}
}
/// Scan processing directive, with initial "<?" already consumed.
/// Returns the next byte offset on success.
pub(crate) fn scan_inline_html_processing(
bytes: &[u8],
mut ix: usize,
scan_guard: &mut HtmlScanGuard,
) -> Option<usize> {
if ix <= scan_guard.processing {
return None;
}
while let Some(offset) = memchr(b'?', &bytes[ix..]) {
ix += offset + 1;
if scan_ch(&bytes[ix..], b'>') == 1 {
return Some(ix + 1);
}
}
scan_guard.processing = ix;
None
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn overflow_list() {
assert!(
scan_listitem(b"4444444444444444444444444444444444444444444444444444444444!").is_none()
);
}
#[test]
fn overflow_by_addition() {
assert!(scan_listitem(b"1844674407370955161615!").is_none());
}
}
|
{
LineStart {
bytes,
tab_start: 0,
ix: 0,
spaces_remaining: 0,
min_hrule_offset: 0,
}
}
|
api.xpack.autoscaling.get_autoscaling_policy.go
|
// Licensed to Elasticsearch B.V under one or more agreements.
// Elasticsearch B.V. licenses this file to you under the Apache 2.0 License.
// See the LICENSE file in the project root for more information.
//
// Code generated from specification version 7.11.0: DO NOT EDIT
package esapi
import (
"context"
"net/http"
"strings"
)
func newAutoscalingGetAutoscalingPolicyFunc(t Transport) AutoscalingGetAutoscalingPolicy
|
// ----- API Definition -------------------------------------------------------
// AutoscalingGetAutoscalingPolicy - Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported.
//
// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-policy.html.
//
type AutoscalingGetAutoscalingPolicy func(name string, o ...func(*AutoscalingGetAutoscalingPolicyRequest)) (*Response, error)
// AutoscalingGetAutoscalingPolicyRequest configures the Autoscaling Get Autoscaling Policy API request.
//
type AutoscalingGetAutoscalingPolicyRequest struct {
Name string
Pretty bool
Human bool
ErrorTrace bool
FilterPath []string
Header http.Header
ctx context.Context
}
// Do executes the request and returns response or error.
//
func (r AutoscalingGetAutoscalingPolicyRequest) Do(ctx context.Context, transport Transport) (*Response, error) {
var (
method string
path strings.Builder
params map[string]string
)
method = "GET"
path.Grow(1 + len("_autoscaling") + 1 + len("policy") + 1 + len(r.Name))
path.WriteString("/")
path.WriteString("_autoscaling")
path.WriteString("/")
path.WriteString("policy")
path.WriteString("/")
path.WriteString(r.Name)
params = make(map[string]string)
if r.Pretty {
params["pretty"] = "true"
}
if r.Human {
params["human"] = "true"
}
if r.ErrorTrace {
params["error_trace"] = "true"
}
if len(r.FilterPath) > 0 {
params["filter_path"] = strings.Join(r.FilterPath, ",")
}
req, err := newRequest(method, path.String(), nil)
if err != nil {
return nil, err
}
if len(params) > 0 {
q := req.URL.Query()
for k, v := range params {
q.Set(k, v)
}
req.URL.RawQuery = q.Encode()
}
if len(r.Header) > 0 {
if len(req.Header) == 0 {
req.Header = r.Header
} else {
for k, vv := range r.Header {
for _, v := range vv {
req.Header.Add(k, v)
}
}
}
}
if ctx != nil {
req = req.WithContext(ctx)
}
res, err := transport.Perform(req)
if err != nil {
return nil, err
}
response := Response{
StatusCode: res.StatusCode,
Body: res.Body,
Header: res.Header,
}
return &response, nil
}
// WithContext sets the request context.
//
func (f AutoscalingGetAutoscalingPolicy) WithContext(v context.Context) func(*AutoscalingGetAutoscalingPolicyRequest) {
return func(r *AutoscalingGetAutoscalingPolicyRequest) {
r.ctx = v
}
}
// WithPretty makes the response body pretty-printed.
//
func (f AutoscalingGetAutoscalingPolicy) WithPretty() func(*AutoscalingGetAutoscalingPolicyRequest) {
return func(r *AutoscalingGetAutoscalingPolicyRequest) {
r.Pretty = true
}
}
// WithHuman makes statistical values human-readable.
//
func (f AutoscalingGetAutoscalingPolicy) WithHuman() func(*AutoscalingGetAutoscalingPolicyRequest) {
return func(r *AutoscalingGetAutoscalingPolicyRequest) {
r.Human = true
}
}
// WithErrorTrace includes the stack trace for errors in the response body.
//
func (f AutoscalingGetAutoscalingPolicy) WithErrorTrace() func(*AutoscalingGetAutoscalingPolicyRequest) {
return func(r *AutoscalingGetAutoscalingPolicyRequest) {
r.ErrorTrace = true
}
}
// WithFilterPath filters the properties of the response body.
//
func (f AutoscalingGetAutoscalingPolicy) WithFilterPath(v ...string) func(*AutoscalingGetAutoscalingPolicyRequest) {
return func(r *AutoscalingGetAutoscalingPolicyRequest) {
r.FilterPath = v
}
}
// WithHeader adds the headers to the HTTP request.
//
func (f AutoscalingGetAutoscalingPolicy) WithHeader(h map[string]string) func(*AutoscalingGetAutoscalingPolicyRequest) {
return func(r *AutoscalingGetAutoscalingPolicyRequest) {
if r.Header == nil {
r.Header = make(http.Header)
}
for k, v := range h {
r.Header.Add(k, v)
}
}
}
// WithOpaqueID adds the X-Opaque-Id header to the HTTP request.
//
func (f AutoscalingGetAutoscalingPolicy) WithOpaqueID(s string) func(*AutoscalingGetAutoscalingPolicyRequest) {
return func(r *AutoscalingGetAutoscalingPolicyRequest) {
if r.Header == nil {
r.Header = make(http.Header)
}
r.Header.Set("X-Opaque-Id", s)
}
}
|
{
return func(name string, o ...func(*AutoscalingGetAutoscalingPolicyRequest)) (*Response, error) {
var r = AutoscalingGetAutoscalingPolicyRequest{Name: name}
for _, f := range o {
f(&r)
}
return r.Do(r.ctx, t)
}
}
|
lockfile.py
|
# Copyright 2017 Robert Csordas. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import fcntl
class LockFile:
def __init__(self, fname):
self._fname = fname
self._fd = None
def acquire(self):
|
def release(self):
fcntl.lockf(self._fd, fcntl.LOCK_UN)
self._fd.close()
self._fd = None
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
|
self._fd=open(self._fname, "w")
os.chmod(self._fname, 0o777)
fcntl.lockf(self._fd, fcntl.LOCK_EX)
|
body-grupos.component.ts
|
import { Component } from '@angular/core';
/**
* Generated class for the BodyGrupos component.
*
* See https://angular.io/docs/ts/latest/api/core/index/ComponentMetadata-class.html
* for more info on Angular Components.
*/
@Component({
selector: 'body-grupos',
templateUrl: 'body-grupos.component.html'
})
export class BodyGrupos {
|
constructor() {
console.log('Hello BodyGrupos Component');
this.text = 'Hello World';
}
}
|
text: string;
|
node.go
|
//
// Last.Backend LLC CONFIDENTIAL
// __________________
//
// [2014] - [2020] Last.Backend LLC
// All Rights Reserved.
//
// NOTICE: All information contained herein is, and remains
// the property of Last.Backend LLC and its suppliers,
// if any. The intellectual and technical concepts contained
// herein are proprietary to Last.Backend LLC
// and its suppliers and may be covered by Russian Federation and Foreign Patents,
// patents in process, and are protected by trade secret or copyright law.
// Dissemination of this information or reproduction of this material
// is strictly forbidden unless prior written permission is obtained
// from Last.Backend LLC.
//
package cache
import (
"context"
"fmt"
"github.com/lastbackend/lastbackend/tools/logger"
"sync"
"github.com/lastbackend/lastbackend/internal/pkg/models"
)
const logCacheNode = "api:cache:node"
type CacheNodeManifest struct {
lock sync.RWMutex
nodes map[string]*models.Node
ingress map[string]*models.Ingress
exporter map[string]*models.Exporter
discovery map[string]*models.Discovery
configs map[string]*models.ConfigManifest
manifests map[string]*models.NodeManifest
}
func (c *CacheNodeManifest) checkNode(node string) {
if _, ok := c.manifests[node]; !ok {
c.manifests[node] = new(models.NodeManifest)
}
}
func (c *CacheNodeManifest) SetPodManifest(node, pod string, s *models.PodManifest) {
ctx := logger.NewContext(context.Background(), nil)
log := logger.WithContext(ctx)
log.Infof("%s:PodManifestSet:> %s, %s, %#v", logCacheNode, node, pod, s)
c.lock.Lock()
defer c.lock.Unlock()
c.checkNode(node)
if c.manifests[node].Pods == nil {
sp := c.manifests[node]
sp.Pods = make(map[string]*models.PodManifest, 0)
}
c.manifests[node].Pods[pod] = s
}
func (c *CacheNodeManifest) DelPodManifest(node, pod string) {
ctx := logger.NewContext(context.Background(), nil)
log := logger.WithContext(ctx)
log.Infof("%s:PodManifestDel:> %s, %s", logCacheNode, node, pod)
c.lock.Lock()
defer c.lock.Unlock()
if _, ok := c.manifests[node]; !ok {
return
}
delete(c.manifests[node].Pods, pod)
}
func (c *CacheNodeManifest) SetVolumeManifest(node, volume string, s *models.VolumeManifest) {
ctx := logger.NewContext(context.Background(), nil)
log := logger.WithContext(ctx)
log.Infof("%s:SetVolumeManifest:> %s, %s", logCacheNode, node, volume)
c.lock.Lock()
defer c.lock.Unlock()
c.checkNode(node)
if c.manifests[node].Volumes == nil {
sp := c.manifests[node]
sp.Volumes = make(map[string]*models.VolumeManifest, 0)
}
c.manifests[node].Volumes[volume] = s
}
func (c *CacheNodeManifest) DelVolumeManifest(node, volume string) {
ctx := logger.NewContext(context.Background(), nil)
log := logger.WithContext(ctx)
log.Infof("%s:DelVolumeManifest:> %s, %s", logCacheNode, node, volume)
c.lock.Lock()
defer c.lock.Unlock()
if _, ok := c.manifests[node]; !ok {
return
}
delete(c.manifests[node].Volumes, volume)
}
func (c *CacheNodeManifest) SetSubnetManifest(cidr string, s *models.SubnetManifest) {
c.lock.Lock()
defer c.lock.Unlock()
for n := range c.manifests {
if _, ok := c.manifests[n].Network[cidr]; !ok {
c.manifests[n].Network = make(map[string]*models.SubnetManifest)
}
c.manifests[n].Network[cidr] = s
}
}
func (c *CacheNodeManifest) SetSecretManifest(name string, s *models.SecretManifest) {
c.lock.Lock()
defer c.lock.Unlock()
for n := range c.manifests {
if _, ok := c.manifests[n].Secrets[name]; !ok {
c.manifests[n].Secrets = make(map[string]*models.SecretManifest)
}
c.manifests[n].Secrets[name] = s
}
}
func (c *CacheNodeManifest) SetConfigManifest(name string, s *models.ConfigManifest) {
c.lock.Lock()
defer c.lock.Unlock()
c.configs[name] = s
for n := range c.manifests {
if _, ok := c.manifests[n].Configs[name]; !ok {
c.manifests[n].Configs = make(map[string]*models.ConfigManifest)
}
c.manifests[n].Configs[name] = s
}
}
func (c *CacheNodeManifest) SetEndpointManifest(addr string, s *models.EndpointManifest) {
c.lock.Lock()
defer c.lock.Unlock()
ctx := logger.NewContext(context.Background(), nil)
log := logger.WithContext(ctx)
log.Debugf("%s set endpoint manifest: %s > %s", logCacheNode, addr, s.IP)
for _, n := range c.manifests {
if n.Endpoints == nil {
n.Endpoints = make(map[string]*models.EndpointManifest, 0)
}
n.Endpoints[addr] = s
}
}
func (c *CacheNodeManifest) SetIngress(ingress *models.Ingress) {
c.lock.Lock()
defer c.lock.Unlock()
c.ingress[ingress.SelfLink().String()] = ingress
}
func (c *CacheNodeManifest) DelIngress(selflink string) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.ingress, selflink)
}
func (c *CacheNodeManifest) SetDiscovery(discovery *models.Discovery) {
c.lock.Lock()
defer c.lock.Unlock()
dvc, ok := c.discovery[discovery.SelfLink().String()]
if !ok {
c.discovery[discovery.SelfLink().String()] = discovery
c.SetResolvers()
return
}
var update = false
switch true {
case dvc.Status.IP != discovery.Status.IP:
update = true
break
case dvc.Status.Port != discovery.Status.Port:
update = true
break
case dvc.Status.Ready != discovery.Status.Ready:
update = true
break
}
if update {
c.discovery[discovery.SelfLink().String()] = discovery
c.SetResolvers()
}
return
}
func (c *CacheNodeManifest) DelDiscovery(selflink string) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.discovery, selflink)
resolvers := make(map[string]*models.ResolverManifest, 0)
for _, d := range c.discovery {
if d.Status.Ready {
resolvers[d.Status.IP] = &models.ResolverManifest{
IP: d.Status.IP,
Port: d.Status.Port,
}
}
}
for _, n := range c.manifests {
n.Resolvers = resolvers
}
}
func (c *CacheNodeManifest) SetExporter(exporter *models.Exporter) {
c.lock.Lock()
defer c.lock.Unlock()
dvc, ok := c.exporter[exporter.SelfLink().String()]
if !ok {
c.exporter[exporter.SelfLink().String()] = exporter
c.SetExporterEndpoint()
return
}
var update = false
switch true {
case dvc.Status.Listener.IP != exporter.Status.Listener.IP:
update = true
break
case dvc.Status.Listener.Port != exporter.Status.Listener.Port:
update = true
break
case dvc.Status.Ready != exporter.Status.Ready:
update = true
break
}
if update {
c.exporter[exporter.SelfLink().String()] = exporter
c.SetExporterEndpoint()
}
return
}
func (c *CacheNodeManifest) DelExporter(selflink string) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.exporter, selflink)
for _, n := range c.manifests {
n.Exporter = nil
}
for _, d := range c.exporter {
if d.Status.Ready {
exporter := &models.ExporterManifest{
Endpoint: fmt.Sprintf("%s:%d", d.Status.Listener.IP, d.Status.Listener.Port),
}
for _, n := range c.manifests {
n.Exporter = exporter
}
break
}
}
}
func (c *CacheNodeManifest) SetExporterEndpoint() {
for _, n := range c.manifests {
n.Exporter = nil
}
for _, d := range c.exporter {
if d.Status.Ready {
exporter := &models.ExporterManifest{
Endpoint: fmt.Sprintf("%s:%d", d.Status.Listener.IP, d.Status.Listener.Port),
}
for _, n := range c.manifests {
n.Exporter = exporter
}
break
}
}
}
func (c *CacheNodeManifest) GetExporterEndpoint() *models.ExporterManifest {
c.lock.Lock()
defer c.lock.Unlock()
exporter := new(models.ExporterManifest)
for _, d := range c.exporter {
if d.Status.Ready {
exporter.Endpoint = fmt.Sprintf("%s:%d", d.Status.Listener.IP, d.Status.Listener.Port)
}
}
return exporter
}
func (c *CacheNodeManifest) SetResolvers() {
resolvers := make(map[string]*models.ResolverManifest, 0)
for _, d := range c.discovery {
if d.Status.Ready {
resolvers[d.Status.IP] = &models.ResolverManifest{
IP: d.Status.IP,
Port: d.Status.Port,
}
}
}
for _, n := range c.manifests {
n.Resolvers = resolvers
}
}
func (c *CacheNodeManifest) GetResolvers() map[string]*models.ResolverManifest {
resolvers := make(map[string]*models.ResolverManifest, 0)
for _, d := range c.discovery {
if d.Status.Ready {
resolvers[d.Status.IP] = &models.ResolverManifest{
IP: d.Status.IP,
Port: d.Status.Port,
}
}
}
return resolvers
}
func (c *CacheNodeManifest) GetConfigs() map[string]*models.ConfigManifest {
return c.configs
}
func (c *CacheNodeManifest) SetNode(node *models.Node) {
c.lock.Lock()
defer c.lock.Unlock()
c.nodes[node.SelfLink().String()] = node
}
func (c *CacheNodeManifest) DelNode(node *models.Node) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.nodes, node.SelfLink().String())
delete(c.manifests, node.SelfLink().String())
}
func (c *CacheNodeManifest) Get(node string) *models.NodeManifest {
c.lock.Lock()
defer c.lock.Unlock()
if s, ok := c.manifests[node]; !ok {
return nil
} else {
return s
}
}
func (c *CacheNodeManifest) Flush(node string) {
c.lock.Lock()
defer c.lock.Unlock()
c.manifests[node] = new(models.NodeManifest)
}
func (c *CacheNodeManifest) Clear(node string) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.manifests, node)
}
func
|
() *CacheNodeManifest {
c := new(CacheNodeManifest)
c.exporter = make(map[string]*models.Exporter, 0)
c.manifests = make(map[string]*models.NodeManifest, 0)
c.ingress = make(map[string]*models.Ingress, 0)
c.discovery = make(map[string]*models.Discovery, 0)
c.configs = make(map[string]*models.ConfigManifest, 0)
return c
}
|
NewCacheNodeManifest
|
package.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Chrony(AutotoolsPackage):
"""chrony is a versatile implementation of the Network Time
Protocol (NTP). It can synchronise the system clock with NTP
servers, reference clocks(e.g. GPS receiver), and manual
input using wristwatch and keyboard."""
homepage = "https://chrony.tuxfamily.org/"
url = "https://github.com/mlichvar/chrony/archive/3.5.1.tar.gz"
version('3.5.1', sha256='881085b944a14853402e1c5cff4de5d815ff104ec6e12eea51c12e42f32f71bd')
version('3.5', sha256='145a270fe4df42931f175e37dd3771a7e714122ae361921a4b93082e648a08c5')
version('3.4', sha256='85fbe433f5a3ee961a20c47a72367760b074448587a9e2d3a6788a95750ed77e')
version('3.3', sha256='0dd7323b5ed9e3208236c1b39fcabf2ad03469fa07ac516ba9c682206133f66d')
depends_on('ruby-asciidoctor')
def setup_run_environment(self, env):
|
env.prepend_path('PATH', self.prefix.sbin)
|
|
smalliscute.go
|
package main
import "fmt"
func
|
() {
var n,a,b int
var divisibles []int
fmt.Scanf("%d %d %d",&n,&a,&b)
for i:=0;i<n;i++ {
if i%a==0 {
divisibles = append(divisibles,i)
} else if i%b==0 {
divisibles = append(divisibles,i)
}
}
var sum uint64
for i:=0;i<len(divisibles);i++ {
sum += uint64(divisibles[i])
}
fmt.Println(sum)
}
|
main
|
agent_run_thread.js
|
const path = require('path')
// const flags = require('flags')
const agents = require(path.resolve(__dirname, '..', 'agents'))
const maps = require(path.resolve(__dirname, '..', 'maps'))
const run_loop = require(path.resolve(__dirname, '..', 'env', 'run_loop.js'))
const available_actions_printer = require(path.resolve(__dirname, '..', 'env', 'available_actions_printer.js'))
const sc2_env = require(path.resolve(__dirname, '..', 'env', 'sc2_env.js'))
// const point_flag = require(path.resolve(__dirname, '..', 'lib', 'point_flag.js'))
const stopwatch = require(path.resolve(__dirname, '..', 'lib', 'stopwatch.js'))
const pythonUtils = require(path.resolve(__dirname, '..', 'lib', 'pythonUtils.js'))
const { withPythonAsync } = pythonUtils
// mock flags npm module to keep function signatures the same
const flags = {
get: function(key) {
return flags[key]
},
set: function(kwargs) {
Object.keys(kwargs).forEach((key) => {
flags[key] = kwargs[key]
})
}
}
// method is duplicated in bin/agent.js
async function run_thread({
agent_classes,
players,
map_name,
visualize,
}) {
// Run one thread worth of the environment with agents.
const kwargs = {
map_name,
battle_net_map: flags.get('battle_net_map'),
players,
agent_interface_format: sc2_env.parse_agent_interface_format({
feature_screen: flags.get('feature_screen_size'),
feature_minimap: flags.get('feature_minimap_size'),
rgb_screen: flags.get('rgb_screen_size'),
rgb_minimap: flags.get('rgb_minimap_size'),
action_space: flags.get('action_space'),
use_feature_units: flags.get('use_feature_units'),
use_raw_units: flags.get('use_raw_units'),
}),
step_mul: flags.get('step_mul'),
game_steps_per_episode: flags.get('game_steps_per_episode'),
disable_fog: flags.get('disable_fog'),
visualize,
}
await withPythonAsync(sc2_env.SC2EnvFactory(kwargs), async (env) => {
env = available_actions_printer.AvailableActionsPrinter(env)
const usedAgents = []
agent_classes.forEach((Agent_cls) => {
usedAgents.push(new Agent_cls())
})
await run_loop.run_loop(usedAgents, env, flags.get('max_agent_steps'), flags.get('max_episodes'))
if (flags.get('save_replay')) {
await env.save_replay(agent_classes[0].name)
}
})
}
async function
|
() {
if (flags.get('trace')) {
stopwatch.sw.trace()
} else if (flags.get('profile')) {
stopwatch.sw.enable()
}
const map_inst = maps.get(flags.get('map'))
const agent_classes = []
const players = []
// default string value "jssc2.agents.random_agent.RandomAgent"
let temp = flags.get('agent').split('.')
let agent_name = temp.pop()
let agent_module = temp.pop()
let agent_cls = agents[agent_module][agent_name]
agent_classes.push(agent_cls)
players.push(new sc2_env.Agent(sc2_env.Race[flags.get('agent_race')], flags.get('agent_name') || agent_name))
if (map_inst.players >= 2) {
if (flags.get('agent2') === 'Bot') {
players.push(sc2_env.Bot(
sc2_env.Race[flags.get('agent2_race')],
sc2_env.Difficulty[flags.get('difficulty')],
sc2_env.BotBuild[flags.get('bot_build')]
))
}
} else {
temp = flags.get('agent2').split('.')
agent_name = temp.pop()
agent_module = temp.pop()
agent_cls = agents[agent_module][agent_name]
players.push(sc2_env.Agent(
sc2_env.Race[flags.get('agent2_race')],
flags.get('agent2_name') || agent_name
))
}
await run_thread(agent_classes, players, flags.get('map'), flags.get('render'))
if (flags.get('profile')) {
console.log(stopwatch.sw.toString())
}
}
process.send('ready')
let startProm
function processMessage(msgStr) {
const msg = JSON.parse(msgStr)
const { message, data } = msg
// all necessary routing follows:
/** start **/
if (message === 'start') {
flags.set(data)
startProm = start()
process.send('started')
startProm.then(() => {
process.send('finished')
})
}
}
process.on('message', processMessage)
|
start
|
setup.py
|
# Copyright 2017 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import os
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from distutils.errors import CompileError, DistutilsError, DistutilsPlatformError, LinkError
import shlex
import subprocess
import sys
import textwrap
import traceback
from horovod import __version__
common_mpi_lib = Extension('horovod.common.mpi_lib', [])
tensorflow_mpi_lib = Extension('horovod.tensorflow.mpi_lib', [])
torch_mpi_lib = Extension('horovod.torch.mpi_lib', [])
torch_mpi_lib_impl = Extension('horovod.torch.mpi_lib_impl', [])
def is_build_action():
if len(sys.argv) <= 1:
return False
if sys.argv[1].startswith('build'):
return True
if sys.argv[1].startswith('bdist'):
return True
if sys.argv[1].startswith('install'):
return True
def check_tf_version():
try:
import tensorflow as tf
if tf.__version__ < '1.1.0':
raise DistutilsPlatformError(
'Your TensorFlow version %s is outdated. '
'Horovod requires tensorflow>=1.1.0' % tf.__version__)
except ImportError:
raise DistutilsPlatformError(
'import tensorflow failed, is it installed?\n\n%s' % traceback.format_exc())
except AttributeError:
# This means that tf.__version__ was not exposed, which makes it *REALLY* old.
raise DistutilsPlatformError(
'Your TensorFlow version is outdated. Horovod requires tensorflow>=1.1.0')
def get_cpp_flags(build_ext):
last_err = None
default_flags = ['-std=c++11', '-fPIC', '-O2']
if sys.platform == 'darwin':
# Darwin most likely will have Clang, which has libc++.
flags_to_try = [default_flags + ['-stdlib=libc++'], default_flags]
else:
flags_to_try = [default_flags, default_flags + ['-stdlib=libc++']]
for cpp_flags in flags_to_try:
try:
test_compile(build_ext, 'test_cpp_flags', extra_preargs=cpp_flags,
code=textwrap.dedent('''\
#include <unordered_map>
void test() {
}
'''))
return cpp_flags
except (CompileError, LinkError):
last_err = 'Unable to determine C++ compilation flags (see error above).'
except Exception:
last_err = 'Unable to determine C++ compilation flags. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_tf_include_dirs():
import tensorflow as tf
tf_inc = tf.sysconfig.get_include()
return [tf_inc, '%s/external/nsync/public' % tf_inc]
def get_tf_lib_dirs():
import tensorflow as tf
tf_lib = tf.sysconfig.get_lib()
return [tf_lib]
def get_tf_libs(build_ext, lib_dirs, cpp_flags):
last_err = None
for tf_libs in [['tensorflow_framework'], []]:
try:
lib_file = test_compile(build_ext, 'test_tensorflow_libs',
library_dirs=lib_dirs, libraries=tf_libs,
extra_preargs=cpp_flags,
code=textwrap.dedent('''\
void test() {
}
'''))
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return tf_libs
except (CompileError, LinkError):
last_err = 'Unable to determine -l link flags to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine -l link flags to use with TensorFlow. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_tf_abi(build_ext, include_dirs, lib_dirs, libs, cpp_flags):
last_err = None
cxx11_abi_macro = '_GLIBCXX_USE_CXX11_ABI'
for cxx11_abi in ['0', '1']:
try:
lib_file = test_compile(build_ext, 'test_tensorflow_abi',
macros=[(cxx11_abi_macro, cxx11_abi)],
include_dirs=include_dirs, library_dirs=lib_dirs,
libraries=libs, extra_preargs=cpp_flags,
code=textwrap.dedent('''\
#include <string>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
void test() {
auto ignore = tensorflow::strings::StrCat("a", "b");
}
'''))
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return cxx11_abi_macro, cxx11_abi
except (CompileError, LinkError):
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_tf_flags(build_ext, cpp_flags):
import tensorflow as tf
try:
return tf.sysconfig.get_compile_flags(), tf.sysconfig.get_link_flags()
except AttributeError:
# fallback to the previous logic
tf_include_dirs = get_tf_include_dirs()
tf_lib_dirs = get_tf_lib_dirs()
tf_libs = get_tf_libs(build_ext, tf_lib_dirs, cpp_flags)
tf_abi = get_tf_abi(build_ext, tf_include_dirs,
tf_lib_dirs, tf_libs, cpp_flags)
compile_flags = []
for include_dir in tf_include_dirs:
compile_flags.append('-I%s' % include_dir)
if tf_abi:
compile_flags.append('-D%s=%s' % tf_abi)
link_flags = []
for lib_dir in tf_lib_dirs:
link_flags.append('-L%s' % lib_dir)
for lib in tf_libs:
link_flags.append('-l%s' % lib)
return compile_flags, link_flags
def get_mpi_flags():
show_command = os.environ.get('HOROVOD_MPICXX_SHOW', 'mpicxx -show')
try:
mpi_show_output = subprocess.check_output(
shlex.split(show_command), universal_newlines=True).strip()
mpi_show_args = shlex.split(mpi_show_output)
if not mpi_show_args[0].startswith('-'):
# Open MPI and MPICH print compiler name as a first word, skip it
mpi_show_args = mpi_show_args[1:]
# strip off compiler call portion and always escape each arg
return ' '.join(['"' + arg.replace('"', '"\'"\'"') + '"'
for arg in mpi_show_args])
except Exception:
raise DistutilsPlatformError(
'%s failed (see error below), is MPI in $PATH?\n'
'Note: If your version of MPI has a custom command to show compilation flags, '
'please specify it with the HOROVOD_MPICXX_SHOW environment variable.\n\n'
'%s' % (show_command, traceback.format_exc()))
def test_compile(build_ext, name, code, libraries=None, include_dirs=None, library_dirs=None, macros=None,
extra_preargs=None):
test_compile_dir = os.path.join(build_ext.build_temp, 'test_compile')
if not os.path.exists(test_compile_dir):
os.makedirs(test_compile_dir)
source_file = os.path.join(test_compile_dir, '%s.cc' % name)
with open(source_file, 'w') as f:
f.write(code)
compiler = build_ext.compiler
[object_file] = compiler.object_filenames([source_file])
shared_object_file = compiler.shared_object_filename(
name, output_dir=test_compile_dir)
compiler.compile([source_file], extra_preargs=extra_preargs,
include_dirs=include_dirs, macros=macros)
compiler.link_shared_object(
[object_file], shared_object_file, libraries=libraries, library_dirs=library_dirs)
return shared_object_file
def get_cuda_dirs(build_ext, cpp_flags):
cuda_include_dirs = []
cuda_lib_dirs = []
cuda_home = os.environ.get('HOROVOD_CUDA_HOME')
if cuda_home:
cuda_include_dirs += ['%s/include' % cuda_home]
cuda_lib_dirs += ['%s/lib' % cuda_home, '%s/lib64' % cuda_home]
cuda_include = os.environ.get('HOROVOD_CUDA_INCLUDE')
if cuda_include:
cuda_include_dirs += [cuda_include]
cuda_lib = os.environ.get('HOROVOD_CUDA_LIB')
if cuda_lib:
cuda_lib_dirs += [cuda_lib]
if not cuda_include_dirs and not cuda_lib_dirs:
# default to /usr/local/cuda
cuda_include_dirs += ['/usr/local/cuda/include']
cuda_lib_dirs += ['/usr/local/cuda/lib', '/usr/local/cuda/lib64']
try:
test_compile(build_ext, 'test_cuda', libraries=['cudart'], include_dirs=cuda_include_dirs,
library_dirs=cuda_lib_dirs, extra_preargs=cpp_flags, code=textwrap.dedent('''\
#include <cuda_runtime.h>
void test() {
cudaSetDevice(0);
}
'''))
except (CompileError, LinkError):
raise DistutilsPlatformError(
'CUDA library was not found (see error above).\n'
'Please specify correct CUDA location with the HOROVOD_CUDA_HOME '
'environment variable or combination of HOROVOD_CUDA_INCLUDE and '
'HOROVOD_CUDA_LIB environment variables.\n\n'
'HOROVOD_CUDA_HOME - path where CUDA include and lib directories can be found\n'
'HOROVOD_CUDA_INCLUDE - path to CUDA include directory\n'
'HOROVOD_CUDA_LIB - path to CUDA lib directory')
return cuda_include_dirs, cuda_lib_dirs
def get_nccl_vals(build_ext, cuda_include_dirs, cuda_lib_dirs, cpp_flags):
nccl_include_dirs = []
nccl_lib_dirs = []
nccl_libs = []
nccl_home = os.environ.get('HOROVOD_NCCL_HOME')
if nccl_home:
nccl_include_dirs += ['%s/include' % nccl_home]
nccl_lib_dirs += ['%s/lib' % nccl_home, '%s/lib64' % nccl_home]
nccl_include_dir = os.environ.get('HOROVOD_NCCL_INCLUDE')
if nccl_include_dir:
nccl_include_dirs += [nccl_include_dir]
nccl_lib_dir = os.environ.get('HOROVOD_NCCL_LIB')
if nccl_lib_dir:
nccl_lib_dirs += [nccl_lib_dir]
nccl_link_mode = os.environ.get('HOROVOD_NCCL_LINK', 'STATIC')
if nccl_link_mode.upper() == 'SHARED':
nccl_libs += ['nccl']
else:
nccl_libs += ['nccl_static']
try:
test_compile(build_ext, 'test_nccl', libraries=nccl_libs, include_dirs=nccl_include_dirs + cuda_include_dirs,
library_dirs=nccl_lib_dirs + cuda_lib_dirs, extra_preargs=cpp_flags, code=textwrap.dedent('''\
#include <nccl.h>
#if NCCL_MAJOR < 2
#error Horovod requires NCCL 2.0 or later version, please upgrade.
#endif
void test() {
ncclUniqueId nccl_id;
ncclGetUniqueId(&nccl_id);
}
'''))
except (CompileError, LinkError):
raise DistutilsPlatformError(
'NCCL 2.0 library or its later version was not found (see error above).\n'
'Please specify correct NCCL location with the HOROVOD_NCCL_HOME '
'environment variable or combination of HOROVOD_NCCL_INCLUDE and '
'HOROVOD_NCCL_LIB environment variables.\n\n'
'HOROVOD_NCCL_HOME - path where NCCL include and lib directories can be found\n'
'HOROVOD_NCCL_INCLUDE - path to NCCL include directory\n'
'HOROVOD_NCCL_LIB - path to NCCL lib directory')
return nccl_include_dirs, nccl_lib_dirs, nccl_libs
def get_ddl_dirs():
# Default DDL home
ddl_home = '/opt/DL/ddl'
ddl_include_dir = '%s/include' % ddl_home
ddl_lib_dir = '%s/lib' % ddl_home
if not os.path.exists(ddl_lib_dir):
raise DistutilsPlatformError('DDL lib was not found. Please, make sure \'ddl\' package is installed.')
if not os.path.exists(ddl_include_dir):
raise DistutilsPlatformError('DDL include was not found. Please, make sure \'ddl-dev\' package is installed.')
return [ddl_include_dir], [ddl_lib_dir]
def get_common_options(build_ext):
cpp_flags = get_cpp_flags(build_ext)
mpi_flags = get_mpi_flags()
gpu_allreduce = os.environ.get('HOROVOD_GPU_ALLREDUCE')
if gpu_allreduce and gpu_allreduce != 'MPI' and gpu_allreduce != 'NCCL' and \
gpu_allreduce != 'DDL':
raise DistutilsError('HOROVOD_GPU_ALLREDUCE=%s is invalid, supported '
'values are "", "MPI", "NCCL", "DDL".' % gpu_allreduce)
gpu_allgather = os.environ.get('HOROVOD_GPU_ALLGATHER')
if gpu_allgather and gpu_allgather != 'MPI':
raise DistutilsError('HOROVOD_GPU_ALLGATHER=%s is invalid, supported '
'values are "", "MPI".' % gpu_allgather)
gpu_broadcast = os.environ.get('HOROVOD_GPU_BROADCAST')
if gpu_broadcast and gpu_broadcast != 'MPI':
raise DistutilsError('HOROVOD_GPU_BROADCAST=%s is invalid, supported '
'values are "", "MPI".' % gpu_broadcast)
if gpu_allreduce or gpu_allgather or gpu_broadcast:
have_cuda = True
cuda_include_dirs, cuda_lib_dirs = get_cuda_dirs(build_ext, cpp_flags)
else:
have_cuda = False
cuda_include_dirs = cuda_lib_dirs = []
if gpu_allreduce == 'NCCL':
have_nccl = True
nccl_include_dirs, nccl_lib_dirs, nccl_libs = get_nccl_vals(
build_ext, cuda_include_dirs, cuda_lib_dirs, cpp_flags)
else:
have_nccl = False
nccl_include_dirs = nccl_lib_dirs = nccl_libs = []
if gpu_allreduce == 'DDL':
have_ddl = True
ddl_include_dirs, ddl_lib_dirs = get_ddl_dirs()
else:
have_ddl = False
ddl_include_dirs = ddl_lib_dirs = []
MACROS = []
INCLUDES = []
SOURCES = []
COMPILE_FLAGS = cpp_flags + shlex.split(mpi_flags)
LINK_FLAGS = shlex.split(mpi_flags)
LIBRARY_DIRS = []
LIBRARIES = []
if have_cuda:
MACROS += [('HAVE_CUDA', '1')]
INCLUDES += cuda_include_dirs
LIBRARY_DIRS += cuda_lib_dirs
LIBRARIES += ['cudart']
if have_nccl:
MACROS += [('HAVE_NCCL', '1')]
INCLUDES += nccl_include_dirs
LINK_FLAGS += ['-Wl,--version-script=hide_nccl.lds']
LIBRARY_DIRS += nccl_lib_dirs
LIBRARIES += nccl_libs
if have_ddl:
MACROS += [('HAVE_DDL', '1')]
INCLUDES += ddl_include_dirs
LIBRARY_DIRS += ddl_lib_dirs
LIBRARIES += ['ddl', 'ddl_pack']
if gpu_allreduce:
MACROS += [('HOROVOD_GPU_ALLREDUCE', "'%s'" % gpu_allreduce[0])]
if gpu_allgather:
MACROS += [('HOROVOD_GPU_ALLGATHER', "'%s'" % gpu_allgather[0])]
if gpu_broadcast:
MACROS += [('HOROVOD_GPU_BROADCAST', "'%s'" % gpu_broadcast[0])]
return dict(MACROS=MACROS,
INCLUDES=INCLUDES,
SOURCES=SOURCES,
COMPILE_FLAGS=COMPILE_FLAGS,
LINK_FLAGS=LINK_FLAGS,
LIBRARY_DIRS=LIBRARY_DIRS,
LIBRARIES=LIBRARIES)
def build_common_extension(build_ext, options, abi_compile_flags):
common_mpi_lib.define_macros = options['MACROS']
common_mpi_lib.include_dirs = options['INCLUDES']
common_mpi_lib.sources = options['SOURCES'] + ['horovod/common/common.cc',
'horovod/common/mpi_message.cc',
'horovod/common/operations.cc',
'horovod/common/timeline.cc']
common_mpi_lib.extra_compile_args = options['COMPILE_FLAGS'] + \
abi_compile_flags
common_mpi_lib.extra_link_args = options['LINK_FLAGS']
common_mpi_lib.library_dirs = options['LIBRARY_DIRS']
common_mpi_lib.libraries = options['LIBRARIES']
build_ext.build_extension(common_mpi_lib)
def build_tf_extension(build_ext, options):
check_tf_version()
tf_compile_flags, tf_link_flags = get_tf_flags(
build_ext, options['COMPILE_FLAGS'])
tensorflow_mpi_lib.define_macros = options['MACROS']
tensorflow_mpi_lib.include_dirs = options['INCLUDES']
tensorflow_mpi_lib.sources = options['SOURCES'] + \
['horovod/tensorflow/mpi_ops.cc']
tensorflow_mpi_lib.extra_compile_args = options['COMPILE_FLAGS'] + \
tf_compile_flags
tensorflow_mpi_lib.extra_link_args = options['LINK_FLAGS'] + tf_link_flags
tensorflow_mpi_lib.library_dirs = options['LIBRARY_DIRS']
tensorflow_mpi_lib.libraries = options['LIBRARIES']
build_ext.build_extension(tensorflow_mpi_lib)
# Return ABI flags used for TensorFlow compilation. We will use this flag
# to compile all the libraries.
return [flag for flag in tf_compile_flags if '_GLIBCXX_USE_CXX11_ABI' in flag]
def dummy_import_torch():
try:
import torch
except:
pass
def check_torch_import():
try:
import torch
except ImportError:
raise DistutilsPlatformError(
'import torch failed, is it installed?\n\n%s' % traceback.format_exc())
def is_torch_cuda():
try:
from torch.utils.ffi import create_extension
cuda_test_ext = create_extension(
name='horovod.torch.test_cuda',
headers=['horovod/torch/dummy.h'],
sources=[],
with_cuda=True,
extra_compile_args=['-std=c11', '-fPIC', '-O2']
)
cuda_test_ext.build()
return True
except:
print('INFO: Above error indicates that this PyTorch installation does not support CUDA.')
return False
def check_macro(macros, key):
return any(k == key and v for k, v in macros)
def set_macro(macros, key, new_value):
if any(k == key for k, _ in macros):
return [(k, new_value if k == key else v) for k, v in macros]
else:
return macros + [(key, new_value)]
class protect_files(object):
def __init__(self, *files):
self.files = files
def
|
(self):
for file in self.files:
os.rename(file, file + '.protected')
def __exit__(self, type, value, traceback):
for file in self.files:
os.rename(file + '.protected', file)
def build_torch_extension(build_ext, options, abi_compile_flags):
check_torch_import()
have_cuda = is_torch_cuda()
if not have_cuda and check_macro(options['MACROS'], 'HAVE_CUDA'):
raise DistutilsPlatformError(
'Horovod build with GPU support was requested, but this PyTorch '
'installation does not support CUDA.')
# Update HAVE_CUDA to mean that PyTorch supports CUDA. Internally, we will be checking
# HOROVOD_GPU_(ALLREDUCE|ALLGATHER|BROADCAST) to decide whether we should use GPU
# version or transfer tensors to CPU memory for those operations.
updated_macros = set_macro(
options['MACROS'], 'HAVE_CUDA', str(int(have_cuda)))
# Create_extension overwrites these files which are customized, we need to protect them.
with protect_files('horovod/torch/mpi_lib/__init__.py',
'horovod/torch/mpi_lib_impl/__init__.py'):
from torch.utils.ffi import create_extension
ffi_iface = create_extension(
name='horovod.torch.mpi_lib',
headers=['horovod/torch/interface.h'] +
(['horovod/torch/interface_cuda.h'] if have_cuda else []),
with_cuda=have_cuda,
language='c',
package=True,
sources=[],
extra_compile_args=['-std=c11', '-fPIC', '-O2']
)
ffi_impl = create_extension(
name='horovod.torch.mpi_lib_impl',
headers=[],
with_cuda=have_cuda,
language='c++',
package=True,
source_extension='.cc',
define_macros=updated_macros,
include_dirs=options['INCLUDES'],
sources=options['SOURCES'] + ['horovod/torch/mpi_ops.cc',
'horovod/torch/handle_manager.cc',
'horovod/torch/ready_event.cc',
'horovod/torch/tensor_util.cc',
'horovod/torch/cuda_util.cc',
'horovod/torch/adapter.cc'],
extra_compile_args=options['COMPILE_FLAGS'] + abi_compile_flags,
extra_link_args=options['LINK_FLAGS'],
library_dirs=options['LIBRARY_DIRS'],
libraries=options['LIBRARIES']
)
for ffi, setuptools_ext in [(ffi_iface, torch_mpi_lib),
(ffi_impl, torch_mpi_lib_impl)]:
ffi_ext = ffi.distutils_extension()
# ffi_ext is distutils Extension, not setuptools Extension
for k, v in ffi_ext.__dict__.items():
setuptools_ext.__dict__[k] = v
build_ext.build_extension(setuptools_ext)
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
options = get_common_options(self)
abi_compile_flags = []
built_plugins = []
# If PyTorch is installed, it must be imported before TensorFlow, otherwise
# we may get an error: dlopen: cannot load any more object with static TLS
dummy_import_torch()
if not os.environ.get('HOROVOD_WITHOUT_TENSORFLOW'):
try:
abi_compile_flags = build_tf_extension(self, options)
built_plugins.append(True)
except:
if not os.environ.get('HOROVOD_WITH_TENSORFLOW'):
print('INFO: Unable to build TensorFlow plugin, will skip it.\n\n'
'%s' % traceback.format_exc(), file=sys.stderr)
built_plugins.append(False)
else:
raise
if not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):
try:
build_torch_extension(self, options, abi_compile_flags)
built_plugins.append(True)
except:
if not os.environ.get('HOROVOD_WITH_PYTORCH'):
print('INFO: Unable to build PyTorch plugin, will skip it.\n\n'
'%s' % traceback.format_exc(), file=sys.stderr)
built_plugins.append(False)
else:
raise
if not built_plugins:
raise DistutilsError(
'Both TensorFlow and PyTorch plugins were excluded from build. Aborting.')
if not any(built_plugins):
raise DistutilsError(
'Neither TensorFlow nor PyTorch plugins were built. See errors above.')
build_common_extension(self, options, abi_compile_flags)
setup(name='horovod',
version=__version__,
packages=find_packages(),
description='Distributed training framework for TensorFlow, Keras, and PyTorch.',
author='Uber Technologies, Inc.',
long_description=textwrap.dedent('''\
Horovod is a distributed training framework for TensorFlow, Keras, and PyTorch.
The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),
url='https://github.com/uber/horovod',
classifiers=[
'License :: OSI Approved :: Apache Software License'
],
ext_modules=[common_mpi_lib, tensorflow_mpi_lib,
torch_mpi_lib, torch_mpi_lib_impl],
cmdclass={'build_ext': custom_build_ext},
# cffi is required for PyTorch
# If cffi is specified in setup_requires, it will need libffi to be installed on the machine,
# which is undesirable. Luckily, `install` action will install cffi before executing build,
# so it's only necessary for `build*` or `bdist*` actions.
setup_requires=['cffi>=1.4.0'] if is_build_action() else [],
install_requires=['cffi>=1.4.0'],
zip_safe=False)
|
__enter__
|
_zone.ts
|
import { IDuration } from "../core/duration";
import { IMap } from "../core/map";
import { IStackTrace } from "../core/stackTrace";
import { IAsyncError } from "./asyncError";
import { ITimer } from "./timer";
import { IZone } from "./zone";
import { IZoneSpecification } from "./zoneSpecification";
declare const dart: {
async: {
_zone: (this: void, _zone: I_Zone) => I_Zone;
};
};
export interface I_Zone {
getParent: () => I_Zone | undefined;
inSameErrorZone: (otherZone: IZone) => boolean;
handleUncaughtError: (error: Object, stackTrace: IStackTrace) => void;
fork: (props?: {
specification?: IZoneSpecification | undefined;
zoneValues?: IMap<Object | undefined, Object | undefined> | undefined;
}) => IZone;
run: <R>(action: () => R) => R;
runUnary: <R, T>(action: (argument: T) => R, argument: T) => R;
runBinary: <R, T1, T2>(
action: (argument1: T1, argument2: T2) => R,
argument1: T1,
argument2: T2
) => R;
runGuarded: (action: () => void) => void;
runUnaryGuarded: <T>(action: (argument: T) => void, argument: T) => void;
runBinaryGuarded: <T1, T2>(
action: (argument1: T1, argument2: T2) => void,
argument1: T1,
argument2: T2
) => void;
registerCallback: <R>(callback: () => R) => () => R;
registerUnaryCallback: <R, T>(callback: (arg: T) => R) => (arg: T) => R;
registerBinaryCallback: <R, T1, T2>(
callback: (arg1: T1, arg2: T2) => R
) => (arg1: T1, arg2: T2) => R;
bindCallback: <R>(callback: () => R) => () => R;
bindUnaryCallback: <R, T>(callback: (argument: T) => R) => (arg: T) => R;
bindBinaryCallback: <R, T1, T2>(
callback: (argument1: T1, argument2: T2) => R
) => (arg1: T1, arg2: T2) => R;
bindCallbackGuarded: (callback: () => void) => () => void;
bindUnaryCallbackGuarded: <T>(
callback: (argument: T) => void
) => (__: T) => void;
bindBinaryCallbackGuarded: <T1, T2>(
callback: (argument1: T1, argument2: T2) => void
) => (__: T2) => void;
errorCallback: (
error: Object,
stackTrace?: IStackTrace | undefined
) => IAsyncError | undefined;
scheduleMicrotask: (callback: () => void) => void;
createTimer: (duration: IDuration, callback: () => void) => ITimer;
createPeriodicTimer: (
period: IDuration,
callback: (timer: ITimer) => void
) => ITimer;
print: (line: string) => void;
getErrorZone: () => IZone;
toString: () => string;
getHashCode: () => number;
}
export class
|
implements IZone {
public constructor() {
dart.async._zone(this);
}
private readonly _dart_getParent: () => I_Zone | undefined =
undefined as any;
private readonly _dart_inSameErrorZone: (otherZone: IZone) => boolean =
undefined as any;
private readonly _dart_handleUncaughtError: (
error: Object,
stackTrace: IStackTrace
) => void = undefined as any;
private readonly _dart_fork: (props?: {
specification?: IZoneSpecification | undefined;
zoneValues?: IMap<Object | undefined, Object | undefined> | undefined;
}) => IZone = undefined as any;
private readonly _dart_run: <R>(action: () => R) => R = undefined as any;
private readonly _dart_runUnary: <R, T>(
action: (argument: T) => R,
argument: T
) => R = undefined as any;
private readonly _dart_runBinary: <R, T1, T2>(
action: (argument1: T1, argument2: T2) => R,
argument1: T1,
argument2: T2
) => R = undefined as any;
private readonly _dart_runGuarded: (action: () => void) => void =
undefined as any;
private readonly _dart_runUnaryGuarded: <T>(
action: (argument: T) => void,
argument: T
) => void = undefined as any;
private readonly _dart_runBinaryGuarded: <T1, T2>(
action: (argument1: T1, argument2: T2) => void,
argument1: T1,
argument2: T2
) => void = undefined as any;
private readonly _dart_registerCallback: <R>(callback: () => R) => () => R =
undefined as any;
private readonly _dart_registerUnaryCallback: <R, T>(
callback: (arg: T) => R
) => (arg: T) => R = undefined as any;
private readonly _dart_registerBinaryCallback: <R, T1, T2>(
callback: (arg1: T1, arg2: T2) => R
) => (arg1: T1, arg2: T2) => R = undefined as any;
private readonly _dart_bindCallback: <R>(callback: () => R) => () => R =
undefined as any;
private readonly _dart_bindUnaryCallback: <R, T>(
callback: (argument: T) => R
) => (arg: T) => R = undefined as any;
private readonly _dart_bindBinaryCallback: <R, T1, T2>(
callback: (argument1: T1, argument2: T2) => R
) => (arg1: T1, arg2: T2) => R = undefined as any;
private readonly _dart_bindCallbackGuarded: (
callback: () => void
) => () => void = undefined as any;
private readonly _dart_bindUnaryCallbackGuarded: <T>(
callback: (argument: T) => void
) => (__: T) => void = undefined as any;
private readonly _dart_bindBinaryCallbackGuarded: <T1, T2>(
callback: (argument1: T1, argument2: T2) => void
) => (__: T2) => void = undefined as any;
private readonly _dart_errorCallback: (
error: Object,
stackTrace?: IStackTrace | undefined
) => IAsyncError | undefined = undefined as any;
private readonly _dart_scheduleMicrotask: (callback: () => void) => void =
undefined as any;
private readonly _dart_createTimer: (
duration: IDuration,
callback: () => void
) => ITimer = undefined as any;
private readonly _dart_createPeriodicTimer: (
period: IDuration,
callback: (timer: ITimer) => void
) => ITimer = undefined as any;
private readonly _dart_print: (line: string) => void = undefined as any;
private readonly _dart_getErrorZone: () => IZone = undefined as any;
private readonly _dart_toString: () => string = undefined as any;
private readonly _dart_getHashCode: () => number = undefined as any;
public getParent(): I_Zone | undefined {
return this._dart_getParent();
}
public inSameErrorZone(otherZone: IZone): boolean {
return this._dart_inSameErrorZone(otherZone);
}
public handleUncaughtError(error: Object, stackTrace: IStackTrace): void {
return this._dart_handleUncaughtError(error, stackTrace);
}
public fork(props?: {
specification?: IZoneSpecification | undefined;
zoneValues?: IMap<Object | undefined, Object | undefined> | undefined;
}): IZone {
return this._dart_fork(props);
}
public run<R>(action: () => R): R {
return this._dart_run(action);
}
public runUnary<R, T>(action: (argument: T) => R, argument: T): R {
return this._dart_runUnary(action, argument);
}
public runBinary<R, T1, T2>(
action: (argument1: T1, argument2: T2) => R,
argument1: T1,
argument2: T2
): R {
return this._dart_runBinary(action, argument1, argument2);
}
public runGuarded(action: () => void): void {
return this._dart_runGuarded(action);
}
public runUnaryGuarded<T>(
action: (argument: T) => void,
argument: T
): void {
return this._dart_runUnaryGuarded(action, argument);
}
public runBinaryGuarded<T1, T2>(
action: (argument1: T1, argument2: T2) => void,
argument1: T1,
argument2: T2
): void {
return this._dart_runBinaryGuarded(action, argument1, argument2);
}
public registerCallback<R>(callback: () => R): () => R {
return this._dart_registerCallback(callback);
}
public registerUnaryCallback<R, T>(callback: (arg: T) => R): (arg: T) => R {
return this._dart_registerUnaryCallback(callback);
}
public registerBinaryCallback<R, T1, T2>(
callback: (arg1: T1, arg2: T2) => R
): (arg1: T1, arg2: T2) => R {
return this._dart_registerBinaryCallback(callback);
}
public bindCallback<R>(callback: () => R): () => R {
return this._dart_bindCallback(callback);
}
public bindUnaryCallback<R, T>(
callback: (argument: T) => R
): (arg: T) => R {
return this._dart_bindUnaryCallback(callback);
}
public bindBinaryCallback<R, T1, T2>(
callback: (argument1: T1, argument2: T2) => R
): (arg1: T1, arg2: T2) => R {
return this._dart_bindBinaryCallback(callback);
}
public bindCallbackGuarded(callback: () => void): () => void {
return this._dart_bindCallbackGuarded(callback);
}
public bindUnaryCallbackGuarded<T>(
callback: (argument: T) => void
): (__: T) => void {
return this._dart_bindUnaryCallbackGuarded(callback);
}
public bindBinaryCallbackGuarded<T1, T2>(
callback: (argument1: T1, argument2: T2) => void
): (__: T2) => void {
return this._dart_bindBinaryCallbackGuarded(callback);
}
public errorCallback(
error: Object,
stackTrace?: IStackTrace | undefined
): IAsyncError | undefined {
return this._dart_errorCallback(error, stackTrace);
}
public scheduleMicrotask(callback: () => void): void {
return this._dart_scheduleMicrotask(callback);
}
public createTimer(duration: IDuration, callback: () => void): ITimer {
return this._dart_createTimer(duration, callback);
}
public createPeriodicTimer(
period: IDuration,
callback: (timer: ITimer) => void
): ITimer {
return this._dart_createPeriodicTimer(period, callback);
}
public print(line: string): void {
return this._dart_print(line);
}
public getErrorZone(): IZone {
return this._dart_getErrorZone();
}
public toString(): string {
return this._dart_toString();
}
public getHashCode(): number {
return this._dart_getHashCode();
}
}
|
_Zone
|
files.py
|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.file import File
from pycatia.system_interfaces.collection import Collection
class Files(Collection):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.Collection
| Files
|
| A collection of all the file objects in a folder.
| It lists all the files contained in the folder. It allows to retrieve File
| objects.
"""
def __init__(self, com_object):
|
def item(self, i_number: int) -> File:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func Item(long iNumber) As File
|
| Returns a file using its index or its name from the file
| collection.
|
| Parameters:
|
| iIndex
| The index or the name of the file to retrieve from the collection
| of files. As a numerics, this index is the rank of the file in the collection.
| The index of the first file in the collection is 1, and the index of the last
| file is Count. As a string, it is the name you assigned to the file using the
|
|
| AnyObject.Name property.
| Returns:
| The retrieved file
| Example:
| This example retrieves in ThisFile the third file, and it ThatFile the
| file named MyFile. in the TestFiles file collection.
|
| Dim ThisFile As File
| Set ThisFile = TestFiles.Item(3)
| Dim ThatFile As File
| Set ThatFile = TestFiles.Item("MyFile")
:param int i_number:
:return: File
:rtype: File
"""
return File(self.files.Item(i_number))
def __getitem__(self, n: int) -> File:
if (n + 1) > self.count:
raise StopIteration
return File(self.files.item(n + 1))
def __repr__(self):
return f'Files(name="{self.name}")'
|
super().__init__(com_object, child_object=File)
self.files = com_object
|
assert_storage.rs
|
// Copyright 2017 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::kvrpcpb::{Context, LockInfo};
use test_raftstore::{Cluster, ServerCluster, SimulateEngine};
use tikv::storage::kv::{Error as KvError, ErrorInner as KvErrorInner, RocksEngine};
use tikv::storage::mvcc::{Error as MvccError, ErrorInner as MvccErrorInner, MAX_TXN_WRITE_SIZE};
use tikv::storage::txn::{Error as TxnError, ErrorInner as TxnErrorInner};
use tikv::storage::{
self, Engine, Error as StorageError, ErrorInner as StorageErrorInner, TxnStatus,
};
use tikv_util::HandyRwLock;
use txn_types::{Key, KvPair, Mutation, TimeStamp, Value};
use super::*;
#[derive(Clone)]
pub struct AssertionStorage<E: Engine> {
pub store: SyncTestStorage<E>,
pub ctx: Context,
}
impl Default for AssertionStorage<RocksEngine> {
fn default() -> Self {
AssertionStorage {
ctx: Context::default(),
store: SyncTestStorageBuilder::new().build().unwrap(),
}
}
}
impl AssertionStorage<SimulateEngine> {
pub fn new_raft_storage_with_store_count(
count: usize,
key: &str,
) -> (Cluster<ServerCluster>, Self) {
let (cluster, store, ctx) = new_raft_storage_with_store_count(count, key);
let storage = Self { ctx, store };
(cluster, storage)
}
pub fn update_with_key_byte(&mut self, cluster: &mut Cluster<ServerCluster>, key: &[u8]) {
// ensure the leader of range which contains current key has been elected
cluster.must_get(key);
let region = cluster.get_region(key);
let leader = cluster.leader_of_region(region.get_id()).unwrap();
if leader.get_store_id() == self.ctx.get_peer().get_store_id() {
return;
}
let engine = cluster.sim.rl().storages[&leader.get_id()].clone();
self.ctx.set_region_id(region.get_id());
self.ctx.set_region_epoch(region.get_region_epoch().clone());
self.ctx.set_peer(leader);
self.store = SyncTestStorageBuilder::from_engine(engine).build().unwrap();
}
pub fn delete_ok_for_cluster(
&mut self,
cluster: &mut Cluster<ServerCluster>,
key: &[u8],
start_ts: impl Into<TimeStamp>,
commit_ts: impl Into<TimeStamp>,
) {
let mutations = vec![Mutation::Delete(Key::from_raw(key))];
let commit_keys = vec![Key::from_raw(key)];
self.two_pc_ok_for_cluster(
cluster,
mutations,
key,
commit_keys,
start_ts.into(),
commit_ts.into(),
);
}
fn get_from_custer(
&mut self,
cluster: &mut Cluster<ServerCluster>,
key: &[u8],
ts: impl Into<TimeStamp>,
) -> Option<Value> {
let ts = ts.into();
for _ in 0..3 {
let res = self.store.get(self.ctx.clone(), &Key::from_raw(key), ts);
if let Ok((data, ..)) = res {
return data;
}
self.expect_not_leader_or_stale_command(res.unwrap_err());
self.update_with_key_byte(cluster, key);
}
panic!("failed with 3 try");
}
pub fn get_none_from_cluster(
&mut self,
cluster: &mut Cluster<ServerCluster>,
key: &[u8],
ts: impl Into<TimeStamp>,
) {
assert_eq!(self.get_from_custer(cluster, key, ts), None);
}
pub fn put_ok_for_cluster(
&mut self,
cluster: &mut Cluster<ServerCluster>,
key: &[u8],
value: &[u8],
start_ts: impl Into<TimeStamp>,
commit_ts: impl Into<TimeStamp>,
) {
let mutations = vec![Mutation::Put((Key::from_raw(key), value.to_vec()))];
let commit_keys = vec![Key::from_raw(key)];
self.two_pc_ok_for_cluster(cluster, mutations, key, commit_keys, start_ts, commit_ts);
}
fn two_pc_ok_for_cluster(
&mut self,
cluster: &mut Cluster<ServerCluster>,
prewrite_mutations: Vec<Mutation>,
key: &[u8],
commit_keys: Vec<Key>,
start_ts: impl Into<TimeStamp>,
commit_ts: impl Into<TimeStamp>,
) {
let retry_time = 3;
let mut success = false;
let start_ts = start_ts.into();
for _ in 0..retry_time {
let res = self.store.prewrite(
self.ctx.clone(),
prewrite_mutations.clone(),
key.to_vec(),
start_ts,
);
if res.is_ok() {
success = true;
break;
}
self.expect_not_leader_or_stale_command(res.unwrap_err());
self.update_with_key_byte(cluster, key)
}
assert!(success);
success = false;
let commit_ts = commit_ts.into();
for _ in 0..retry_time {
let res = self
.store
.commit(self.ctx.clone(), commit_keys.clone(), start_ts, commit_ts);
if res.is_ok() {
success = true;
break;
}
self.expect_not_leader_or_stale_command(res.unwrap_err());
self.update_with_key_byte(cluster, key)
}
assert!(success);
}
pub fn gc_ok_for_cluster(
&mut self,
cluster: &mut Cluster<ServerCluster>,
region_key: &[u8],
safe_point: impl Into<TimeStamp>,
) {
let safe_point = safe_point.into();
for _ in 0..3 {
let ret = self.store.gc(self.ctx.clone(), safe_point);
if ret.is_ok() {
return;
}
self.expect_not_leader_or_stale_command(ret.unwrap_err());
self.update_with_key_byte(cluster, region_key);
}
panic!("failed with 3 retry!");
}
pub fn test_txn_store_gc3_for_cluster(
&mut self,
cluster: &mut Cluster<ServerCluster>,
key_prefix: u8,
) {
let key_len = 10_000;
let key = vec![key_prefix; 1024];
for k in 1u64..(MAX_TXN_WRITE_SIZE / key_len * 2) as u64 {
self.put_ok_for_cluster(cluster, &key, b"", k * 10, k * 10 + 5);
}
self.delete_ok_for_cluster(cluster, &key, 1000, 1050);
self.get_none_from_cluster(cluster, &key, 2000);
self.gc_ok_for_cluster(cluster, &key, 2000);
self.get_none_from_cluster(cluster, &key, 3000);
}
}
impl<E: Engine> AssertionStorage<E> {
pub fn get_none(&self, key: &[u8], ts: impl Into<TimeStamp>) {
let key = Key::from_raw(key);
assert_eq!(
self.store.get(self.ctx.clone(), &key, ts.into()).unwrap().0,
None
);
}
pub fn get_err(&self, key: &[u8], ts: impl Into<TimeStamp>) {
let key = Key::from_raw(key);
assert!(self.store.get(self.ctx.clone(), &key, ts.into()).is_err());
}
pub fn get_ok(&self, key: &[u8], ts: impl Into<TimeStamp>, expect: &[u8]) {
let key = Key::from_raw(key);
assert_eq!(
self.store
.get(self.ctx.clone(), &key, ts.into())
.unwrap()
.0
.unwrap(),
expect
);
}
pub fn batch_get_ok(&self, keys: &[&[u8]], ts: impl Into<TimeStamp>, expect: Vec<&[u8]>) {
let keys: Vec<Key> = keys.iter().map(|x| Key::from_raw(x)).collect();
let result: Vec<Vec<u8>> = self
.store
.batch_get(self.ctx.clone(), &keys, ts.into())
.unwrap()
.0
.into_iter()
.map(|x| x.unwrap().1)
.collect();
let expect: Vec<Vec<u8>> = expect.into_iter().map(|x| x.to_vec()).collect();
assert_eq!(result, expect);
}
pub fn batch_get_command_ok(&self, keys: &[&[u8]], ts: u64, expect: Vec<&[u8]>) {
let result: Vec<Option<Vec<u8>>> = self
.store
.batch_get_command(self.ctx.clone(), &keys, ts)
.unwrap()
.into_iter()
.map(|(x, ..)| x)
.collect();
let expect: Vec<Option<Vec<u8>>> = expect
.into_iter()
.map(|x| if x.is_empty() { None } else { Some(x.to_vec()) })
.collect();
assert_eq!(result, expect);
}
fn expect_not_leader_or_stale_command(&self, err: storage::Error) {
match err {
StorageError(box StorageErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(
MvccError(box MvccErrorInner::Engine(KvError(box KvErrorInner::Request(ref e)))),
))))
| StorageError(box StorageErrorInner::Txn(TxnError(box TxnErrorInner::Engine(
KvError(box KvErrorInner::Request(ref e)),
))))
| StorageError(box StorageErrorInner::Engine(KvError(box KvErrorInner::Request(
ref e,
)))) => {
assert!(
e.has_not_leader() | e.has_stale_command(),
"invalid error {:?}",
e
);
}
_ => {
panic!(
"expect not leader error or stale command, but got {:?}",
err
);
}
}
}
fn expect_invalid_tso_err<T>(
&self,
resp: Result<T, storage::Error>,
sts: impl Into<TimeStamp>,
cmt_ts: impl Into<TimeStamp>,
) where
T: std::fmt::Debug,
{
assert!(resp.is_err());
let err = resp.unwrap_err();
match err {
StorageError(box StorageErrorInner::Txn(TxnError(
box TxnErrorInner::InvalidTxnTso {
start_ts,
commit_ts,
},
))) => {
assert_eq!(sts.into(), start_ts);
assert_eq!(cmt_ts.into(), commit_ts);
}
_ => {
panic!("expect invalid tso error, but got {:?}", err);
}
}
}
pub fn put_ok(
&self,
key: &[u8],
value: &[u8],
start_ts: impl Into<TimeStamp>,
commit_ts: impl Into<TimeStamp>,
) {
let start_ts = start_ts.into();
self.store
.prewrite(
self.ctx.clone(),
vec![Mutation::Put((Key::from_raw(key), value.to_vec()))],
key.to_vec(),
start_ts,
)
.unwrap();
self.store
.commit(
self.ctx.clone(),
vec![Key::from_raw(key)],
start_ts,
commit_ts.into(),
)
.unwrap();
}
pub fn delete_ok(
&self,
key: &[u8],
start_ts: impl Into<TimeStamp>,
commit_ts: impl Into<TimeStamp>,
) {
let start_ts = start_ts.into();
self.store
.prewrite(
self.ctx.clone(),
vec![Mutation::Delete(Key::from_raw(key))],
key.to_vec(),
start_ts,
)
.unwrap();
self.store
.commit(
self.ctx.clone(),
vec![Key::from_raw(key)],
start_ts,
commit_ts.into(),
)
.unwrap();
}
pub fn scan_ok(
&self,
start_key: &[u8],
limit: usize,
ts: impl Into<TimeStamp>,
expect: Vec<Option<(&[u8], &[u8])>>,
) {
let key_address = Key::from_raw(start_key);
let result = self
.store
.scan(self.ctx.clone(), key_address, None, limit, false, ts.into())
.unwrap();
let result: Vec<Option<KvPair>> = result.into_iter().map(Result::ok).collect();
let expect: Vec<Option<KvPair>> = expect
.into_iter()
.map(|x| x.map(|(k, v)| (k.to_vec(), v.to_vec())))
.collect();
assert_eq!(result, expect);
}
pub fn reverse_scan_ok(
&self,
start_key: &[u8],
limit: usize,
ts: impl Into<TimeStamp>,
expect: Vec<Option<(&[u8], &[u8])>>,
) {
let key_address = Key::from_raw(start_key);
let result = self
.store
.reverse_scan(self.ctx.clone(), key_address, None, limit, false, ts.into())
.unwrap();
let result: Vec<Option<KvPair>> = result.into_iter().map(Result::ok).collect();
let expect: Vec<Option<KvPair>> = expect
.into_iter()
.map(|x| x.map(|(k, v)| (k.to_vec(), v.to_vec())))
.collect();
assert_eq!(result, expect);
}
pub fn scan_key_only_ok(
&self,
start_key: &[u8],
limit: usize,
ts: impl Into<TimeStamp>,
expect: Vec<Option<&[u8]>>,
) {
let key_address = Key::from_raw(start_key);
let result = self
.store
.scan(self.ctx.clone(), key_address, None, limit, true, ts.into())
.unwrap();
let result: Vec<Option<KvPair>> = result.into_iter().map(Result::ok).collect();
let expect: Vec<Option<KvPair>> = expect
.into_iter()
.map(|x| x.map(|k| (k.to_vec(), vec![])))
.collect();
assert_eq!(result, expect);
}
pub fn prewrite_ok(
&self,
mutations: Vec<Mutation>,
primary: &[u8],
start_ts: impl Into<TimeStamp>,
) {
self.store
.prewrite(
self.ctx.clone(),
mutations,
primary.to_vec(),
start_ts.into(),
)
.unwrap();
}
pub fn prewrite_err(
&self,
mutations: Vec<Mutation>,
primary: &[u8],
start_ts: impl Into<TimeStamp>,
) {
self.store
.prewrite(
self.ctx.clone(),
mutations,
primary.to_vec(),
start_ts.into(),
)
.unwrap_err();
}
pub fn prewrite_locked(
&self,
mutations: Vec<Mutation>,
primary: &[u8],
start_ts: impl Into<TimeStamp>,
expect_locks: Vec<(&[u8], &[u8], TimeStamp)>,
) {
let res = self
.store
.prewrite(
self.ctx.clone(),
mutations,
primary.to_vec(),
start_ts.into(),
)
.unwrap();
let locks: Vec<(&[u8], &[u8], TimeStamp)> = res
.locks
.iter()
.filter_map(|x| {
if let Err(StorageError(box StorageErrorInner::Txn(TxnError(
box TxnErrorInner::Mvcc(MvccError(box MvccErrorInner::KeyIsLocked(info))),
)))) = x
{
Some((
info.get_key(),
info.get_primary_lock(),
info.get_lock_version().into(),
))
} else {
None
}
})
.collect();
assert_eq!(expect_locks, locks);
}
pub fn prewrite_conflict(
&self,
mutations: Vec<Mutation>,
cur_primary: &[u8],
cur_start_ts: impl Into<TimeStamp>,
confl_key: &[u8],
confl_ts: impl Into<TimeStamp>,
) {
let cur_start_ts = cur_start_ts.into();
let err = self
.store
.prewrite(
self.ctx.clone(),
mutations,
cur_primary.to_vec(),
cur_start_ts,
)
.unwrap_err();
match err {
StorageError(box StorageErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(
MvccError(box MvccErrorInner::WriteConflict {
start_ts,
conflict_start_ts,
ref key,
ref primary,
..
}),
)))) => {
assert_eq!(cur_start_ts, start_ts);
assert_eq!(confl_ts.into(), conflict_start_ts);
assert_eq!(key.to_owned(), confl_key.to_owned());
assert_eq!(primary.to_owned(), cur_primary.to_owned());
}
_ => {
panic!("expect conflict error, but got {:?}", err);
}
}
}
pub fn commit_ok(
&self,
keys: Vec<&[u8]>,
start_ts: impl Into<TimeStamp>,
commit_ts: impl Into<TimeStamp>,
actual_commit_ts: impl Into<TimeStamp>,
) {
let keys: Vec<Key> = keys.iter().map(|x| Key::from_raw(x)).collect();
let txn_status = self
.store
.commit(self.ctx.clone(), keys, start_ts.into(), commit_ts.into())
.unwrap();
assert_eq!(txn_status, TxnStatus::committed(actual_commit_ts.into()));
}
pub fn commit_with_illegal_tso(
&self,
keys: Vec<&[u8]>,
start_ts: impl Into<TimeStamp>,
commit_ts: impl Into<TimeStamp>,
) {
let start_ts = start_ts.into();
let commit_ts = commit_ts.into();
let keys: Vec<Key> = keys.iter().map(|x| Key::from_raw(x)).collect();
let resp = self
.store
.commit(self.ctx.clone(), keys, start_ts, commit_ts);
self.expect_invalid_tso_err(resp, start_ts, commit_ts);
}
pub fn cleanup_ok(
&self,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
) {
self.store
.cleanup(
self.ctx.clone(),
Key::from_raw(key),
start_ts.into(),
current_ts.into(),
)
.unwrap();
}
pub fn cleanup_err(
&self,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
) {
assert!(
self.store
.cleanup(
self.ctx.clone(),
Key::from_raw(key),
start_ts.into(),
current_ts.into()
)
.is_err()
);
}
pub fn rollback_ok(&self, keys: Vec<&[u8]>, start_ts: impl Into<TimeStamp>) {
let keys: Vec<Key> = keys.iter().map(|x| Key::from_raw(x)).collect();
self.store
.rollback(self.ctx.clone(), keys, start_ts.into())
.unwrap();
}
pub fn rollback_err(&self, keys: Vec<&[u8]>, start_ts: impl Into<TimeStamp>) {
let keys: Vec<Key> = keys.iter().map(|x| Key::from_raw(x)).collect();
assert!(
self.store
.rollback(self.ctx.clone(), keys, start_ts.into())
.is_err()
);
}
pub fn scan_locks_ok(
&self,
max_ts: impl Into<TimeStamp>,
start_key: &[u8],
end_key: &[u8],
limit: usize,
expect: Vec<LockInfo>,
) {
let start_key = if start_key.is_empty() {
None
} else {
Some(Key::from_raw(&start_key))
};
let end_key = if end_key.is_empty() {
None
} else {
Some(Key::from_raw(&end_key))
};
assert_eq!(
self.store
.scan_locks(self.ctx.clone(), max_ts.into(), start_key, end_key, limit)
.unwrap(),
expect
);
}
pub fn resolve_lock_ok(
&self,
start_ts: impl Into<TimeStamp>,
|
.unwrap();
}
pub fn resolve_lock_batch_ok(
&self,
start_ts_1: impl Into<TimeStamp>,
commit_ts_1: impl Into<TimeStamp>,
start_ts_2: impl Into<TimeStamp>,
commit_ts_2: impl Into<TimeStamp>,
) {
self.store
.resolve_lock_batch(
self.ctx.clone(),
vec![
(start_ts_1.into(), commit_ts_1.into()),
(start_ts_2.into(), commit_ts_2.into()),
],
)
.unwrap();
}
pub fn resolve_lock_with_illegal_tso(
&self,
start_ts: impl Into<TimeStamp>,
commit_ts: Option<impl Into<TimeStamp>>,
) {
let start_ts = start_ts.into();
let commit_ts = commit_ts.map(Into::into);
let resp = self
.store
.resolve_lock(self.ctx.clone(), start_ts, commit_ts);
self.expect_invalid_tso_err(resp, start_ts, commit_ts.unwrap())
}
pub fn gc_ok(&self, safe_point: impl Into<TimeStamp>) {
self.store.gc(self.ctx.clone(), safe_point.into()).unwrap();
}
pub fn raw_get_ok(&self, cf: String, key: Vec<u8>, value: Option<Vec<u8>>) {
assert_eq!(
self.store.raw_get(self.ctx.clone(), cf, key).unwrap(),
value
);
}
pub fn raw_put_ok(&self, cf: String, key: Vec<u8>, value: Vec<u8>) {
self.store
.raw_put(self.ctx.clone(), cf, key, value)
.unwrap();
}
pub fn raw_put_err(&self, cf: String, key: Vec<u8>, value: Vec<u8>) {
self.store
.raw_put(self.ctx.clone(), cf, key, value)
.unwrap_err();
}
pub fn raw_delete_ok(&self, cf: String, key: Vec<u8>) {
self.store.raw_delete(self.ctx.clone(), cf, key).unwrap()
}
pub fn raw_delete_err(&self, cf: String, key: Vec<u8>) {
self.store
.raw_delete(self.ctx.clone(), cf, key)
.unwrap_err();
}
pub fn raw_scan_ok(
&self,
cf: String,
start_key: Vec<u8>,
limit: usize,
expect: Vec<(&[u8], &[u8])>,
) {
let result: Vec<KvPair> = self
.store
.raw_scan(self.ctx.clone(), cf, start_key, None, limit)
.unwrap()
.into_iter()
.map(|x| x.unwrap())
.collect();
let expect: Vec<KvPair> = expect
.into_iter()
.map(|(k, v)| (k.to_vec(), v.to_vec()))
.collect();
assert_eq!(result, expect);
}
pub fn test_txn_store_gc(&self, key: &str) {
let key_bytes = key.as_bytes();
self.put_ok(key_bytes, b"v1", 5, 10);
self.put_ok(key_bytes, b"v2", 15, 20);
self.gc_ok(30);
self.get_none(key_bytes, 15);
self.get_ok(key_bytes, 25, b"v2");
}
pub fn test_txn_store_gc3(&self, key_prefix: u8) {
let key_len = 10_000;
let key = vec![key_prefix; 1024];
for k in 1u64..(MAX_TXN_WRITE_SIZE / key_len * 2) as u64 {
self.put_ok(&key, b"", k * 10, k * 10 + 5);
}
self.delete_ok(&key, 1000, 1050);
self.get_none(&key, 2000);
self.gc_ok(2000);
self.get_none(&key, 3000);
}
}
|
commit_ts: Option<impl Into<TimeStamp>>,
) {
self.store
.resolve_lock(self.ctx.clone(), start_ts.into(), commit_ts.map(Into::into))
|
skill-manager.d.ts
|
type DescriptionDictionary = { [key: string]: string | undefined };
type EnvironmentDictionary = { [key: string]: string[] | undefined };
type RoomDictionary = { [key: string]: string[] | undefined };
type SkillManagerSkillName = keyof SkillManagerSkills['class'] | keyof SkillManagerSkills['general'] | keyof SkillManagerSkills['trade'];
interface SkillManagerSkill {
active: boolean;
}
interface SkillManagerClassSkill extends SkillManagerSkill {
classes: string[];
}
interface SkillManagerGeneralSkill extends SkillManagerSkill {
}
interface SkillManagerTradeSkill extends SkillManagerSkill {
}
//#region Pranks
type SkillManagerPranksProps = 'balloon' | 'blackjack' | 'itchpowder' | 'mickey';
type SkillManagerPranksPropAmount = 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10;
type SkillManagerPranksIllusionColour = 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 'lethal';
type SkillManagerPranksRunAwayDistance = 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11;
interface SkillManagerPranks extends SkillManagerClassSkill {
handspring(target: string): string;
// props(): void;
// wishForProp(prop: SkillManagerPranksProps, amount?: SkillManagerPranksPropAmount): void;
// inflateBalloon(): void;
// slipperiness(): void;
// bop(target?: string | number): void;
// inflateBalloonIntoGiraffe(): void;
// mountGiraffe(): void;
// stepIntoTrees(): void;
// backflip(direction: string): void;
// hocuspocus(illusion: string, delay: number, colour?: SkillManagerPranksIllusionColour): void;
// runAway(direction: string, distance?: SkillManagerPranksRunAwayDistance): void;
// balloonHandoff(target?: string): void;
// backHandspring(target: string, direction: string): void;
}
interface SkillManagerPranksSettings {
enabled: boolean;
}
//#endregion
//#region Puppetry
interface SkillManagerPuppetry extends SkillManagerClassSkill {
}
interface SkillManagerPuppetrySettings {
enabled: boolean;
}
//#endregion
//#region Tarot
interface SkillManagerTarotInscribingQueue {
sun: number;
emperor: number;
magician: number;
priestess: number;
fool: number;
chariot: number;
hermit: number;
empress: number;
lovers: number;
hierophant: number;
hangedman: number;
tower: number;
wheel: number;
creator: number;
justice: number;
star: number;
aeon: number;
lust: number;
universe: number;
devil: number;
moon: number;
death: number;
}
interface SkillManagerTarotInscribing {
active: boolean;
runningQueue: boolean;
amount: number;
queue: SkillManagerTarotInscribingQueue;
start(): void;
stop(): void;
reset(): void;
runQueue(): void;
}
interface SkillManagerTarot extends SkillManagerClassSkill {
cards: string[];
descriptionDictionary: DescriptionDictionary;
inscribing: SkillManagerTarotInscribing;
}
interface SkillManagerTarotSettings {
enabled: boolean;
}
//#endregion
//#region Tekura
interface SkillManagerTekura extends SkillManagerClassSkill {
}
interface SkillManagerTekuraSettings {
enabled: boolean;
}
//#endregion
//#region Telepathy
interface SkillManagerTelepathy extends SkillManagerClassSkill {
}
interface SkillManagerTelepathySettings {
enabled: boolean;
}
//#endregion
//#region Harvesting
interface SkillManagerHarvesting extends SkillManagerTradeSkill {
harvestables: string[];
descriptionDictionary: DescriptionDictionary;
}
interface SkillManagerHarvestingSettings {
enabled: boolean;
}
//#endregion
//#region Transmutation
interface SkillManagerTransmutation extends SkillManagerTradeSkill {
extractables: string[];
descriptionDictionary: DescriptionDictionary;
}
interface SkillManagerTransmutationSettings {
enabled: boolean;
}
//#endregion
//#region Gathering
interface SkillManagerButchering {
running: boolean;
descriptionDictionary: DescriptionDictionary;
itemToRewield?: string;
start(): void;
stop(): void;
butcher(): void;
}
interface SkillManagerGathering extends SkillManagerTradeSkill {
butchering: SkillManagerButchering;
gatherables: string[];
environmentDictionary: EnvironmentDictionary;
descriptionDictionary: DescriptionDictionary;
getGatherables(): string[];
}
interface SkillManagerGatheringSettings {
enabled: boolean;
cleaverId?: string;
}
//#endregion
//#region Collecting
interface SkillManagerCollecting extends SkillManagerTradeSkill {
running: boolean;
automatic: boolean;
waitingForPlants: boolean;
waitingForMinerals: boolean;
queue: Set<string>;
day: string;
rooms: RoomDictionary;
start(): void;
stop(): void;
clear(): void;
collect(): void;
tryCollect(): void;
onPlant(args: TriggerFunctionArgs & { 1: string }): void;
onMineral(args: TriggerFunctionArgs & { 1: string }): void;
onCollected(args: TriggerFunctionArgs & { 1?: string; 2: string; }): void;
}
interface SkillManagerCollectingSettings {
enabled: boolean;
}
//#endregion
//#region Inkmilling
interface SkillManagerInkmillingQueue {
red: number;
blue: number;
yellow: number;
green: number;
purple: number;
gold: number;
black: number;
}
interface SkillManagerInkmillingInkReagents {
red?: number;
blue?: number;
yellow?: number;
gold?: number;
common?: number;
uncommon?: number;
scarce?: number;
rare?: number;
}
interface SkillManagerInkmillingInks {
red: SkillManagerInkmillingInkReagents;
blue: SkillManagerInkmillingInkReagents;
yellow: SkillManagerInkmillingInkReagents;
green: SkillManagerInkmillingInkReagents;
purple: SkillManagerInkmillingInkReagents;
gold: SkillManagerInkmillingInkReagents;
black: SkillManagerInkmillingInkReagents;
}
type ReagentDictionary = { [key: string]: string[] };
interface SkillManagerInkmilling extends SkillManagerTradeSkill {
running: boolean;
runningQueue: boolean;
queue: SkillManagerInkmillingQueue;
reagents: ReagentDictionary;
inks: SkillManagerInkmillingInks;
descriptionDictionary: DescriptionDictionary;
mill(colour: string, amount?: number): string;
start(): void;
stop(): void;
reset(): void;
runQueue(): void;
}
interface SkillManagerInkmillingSettings {
enabled: boolean;
millId?: string;
}
//#endregion
//#region Settings
interface SkillManagerClassSkillSettings {
pranks: SkillManagerPranksSettings;
puppetry: SkillManagerPuppetrySettings;
tarot: SkillManagerTarotSettings;
tekura: SkillManagerTekuraSettings;
telepathy: SkillManagerTelepathySettings;
}
interface SkillManagerGeneralSkillSettings {
}
interface SkillManagerTradeSkillSettings {
harvesting: SkillManagerHarvestingSettings;
transmutation: SkillManagerTransmutationSettings;
gathering: SkillManagerGatheringSettings;
collecting: SkillManagerCollectingSettings;
inkmilling: SkillManagerInkmillingSettings;
|
interface SkillManagerSettings {
enabled: boolean;
class: SkillManagerClassSkillSettings;
general: SkillManagerGeneralSkillSettings;
trade: SkillManagerTradeSkillSettings;
}
//#endregion
interface SkillManagerClassSkills {
pranks: SkillManagerPranks;
puppetry: SkillManagerPuppetry;
tarot: SkillManagerTarot;
tekura: SkillManagerTekura;
telepathy: SkillManagerTelepathy;
}
interface SkillManagerGeneralSkills {
}
interface SkillManagerTradeSkills {
harvesting: SkillManagerHarvesting;
transmutation: SkillManagerTransmutation;
gathering: SkillManagerGathering;
collecting: SkillManagerCollecting;
inkmilling: SkillManagerInkmilling;
}
interface SkillManagerSkills {
class: SkillManagerClassSkills;
general: SkillManagerGeneralSkills;
trade: SkillManagerTradeSkills;
}
interface SkillManager {
settings: SkillManagerSettings;
skills: SkillManagerSkills;
echo(message: string): void;
error(text: string): void;
save(): void;
onAbility<Skill extends SkillManagerSkillName>(
skill: Skill,
ability: string,
command: string | undefined,
eventOrEvents: string | string[],
args: TriggerFunctionArgs | MultiLineTriggerFunctionArgs
): void;
}
export type SkillManagerClient = typeof client & {
skillManager: SkillManager;
};
|
}
|
_version.ts
|
// @ts-ignore
|
try{self['workbox:range-requests:6.5.3']&&_()}catch(e){}
| |
test_reporters.py
|
import sys
from typing import List
import pytest
from region_profiler import RegionProfiler
from region_profiler import reporter_columns as cols
from region_profiler.reporters import (
ConsoleReporter,
CsvReporter,
SilentReporter,
Slice,
get_profiler_slice,
)
from region_profiler.utils import SeqStatsProtocol
class FixedStats(SeqStatsProtocol):
def __init__(self, count, total, min, max):
self.count = count
self.total = total
self.min = min
self.max = max
def add(self, x: float):
raise NotImplementedError
@pytest.fixture()
def dummy_region_profiler():
"""Generate dummy region profiler to test reporters."""
rp = RegionProfiler()
with rp.region("a"):
with rp.region("b"):
pass
for n in ["c", "d"]:
with rp.region(n):
with rp.region("x"):
pass
rp.root.stats = FixedStats(1, 100, 100, 100)
a = rp.root.children["a"]
a.stats = FixedStats(1, 90, 90, 90)
a.children["b"].stats = FixedStats(4, 20, 2, 10)
a.children["c"].stats = FixedStats(2, 30, 10, 20)
a.children["c"].children["x"].stats = FixedStats(2, 10, 5, 5)
a.children["d"].stats = FixedStats(1, 25, 25, 25)
a.children["d"].children["x"].stats = FixedStats(1, 10, 10, 10)
return rp
def test_slice_generation(dummy_region_profiler):
"""Test that node tree is properly serialized in a list."""
expected = [
Slice(0, RegionProfiler.ROOT_NODE_NAME, None, 0, 1, 100, 10, 100, 100),
Slice(1, "a", None, 1, 1, 90, 15, 90, 90),
Slice(2, "c", None, 2, 2, 30, 20, 10, 20),
Slice(3, "x", None, 3, 2, 10, 10, 5, 5),
Slice(4, "d", None, 2, 1, 25, 15, 25, 25),
Slice(5, "x", None, 3, 1, 10, 10, 10, 10),
Slice(6, "b", None, 2, 4, 20, 20, 2, 10),
]
expected[1].parent = expected[0]
expected[2].parent = expected[1]
expected[3].parent = expected[2]
expected[4].parent = expected[1]
expected[5].parent = expected[4]
expected[6].parent = expected[1]
slices = get_profiler_slice(dummy_region_profiler)
assert slices == expected
def test_silent_reporter(dummy_region_profiler):
"""Test :py:class:`SilentReporter` reporter."""
r = SilentReporter([cols.name, cols.node_id, cols.parent_id, cols.total_us])
r.dump_profiler(dummy_region_profiler)
expected = [
["name", "id", "parent_id", "total_us"],
[RegionProfiler.ROOT_NODE_NAME, "0", "", "100000000"],
["a", "1", "0", "90000000"],
["c", "2", "1", "30000000"],
["x", "3", "2", "10000000"],
["d", "4", "1", "25000000"],
["x", "5", "4", "10000000"],
["b", "6", "1", "20000000"],
]
assert r.rows == expected
def test_console_reporter(dummy_region_profiler, capsys):
|
def test_csv_reporter(dummy_region_profiler, capsys):
"""Test :py:class:`CsvReporter` reporter."""
r = CsvReporter(
[cols.name, cols.node_id, cols.parent_id, cols.total_us], stream=sys.stdout
)
r.dump_profiler(dummy_region_profiler)
expected = [
["name", "id", "parent_id", "total_us"],
[RegionProfiler.ROOT_NODE_NAME, "0", "", "100000000"],
["a", "1", "0", "90000000"],
["c", "2", "1", "30000000"],
["x", "3", "2", "10000000"],
["d", "4", "1", "25000000"],
["x", "5", "4", "10000000"],
["b", "6", "1", "20000000"],
]
output, err = capsys.readouterr()
output = [[c.strip() for c in r.split(",")] for r in output.strip().split("\n")]
assert len(output) == len(expected)
for row, expected_vals in zip(output, expected):
assert len(row) == len(expected_vals)
for col, v in zip(row, expected_vals):
assert col == v
|
"""Test :py:class:`ConsoleReporter` reporter."""
r = ConsoleReporter(
[cols.name, cols.node_id, cols.parent_id, cols.total_us], stream=sys.stdout
)
r.dump_profiler(dummy_region_profiler)
expected: List[List[str]] = [
["name", "id", "parent id", "total us"],
[],
[RegionProfiler.ROOT_NODE_NAME, "0", "", "100000000"],
["a", "1", "0", "90000000"],
["c", "2", "1", "30000000"],
["x", "3", "2", "10000000"],
["d", "4", "1", "25000000"],
["x", "5", "4", "10000000"],
["b", "6", "1", "20000000"],
]
output, err = capsys.readouterr()
output = output.strip().split("\n")
assert len(output) == len(expected)
for row, expected_vals in zip(output, expected):
assert len(row) == len(output[0])
for v in expected_vals:
assert v in row
|
provider.go
|
package office365
import (
"context"
"log"
"os"
"terraform-provider-office365/client"
t "terraform-provider-office365/token"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func Provider() *schema.Provider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"client_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("OFFICE365_CLIENT_ID", ""),
},
"client_secret": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("OFFICE365_CLIENT_SECRET", ""),
},
"tenant_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("OFFICE365_TENANT_ID", ""),
},
},
ResourcesMap: map[string]*schema.Resource{
"office365_user": resourceUser(),
"office365_user_license": resourceUserLicense(),
},
DataSourcesMap: map[string]*schema.Resource{
"office365_user": dataSourceUsers(),
},
ConfigureContextFunc: providerConfigure,
}
}
func
|
(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) {
log.Println("provider.go called")
var diags diag.Diagnostics
clienId := d.Get("client_id").(string)
clientSecret := d.Get("client_secret").(string)
tenantId := d.Get("tenant_id").(string)
if clienId == "" || clientSecret == "" || tenantId == "" {
diags = append(diags, diag.Diagnostic{
Severity: diag.Error,
Summary: "Please Re-check you credentials",
Detail: "Please Re-check you credentials",
})
return nil, diags
}
bearer, err := t.GetToken(clienId, clientSecret, tenantId)
if err != nil {
os.Setenv("bearer", "")
diags = append(diags, diag.Diagnostic{
Severity: diag.Error,
Summary: err.Error(),
Detail: "please enter valid credentials",
})
return nil, diags
}
return client.NewClient(bearer), diags
}
|
providerConfigure
|
permission_schema.py
|
'''Copyright 2018 Province of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
import toastedmarshmallow
from marshmallow import fields
from app.models.theq import Permission
from qsystem import ma
class PermissionSchema(ma.SQLAlchemySchema):
class Meta:
model = Permission
include_relationships = True
load_instance = True
jit = toastedmarshmallow.Jit
permission_id = fields.Int()
permission_code = fields.Str()
|
permission_desc = fields.Str()
| |
interface.go
|
// /*
// Copyright The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// */
//
// Code generated by MockGen. DO NOT EDIT.
// Source: pkg/azureclients/privatednszonegroupclient/interface.go
// Package mockprivatednszonegroupclient is a generated GoMock package.
package mockprivatednszonegroupclient
import (
context "context"
reflect "reflect"
network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
gomock "github.com/golang/mock/gomock"
)
// MockInterface is a mock of Interface interface.
type MockInterface struct {
ctrl *gomock.Controller
recorder *MockInterfaceMockRecorder
}
// MockInterfaceMockRecorder is the mock recorder for MockInterface.
type MockInterfaceMockRecorder struct {
mock *MockInterface
}
// NewMockInterface creates a new mock instance.
func NewMockInterface(ctrl *gomock.Controller) *MockInterface
|
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
return m.recorder
}
// CreateOrUpdate mocks base method.
func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, privateEndpointName, privateDNSZoneGroupName string, parameters network.PrivateDNSZoneGroup, waitForCompletion bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName, parameters, waitForCompletion)
ret0, _ := ret[0].(error)
return ret0
}
// CreateOrUpdate indicates an expected call of CreateOrUpdate.
func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName, parameters, waitForCompletion interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName, parameters, waitForCompletion)
}
// Get mocks base method.
func (m *MockInterface) Get(ctx context.Context, resourceGroupName, privateEndpointName, privateDNSZoneGroupName string) (network.PrivateDNSZoneGroup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName)
ret0, _ := ret[0].(network.PrivateDNSZoneGroup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName)
}
|
{
mock := &MockInterface{ctrl: ctrl}
mock.recorder = &MockInterfaceMockRecorder{mock}
return mock
}
|
0014_add_node_uuid_unique_constraint.py
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Add a uniqueness constraint to the uuid column of DbNode table."""
from django.db import migrations, models
from aiida.backends.djsite.db.migrations import upgrade_schema_version
from aiida.common.utils import get_new_uuid
REVISION = '1.0.14'
DOWN_REVISION = '1.0.13'
def verify_node_uuid_uniqueness(_, __):
|
def reverse_code(_, __):
pass
class Migration(migrations.Migration):
"""Add a uniqueness constraint to the uuid column of DbNode table."""
dependencies = [
('db', '0013_django_1_8'),
]
operations = [
migrations.RunPython(verify_node_uuid_uniqueness, reverse_code=reverse_code),
migrations.AlterField(
model_name='dbnode',
name='uuid',
field=models.CharField(max_length=36, default=get_new_uuid, unique=True),
),
upgrade_schema_version(REVISION, DOWN_REVISION)
]
|
"""Check whether the database contains nodes with duplicate UUIDS.
Note that we have to redefine this method from aiida.manage.database.integrity.verify_node_uuid_uniqueness
because the migrations.RunPython command that will invoke this function, will pass two arguments and therefore
this wrapper needs to have a different function signature.
:raises: IntegrityError if database contains nodes with duplicate UUIDS.
"""
from aiida.backends.general.migrations.duplicate_uuids import verify_uuid_uniqueness
verify_uuid_uniqueness(table='db_dbnode')
|
DiagnosticsPrinter.ts
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import {
Diagnostic,
DiagnosticAdvice,
DiagnosticLanguage,
DiagnosticSourceType,
Diagnostics,
DiagnosticsProcessor,
deriveRootAdviceFromDiagnostic,
} from '@romejs/diagnostics';
import {Reporter} from '@romejs/cli-reporter';
import {
DiagnosticsFileReader,
DiagnosticsFileReaderStats,
DiagnosticsPrinterFlags,
DiagnosticsPrinterOptions,
} from './types';
import {
formatAnsi,
markup,
markupToPlainTextString,
} from '@romejs/string-markup';
import {toLines} from './utils';
import printAdvice from './printAdvice';
import successBanner from './banners/success.json';
import errorBanner from './banners/error.json';
import {
AbsoluteFilePath,
AbsoluteFilePathSet,
UnknownFilePath,
UnknownFilePathMap,
UnknownFilePathSet,
createAbsoluteFilePath,
createUnknownFilePath,
} from '@romejs/path';
import {Number0, Number1} from '@romejs/ob1';
import {existsSync, lstatSync, readFileTextSync} from '@romejs/fs';
type Banner = {
// Array<number> should really be [number, number, number], but TypeScript widens the imported types
palettes: Array<Array<number>>;
// Array<number> should really be [number, number], same reason as above
rows: Array<Array<number | Array<number>>>;
};
type PositionLike = {
line?: undefined | Number1;
column?: undefined | Number0;
};
export function
|
(
path: AbsoluteFilePath,
): ReturnType<DiagnosticsFileReader> {
if (!existsSync(path)) {
return;
}
const src = readFileTextSync(path);
const mtime = lstatSync(path).mtimeMs;
return {content: src, mtime};
}
function equalPosition(
a: undefined | PositionLike,
b: undefined | PositionLike,
): boolean {
if (a === undefined || b === undefined) {
return false;
}
if (a.line !== b.line || a.column !== b.column) {
return false;
}
return true;
}
type FooterPrintCallback = (
reporter: Reporter,
error: boolean,
) => void | boolean;
export const DEFAULT_PRINTER_FLAGS: DiagnosticsPrinterFlags = {
grep: '',
inverseGrep: false,
showAllDiagnostics: true,
fieri: false,
verboseDiagnostics: false,
maxDiagnostics: 100,
};
// Dependency that may not be included in the output diagnostic but whose changes may effect the validity of this one
type ChangeFileDependency = {
type: 'change';
path: UnknownFilePath;
mtime: number;
};
// Dependency that will have a code frame in the output diagnostic
type ReferenceFileDependency = {
type: 'reference';
path: UnknownFilePath;
mtime: undefined | number;
sourceType: undefined | DiagnosticSourceType;
language: undefined | DiagnosticLanguage;
};
type FileDependency = ChangeFileDependency | ReferenceFileDependency;
export type DiagnosticsPrinterFileSources = UnknownFilePathMap<{
sourceText: string;
lines: Array<string>;
}>;
export type DiagnosticsPrinterFileMtimes = UnknownFilePathMap<number>;
export default class DiagnosticsPrinter extends Error {
constructor(opts: DiagnosticsPrinterOptions) {
super(
"Diagnostics printer. If you're seeing this then it wasn't caught and printed correctly.",
);
const {cwd, reporter, flags = DEFAULT_PRINTER_FLAGS} = opts;
this.reporter = reporter;
this.flags = flags;
this.readFile =
opts.readFile === undefined ? readDiagnosticsFileLocal : opts.readFile;
this.cwd = cwd === undefined ? createAbsoluteFilePath(process.cwd()) : cwd;
this.processor =
opts.processor === undefined ? new DiagnosticsProcessor() : opts.processor;
this.displayedCount = 0;
this.problemCount = 0;
this.filteredCount = 0;
this.truncatedCount = 0;
this.hasTruncatedDiagnostics = false;
this.missingFileSources = new AbsoluteFilePathSet();
this.fileSources = new UnknownFilePathMap();
this.fileMtimes = new UnknownFilePathMap();
this.onFooterPrintCallbacks = [];
}
reporter: Reporter;
processor: DiagnosticsProcessor;
onFooterPrintCallbacks: Array<FooterPrintCallback>;
flags: DiagnosticsPrinterFlags;
cwd: AbsoluteFilePath;
readFile: DiagnosticsFileReader;
hasTruncatedDiagnostics: boolean;
missingFileSources: AbsoluteFilePathSet;
fileSources: DiagnosticsPrinterFileSources;
fileMtimes: DiagnosticsPrinterFileMtimes;
displayedCount: number;
problemCount: number;
filteredCount: number;
truncatedCount: number;
createFilePath(filename: undefined | string): UnknownFilePath {
if (filename === undefined) {
filename = 'unknown';
}
const {normalizeFilename} = this.reporter.markupOptions;
if (normalizeFilename === undefined) {
return createUnknownFilePath(filename);
} else {
return createUnknownFilePath(normalizeFilename(filename));
}
}
throwIfAny() {
if (this.hasDiagnostics()) {
throw this;
}
}
hasDiagnostics(): boolean {
return this.processor.hasDiagnostics();
}
getDisplayedProblemsCount() {
return this.problemCount - this.filteredCount;
}
shouldTruncate(): boolean {
if (
!this.flags.showAllDiagnostics &&
this.displayedCount > this.flags.maxDiagnostics
) {
return true;
} else {
return false;
}
}
getDiagnostics(): Diagnostics {
return this.processor.getSortedDiagnostics();
}
shouldIgnore(diag: Diagnostic): boolean {
const {grep, inverseGrep} = this.flags;
// An empty grep pattern means show everything
if (grep === undefined || grep === '') {
return false;
}
// Match against the supplied grep pattern
let ignored =
markupToPlainTextString(diag.description.message.value).toLowerCase().includes(
grep,
) === false;
if (inverseGrep) {
ignored = !ignored;
}
return ignored;
}
addFileSource(
info: ChangeFileDependency | ReferenceFileDependency,
stats: DiagnosticsFileReaderStats,
) {
this.fileMtimes.set(info.path, stats.mtime);
if (info.type === 'reference') {
this.fileSources.set(
info.path,
{
sourceText: stats.content,
lines: toLines({
path: info.path,
input: stats.content,
sourceType: info.sourceType,
language: info.language,
}),
},
);
}
}
getDependenciesFromDiagnostics(
diagnostics: Diagnostics,
): Array<FileDependency> {
const deps: Array<FileDependency> = [];
for (const {
dependencies,
description: {advice},
location: {language, sourceType, mtime, filename},
} of diagnostics) {
if (filename !== undefined) {
deps.push({
type: 'reference',
path: this.createFilePath(filename),
mtime,
language,
sourceType,
});
}
if (dependencies !== undefined) {
for (const {filename, mtime} of dependencies) {
deps.push({
type: 'change',
path: this.createFilePath(filename),
mtime,
});
}
}
if (advice !== undefined) {
for (const item of advice) {
if (item.type === 'frame') {
const {location} = item;
if (
location.filename !== undefined &&
location.sourceText === undefined
) {
deps.push({
type: 'reference',
path: this.createFilePath(location.filename),
language: location.language,
sourceType: location.sourceType,
mtime: location.mtime,
});
}
}
}
}
}
const depsMap: UnknownFilePathMap<FileDependency> = new UnknownFilePathMap();
// Remove non-absolute filenames and normalize sourceType and language for conflicts
for (const dep of deps) {
const path = dep.path;
if (!path.isAbsolute()) {
continue;
}
const existing = depsMap.get(path);
// reference dependency can override change since it has more metadata that needs conflict resolution
if (existing === undefined || existing.type === 'change') {
depsMap.set(dep.path, dep);
continue;
}
if (dep.type === 'reference') {
if (existing.sourceType !== dep.sourceType) {
existing.sourceType = 'unknown';
}
if (existing.language !== dep.language) {
existing.language = 'unknown';
}
}
}
return Array.from(depsMap.values());
}
fetchFileSources(diagnostics: Diagnostics) {
for (const dep of this.getDependenciesFromDiagnostics(diagnostics)) {
const {path} = dep;
if (!path.isAbsolute()) {
continue;
}
const abs = path.assertAbsolute();
const stats = this.readFile(abs);
if (stats === undefined) {
this.missingFileSources.add(abs);
} else {
this.addFileSource(dep, stats);
}
}
}
print() {
const filteredDiagnostics = this.filterDiagnostics();
this.fetchFileSources(filteredDiagnostics);
this.displayDiagnostics(filteredDiagnostics);
}
displayDiagnostics(diagnostics: Diagnostics) {
const restoreRedirect = this.reporter.redirectOutToErr(true);
for (const diag of diagnostics) {
this.displayDiagnostic(diag);
}
this.reporter.redirectOutToErr(restoreRedirect);
}
getOutdatedFiles(diag: Diagnostic): UnknownFilePathSet {
let outdatedFiles: UnknownFilePathSet = new UnknownFilePathSet();
for (const {
path,
mtime: expectedMtime,
} of this.getDependenciesFromDiagnostics([diag])) {
const mtime = this.fileMtimes.get(path);
if (
mtime !== undefined &&
expectedMtime !== undefined &&
mtime > expectedMtime
) {
outdatedFiles.add(path);
}
}
return outdatedFiles;
}
displayDiagnostic(diag: Diagnostic) {
const {reporter} = this;
const {start, end, filename} = diag.location;
let advice = [...(diag.description.advice || [])];
// Remove stacktrace from beginning if it contains only one frame that matches the root diagnostic location
const firstAdvice = advice[0];
if (
firstAdvice !== undefined &&
firstAdvice.type === 'stacktrace' &&
firstAdvice.frames.length === 1
) {
const frame = firstAdvice.frames[0];
if (frame.filename === filename && equalPosition(frame, start)) {
advice.shift();
}
}
// Determine if we should skip showing the frame at the top of the diagnostic output
// We check if there are any frame advice entries that match us exactly, this is
// useful for stuff like reporting call stacks
let skipFrame = false;
if (start !== undefined && end !== undefined) {
adviceLoop: for (const item of advice) {
if (
item.type === 'frame' &&
item.location.filename === filename &&
equalPosition(item.location.start, start) &&
equalPosition(item.location.end, end)
) {
skipFrame = true;
break;
}
if (item.type === 'stacktrace') {
for (const frame of item.frames) {
if (frame.filename === filename && equalPosition(frame, start)) {
skipFrame = true;
break adviceLoop;
}
}
}
}
}
const outdatedAdvice: DiagnosticAdvice = [];
const outdatedFiles = this.getOutdatedFiles(diag);
const isOutdated = outdatedFiles.size > 0;
if (isOutdated) {
const outdatedFilesArr = Array.from(outdatedFiles, (path) => path.join());
if (outdatedFilesArr.length === 1 && outdatedFilesArr[0] === filename) {
outdatedAdvice.push({
type: 'log',
category: 'warn',
text: 'This file has been changed since the diagnostic was produced and may be out of date',
});
} else {
outdatedAdvice.push({
type: 'log',
category: 'warn',
text: 'This diagnostic may be out of date as it relies on the following files that have been changed since the diagnostic was generated',
});
outdatedAdvice.push({
type: 'list',
list: outdatedFilesArr.map((filename) =>
markup`<filelink target="${filename}" />`
),
});
}
}
const derived = deriveRootAdviceFromDiagnostic(
diag,
{
skipFrame,
includeHeaderInAdvice: false,
outdated: isOutdated,
},
);
reporter.hr(derived.header);
reporter.indent(() => {
// Concat all the advice together
const allAdvice: DiagnosticAdvice = [
...derived.advice,
...outdatedAdvice,
...advice,
];
// Print advice
for (const item of allAdvice) {
const res = printAdvice(
item,
{
printer: this,
flags: this.flags,
missingFileSources: this.missingFileSources,
fileSources: this.fileSources,
diagnostic: diag,
reporter,
},
);
if (res.printed) {
reporter.br();
}
if (res.truncated) {
this.hasTruncatedDiagnostics = true;
}
}
// Print verbose information
if (this.flags.verboseDiagnostics) {
const {origins} = diag;
if (origins !== undefined && origins.length > 0) {
reporter.br();
reporter.info('Why are you seeing this diagnostic?');
reporter.br();
reporter.list(
origins.map((origin) => {
let res = `<emphasis>${origin.category}</emphasis>`;
if (origin.message !== undefined) {
res += `: ${origin.message}`;
}
return res;
}),
{ordered: true},
);
}
}
});
}
filterDiagnostics(): Diagnostics {
const diagnostics = this.getDiagnostics();
const filteredDiagnostics: Diagnostics = [];
for (const diag of diagnostics) {
this.problemCount++;
if (this.shouldIgnore(diag)) {
this.filteredCount++;
} else if (this.shouldTruncate()) {
this.truncatedCount++;
} else {
this.displayedCount++;
filteredDiagnostics.push(diag);
}
}
return filteredDiagnostics;
}
onFooterPrint(fn: FooterPrintCallback) {
this.onFooterPrintCallbacks.push(fn);
}
footer() {
const {reporter, problemCount} = this;
const isError = problemCount > 0;
if (isError) {
const restoreRedirect = reporter.redirectOutToErr(true);
reporter.hr();
reporter.redirectOutToErr(restoreRedirect);
}
if (this.hasTruncatedDiagnostics) {
reporter.warn(
'Some diagnostics have been truncated. Use the --verbose-diagnostics flag to disable truncation.',
);
}
if (isError) {
if (this.flags.fieri) {
this.showBanner(errorBanner);
}
} else {
if (this.flags.fieri) {
this.showBanner(successBanner);
}
}
for (const handler of this.onFooterPrintCallbacks) {
const stop = handler(reporter, isError);
if (stop) {
return;
}
}
if (isError) {
this.footerError();
} else {
reporter.success('No known problems!');
}
}
showBanner(banner: Banner) {
for (const stream of this.reporter.getStreams(false)) {
for (const row of banner.rows) {
for (const field of row) {
let palleteIndex;
let times = 1;
if (Array.isArray(field)) {
[palleteIndex, times] = field;
} else {
palleteIndex = field;
}
const pallete = banner.palettes[palleteIndex];
stream.write(
formatAnsi.bgRgb(
' ',
{
r: pallete[0],
g: pallete[1],
b: pallete[2],
},
).repeat(times),
);
}
stream.write('\n');
}
}
}
footerError() {
const {reporter, filteredCount} = this;
const displayableProblems = this.getDisplayedProblemsCount();
let str = `Found <number emphasis>${displayableProblems}</number> <grammarNumber plural="problems" singular="problem">${displayableProblems}</grammarNumber>`;
if (filteredCount > 0) {
str += `<dim> (${filteredCount} filtered)</dim>`;
}
reporter.error(str);
if (this.truncatedCount > 0) {
const {maxDiagnostics} = this.flags;
reporter.warn(
`Only <number>${maxDiagnostics}</number> errors shown, add the <emphasis>--show-all-diagnostics</emphasis> flag to view the remaining <number>${displayableProblems -
maxDiagnostics}</number> errors`,
);
}
}
}
|
readDiagnosticsFileLocal
|
image_processing.py
|
"""Support for performing EdgeTPU classification on images."""
import logging
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE, PLATFORM_SCHEMA,
ImageProcessingEntity)
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = 'matches'
ATTR_SUMMARY = 'summary'
ATTR_TOTAL_MATCHES = 'total_matches'
CONF_CATEGORIES = 'categories'
CONF_CATEGORY = 'category'
CONF_FILE_OUT = 'file_out'
CONF_PATH = 'path'
CONF_LABELS = 'labels'
CONF_MODEL = 'model'
CONF_MODEL_DIR = 'model_dir'
CONF_TPU_DEVICE = 'device'
CONF_TPU_TOP_K = 'top_k'
CONF_TPU_THRESHOLD = 'threshold'
CONF_TPU_KEEP_ASPECT_RATIO = 'keep_aspect_ratio'
CONF_TPU_RESAMPLE = 'resample'
DEFAULT_THRESHOLD = 0.05
DEFAULT_TOP_K = 10
DEFAULT_KEEP_ASPECT_RATIO = True
DEFAULT_RESAMPLE = 0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FILE_OUT, default=[]):
vol.All(cv.ensure_list, [cv.template]),
vol.Required(CONF_MODEL): vol.Schema({
vol.Required(CONF_PATH): cv.isfile,
vol.Optional(CONF_LABELS): cv.isfile,
vol.Optional(CONF_MODEL_DIR): cv.isdir,
vol.Optional(CONF_TPU_DEVICE): cv.string,
vol.Optional(CONF_TPU_THRESHOLD, default=DEFAULT_THRESHOLD): cv.small_float,
vol.Optional(CONF_TPU_KEEP_ASPECT_RATIO, default=DEFAULT_KEEP_ASPECT_RATIO): cv.boolean,
vol.Optional(CONF_TPU_RESAMPLE, default=DEFAULT_RESAMPLE): cv.positive_int,
vol.Optional(CONF_TPU_TOP_K, default=DEFAULT_TOP_K): cv.positive_int,
})
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the EdgeTPU image processing platform."""
try:
# Verify that the TensorFlow Object Detection API is pre-installed
# pylint: disable=unused-import,unused-variable
from edgetpu.detection.engine import DetectionEngine
except ImportError:
# pylint: disable=line-too-long
_LOGGER.error(
"No EdgeTPU Object Detection library found! Install or compile ") # noqa
return
entities = []
for camera in config[CONF_SOURCE]:
entities.append(EdgeTPUImageProcessor(
hass, camera[CONF_ENTITY_ID], camera.get(CONF_NAME),
config))
add_entities(entities)
class EdgeTPUImageProcessor(ImageProcessingEntity):
"""Representation of an EdgeTPU image processor."""
def __init__(self, hass, camera_entity, name, config):
"""Initialize the EdgeTPU entity."""
from edgetpu.detection.engine import DetectionEngine # pylint: disable=import-error
model_config = config.get(CONF_MODEL)
_LOGGER.info("config = %s", model_config)
self.hass = hass
self._camera_entity = camera_entity
_LOGGER.info("camera = %s", self._camera_entity)
if name:
self._name = name
else:
self._name = "EdgeTPU {0}".format(
split_entity_id(camera_entity)[1])
self._file_out = config.get(CONF_FILE_OUT)
self._model = model_config.get(CONF_PATH)
self._threshold = model_config.get(CONF_TPU_THRESHOLD)
self._top_k = model_config.get(CONF_TPU_TOP_K)
self._keep_aspect_ratio = model_config.get(CONF_TPU_KEEP_ASPECT_RATIO)
self._resample = model_config.get(CONF_TPU_RESAMPLE)
self._engine = DetectionEngine(self._model, device_path=model_config.get(CONF_TPU_DEVICE))
labels = model_config.get(CONF_LABELS)
self._labels = self._read_label_file(labels) if labels else None
template.attach(hass, self._file_out)
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_MATCHES: self._matches,
# ATTR_SUMMARY: {item: len(values)
# for item, values in self._matches.items()},
ATTR_TOTAL_MATCHES: self._total_matches
}
# Function to read labels from text files.
def _read_label_file(self, file_path):
with open(file_path, 'r', encoding="utf-8") as source_file:
lines = source_file.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def process_image(self, image):
"""Process the image."""
from PIL import Image
from PIL import ImageDraw
_LOGGER.debug("Model=%s", self._model)
matches = {}
total_matches = 0
# Open image.
# _LOGGER.info("image = %s", image)
import io
img = Image.open(io.BytesIO(bytearray(image)))
# img.save("/tmp/test.jpg")
draw = ImageDraw.Draw(img)
# Run inference.
ans = self._engine.DetectWithImage(img, threshold=self._threshold,
keep_aspect_ratio=self._keep_aspect_ratio,
relative_coord=False, top_k=self._top_k)
# Display result.
if ans:
for obj in ans:
_LOGGER.info("label_id = %d", obj.label_id)
if self._labels:
_LOGGER.info("label=%s", self._labels[obj.label_id])
|
box = obj.bounding_box.flatten().tolist()
_LOGGER.info("box = %s", box)
# Draw a rectangle.
draw.rectangle(box, outline='red')
if self._file_out:
for path_template in self._file_out:
if isinstance(path_template, template.Template):
img.save(path_template.render(
camera_entity=self._camera_entity))
else:
img.save(path_template)
if 'Face' not in matches.keys():
matches['Face'] = []
matches['Face'].append({
'score': float(obj.score),
'box': box
})
total_matches += 1
else:
_LOGGER.info("No object detected!")
self._matches = matches
self._total_matches = total_matches
|
_LOGGER.info("score = %f", obj.score)
|
P02_DepthFirstSearch.py
|
class Graph():
def __init__(self):
self.vertex = {}
# for printing the Graph vertexes
def printGraph(self):
print(self.vertex)
for i in self.vertex.keys():
print(i,' -> ', ' -> '.join([str(j) for j in self.vertex[i]]))
# for adding the edge beween two vertexes
def addEdge(self, fromVertex, toVertex):
# check if vertex is already present,
if fromVertex in self.vertex.keys():
self.vertex[fromVertex].append(toVertex)
else:
# else make a new vertex
self.vertex[fromVertex] = [toVertex]
def DFS(self):
# visited array for storing already visited nodes
visited = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if visited[i] == False:
self.DFSRec(i, visited)
def
|
(self, startVertex, visited):
# mark start vertex as visited
visited[startVertex] = True
print(startVertex, end = ' ')
# Recur for all the vertexes that are adjacent to this node
for i in self.vertex.keys():
if visited[i] == False:
self.DFSRec(i, visited)
if __name__ == '__main__':
g = Graph()
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
g.printGraph()
print('DFS:')
g.DFS()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
|
DFSRec
|
raft.rs
|
use enum_kinds::EnumKind;
use futures::stream::FuturesUnordered;
use log::*;
use std::sync::Arc;
use std::time::Instant;
use parking_lot::Mutex;
use tokio::prelude::*;
use rcommon::clone;
use rlog::message::Id;
use rproto::common::Status;
use crate::rpc::client::EndpointClient;
use std::time::Duration;
struct Candidate {
timestamp: Instant,
votes_from: Vec<u32>,
}
#[derive(EnumKind)]
#[enum_kind(StateKind)]
enum State {
Idle,
Candidate(Candidate),
Follower,
Leader,
}
impl State {
pub fn kind(&self) -> StateKind {
self.into()
}
}
impl State {
pub fn as_candidate(&self) -> Option<&Candidate> {
if let State::Candidate(v) = self {
Some(v)
} else {
None
}
}
pub fn as_candidate_mut(&mut self) -> Option<&mut Candidate> {
if let State::Candidate(v) = self {
Some(v)
} else {
None
}
}
}
pub type ClusterHandle = Arc<Mutex<Cluster>>;
struct
|
{
id: u32,
client: EndpointClient,
last_activity: Instant,
}
pub struct Cluster {
election_timeout: Duration,
state: State,
node_id: u32,
// FIXME persistent
term: u64,
voted_for: Option<u32>,
last_entry: Option<(Id, u64)>,
horizon: Id,
nodes: Vec<Node>,
topic_id: String,
partition_id: u32,
}
impl Cluster {
pub fn new(node_id: u32, last_entry: Option<(Id, u64)>,
topic_id: String, partition_id: u32, election_timeout: Duration) -> ClusterHandle
{
Arc::new(Mutex::new(Self {
state: State::Idle,
node_id,
term: 0,
voted_for: None,
last_entry,
horizon: Id::new(0),
nodes: Vec::new(),
topic_id,
partition_id,
election_timeout,
}))
}
pub fn add_node(&mut self, id: u32, client: EndpointClient) {
self.nodes.push(Node { id, client, last_activity: Instant::now() });
}
pub fn start_election(this: &ClusterHandle) {
use rproto::ask_vote::*;
let (futs, election_timeout) = {
let mut this = this.lock();
assert!(this.nodes.len() >= 1);
debug_assert_eq!(this.nodes.iter().filter(|n| n.id == this.node_id).count(), 1);
this.term += 1;
if this.nodes.len() == 1 {
info!("[{}] becoming leader for term {} without election \
because it's the only node in the cluster",
this.node_id, this.term);
this.state = State::Leader;
return;
}
this.state = State::Candidate(Candidate {
timestamp: Instant::now(),
votes_from: vec![this.node_id],
});
let mut futs = FuturesUnordered::new();
for node in &this.nodes {
let node_id = node.id;
let this_node_id = this.node_id;
if node_id == this_node_id {
continue;
}
let f = node.client.ask(
Request {
term: this.term,
node_id: this_node_id,
last_entry: this.last_entry.map(|(id, term)| request::Entry {
id: id.get(),
term,
}),
topic_id: this.topic_id.clone(),
partition_id: this.partition_id,
})
.map_err(move |e| warn!("[{}] AskVote RPC to {} failed: {:?}", this_node_id, node_id, e))
.map(move |resp| (node_id, resp));
futs.push(f);
}
(futs, this.election_timeout)
};
fn maybe_retry(this: &ClusterHandle) {
let retry = {
let this = this.lock();
if this.state.kind() == StateKind::Candidate {
debug!("[{}] election term {} timed out, starting a new one",
this.node_id, this.term);
true
} else {
false
}
};
if retry {
Cluster::start_election(&this);
}
}
let fut = futs
.take_while(clone!(this => move |_| Ok(this.lock().state.kind() == StateKind::Candidate)))
.for_each(clone!(this => move |(node_id, resp)| {
let mut this = this.lock();
let resp = if let rproto::response::Response::AskVote(v) = resp.response.unwrap() {
v
} else {
warn!("[{}] invalid AskVote response from {}", this.node_id, node_id);
return Ok(());
};
if this.state.kind() != StateKind::Candidate {
debug!("[{}] ignoring AskVote response from {} since this node is no longer in Candidate state",
this.node_id, node_id);
}
let status = resp.common.unwrap().status;
if status != Status::Ok.into() {
warn!("[{}] got non-Ok AskVote response from {}: {} ({:?})",
this.node_id, node_id, status, Status::from_i32(status));
}
if resp.term > this.term {
debug!("[{}] saw a higher term, becoming a follower: {} -> {}",
this.node_id, this.term, resp.term);
this.term = resp.term;
this.state = State::Follower;
return Ok(());
} else if resp.term < this.term {
debug!("[{}] ignoring AskVote response from previous term {} < {}",
this.node_id, resp.term, this.term);
return Ok(());
}
if resp.vote_granted {
let vote_count = {
let votes_from = &mut this.state.as_candidate_mut().unwrap().votes_from;
votes_from.push(node_id);
votes_from.len()
};
debug!("[{}] vote received from {}, now have votes from {:?}",
this.node_id, node_id, this.state.as_candidate().unwrap().votes_from);
let majority_count = this.nodes.len() / 2 + 1;
if vote_count >= majority_count {
info!("[{}] becoming leader for term {} due to votes majority {} from {:?}",
this.node_id, this.term, vote_count,
this.state.as_candidate().unwrap().votes_from);
this.state = State::Leader;
}
} else {
debug!("[{}] {} didn't grant the vote", this.node_id, node_id);
}
Ok(())
}))
.timeout(election_timeout)
.map_err(clone!(this => move |_| maybe_retry(&this)));
tokio::spawn(fut);
}
pub fn ask_vote(&mut self, request: &rproto::ask_vote::Request) -> rproto::ask_vote::Response {
let vote_granted = request.term >= self.term &&
(self.voted_for.is_none() || self.voted_for == Some(request.node_id)) &&
(self.last_entry.is_none() == request.last_entry.is_none() ||
request.last_entry.as_ref().and_then(|e| self.last_entry.map(|(my_id, my_term)|
e.id >= my_id.get() && e.term >= my_term)) == Some(true));
// TODO prevent deposing of live leader
if vote_granted {
self.voted_for = Some(request.node_id);
}
rproto::ask_vote::Response {
common: Some(rproto::common::Response { status: rproto::common::Status::Ok.into() }),
term: self.term,
vote_granted,
}
}
}
|
Node
|
event_tracking.py
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
|
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module holds data for ppr queue event tracking."""
from __future__ import annotations
from mhr_api.models import utils as model_utils
from mhr_api.utils.base import BaseEnum
from .db import db
class EventTracking(db.Model): # pylint: disable=too-many-instance-attributes
"""This class manages all of the event tracking information."""
class EventTrackingTypes(BaseEnum):
"""Render an Enum of the event tracking types."""
SEARCH_REPORT = 'SEARCH_REPORT'
API_NOTIFICATION = 'API_NOTIFICATION'
EMAIL = 'EMAIL'
SURFACE_MAIL = 'SURFACE_MAIL'
EMAIL_REPORT = 'EMAIL_REPORT'
REGISTRATION_REPORT = 'REGISTRATION_REPORT'
__tablename__ = 'event_tracking'
id = db.Column('id', db.Integer, db.Sequence('event_tracking_id_seq'), primary_key=True)
key_id = db.Column('key_id', db.Integer, nullable=False, index=True)
event_ts = db.Column('event_ts', db.DateTime, nullable=False, index=True)
event_tracking_type = db.Column('event_tracking_type', db.String(20),
db.ForeignKey('event_tracking_types.event_tracking_type'),
nullable=False, index=True)
status = db.Column('status', db.Integer, nullable=True)
message = db.Column('message', db.String(2000), nullable=True)
email_id = db.Column('email_address', db.String(250), nullable=True)
# Relationships - SerialType
tracking_type = db.relationship('EventTrackingType', foreign_keys=[event_tracking_type],
back_populates='event_tracking', cascade='all, delete', uselist=False)
def save(self):
"""Save the object to the database immediately."""
db.session.add(self)
db.session.commit()
@property
def json(self) -> dict:
"""Return the event tracking record as a json object."""
event_tracking = {
'eventTrackingId': self.id,
'keyId': self.key_id,
'type': self.event_tracking_type,
'createDateTime': model_utils.format_ts(self.event_ts)
}
if self.status:
event_tracking['status'] = self.status
if self.message:
event_tracking['message'] = self.message
if self.email_id:
event_tracking['emailAddress'] = self.email_id
return event_tracking
@classmethod
def find_by_id(cls, event_id: int):
"""Return a tracking object by ID."""
if event_id:
return cls.query.get(event_id)
return None
@classmethod
def find_by_key_id(cls, key_id: int):
"""Return a list of event tracking objects by key id."""
event_tracking = None
if key_id:
event_tracking = cls.query.filter(EventTracking.key_id == key_id) \
.order_by(EventTracking.id).all()
return event_tracking
@classmethod
def find_by_key_id_type(cls, key_id: int, event_tracking_type: str, extra_key: str = None):
"""Return a list of event tracking objects by key id and event tracking type."""
event_tracking = None
if key_id and event_tracking_type:
event_tracking = cls.query.filter(EventTracking.key_id == key_id,
EventTracking.event_tracking_type == event_tracking_type) \
.order_by(EventTracking.id).all()
if event_tracking is not None and extra_key:
events = []
for event in event_tracking:
if event.message and event.message.find(extra_key) > 0:
events.append(event)
return events
return event_tracking
@staticmethod
def create(key_id: int, event_type: str, status: int = None, message: str = None):
"""Create an EventTracking record."""
event_tracking = EventTracking(key_id=key_id, event_tracking_type=event_type, status=status, message=message)
event_tracking.event_ts = model_utils.now_ts()
event_tracking.save()
return event_tracking
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
models.py
|
from flask_login import UserMixin
|
from __init__ import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000))
| |
clkoutcfg.rs
|
#[doc = "Reader of register CLKOUTCFG"]
pub type R = crate::R<u32, super::CLKOUTCFG>;
#[doc = "Writer for register CLKOUTCFG"]
pub type W = crate::W<u32, super::CLKOUTCFG>;
#[doc = "Register CLKOUTCFG `reset()`'s with value 0"]
impl crate::ResetValue for super::CLKOUTCFG {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `CLKOUTSEL`"]
pub type CLKOUTSEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CLKOUTSEL`"]
pub struct CLKOUTSEL_W<'a> {
w: &'a mut W,
}
impl<'a> CLKOUTSEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
#[doc = "Reader of field `CLKOUTDIV`"]
pub type CLKOUTDIV_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CLKOUTDIV`"]
pub struct CLKOUTDIV_W<'a> {
w: &'a mut W,
}
impl<'a> CLKOUTDIV_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 4)) | (((value as u32) & 0x0f) << 4);
self.w
}
}
#[doc = "Reader of field `CLKOUT_EN`"]
pub type CLKOUT_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CLKOUT_EN`"]
pub struct CLKOUT_EN_W<'a> {
w: &'a mut W,
}
impl<'a> CLKOUT_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
|
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `CLKOUT_ACT`"]
pub type CLKOUT_ACT_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CLKOUT_ACT`"]
pub struct CLKOUT_ACT_W<'a> {
w: &'a mut W,
}
impl<'a> CLKOUT_ACT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
impl R {
#[doc = "Bits 0:3 - Selects the clock source for the CLKOUT function. 0x0 = Selects the CPU clock as the CLKOUT source. 0x1 = Selects the main oscillator as the CLKOUT source. 0x2 = Selects the Internal RC oscillator as the CLKOUT source. 0x3 = Selects the USB clock as the CLKOUT source. 0x4 = Selects the RTC oscillator as the CLKOUT source. 0x5 = Selects the SPIFI clock as the CLKOUT source. 0x6 = Selects the Watchdog oscillator as the CLKOUT source. Other settings are reserved. Do not use."]
#[inline(always)]
pub fn clkoutsel(&self) -> CLKOUTSEL_R {
CLKOUTSEL_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bits 4:7 - Integer value to divide the output clock by, minus one. 0x0 = Clock is divided by 1. 0x1 = Clock is divided by 2. 0x2 = Clock is divided by 3. ... 0xF = Clock is divided by 16."]
#[inline(always)]
pub fn clkoutdiv(&self) -> CLKOUTDIV_R {
CLKOUTDIV_R::new(((self.bits >> 4) & 0x0f) as u8)
}
#[doc = "Bit 8 - CLKOUT enable control, allows switching the CLKOUT source without glitches. Clear to stop CLKOUT on the next falling edge. Set to enable CLKOUT."]
#[inline(always)]
pub fn clkout_en(&self) -> CLKOUT_EN_R {
CLKOUT_EN_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - CLKOUT activity indication. Reads as 1 when CLKOUT is enabled. Read as 0 when CLKOUT has been disabled via the CLKOUT_EN bit and the clock has completed being stopped."]
#[inline(always)]
pub fn clkout_act(&self) -> CLKOUT_ACT_R {
CLKOUT_ACT_R::new(((self.bits >> 9) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:3 - Selects the clock source for the CLKOUT function. 0x0 = Selects the CPU clock as the CLKOUT source. 0x1 = Selects the main oscillator as the CLKOUT source. 0x2 = Selects the Internal RC oscillator as the CLKOUT source. 0x3 = Selects the USB clock as the CLKOUT source. 0x4 = Selects the RTC oscillator as the CLKOUT source. 0x5 = Selects the SPIFI clock as the CLKOUT source. 0x6 = Selects the Watchdog oscillator as the CLKOUT source. Other settings are reserved. Do not use."]
#[inline(always)]
pub fn clkoutsel(&mut self) -> CLKOUTSEL_W {
CLKOUTSEL_W { w: self }
}
#[doc = "Bits 4:7 - Integer value to divide the output clock by, minus one. 0x0 = Clock is divided by 1. 0x1 = Clock is divided by 2. 0x2 = Clock is divided by 3. ... 0xF = Clock is divided by 16."]
#[inline(always)]
pub fn clkoutdiv(&mut self) -> CLKOUTDIV_W {
CLKOUTDIV_W { w: self }
}
#[doc = "Bit 8 - CLKOUT enable control, allows switching the CLKOUT source without glitches. Clear to stop CLKOUT on the next falling edge. Set to enable CLKOUT."]
#[inline(always)]
pub fn clkout_en(&mut self) -> CLKOUT_EN_W {
CLKOUT_EN_W { w: self }
}
#[doc = "Bit 9 - CLKOUT activity indication. Reads as 1 when CLKOUT is enabled. Read as 0 when CLKOUT has been disabled via the CLKOUT_EN bit and the clock has completed being stopped."]
#[inline(always)]
pub fn clkout_act(&mut self) -> CLKOUT_ACT_W {
CLKOUT_ACT_W { w: self }
}
}
| |
variable.go
|
package variable
import (
"agent/global/consts"
"agent/model"
"agent/model/check"
"agent/model/config"
"go.uber.org/zap"
"log"
"net"
"os"
"path/filepath"
)
var (
BasePath string // Defines the root directory of the project
ConfigFilePath string
// Global log pointer
ZapLog *zap.Logger
// Global configuration object
Config *config.Config
// Communication dictionary
CommunicationDict map[*net.Conn]chan bool
// Command data buffer
CommandBufferChan chan *model.DataBufferClient
// Monitoring data buffer
MonitorDataBufferChan chan *model.DataBufferClient
// CMD command execution error counter
CmdErrCounter = 0
CmdErrLimit = 2160
// Test result object
Result interface{}
// save last monitor data
LastMonitorData *check.MonitorRecord
)
func init()
|
{
// 1.Initialize program root directory
if path, err := os.Getwd(); err == nil {
BasePath = path
ConfigFilePath = filepath.Join(BasePath, "config", "config.yml")
} else {
log.Fatal(consts.ErrorsBasePath)
}
}
|
|
mod.rs
|
//! This module implements the Property Descriptor.
//!
//! The Property Descriptor type is used to explain the manipulation and reification of Object property attributes.
//! Values of the Property Descriptor type are Records. Each field's name is an attribute name
//! and its value is a corresponding attribute value as specified in [6.1.7.1][section].
//! In addition, any field may be present or absent.
//! The schema name used within this specification to tag literal descriptions of Property Descriptor records is “PropertyDescriptor”.
//!
//! More information:
//! - [MDN documentation][mdn]
//! - [ECMAScript reference][spec]
//!
//! [spec]: https://tc39.es/ecma262/#sec-property-descriptor-specification-type
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/defineProperty
//! [section]: https://tc39.es/ecma262/#sec-property-attributes
use crate::{
gc::{Finalize, Trace},
object::GcObject,
value::{RcString, RcSymbol, Value},
};
use std::{convert::TryFrom, fmt};
mod attribute;
pub use attribute::Attribute;
/// A data descriptor is a property that has a value, which may or may not be writable.
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-property-descriptor-specification-type
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/defineProperty
#[derive(Debug, Clone, Trace, Finalize)]
pub struct DataDescriptor {
value: Value,
attributes: Attribute,
}
impl DataDescriptor {
/// Create a new `DataDescriptor`.
#[inline]
pub fn new<V>(value: V, attributes: Attribute) -> Self
where
V: Into<Value>,
{
Self {
value: value.into(),
attributes,
}
}
/// Return the `value` of the data descriptor.
#[inline]
pub fn value(&self) -> Value {
self.value.clone()
}
/// Return the attributes of the descriptor.
#[inline]
pub fn attributes(&self) -> Attribute {
self.attributes
}
/// Check whether the descriptor is configurable.
#[inline]
pub fn configurable(&self) -> bool {
self.attributes.configurable()
}
/// Set whether the descriptor is configurable.
#[inline]
pub fn set_configurable(&mut self, configurable: bool) {
self.attributes.set_configurable(configurable)
}
/// Check whether the descriptor is enumerable.
#[inline]
pub fn enumerable(&self) -> bool {
self.attributes.enumerable()
}
/// Set whether the descriptor is enumerable.
#[inline]
pub fn set_enumerable(&mut self, enumerable: bool) {
self.attributes.set_enumerable(enumerable)
}
/// Check whether the descriptor is writable.
#[inline]
pub fn writable(&self) -> bool {
self.attributes.writable()
}
/// Set whether the descriptor is writable.
#[inline]
pub fn set_writable(&mut self, writable: bool) {
self.attributes.set_writable(writable)
}
}
impl From<DataDescriptor> for PropertyDescriptor {
#[inline]
fn from(value: DataDescriptor) -> Self {
Self::Data(value)
}
}
/// An accessor descriptor is a property described by a getter-setter pair of functions.
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-property-descriptor-specification-type
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/defineProperty
#[derive(Debug, Clone, Trace, Finalize)]
pub struct AccessorDescriptor {
/// The function serving as getter.
pub(crate) get: Option<GcObject>,
/// The function serving as setter.
pub(crate) set: Option<GcObject>,
/// The attributes of the accessor descriptor.
pub(crate) attributes: Attribute,
}
impl AccessorDescriptor {
/// Create a new `AccessorDescriptor`.
///
/// If the `attributes` argument contains a `writable` flag, it will be removed so only `enumerable`
/// and `configurable` remains.
#[inline]
pub fn new(get: Option<GcObject>, set: Option<GcObject>, mut attributes: Attribute) -> Self {
// Accessors can not have writable attribute.
attributes.remove(Attribute::WRITABLE);
Self {
get,
set,
attributes,
}
}
/// Return the getter if it exists.
#[inline]
pub fn getter(&self) -> Option<&GcObject> {
self.get.as_ref()
}
/// Return the setter if it exists.
#[inline]
pub fn setter(&self) -> Option<&GcObject> {
self.set.as_ref()
}
/// Set the getter of the accessor descriptor.
#[inline]
pub fn set_getter(&mut self, get: Option<GcObject>) {
self.get = get;
}
/// Set the setter of the accessor descriptor.
#[inline]
pub fn set_setter(&mut self, set: Option<GcObject>) {
self.set = set;
}
/// Return the attributes of the accessor descriptor.
///
/// It is guaranteed to not contain a `writable` flag
#[inline]
pub fn attributes(&self) -> Attribute {
self.attributes
}
/// Check whether the descriptor is configurable.
#[inline]
pub fn configurable(&self) -> bool {
self.attributes.configurable()
}
/// Set whether the descriptor is configurable.
#[inline]
pub fn set_configurable(&mut self, configurable: bool) {
self.attributes.set_configurable(configurable)
}
/// Check whether the descriptor is enumerable.
#[inline]
pub fn enumerable(&self) -> bool {
self.attributes.enumerable()
}
/// Set whether the descriptor is enumerable.
#[inline]
pub fn set_enumerable(&mut self, enumerable: bool) {
self.attributes.set_enumerable(enumerable)
}
}
impl From<AccessorDescriptor> for PropertyDescriptor {
#[inline]
fn from(value: AccessorDescriptor) -> Self {
Self::Accessor(value)
}
}
/// This represents a JavaScript Property AKA The Property Descriptor.
///
/// Property descriptors present in objects come in two main flavors:
/// - data descriptors
/// - accessor descriptors
///
/// A data descriptor is a property that has a value, which may or may not be writable.
/// An accessor descriptor is a property described by a getter-setter pair of functions.
/// A descriptor must be one of these two flavors; it cannot be both.
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-property-descriptor-specification-type
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/defineProperty
#[derive(Debug, Clone, Trace, Finalize)]
pub enum PropertyDescriptor {
Accessor(AccessorDescriptor),
Data(DataDescriptor),
}
impl PropertyDescriptor {
/// An accessor Property Descriptor is one that includes any fields named either `[[Get]]` or `[[Set]]`.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-isaccessordescriptor
#[inline]
pub fn is_accessor_descriptor(&self) -> bool {
matches!(self, Self::Accessor(_))
}
/// Return `Some()` if it is a accessor descriptor, `None` otherwise.
#[inline]
pub fn as_accessor_descriptor(&self) -> Option<&AccessorDescriptor> {
match self {
Self::Accessor(ref accessor) => Some(accessor),
_ => None,
}
}
/// A data Property Descriptor is one that includes any fields named either `[[Value]]` or `[[Writable]]`.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-isdatadescriptor
#[inline]
pub fn is_data_descriptor(&self) -> bool {
matches!(self, Self::Data(_))
}
/// Return `Some()` if it is a data descriptor, `None` otherwise.
#[inline]
pub fn as_data_descriptor(&self) -> Option<&DataDescriptor> {
match self {
Self::Data(ref data) => Some(data),
_ => None,
}
}
/// Check whether the descriptor is enumerable.
#[inline]
pub fn enumerable(&self) -> bool {
match self {
Self::Accessor(ref accessor) => accessor.enumerable(),
Self::Data(ref data) => data.enumerable(),
}
}
/// Check whether the descriptor is configurable.
#[inline]
pub fn configurable(&self) -> bool {
match self {
Self::Accessor(ref accessor) => accessor.configurable(),
Self::Data(ref data) => data.configurable(),
}
}
/// Return the attributes of the descriptor.
#[inline]
pub fn attributes(&self) -> Attribute {
match self {
Self::Accessor(ref accessor) => accessor.attributes(),
Self::Data(ref data) => data.attributes(),
}
}
}
/// This abstracts away the need for IsPropertyKey by transforming the PropertyKey
/// values into an enum with both valid types: String and Symbol
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-ispropertykey
#[derive(Trace, Finalize, Debug, Clone)]
pub enum PropertyKey {
String(RcString),
Symbol(RcSymbol),
Index(u32),
}
impl From<RcString> for PropertyKey {
#[inline]
fn from(string: RcString) -> PropertyKey {
if let Ok(index) = string.parse() {
PropertyKey::Index(index)
} else {
PropertyKey::String(string)
}
}
}
impl From<&str> for PropertyKey {
#[inline]
fn from(string: &str) -> PropertyKey {
if let Ok(index) = string.parse() {
PropertyKey::Index(index)
} else {
PropertyKey::String(string.into())
}
}
}
impl From<String> for PropertyKey {
#[inline]
fn from(string: String) -> PropertyKey {
if let Ok(index) = string.parse() {
PropertyKey::Index(index)
} else {
PropertyKey::String(string.into())
}
}
}
impl From<Box<str>> for PropertyKey {
#[inline]
fn from(string: Box<str>) -> PropertyKey {
if let Ok(index) = string.parse() {
PropertyKey::Index(index)
} else {
PropertyKey::String(string.into())
}
}
}
impl From<RcSymbol> for PropertyKey {
#[inline]
fn from(symbol: RcSymbol) -> PropertyKey {
PropertyKey::Symbol(symbol)
}
}
impl fmt::Display for PropertyKey {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PropertyKey::String(ref string) => string.fmt(f),
PropertyKey::Symbol(ref symbol) => symbol.fmt(f),
PropertyKey::Index(index) => index.fmt(f),
}
}
}
impl From<&PropertyKey> for Value {
#[inline]
fn from(property_key: &PropertyKey) -> Value {
match property_key {
PropertyKey::String(ref string) => string.clone().into(),
PropertyKey::Symbol(ref symbol) => symbol.clone().into(),
PropertyKey::Index(index) => {
if let Ok(integer) = i32::try_from(*index) {
Value::integer(integer)
} else {
Value::number(*index)
}
}
}
}
}
impl From<PropertyKey> for Value {
#[inline]
fn from(property_key: PropertyKey) -> Value {
match property_key {
PropertyKey::String(ref string) => string.clone().into(),
PropertyKey::Symbol(ref symbol) => symbol.clone().into(),
PropertyKey::Index(index) => {
if let Ok(integer) = i32::try_from(index) {
Value::integer(integer)
} else {
Value::number(index)
}
}
}
}
}
impl From<u8> for PropertyKey {
fn from(value: u8) -> Self {
PropertyKey::Index(value.into())
}
}
impl From<u16> for PropertyKey {
fn from(value: u16) -> Self {
PropertyKey::Index(value.into())
}
}
impl From<u32> for PropertyKey {
fn from(value: u32) -> Self {
PropertyKey::Index(value)
}
}
impl From<usize> for PropertyKey {
fn from(value: usize) -> Self {
if let Ok(index) = u32::try_from(value) {
PropertyKey::Index(index)
} else {
PropertyKey::String(RcString::from(value.to_string()))
}
}
}
impl From<isize> for PropertyKey {
fn from(value: isize) -> Self {
if let Ok(index) = u32::try_from(value) {
PropertyKey::Index(index)
} else {
PropertyKey::String(RcString::from(value.to_string()))
}
}
}
|
fn from(value: i32) -> Self {
if let Ok(index) = u32::try_from(value) {
PropertyKey::Index(index)
} else {
PropertyKey::String(RcString::from(value.to_string()))
}
}
}
impl From<f64> for PropertyKey {
fn from(value: f64) -> Self {
use num_traits::cast::FromPrimitive;
if let Some(index) = u32::from_f64(value) {
return PropertyKey::Index(index);
}
PropertyKey::String(ryu_js::Buffer::new().format(value).into())
}
}
impl PartialEq<&str> for PropertyKey {
fn eq(&self, other: &&str) -> bool {
match self {
PropertyKey::String(ref string) => string == other,
_ => false,
}
}
}
|
impl From<i32> for PropertyKey {
|
daemon.go
|
// +build daemon
package main
import (
"crypto/tls"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/uuid"
apiserver "github.com/docker/docker/api/server"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/cli"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/opts"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/pidfile"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/timeutils"
"github.com/docker/docker/pkg/tlsconfig"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
)
const daemonUsage = " docker daemon [ --help | ... ]\n"
var (
flDaemon = flag.Bool([]string{"#d", "#-daemon"}, false, "Enable daemon mode (deprecated; use docker daemon)")
daemonCli cli.Handler = NewDaemonCli()
)
// TODO: remove once `-d` is retired
func
|
() {
// This block makes sure that if the deprecated daemon flag `--daemon` is absent,
// then all daemon-specific flags are absent as well.
if !*flDaemon && daemonFlags != nil {
flag.CommandLine.Visit(func(fl *flag.Flag) {
for _, name := range fl.Names {
name := strings.TrimPrefix(name, "#")
if daemonFlags.Lookup(name) != nil {
// daemon flag was NOT specified, but daemon-specific flags were
// so let's error out
fmt.Fprintf(os.Stderr, "docker: the daemon flag '-%s' must follow the 'docker daemon' command.\n", name)
os.Exit(1)
}
}
})
}
if *flDaemon {
if *flHelp {
// We do not show the help output here, instead, we tell the user about the new daemon command,
// because the help output is so long they would not see the warning anyway.
fmt.Fprintln(os.Stderr, "Please use 'docker daemon --help' instead.")
os.Exit(0)
}
daemonCli.(*DaemonCli).CmdDaemon(flag.Args()...)
os.Exit(0)
}
}
func presentInHelp(usage string) string { return usage }
func absentFromHelp(string) string { return "" }
// NewDaemonCli returns a pre-configured daemon CLI
func NewDaemonCli() *DaemonCli {
daemonFlags = cli.Subcmd("daemon", nil, "Enable daemon mode", true)
// TODO(tiborvass): remove InstallFlags?
daemonConfig := new(daemon.Config)
daemonConfig.InstallFlags(daemonFlags, presentInHelp)
daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp)
registryOptions := new(registry.Options)
registryOptions.InstallFlags(daemonFlags, presentInHelp)
registryOptions.InstallFlags(flag.CommandLine, absentFromHelp)
daemonFlags.Require(flag.Exact, 0)
return &DaemonCli{
Config: daemonConfig,
registryOptions: registryOptions,
}
}
func migrateKey() (err error) {
// Migrate trust key if exists at ~/.docker/key.json and owned by current user
oldPath := filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile)
newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile)
if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) {
defer func() {
// Ensure old path is removed if no error occurred
if err == nil {
err = os.Remove(oldPath)
} else {
logrus.Warnf("Key migration failed, key file not removed at %s", oldPath)
os.Remove(newPath)
}
}()
if err := system.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil {
return fmt.Errorf("Unable to create daemon configuration directory: %s", err)
}
newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return fmt.Errorf("error creating key file %q: %s", newPath, err)
}
defer newFile.Close()
oldFile, err := os.Open(oldPath)
if err != nil {
return fmt.Errorf("error opening key file %q: %s", oldPath, err)
}
defer oldFile.Close()
if _, err := io.Copy(newFile, oldFile); err != nil {
return fmt.Errorf("error copying key: %s", err)
}
logrus.Infof("Migrated key from %s to %s", oldPath, newPath)
}
return nil
}
// DaemonCli represents the daemon CLI.
type DaemonCli struct {
*daemon.Config
registryOptions *registry.Options
}
func getGlobalFlag() (globalFlag *flag.Flag) {
defer func() {
if x := recover(); x != nil {
switch f := x.(type) {
case *flag.Flag:
globalFlag = f
default:
panic(x)
}
}
}()
visitor := func(f *flag.Flag) { panic(f) }
commonFlags.FlagSet.Visit(visitor)
clientFlags.FlagSet.Visit(visitor)
return
}
// CmdDaemon is the daemon command, called the raw arguments after `docker daemon`.
func (cli *DaemonCli) CmdDaemon(args ...string) error {
// warn from uuid package when running the daemon
uuid.Loggerf = logrus.Warnf
if *flDaemon {
// allow legacy forms `docker -D -d` and `docker -d -D`
logrus.Warn("please use 'docker daemon' instead.")
} else if !commonFlags.FlagSet.IsEmpty() || !clientFlags.FlagSet.IsEmpty() {
// deny `docker -D daemon`
illegalFlag := getGlobalFlag()
fmt.Fprintf(os.Stderr, "invalid flag '-%s'.\nSee 'docker daemon --help'.\n", illegalFlag.Names[0])
os.Exit(1)
} else {
// allow new form `docker daemon -D`
flag.Merge(daemonFlags, commonFlags.FlagSet)
}
daemonFlags.ParseFlags(args, true)
commonFlags.PostParse()
if len(commonFlags.Hosts) == 0 {
commonFlags.Hosts = []string{opts.DefaultHost}
}
if commonFlags.TrustKey == "" {
commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), defaultTrustKeyFile)
}
if utils.ExperimentalBuild() {
logrus.Warn("Running experimental build")
}
logrus.SetFormatter(&logrus.TextFormatter{TimestampFormat: timeutils.RFC3339NanoFixed})
if err := setDefaultUmask(); err != nil {
logrus.Fatalf("Failed to set umask: %v", err)
}
if len(cli.LogConfig.Config) > 0 {
if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil {
logrus.Fatalf("Failed to set log opts: %v", err)
}
}
var pfile *pidfile.PIDFile
if cli.Pidfile != "" {
pf, err := pidfile.New(cli.Pidfile)
if err != nil {
logrus.Fatalf("Error starting daemon: %v", err)
}
pfile = pf
defer func() {
if err := pfile.Remove(); err != nil {
logrus.Error(err)
}
}()
}
if cli.LogConfig.Config == nil {
cli.LogConfig.Config = make(map[string]string)
}
serverConfig := &apiserver.Config{
Logging: true,
Version: dockerversion.VERSION,
}
serverConfig = setPlatformServerConfig(serverConfig, cli.Config)
if commonFlags.TLSOptions != nil {
if !commonFlags.TLSOptions.InsecureSkipVerify {
// server requires and verifies client's certificate
commonFlags.TLSOptions.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig, err := tlsconfig.Server(*commonFlags.TLSOptions)
if err != nil {
logrus.Fatal(err)
}
serverConfig.TLSConfig = tlsConfig
}
api := apiserver.New(serverConfig)
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go func() {
if err := api.ServeAPI(commonFlags.Hosts); err != nil {
logrus.Errorf("ServeAPI error: %v", err)
serveAPIWait <- err
return
}
serveAPIWait <- nil
}()
if err := migrateKey(); err != nil {
logrus.Fatal(err)
}
cli.TrustKeyPath = commonFlags.TrustKey
registryService := registry.NewService(cli.registryOptions)
d, err := daemon.NewDaemon(cli.Config, registryService)
if err != nil {
if pfile != nil {
if err := pfile.Remove(); err != nil {
logrus.Error(err)
}
}
logrus.Fatalf("Error starting daemon: %v", err)
}
logrus.Info("Daemon has completed initialization")
logrus.WithFields(logrus.Fields{
"version": dockerversion.VERSION,
"commit": dockerversion.GITCOMMIT,
"execdriver": d.ExecutionDriver().Name(),
"graphdriver": d.GraphDriver().String(),
}).Info("Docker daemon")
signal.Trap(func() {
api.Close()
<-serveAPIWait
shutdownDaemon(d, 15)
if pfile != nil {
if err := pfile.Remove(); err != nil {
logrus.Error(err)
}
}
})
// after the daemon is done setting up we can tell the api to start
// accepting connections with specified daemon
api.AcceptConnections(d)
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
shutdownDaemon(d, 15)
if errAPI != nil {
if pfile != nil {
if err := pfile.Remove(); err != nil {
logrus.Error(err)
}
}
logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI)
}
return nil
}
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
// d.Shutdown() is waiting too long to kill container or worst it's
// blocked there
func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) {
ch := make(chan struct{})
go func() {
d.Shutdown()
close(ch)
}()
select {
case <-ch:
logrus.Debug("Clean shutdown succeeded")
case <-time.After(timeout * time.Second):
logrus.Error("Force shutdown daemon")
}
}
|
handleGlobalDaemonFlag
|
kama_test.go
|
package indicators_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/jaybutera/gotrade"
"github.com/jaybutera/gotrade/indicators"
)
var _ = Describe("when creating an demawithoutstorage", func() {
var (
indicator *indicators.KamaWithoutStorage
indicatorError error
)
Context("and the indicator was not given a value available action", func() {
BeforeEach(func() {
indicator, indicatorError = indicators.NewKamaWithoutStorage(4, nil)
})
It("the indicator should not be created and return the appropriate error message", func() {
Expect(indicator).To(BeNil())
Expect(indicatorError).To(Equal(indicators.ErrValueAvailableActionIsNil))
})
})
Context("and the indicator was given a timePeriod below the minimum", func() {
BeforeEach(func() {
indicator, indicatorError = indicators.NewKamaWithoutStorage(1, fakeFloatValAvailable)
})
It("the indicator should not be created and return the appropriate error message", func() {
Expect(indicator).To(BeNil())
})
})
Context("and the indicator was given a timePeriod above the maximum", func() {
BeforeEach(func() {
indicator, indicatorError = indicators.NewKamaWithoutStorage(indicators.MaximumLookbackPeriod+1, fakeFloatValAvailable)
})
It("the indicator should not be created and return the appropriate error message", func() {
Expect(indicator).To(BeNil())
})
})
})
var _ = Describe("when calculating a kaufmann adaptive moving average (kama) with DOHLCV source data", func() {
var (
period int = 3
indicator *indicators.Kama
inputs IndicatorWithFloatBoundsSharedSpecInputs
stream *fakeDOHLCVStreamSubscriber
indicatorError error
)
Context("given the indicator is created via the standard constructor", func() {
BeforeEach(func() {
indicator, _ = indicators.NewKama(period, gotrade.UseClosePrice)
inputs = NewIndicatorWithFloatBoundsSharedSpecInputs(indicator, len(sourceDOHLCVData), indicator,
func() float64 {
return GetFloatDataMax(indicator.Data)
},
func() float64 {
return GetFloatDataMin(indicator.Data)
})
})
Context("and the indicator has not yet received any ticks", func() {
ShouldBeAnInitialisedIndicator(&inputs)
ShouldNotHaveAnyFloatBoundsSetYet(&inputs)
})
Context("and the indicator has received less ticks than the lookback period", func() {
BeforeEach(func() {
for i := 0; i < indicator.GetLookbackPeriod(); i++ {
|
ShouldBeAnIndicatorThatHasReceivedFewerTicksThanItsLookbackPeriod(&inputs)
ShouldNotHaveAnyFloatBoundsSetYet(&inputs)
})
Context("and the indicator has received ticks equal to the lookback period", func() {
BeforeEach(func() {
for i := 0; i <= indicator.GetLookbackPeriod(); i++ {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedTicksEqualToItsLookbackPeriod(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
})
Context("and the indicator has received more ticks than the lookback period", func() {
BeforeEach(func() {
for i := range sourceDOHLCVData {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedMoreTicksThanItsLookbackPeriod(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
})
Context("and the indicator has recieved all of its ticks", func() {
BeforeEach(func() {
for i := 0; i < len(sourceDOHLCVData); i++ {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedAllOfItsTicks(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
})
})
Context("given the indicator is created via the standard constructor with a nil data selection func", func() {
BeforeEach(func() {
indicator, indicatorError = indicators.NewKama(period, nil)
})
It("the indicator should not be created and return the appropriate error message", func() {
Expect(indicator).To(BeNil())
Expect(indicatorError).To(Equal(indicators.ErrDOHLCVDataSelectFuncIsNil))
})
})
Context("given the indicator is created via the constructor with defaulted parameters", func() {
BeforeEach(func() {
indicator, _ = indicators.NewDefaultKama()
inputs = NewIndicatorWithFloatBoundsSharedSpecInputs(indicator, len(sourceDOHLCVData), indicator,
func() float64 {
return GetFloatDataMax(indicator.Data)
},
func() float64 {
return GetFloatDataMin(indicator.Data)
})
})
Context("and the indicator has not yet received any ticks", func() {
ShouldBeAnInitialisedIndicator(&inputs)
ShouldNotHaveAnyFloatBoundsSetYet(&inputs)
})
Context("and the indicator has recieved all of its ticks", func() {
BeforeEach(func() {
for i := 0; i < len(sourceDOHLCVData); i++ {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedAllOfItsTicks(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
})
})
Context("given the indicator is created via the constructor with fixed source length", func() {
BeforeEach(func() {
indicator, _ = indicators.NewKamaWithSrcLen(uint(len(sourceDOHLCVData)), 4, gotrade.UseClosePrice)
inputs = NewIndicatorWithFloatBoundsSharedSpecInputs(indicator, len(sourceDOHLCVData), indicator,
func() float64 {
return GetFloatDataMax(indicator.Data)
},
func() float64 {
return GetFloatDataMin(indicator.Data)
})
})
It("should have pre-allocated storge for the output data", func() {
Expect(cap(indicator.Data)).To(Equal(len(sourceDOHLCVData) - indicator.GetLookbackPeriod()))
})
Context("and the indicator has not yet received any ticks", func() {
ShouldBeAnInitialisedIndicator(&inputs)
ShouldNotHaveAnyFloatBoundsSetYet(&inputs)
})
Context("and the indicator has recieved all of its ticks", func() {
BeforeEach(func() {
for i := 0; i < len(sourceDOHLCVData); i++ {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedAllOfItsTicks(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
It("no new storage capcity should have been allocated", func() {
Expect(len(indicator.Data)).To(Equal(cap(indicator.Data)))
})
})
})
Context("given the indicator is created via the constructor with defaulted parameters and fixed source length", func() {
BeforeEach(func() {
indicator, _ = indicators.NewDefaultKamaWithSrcLen(uint(len(sourceDOHLCVData)))
inputs = NewIndicatorWithFloatBoundsSharedSpecInputs(indicator, len(sourceDOHLCVData), indicator,
func() float64 {
return GetFloatDataMax(indicator.Data)
},
func() float64 {
return GetFloatDataMin(indicator.Data)
})
})
It("should have pre-allocated storge for the output data", func() {
Expect(cap(indicator.Data)).To(Equal(len(sourceDOHLCVData) - indicator.GetLookbackPeriod()))
})
Context("and the indicator has not yet received any ticks", func() {
ShouldBeAnInitialisedIndicator(&inputs)
ShouldNotHaveAnyFloatBoundsSetYet(&inputs)
})
Context("and the indicator has recieved all of its ticks", func() {
BeforeEach(func() {
for i := 0; i < len(sourceDOHLCVData); i++ {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedAllOfItsTicks(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
It("no new storage capcity should have been allocated", func() {
Expect(len(indicator.Data)).To(Equal(cap(indicator.Data)))
})
})
})
Context("given the indicator is created via the constructor for use with a price stream", func() {
BeforeEach(func() {
stream = newFakeDOHLCVStreamSubscriber()
indicator, _ = indicators.NewKamaForStream(stream, 4, gotrade.UseClosePrice)
inputs = NewIndicatorWithFloatBoundsSharedSpecInputs(indicator, len(sourceDOHLCVData), indicator,
func() float64 {
return GetFloatDataMax(indicator.Data)
},
func() float64 {
return GetFloatDataMin(indicator.Data)
})
})
It("should have requested to be attached to the stream", func() {
Expect(stream.lastCallToAddTickSubscriptionArg).To(Equal(indicator))
})
Context("and the indicator has not yet received any ticks", func() {
ShouldBeAnInitialisedIndicator(&inputs)
ShouldNotHaveAnyFloatBoundsSetYet(&inputs)
})
Context("and the indicator has recieved all of its ticks", func() {
BeforeEach(func() {
for i := 0; i < len(sourceDOHLCVData); i++ {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedAllOfItsTicks(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
})
})
Context("given the indicator is created via the constructor for use with a price stream with defaulted parameters", func() {
BeforeEach(func() {
stream = newFakeDOHLCVStreamSubscriber()
indicator, _ = indicators.NewDefaultKamaForStream(stream)
inputs = NewIndicatorWithFloatBoundsSharedSpecInputs(indicator, len(sourceDOHLCVData), indicator,
func() float64 {
return GetFloatDataMax(indicator.Data)
},
func() float64 {
return GetFloatDataMin(indicator.Data)
})
})
It("should have requested to be attached to the stream", func() {
Expect(stream.lastCallToAddTickSubscriptionArg).To(Equal(indicator))
})
Context("and the indicator has not yet received any ticks", func() {
ShouldBeAnInitialisedIndicator(&inputs)
ShouldNotHaveAnyFloatBoundsSetYet(&inputs)
})
Context("and the indicator has recieved all of its ticks", func() {
BeforeEach(func() {
for i := 0; i < len(sourceDOHLCVData); i++ {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedAllOfItsTicks(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
})
})
Context("given the indicator is created via the constructor for use with a price stream with fixed source length", func() {
BeforeEach(func() {
stream = newFakeDOHLCVStreamSubscriber()
indicator, _ = indicators.NewKamaForStreamWithSrcLen(uint(len(sourceDOHLCVData)), stream, 4, gotrade.UseClosePrice)
inputs = NewIndicatorWithFloatBoundsSharedSpecInputs(indicator, len(sourceDOHLCVData), indicator,
func() float64 {
return GetFloatDataMax(indicator.Data)
},
func() float64 {
return GetFloatDataMin(indicator.Data)
})
})
It("should have pre-allocated storge for the output data", func() {
Expect(cap(indicator.Data)).To(Equal(len(sourceDOHLCVData) - indicator.GetLookbackPeriod()))
})
It("should have requested to be attached to the stream", func() {
Expect(stream.lastCallToAddTickSubscriptionArg).To(Equal(indicator))
})
Context("and the indicator has not yet received any ticks", func() {
ShouldBeAnInitialisedIndicator(&inputs)
ShouldNotHaveAnyFloatBoundsSetYet(&inputs)
})
Context("and the indicator has recieved all of its ticks", func() {
BeforeEach(func() {
for i := 0; i < len(sourceDOHLCVData); i++ {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedAllOfItsTicks(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
It("no new storage capcity should have been allocated", func() {
Expect(len(indicator.Data)).To(Equal(cap(indicator.Data)))
})
})
})
Context("given the indicator is created via the constructor for use with a price stream with fixed source length with defaulted parmeters", func() {
BeforeEach(func() {
stream = newFakeDOHLCVStreamSubscriber()
indicator, _ = indicators.NewDefaultKamaForStreamWithSrcLen(uint(len(sourceDOHLCVData)), stream)
inputs = NewIndicatorWithFloatBoundsSharedSpecInputs(indicator, len(sourceDOHLCVData), indicator,
func() float64 {
return GetFloatDataMax(indicator.Data)
},
func() float64 {
return GetFloatDataMin(indicator.Data)
})
})
It("should have pre-allocated storge for the output data", func() {
Expect(cap(indicator.Data)).To(Equal(len(sourceDOHLCVData) - indicator.GetLookbackPeriod()))
})
It("should have requested to be attached to the stream", func() {
Expect(stream.lastCallToAddTickSubscriptionArg).To(Equal(indicator))
})
Context("and the indicator has not yet received any ticks", func() {
ShouldBeAnInitialisedIndicator(&inputs)
ShouldNotHaveAnyFloatBoundsSetYet(&inputs)
})
Context("and the indicator has recieved all of its ticks", func() {
BeforeEach(func() {
for i := 0; i < len(sourceDOHLCVData); i++ {
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
ShouldBeAnIndicatorThatHasReceivedAllOfItsTicks(&inputs)
ShouldHaveFloatBoundsSetToMinMaxOfResults(&inputs)
It("no new storage capcity should have been allocated", func() {
Expect(len(indicator.Data)).To(Equal(cap(indicator.Data)))
})
})
})
})
|
indicator.ReceiveDOHLCVTick(sourceDOHLCVData[i], i+1)
}
})
|
test_day_05.py
|
from aoc.day_02 import IntcodeComputer
def _run_test(program, expected):
computer = IntcodeComputer(program).execute()
assert ",".join(str(x) for x in computer.memory) == expected
def test_input(monkeypatch):
monkeypatch.setattr("builtins.input", lambda: "1")
program = "3,0,99"
expected = "1,0,99"
_run_test(program, expected)
def test_output(capfd):
program = "4,0,99"
IntcodeComputer(program).execute()
captured = capfd.readouterr()
assert captured.out == "4\n"
def test_ex01():
program = "1002,4,3,4,33"
expected = "1002,4,3,4,99"
_run_test(program, expected)
def _test_in_out(comp, in_val, out_val, monkeypatch, capfd):
monkeypatch.setattr("builtins.input", lambda: in_val)
comp.execute()
captured = capfd.readouterr()
assert captured.out == f"{out_val}\n"
def test_ex02(monkeypatch, capfd):
program = "3,9,8,9,10,9,4,9,99,-1,8"
comp = IntcodeComputer(program)
_test_in_out(comp, 8, 1, monkeypatch, capfd)
_test_in_out(comp, 1, 0, monkeypatch, capfd)
def test_ex03(monkeypatch, capfd):
program = "3,9,7,9,10,9,4,9,99,-1,8"
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 1, monkeypatch, capfd)
_test_in_out(comp, 9, 0, monkeypatch, capfd)
def test_ex04(monkeypatch, capfd):
program = "3,3,1108,-1,8,3,4,3,99"
comp = IntcodeComputer(program)
_test_in_out(comp, 8, 1, monkeypatch, capfd)
_test_in_out(comp, 1, 0, monkeypatch, capfd)
def test_ex05(monkeypatch, capfd):
program = "3,3,1107,-1,8,3,4,3,99"
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 1, monkeypatch, capfd)
_test_in_out(comp, 8, 0, monkeypatch, capfd)
def test_ex06(monkeypatch, capfd):
program = "3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9"
comp = IntcodeComputer(program)
_test_in_out(comp, 0, 0, monkeypatch, capfd)
_test_in_out(comp, 1, 1, monkeypatch, capfd)
def test_ex07(monkeypatch, capfd):
program = "3,3,1105,-1,9,1101,0,0,12,4,12,99,1"
comp = IntcodeComputer(program)
_test_in_out(comp, 0, 0, monkeypatch, capfd)
_test_in_out(comp, 1, 1, monkeypatch, capfd)
def test_ex08(monkeypatch, capfd):
|
program = (
"3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,"
"1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,"
"999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99"
)
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 999, monkeypatch, capfd)
_test_in_out(comp, 8, 1000, monkeypatch, capfd)
_test_in_out(comp, 9, 1001, monkeypatch, capfd)
|
|
footer.js
|
import { Link } from "gatsby"
// import PropTypes from "prop-types"
import React from "react"
const Footer = () => (
<footer class="footer">
<div class="container py-5">
<div class="row">
<div class="col-12 col-md">
<h4 style={{color: 'white'}}>Alma</h4>
<small class="d-block mb-3 text-muted">© 1999 - {new Date().getFullYear() }</small>
</div>
<div class="col-6 col-md">
<ul class="list-unstyled text-small">
<h5 style={{color: 'white'}}>Cinema</h5>
<li><Link className="text-muted" to="/cinema/">Accueil</Link></li>
<li><Link className="text-muted" to="/cinema/affiche/">A l'Affiche</Link></li>
<li><Link className="text-muted" to="/cinema/theatre/">Theatre</Link></li>
<li><Link className="text-muted" to="/cinema/ballets/">Ballets/Operas</Link></li>
<li><Link className="text-muted" to="/cinema/scolaire/">Cinema Scolaire</Link></li>
<li><Link className="text-muted" to="/cinema/comedie/">Comedie Francaise</Link></li>
<li><Link className="text-muted" to="/cinema/paiement/">Nos Tarifs</Link></li>
</ul>
</div>
<div class="col-6 col-md">
<ul class="list-unstyled text-small">
<h5 style={{color: 'white'}}>Radio</h5>
<li><Link className="text-muted" to="/radio/">Accueil</Link></li>
<li><Link className="text-muted" to="/radio/emissions/">Emissions</Link></li>
<li><Link className="text-muted" to="/radio/images/">Gallerie</Link></li>
<li><Link className="text-muted" to="/radio/partenaires/">Partenaires</Link></li>
</ul>
</div>
<div class="col-6 col-md">
<ul class="list-unstyled text-small">
<h5 style={{color: 'white'}}>A Propos</h5>
<li><Link className="text-muted" to="/equipe/">Notre Equipe</Link></li>
<li><Link className="text-muted" to="/lieu/">Lieu</Link></li>
<li><Link className="text-muted" to="/confidentialite/">Confidentialite</Link></li>
<li><Link className="text-muted" to="/tos/">Termes de service</Link></li>
<li><Link className="text-muted" to="/gestion/">Gestion Administrateur</Link></li>
</ul>
</div>
</div>
</div>
</footer>
);
|
export default Footer
|
|
fr-CM.ts
|
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
// THIS CODE IS GENERATED - DO NOT MODIFY
// See angular/tools/gulp-tasks/cldr/extract.js
function
|
(n: number): number {
let i = Math.floor(Math.abs(n));
if (i === 0 || i === 1) return 1;
return 5;
}
export default [
'fr-CM', [['mat.', 'soir'], , ['matin', 'soir']], ,
[
['D', 'L', 'M', 'M', 'J', 'V', 'S'], ['dim.', 'lun.', 'mar.', 'mer.', 'jeu.', 'ven.', 'sam.'],
['dimanche', 'lundi', 'mardi', 'mercredi', 'jeudi', 'vendredi', 'samedi'],
['di', 'lu', 'ma', 'me', 'je', 've', 'sa']
],
,
[
['J', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D'],
[
'janv.', 'févr.', 'mars', 'avr.', 'mai', 'juin', 'juil.', 'août', 'sept.', 'oct.', 'nov.',
'déc.'
],
[
'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre',
'octobre', 'novembre', 'décembre'
]
],
, [['av. J.-C.', 'ap. J.-C.'], , ['avant Jésus-Christ', 'après Jésus-Christ']], 1, [6, 0],
['dd/MM/y', 'd MMM y', 'd MMMM y', 'EEEE d MMMM y'],
['HH:mm', 'HH:mm:ss', 'HH:mm:ss z', 'HH:mm:ss zzzz'],
[
'{1} {0}',
'{1} \'à\' {0}',
,
],
[',', ' ', ';', '%', '+', '-', 'E', '×', '‰', '∞', 'NaN', ':'],
['#,##0.###', '#,##0 %', '#,##0.00 ¤', '#E0'], 'FCFA', 'franc CFA (BEAC)', plural
];
|
plural
|
logger.rs
|
use log::{LevelFilter, SetLoggerError};
use log4rs::{
append::console::ConsoleAppender,
config::{Appender, Config, Root},
encode::pattern::PatternEncoder,
Handle,
};
pub fn
|
(level: LevelFilter) -> Result<Handle, SetLoggerError> {
let stdout = ConsoleAppender::builder()
.encoder(Box::new(PatternEncoder::new("{h({m})}{n}")))
.build();
let config = Config::builder()
.appender(Appender::builder().build("stdout", Box::new(stdout)))
.build(Root::builder().appender("stdout").build(level))
.unwrap();
log4rs::init_config(config)
}
|
init_logger
|
website_test.go
|
package jobs
import (
"bytes"
"html/template"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"time"
_ "embed"
"github.com/dghubble/go-twitter/twitter"
"github.com/xarantolus/spacex-hop-bot/scrapers"
"github.com/xarantolus/spacex-hop-bot/util"
)
type TestTweetingClient struct {
tweetedTweetText string
tweetedTweetReply *int64
}
|
func (r *TestTweetingClient) AddListMember(listID int64, userID int64) (err error) {
panic("AddListMember not implemented")
}
func (r *TestTweetingClient) Retweet(tweet *twitter.Tweet) error {
panic("Retweet not implemented")
}
func (r *TestTweetingClient) Tweet(text string, inReplyToID *int64) (t *twitter.Tweet, err error) {
if r.HasTweeted() {
panic("test tweeted more than once")
}
r.tweetedTweetText = text
r.tweetedTweetReply = inReplyToID
return &twitter.Tweet{
ID: 13589,
SimpleText: text,
}, nil
}
func (r *TestTweetingClient) HasTweetedWithoutAnswering(text string) bool {
return r.tweetedTweetText == text && r.tweetedTweetReply == nil
}
func (r TestTweetingClient) HasTweeted() bool {
return r.tweetedTweetText != ""
}
//go:embed testdata/spacex_starship_page.html
var pageTemplateText string
func Test_runWebsiteScrape(t *testing.T) {
tmpl := template.Must(template.New("").Parse(pageTemplateText))
var pageContentWithText = func(text string) string {
var b bytes.Buffer
err := tmpl.Execute(&b, struct {
Text string
}{
Text: text,
})
if err != nil {
panic("executing template: " + err.Error())
}
return b.String()
}
var date = func(year int, month time.Month, day int) time.Time {
return time.Date(year, month, day, 0, 0, 0, 0, util.NorthAmericaTZ)
}
var lastChange = scrapers.StarshipInfo{
ShipName: "SN15",
NextFlightDate: date(2021, time.May, 5),
}
tests := []struct {
pageContent string
now time.Time
wantTweet string
wantNewInfo scrapers.StarshipInfo
wantErr bool
}{
{
// This one should basically do nothing, there are no changes.
// The problem we have here is that the fuzzy date matcher assumes that May 5 is in the current year, while the flight was in 2021
// I have decided to handle this case by just editing the saved JSON file once a year
pageContent: pageContentWithText("On Wednesday, May 5, Starship serial number 15 (SN15) successfully completed SpaceX’s fifth high-altitude flight test of a Starship prototype from Starbase in Texas."),
now: date(2022, time.January, 8),
wantNewInfo: scrapers.StarshipInfo{
ShipName: "SN15",
// Yes, this is wrong, but for the test it's right
NextFlightDate: date(2022, time.May, 5),
},
wantTweet: "The SpaceX #Starship website now mentions May 5 for #SN15\n#WenHop\n" + scrapers.StarshipURL,
},
{
pageContent: pageContentWithText("On Wednesday, February 13, Starship serial number 20 (S20) will do another high-altitude flight test of a Starship prototype from Starbase in Texas."),
now: date(2022, time.January, 8),
wantNewInfo: scrapers.StarshipInfo{
ShipName: "S20",
NextFlightDate: date(2022, time.February, 13),
},
wantTweet: "The SpaceX #Starship website now mentions February 13 for #S20\n#WenHop\n" + scrapers.StarshipURL,
},
{
pageContent: pageContentWithText("SpaceX plans an orbital test flight of S20 and B4 for Wednesday, February 16"),
now: date(2022, time.January, 8),
wantErr: false,
wantNewInfo: scrapers.StarshipInfo{
ShipName: "S20",
NextFlightDate: date(2022, time.February, 16),
Orbital: true,
},
wantTweet: "The SpaceX #Starship website now mentions February 16 for an orbital flight of #S20\n#WenHop\n" + scrapers.StarshipURL,
},
{
pageContent: pageContentWithText("SpaceX plans an orbital flight test with S20 and B4 for Wednesday, February 16"),
now: date(2022, time.January, 8),
wantErr: false,
wantNewInfo: scrapers.StarshipInfo{
ShipName: "S20",
NextFlightDate: date(2022, time.February, 16),
Orbital: true,
},
wantTweet: "The SpaceX #Starship website now mentions February 16 for an orbital flight of #S20\n#WenHop\n" + scrapers.StarshipURL,
},
{
pageContent: pageContentWithText("SpaceX plans a test flight of S20 and B4 with an orbital trajectory for Wednesday, February 16"),
now: date(2022, time.January, 8),
wantErr: false,
wantNewInfo: scrapers.StarshipInfo{
ShipName: "S20",
NextFlightDate: date(2022, time.February, 16),
Orbital: true,
},
wantTweet: "The SpaceX #Starship website now mentions February 16 for an orbital flight of #S20\n#WenHop\n" + scrapers.StarshipURL,
},
}
for _, tt := range tests {
t.Run(t.Name(), func(t *testing.T) {
twitterClient := &TestTweetingClient{}
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "text/html")
rw.Write([]byte(tt.pageContent))
}))
if tt.now.IsZero() {
tt.now = time.Now()
}
gotNewInfo, err := runWebsiteScrape(twitterClient, nil, server.URL, lastChange, tt.now)
if (err != nil) != tt.wantErr {
t.Errorf("runWebsiteScrape() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotNewInfo, tt.wantNewInfo) {
t.Errorf("runWebsiteScrape() = %v, want %v", gotNewInfo, tt.wantNewInfo)
}
if tt.wantTweet != "" && !twitterClient.HasTweetedWithoutAnswering(tt.wantTweet) {
rt := twitterClient.tweetedTweetText
if rt == "" {
t.Errorf("runWebsiteScrape() should have tweeted %q, but didn't tweet anything", tt.wantTweet)
} else {
t.Errorf("runWebsiteScrape() should have tweeted %q, but tweeted %q", tt.wantTweet, rt)
}
}
if tt.wantTweet == "" && twitterClient.HasTweeted() {
t.Errorf("runWebsiteScrape() should not have tweeted anything, but did (%q)", twitterClient.tweetedTweetText)
}
})
}
}
|
func (r *TestTweetingClient) LoadStatus(tweetID int64) (*twitter.Tweet, error) {
panic("LoadStatus not implemented")
}
|
quad.rs
|
use crate::geom::{tri, vertex, Cuboid, Range, Rect, Tri, Vertex, Vertex2d, Vertex3d};
use core::ops::{Deref, Index};
/// The number of vertices in a quad.
pub const NUM_VERTICES: u8 = 4;
/// The number of triangles that make up a quad.
pub const NUM_TRIANGLES: u8 = 2;
/// The same as `triangles`, but instead returns the vertex indices for each triangle.
pub const TRIANGLE_INDEX_TRIS: TrianglesIndexTris = [[0, 1, 2], [0, 2, 3]];
pub type TrianglesIndexTris = [[usize; tri::NUM_VERTICES as usize]; NUM_TRIANGLES as usize];
/// The number of indices used to describe each triangle in the quad.
pub const NUM_TRIANGLE_INDICES: u8 = 6;
/// The same as `triangles`, but instead returns the vertex indices for each triangle.
pub const TRIANGLE_INDICES: [usize; NUM_TRIANGLE_INDICES as usize] = [0, 1, 2, 0, 2, 3];
/// A quad represented by its four vertices.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub struct Quad<V = vertex::Default>(pub [V; NUM_VERTICES as usize]);
/// An `Iterator` yielding the two triangles that make up a quad.
#[derive(Clone, Debug)]
pub struct Triangles<V = vertex::Default> {
a: Option<Tri<V>>,
b: Option<Tri<V>>,
}
/// A simple iterator yielding each vertex in a `Quad`.
#[derive(Clone, Debug)]
pub struct Vertices<V = vertex::Default> {
quad: Quad<V>,
index: u8,
}
impl<V> Quad<V>
where
V: Vertex,
{
/// Produce an iterator yielding each vertex in the `Quad`.
pub fn vertices(self) -> Vertices<V> {
vertices(self)
}
/// Produce the centroid of the quad, aka the "mean"/"average" vertex.
pub fn centroid(&self) -> V
where
V: vertex::Average,
{
centroid(self)
}
/// Triangulates the given quad, represented by four points that describe its edges in either
/// clockwise or anti-clockwise order.
///
/// # Example
///
/// The following rectangle
///
/// ```ignore
/// a b
/// --------
/// | |
/// | |
/// | |
/// --------
/// d c
///
/// ```
///
/// given as
///
/// ```ignore
/// triangles([a, b, c, d])
/// ```
///
/// returns
///
/// ```ignore
/// (Tri([a, b, c]), Tri([a, c, d]))
/// ```
///
/// Here's a basic code example:
///
/// ```
/// # use nannou_core as nannou;
/// use nannou::geom::{self, pt2, Quad, Tri};
///
/// fn main() {
/// let a = pt2(0.0, 1.0);
/// let b = pt2(1.0, 1.0);
/// let c = pt2(1.0, 0.0);
/// let d = pt2(0.0, 0.0);
/// let quad = Quad([a, b, c, d]);
/// let triangles = geom::quad::triangles(&quad);
/// assert_eq!(triangles, (Tri([a, b, c]), Tri([a, c, d])));
/// }
/// ```
#[inline]
pub fn triangles(&self) -> (Tri<V>, Tri<V>) {
triangles(self)
}
/// The same as `triangles` but provided as an **Iterator**.
pub fn triangles_iter(&self) -> Triangles<V> {
triangles_iter(self)
}
/// The bounding `Rect` of the quad.
pub fn bounding_rect(self) -> Rect<V::Scalar>
where
V: Vertex2d,
{
let (a, b, c, d) = self.into();
let ([ax, ay], b, c, d) = (a.point2(), b.point2(), c.point2(), d.point2());
let rect = Rect {
x: Range::new(ax, ax),
y: Range::new(ay, ay),
};
rect.stretch_to_point(b)
.stretch_to_point(c)
.stretch_to_point(d)
}
/// The bounding `Rect` of the triangle.
pub fn bounding_cuboid(self) -> Cuboid<V::Scalar>
where
V: Vertex3d,
{
let (a, b, c, d) = self.into();
let ([ax, ay, az], b, c, d) = (a.point3(), b.point3(), c.point3(), d.point3());
let cuboid = Cuboid {
x: Range::new(ax, ax),
y: Range::new(ay, ay),
z: Range::new(az, az),
};
cuboid
.stretch_to_point(b)
.stretch_to_point(c)
.stretch_to_point(d)
}
/// Map the **Quad**'s vertices to a new type.
pub fn map_vertices<F, V2>(self, mut map: F) -> Quad<V2>
where
F: FnMut(V) -> V2,
{
let (a, b, c, d) = self.into();
Quad([map(a), map(b), map(c), map(d)])
}
}
/// Produce an iterator yielding each vertex in the given **Quad**.
pub fn vertices<V>(quad: Quad<V>) -> Vertices<V> {
let index = 0;
Vertices { quad, index }
}
/// Produce the centroid of the quad, aka the "mean"/"average" vertex.
pub fn centroid<V>(quad: &Quad<V>) -> V
where
V: vertex::Average,
{
crate::geom::centroid(quad.iter().cloned()).unwrap()
}
/// Triangulates the given quad, represented by four points that describe its edges in either
/// clockwise or anti-clockwise order.
///
/// # Example
///
/// The following rectangle
///
/// ```ignore
///
/// a b
/// --------
/// | |
/// | |
/// | |
/// --------
/// d c
///
/// ```
///
/// given as
///
/// ```ignore
/// triangles([a, b, c, d])
/// ```
///
/// returns
///
/// ```ignore
/// (Tri([a, b, c]), Tri([a, c, d]))
/// ```
///
/// Here's a basic code example:
///
/// ```
/// # use nannou_core as nannou;
/// use nannou::geom::{self, pt2, Quad, Tri};
///
/// fn main() {
/// let a = pt2(0.0, 1.0);
/// let b = pt2(1.0, 1.0);
/// let c = pt2(1.0, 0.0);
/// let d = pt2(0.0, 0.0);
/// let quad = Quad([a, b, c, d]);
/// let triangles = geom::quad::triangles(&quad);
/// assert_eq!(triangles, (Tri([a, b, c]), Tri([a, c, d])));
/// }
/// ```
#[inline]
pub fn triangles<V>(q: &Quad<V>) -> (Tri<V>, Tri<V>)
where
V: Vertex,
{
let a = Tri::from_index_tri(&q.0, &TRIANGLE_INDEX_TRIS[0]);
let b = Tri::from_index_tri(&q.0, &TRIANGLE_INDEX_TRIS[1]);
(a, b)
}
/// The same as `triangles` but provided as an `Iterator`.
pub fn triangles_iter<V>(points: &Quad<V>) -> Triangles<V>
where
V: Vertex,
{
let (a, b) = triangles(points);
Triangles {
a: Some(a),
b: Some(b),
}
}
impl<V> Iterator for Triangles<V> {
type Item = Tri<V>;
fn next(&mut self) -> Option<Self::Item> {
self.a.take().or_else(|| self.b.take())
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<V> DoubleEndedIterator for Triangles<V> {
fn next_back(&mut self) -> Option<Self::Item> {
self.b.take().or_else(|| self.a.take())
}
}
impl<V> ExactSizeIterator for Triangles<V> {
fn len(&self) -> usize {
match (&self.a, &self.b) {
(&Some(_), &Some(_)) => 2,
(&None, &Some(_)) => 0,
_ => 1,
}
}
}
impl<V> Iterator for Vertices<V>
where
V: Clone,
{
type Item = V;
fn next(&mut self) -> Option<Self::Item> {
if self.index < NUM_VERTICES {
let v = self.quad[self.index as usize].clone();
self.index += 1;
Some(v)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<V> ExactSizeIterator for Vertices<V>
where
V: Clone,
{
fn len(&self) -> usize {
NUM_VERTICES as usize - self.index as usize
}
}
impl<V> Deref for Quad<V> {
type Target = [V; NUM_VERTICES as usize];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<V> From<[V; NUM_VERTICES as usize]> for Quad<V>
where
V: Vertex,
{
fn from(points: [V; NUM_VERTICES as usize]) -> Self {
Quad(points)
}
}
impl<V> From<(V, V, V, V)> for Quad<V>
where
V: Vertex,
{
fn from((a, b, c, d): (V, V, V, V)) -> Self {
Quad([a, b, c, d])
}
}
impl<V> Into<[V; NUM_VERTICES as usize]> for Quad<V>
where
V: Vertex,
{
fn into(self) -> [V; NUM_VERTICES as usize] {
self.0
}
}
impl<V> Into<(V, V, V, V)> for Quad<V>
where
V: Vertex,
{
fn into(self) -> (V, V, V, V) {
(self[0], self[1], self[2], self[3])
}
}
impl<V> AsRef<Quad<V>> for Quad<V>
where
V: Vertex,
{
fn
|
(&self) -> &Quad<V> {
self
}
}
impl<V> AsRef<[V; NUM_VERTICES as usize]> for Quad<V>
where
V: Vertex,
{
fn as_ref(&self) -> &[V; NUM_VERTICES as usize] {
&self.0
}
}
impl<V> Index<usize> for Quad<V>
where
V: Vertex,
{
type Output = V;
fn index(&self, index: usize) -> &Self::Output {
&self.0[index]
}
}
|
as_ref
|
neighbors.ts
|
import { INode } from "../types";
|
export interface INeighborsOptions<TNodeMeta, TEdgeMeta> {
node: INode<TNodeMeta, TEdgeMeta>;
exclude?: Set<INode<TNodeMeta, TEdgeMeta>>;
includeEdgeToExcludedNode?: boolean;
}
/**
* This method gathers neighboring nodes of an input node. You can optionally exclude nodes from the returned list.
*
* If includeEdgeToExcludedNode is set then when a node is examined that should be excluded
*/
export function neighbors<TNodeMeta, TEdgeMeta>({
node,
exclude,
includeEdgeToExcludedNode
}: INeighborsOptions<TNodeMeta, TEdgeMeta>) {
const nodes = [];
const edges = [];
if (exclude) {
// Gather incoming nodes
for (let i = 0, iMax = node.in.length; i < iMax; ++i) {
const edge = node.in[i];
if (!exclude.has(edge.a)) {
nodes.push(edge.a);
edges.push(edge);
} else if (includeEdgeToExcludedNode) {
edges.push(edge);
}
}
// Gather outgoing nodes
for (let i = 0, iMax = node.out.length; i < iMax; ++i) {
const edge = node.out[i];
if (!exclude.has(edge.b)) {
nodes.push(edge.b);
edges.push(edge);
} else if (includeEdgeToExcludedNode) {
edges.push(edge);
}
}
} else {
// Gather incoming nodes
for (let i = 0, iMax = node.in.length; i < iMax; ++i) {
const edge = node.in[i];
nodes.push(edge.a);
edges.push(edge);
}
// Gather outgoing nodes
for (let i = 0, iMax = node.out.length; i < iMax; ++i) {
const edge = node.out[i];
nodes.push(edge.b);
edges.push(edge);
}
}
return {
nodes,
edges
};
}
| |
mod_markdown_test.go
|
// Copyright 2021 The BFE Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package mod_markdown
import (
"bytes"
"io/ioutil"
"net/url"
"reflect"
"strconv"
"testing"
"github.com/baidu/go-lib/web-monitor/web_monitor"
"github.com/bfenetworks/bfe/bfe_basic"
"github.com/bfenetworks/bfe/bfe_http"
"github.com/bfenetworks/bfe/bfe_module"
)
func TestModuleMarkdown_Init(t *testing.T)
|
func TestModuleMarkdown_renderMarkDownHandler(t *testing.T) {
m := prepareModule()
reqStrs := []string{"testcase0", "testcase1"}
typeStrs := []string{"default"}
for _, str := range reqStrs {
for _, typeStr := range typeStrs {
mdPath := "./testdata/" + str + ".md"
targetPath := "./testdata/" + str + "_" + typeStr + ".output"
urlPath := "/" + typeStr
req := prepareRequest("unittest", urlPath)
res := prepareResponse(mdPath)
m.renderMarkDownHandler(req, res)
got, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Errorf("ModuleMarkdown.TestModuleMarkdown_renderMarkDownHandler() error = %v", err)
}
want, err := ioutil.ReadFile(targetPath)
if !reflect.DeepEqual(got, want) {
t.Errorf("ModuleMarkdown.TestModuleMarkdown_renderMarkDownHandler(), got[%s], want[%s]", string(got), string(want))
}
if int64(len(want)) != res.ContentLength {
t.Errorf("ModuleMarkdown.TestModuleMarkdown_renderMarkDownHandler() got[%d], want[%d]", res.ContentLength, len(want))
}
}
}
// test invalid response
mdPath := "./testdata/testcase0.md"
responses := prepareabnormalResponse(mdPath)
for _, res := range responses {
err := m.checkResponse(res)
if err == nil {
t.Errorf("ModuleMarkdown.TestModuleMarkdown_checkResponse() got[nil], want[%s]", err)
}
}
// check not exist product
urlPath := "/default"
req := prepareRequest("not_exists", urlPath)
res := prepareResponse(mdPath)
code := m.renderMarkDownHandler(req, res)
if code != bfe_module.BfeHandlerGoOn {
t.Errorf("ModuleMarkdown.TestModuleMarkdown_checkResponse() got[%d], want[%d]", code, bfe_module.BfeHandlerGoOn)
}
}
func prepareModule() *ModuleMarkdown {
m := NewModuleMarkdown()
m.Init(bfe_module.NewBfeCallbacks(), web_monitor.NewWebHandlers(), "./testdata")
return m
}
func prepareRequest(product, path string) *bfe_basic.Request {
req := new(bfe_basic.Request)
req.HttpRequest = new(bfe_http.Request)
req.HttpRequest.Header = make(bfe_http.Header)
req.Route.Product = product
req.Session = new(bfe_basic.Session)
req.Context = make(map[interface{}]interface{})
req.HttpRequest.URL = &url.URL{}
req.HttpRequest.URL.Path = path
return req
}
func prepareResponse(filename string) *bfe_http.Response {
res := new(bfe_http.Response)
res.StatusCode = 200
res.Header = make(bfe_http.Header)
content, _ := ioutil.ReadFile(filename)
res.ContentLength = int64(len(content))
res.Body = ioutil.NopCloser(bytes.NewReader(content))
res.Header.Set("Content-Type", "text/markdown")
res.Header.Set("Content-length", strconv.FormatInt(res.ContentLength, 10))
return res
}
func prepareabnormalResponse(filename string) []*bfe_http.Response {
var responses []*bfe_http.Response
res := new(bfe_http.Response)
res.StatusCode = 200
res.Header = make(bfe_http.Header)
content, _ := ioutil.ReadFile(filename)
res.ContentLength = int64(len(content))
res.Body = ioutil.NopCloser(bytes.NewReader(content))
res.Header.Set("Content-Type", "text/html")
res.Header.Set("Content-length", strconv.FormatInt(res.ContentLength, 10))
responses = append(responses, res)
res = new(bfe_http.Response)
res.StatusCode = 200
res.Header = make(bfe_http.Header)
res.ContentLength = -1
res.Header.Set("Content-Type", "text/markdown")
res.Header.Set("Content-length", strconv.FormatInt(res.ContentLength, 10))
responses = append(responses, res)
res = new(bfe_http.Response)
res.StatusCode = 200
res.Header = make(bfe_http.Header)
res.ContentLength = -1
res.TransferEncoding = []string{"chunked"}
res.Header.Set("Content-Type", "text/markdown")
responses = append(responses, res)
return responses
}
|
{
type args struct {
cbs *bfe_module.BfeCallbacks
whs *web_monitor.WebHandlers
cr string
wantErr bool
}
// normal test case
m := NewModuleMarkdown()
case0 := args{
cbs: bfe_module.NewBfeCallbacks(),
whs: web_monitor.NewWebHandlers(),
cr: "./testdata",
wantErr: false,
}
if err := m.Init(case0.cbs, case0.whs, case0.cr); (err != nil) != case0.wantErr {
t.Errorf("ModuleMarkdown.Init() error = %v, wantErr %v", err, case0.wantErr)
}
// not exist path test case
m = NewModuleMarkdown()
case0 = args{
cbs: bfe_module.NewBfeCallbacks(),
whs: web_monitor.NewWebHandlers(),
cr: "./notexist",
wantErr: true,
}
if err := m.Init(case0.cbs, case0.whs, case0.cr); (err != nil) != case0.wantErr {
t.Errorf("ModuleMarkdown.Init() error = %v, wantErr %v", err, case0.wantErr)
}
// normal case
m = NewModuleMarkdown()
case0 = args{
cbs: bfe_module.NewBfeCallbacks(),
whs: nil,
cr: "./testdata",
wantErr: true,
}
if err := m.Init(case0.cbs, case0.whs, case0.cr); (err != nil) != case0.wantErr {
t.Errorf("ModuleMarkdown.Init() error = %v, wantErr %v", err, case0.wantErr)
}
// no register pointer case
m = NewModuleMarkdown()
case0 = args{
cbs: &bfe_module.BfeCallbacks{},
whs: web_monitor.NewWebHandlers(),
cr: "./testdata",
wantErr: true,
}
if err := m.Init(case0.cbs, case0.whs, case0.cr); (err != nil) != case0.wantErr {
t.Errorf("ModuleMarkdown.Init() error = %v, wantErr %v", err, case0.wantErr)
}
// no data case
m = NewModuleMarkdown()
case0 = args{
cbs: &bfe_module.BfeCallbacks{},
whs: web_monitor.NewWebHandlers(),
cr: "./testdata/case0",
wantErr: true,
}
if err := m.Init(case0.cbs, case0.whs, case0.cr); (err != nil) != case0.wantErr {
t.Errorf("ModuleMarkdown.Init() error = %v, wantErr %v", err, case0.wantErr)
}
// no data case
m = NewModuleMarkdown()
case0 = args{
cbs: &bfe_module.BfeCallbacks{},
whs: web_monitor.NewWebHandlers(),
cr: "./testdata/case1",
wantErr: true,
}
if err := m.Init(case0.cbs, case0.whs, case0.cr); (err != nil) != case0.wantErr {
t.Errorf("ModuleMarkdown.Init() error = %v, wantErr %v", err, case0.wantErr)
}
}
|
function-block-arg-adjustment.py
|
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.
from torch_mlir.dialects.torch.importer.jit_ir import ModuleBuilder
from utils import create_script_function
# RUN: %PYTHON %s | torch-mlir-opt | FileCheck %s
mb = ModuleBuilder()
# CHECK-LABEL: func @__torch__.refined_block_arg(
# CHECK-SAME: %[[ARG:.*]]: !torch.tensor) -> !torch.tensor {
# CHECK: %[[REFINED:.*]] = torch.tensor_static_info_cast %[[ARG]] : !torch.tensor to !torch.tensor<[1,384],f32>
# CHECK: %[[RESULT:.*]] = torch.tensor_static_info_cast %[[REFINED]] : !torch.tensor<[1,384],f32> to !torch.tensor
# CHECK: return %[[RESULT]] : !torch.tensor
mb.import_function(create_script_function("__torch__.refined_block_arg", """
graph(%0 : Float(1, 384)):
return (%0)
"""))
|
mb.module.operation.print()
print()
|
|
serializers.py
|
from rest_framework import serializers
class
|
(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
|
HelloSerializer
|
thumbnail.directive.ts
|
import { SchemaDirectiveVisitor } from "graphql-tools";
import { defaultFieldResolver, GraphQLField } from 'graphql';
export const ThumbnailDirective =(
next,
src,
args,
context,
) =>{
//field: GraphQLField<any, any>
debugger
// const { resolve = defaultFieldResolver,args } = field;
// field.resolve = async function(...args1) {
// const result = await resolve.apply(this, args1);
// const size=args[0];
|
// return `${result}.thumbnail-${size}.webp`;
// }
// return result;
// };
return next().then((url) => {
debugger
const size=args['size'];
console.log(url,size)
if (typeof(url) === 'string') {
return `${url}.thumbnail-${size}.webp`;
}
return url;
});
}
|
// debugger;
// if (typeof result === 'string'&&size) {
|
Keyboard.js
|
namespace("Keyboard")
// =====================================================================================================================
// Key codes copied from closure-library
// https://code.google.com/p/closure-library/source/browse/closure/goog/events/keycodes.js
// ---------------------------------------------------------------------------------------------------------------------
// Copyright 2006 The Closure Library Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
Keyboard.Codes = {
WIN_KEY_FF_LINUX : 0,
MAC_ENTER : 3,
BACKSPACE : 8,
TAB : 9,
NUM_CENTER : 12, // NUMLOCK on FF/Safari Mac
ENTER : 13,
SHIFT : 16,
CTRL : 17,
ALT : 18,
PAUSE : 19,
CAPS_LOCK : 20,
ESC : 27,
SPACE : 32,
PAGE_UP : 33, // also NUM_NORTH_EAST
PAGE_DOWN : 34, // also NUM_SOUTH_EAST
END : 35, // also NUM_SOUTH_WEST
HOME : 36, // also NUM_NORTH_WEST
LEFT : 37, // also NUM_WEST
UP : 38, // also NUM_NORTH
RIGHT : 39, // also NUM_EAST
DOWN : 40, // also NUM_SOUTH
PRINT_SCREEN : 44,
INSERT : 45, // also NUM_INSERT
DELETE : 46, // also NUM_DELETE
ZERO : 48,
ONE : 49,
TWO : 50,
THREE : 51,
FOUR : 52,
FIVE : 53,
SIX : 54,
SEVEN : 55,
EIGHT : 56,
NINE : 57,
FF_SEMICOLON : 59, // Firefox (Gecko) fires this for semicolon instead of 186
FF_EQUALS : 61, // Firefox (Gecko) fires this for equals instead of 187
FF_DASH : 173, // Firefox (Gecko) fires this for dash instead of 189
QUESTION_MARK : 63, // needs localization
A : 65,
B : 66,
C : 67,
D : 68,
E : 69,
F : 70,
G : 71,
H : 72,
I : 73,
J : 74,
K : 75,
L : 76,
M : 77,
N : 78,
O : 79,
P : 80,
Q : 81,
R : 82,
S : 83,
T : 84,
U : 85,
V : 86,
W : 87,
X : 88,
Y : 89,
Z : 90,
META : 91, // WIN_KEY_LEFT
WIN_KEY_RIGHT : 92,
CONTEXT_MENU : 93,
NUM_ZERO : 96,
NUM_ONE : 97,
NUM_TWO : 98,
NUM_THREE : 99,
NUM_FOUR : 100,
NUM_FIVE : 101,
NUM_SIX : 102,
NUM_SEVEN : 103,
NUM_EIGHT : 104,
NUM_NINE : 105,
NUM_MULTIPLY : 106,
NUM_PLUS : 107,
NUM_MINUS : 109,
NUM_PERIOD : 110,
NUM_DIVISION : 111,
F1 : 112,
F2 : 113,
F3 : 114,
F4 : 115,
F5 : 116,
F6 : 117,
F7 : 118,
F8 : 119,
F9 : 120,
F10 : 121,
F11 : 122,
F12 : 123,
NUMLOCK : 144,
SCROLL_LOCK : 145,
// OS-specific media keys like volume controls and browser controls.
FIRST_MEDIA_KEY : 166,
LAST_MEDIA_KEY : 183,
SEMICOLON : 186, // needs localization
DASH : 189, // needs localization
EQUALS : 187, // needs localization
COMMA : 188, // needs localization
PERIOD : 190, // needs localization
SLASH : 191, // needs localization
APOSTROPHE : 192, // needs localization
TILDE : 192, // needs localization
SINGLE_QUOTE : 222, // needs localization
OPEN_SQUARE_BRACKET : 219, // needs localization
BACKSLASH : 220, // needs localization
CLOSE_SQUARE_BRACKET: 221, // needs localization
WIN_KEY : 224,
MAC_FF_META : 224, // Firefox (Gecko) fires this for the meta key instead of 91
MAC_WK_CMD_LEFT : 91, // WebKit Left Command key fired, same as META
MAC_WK_CMD_RIGHT : 93, // WebKit Right Command key fired, different from META
WIN_IME : 229,
// We've seen users whose machines fire this keycode at regular one
// second intervals. The common thread among these users is that
// they're all using Dell Inspiron laptops, so we suspect that this
// indicates a hardware/bios problem.
|
};
// =====================================================================================================================
|
// http://en.community.dell.com/support-forums/laptop/f/3518/p/19285957/19523128.aspx
PHANTOM : 255
|
user.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, SelectField
from wtforms.validators import DataRequired, Email, Length, EqualTo
from app.models.Model import Category
def get_categories():
categories_query = Category.query.all()
categories = []
for category in categories_query:
categories.append((category.id,category.title))
return categories
# Register form
class
|
(FlaskForm):
email = StringField(label="email", validators=[DataRequired(), Email()])
username = StringField(label="username", validators=[DataRequired()])
password = PasswordField(label="password", validators=[DataRequired(), Length(min=6)])
confirm = PasswordField(label="confirm", validators=[DataRequired(),EqualTo(fieldname='password')])
category = SelectField('Selectionée une category', validators=[DataRequired()],choices=get_categories())
submit = SubmitField(label="inscrire")
# login form
class LoginForm(FlaskForm):
email = StringField('email', validators=[DataRequired(),Email()])
password = PasswordField('password', validators=[DataRequired()])
submit = SubmitField('identifier')
|
RegisterForm
|
assessment.js
|
'use strict'
const userNameInput = document.getElementById('user-name');
const assessmentButton=document.getElementById('assessment');
const resultDivided=document.getElementById('result-area');
const tweetDivided=document.getElementById('tweet-area');
/**
* remove all child element of specified parent one
* @param {HTMLElement} element Htmlの要素
*/
function removeAllChildren(element){
while(element.firstChild){
// unless child element exists, remove it
element.removeChild(element.firstChild);
}
}
userNameInput.onkeydown = (event) => {
if (event.key === 'Enter') {
// TODO ボタンのonclick() 処理を呼び出す
assessmentButton.onclick();
}
};
assessmentButton.onclick = function buttonOnClick(){
const userName=userNameInput.value;
// End process when input name is null
if(userName.length===0){
return;
}
// to do create result-area
removeAllChildren(resultDivided);
const header=document.createElement('h3');
header.innerText='result';
resultDivided.appendChild(header);
const paragraph=document.createElement('p')
const result=assessment(userName);
paragraph.innerText=result;
resultDivided.appendChild(paragraph);
// to do create tweet-area
removeAllChildren(tweetDivided);
const anchor=document.createElement('a');
const hrefValue='https://twitter.com/intent/tweet?button_hashtag='
// クエリが日本語だった場合URIエンコードが必要
+ encodeURIComponent('yourGoodAssessment') + '&ref_src=twsrc%5Etfw';
anchor.setAttribute('href',hrefValue);
anchor.className='twitter-hashtag-button';
anchor.setAttribute('data-text', result);
anchor.innerText='Tweet #yourGoodAssessment';
tweetDivided.appendChild(anchor);
const srcValue='https://platform.twitter.com/widgets.js';
const script=document.createElement('script');
script.setAttribute('src',srcValue);
tweetDivided.appendChild(script);
|
}
const answers=[
'{userName}のいいところは声です。{userName}の特徴的な声は皆を惹きつけ、心に残ります。',
'{userName}のいいところはまなざしです。{userName}に見つめられた人は、気になって仕方がないでしょう。',
'{userName}のいいところは情熱です。{userName}の情熱に周りの人は感化されます。',
'{userName}のいいところは厳しさです。{userName}の厳しさがものごとをいつも成功に導きます。',
'{userName}のいいところは知識です。博識な{userName}を多くの人が頼りにしています。',
'{userName}のいいところはユニークさです。{userName}だけのその特徴が皆を楽しくさせます。',
'{userName}のいいところは用心深さです。{userName}の洞察に、多くの人が助けられます。',
'{userName}のいいところは見た目です。内側から溢れ出る{userName}の良さに皆が気を惹かれます。',
'{userName}のいいところは決断力です。{userName}がする決断にいつも助けられる人がいます。',
'{userName}のいいところは思いやりです。{userName}に気をかけてもらった多くの人が感謝しています。',
'{userName}のいいところは感受性です。{userName}が感じたことに皆が共感し、わかりあうことができます。',
'{userName}のいいところは節度です。強引すぎない{userName}の考えに皆が感謝しています。',
'{userName}のいいところは好奇心です。新しいことに向かっていく{userName}の心構えが多くの人に魅力的に映ります。',
'{userName}のいいところは気配りです。{userName}の配慮が多くの人を救っています。',
'{userName}のいいところはその全てです。ありのままの{userName}自身がいいところなのです。',
'{userName}のいいところは自制心です。やばいと思ったときにしっかりと衝動を抑えられる{userName}が皆から評価されています。',
'{userName}のいいところは優しさです。あなたの優しい雰囲気や立ち振る舞いに多くの人が癒やされています。'
];
/**
* 名前の文字列を渡すと診断結果を返す関数
* @param{string}userName ユーザーの名前
* @return {string}診断結果
*/
function assessment(userName){
//to do 診断結果処理を実装する
let sumOfCharCode=0;
for(let i=0; i<userName.length; i++){
sumOfCharCode=sumOfCharCode + userName.charCodeAt(i);
}
const index=sumOfCharCode % answers.length;
let result=answers[index];
result = result.replace(/\{userName\}/g, userName);
return result;
}
// console.log(assessment('abc'));
// console.log(assessment('def'));
// console.log(assessment('abc'));
// テストコード
console.assert(
assessment('abc') === assessment('abc'),
'入力が同じ名前なら同じ診断結果を出力する処理が正しくありません'
);
| |
dovpn.py
|
#!/usr/bin/env python3
"""Main function for the DOVPN project."""
import argparse
import logging
import os
import yaml
import vpnorchestrator
def
|
():
"""Main function that sets up script to run.
Handles arguments, logging, and configuration before passing of control
to the orchestrator object."""
parser = argparse.ArgumentParser(description='Manage a DigitalOcean VPN.')
parser.add_argument('-c', '--config', default="config.yaml",
help='configuration file location')
parser.add_argument('-r', '--remove', action='store_true',
help='remove all related DigitalOcean droplets and keys, and quit')
parser.add_argument('-v', '--verbose', action='store_true',
help="enable verbose output")
parser.add_argument('-d', '--debug', action='store_true',
help="enable verbose output with HTTP requests (implies -v)")
args = parser.parse_args()
log_format = "%(asctime)s %(levelname)8s: %(message)s"
if args.debug:
import http.client as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig(format=log_format, level=logging.DEBUG)
elif args.verbose:
logging.basicConfig(format=log_format, level=logging.DEBUG)
else:
logging.basicConfig(format=log_format, level=logging.INFO)
if os.geteuid() != 0:
logging.critical("You are not root!")
exit(1)
if not os.path.isfile(args.config):
logging.critical("Config file {} does not exist.".format(args.config))
exit(1)
logging.info("Loading configuration file {}".format(args.config))
with open(args.config, "r") as config_file:
config_yaml = yaml.load(config_file, Loader=yaml.FullLoader)
if args.remove:
logging.info("Removing all DigitalOcean droplets and keys")
orch = vpnorchestrator.VpnOrchestrator(config_yaml)
orch.clean()
exit(0)
try:
orch = vpnorchestrator.VpnOrchestrator(config_yaml)
orch.start()
orch.wait()
orch.teardown()
except Exception as ex:
orch.teardown()
raise ex
if __name__ == "__main__":
main()
|
main
|
http_not_used_rule.go
|
package elbv2
import (
"github.com/aquasecurity/defsec/rules"
"github.com/aquasecurity/defsec/rules/aws/elb"
"github.com/aquasecurity/tfsec/internal/app/tfsec/block"
|
func init() {
scanner.RegisterCheckRule(rule.Rule{
LegacyID: "AWS004",
BadExample: []string{`
resource "aws_alb_listener" "bad_example" {
protocol = "HTTP"
}
`},
GoodExample: []string{`
resource "aws_alb_listener" "good_example" {
protocol = "HTTPS"
}
`},
Links: []string{
"https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener",
},
RequiredTypes: []string{"resource"},
RequiredLabels: []string{"aws_lb_listener", "aws_alb_listener"},
Base: elb.CheckHttpNotUsed,
CheckTerraform: func(resourceBlock block.Block, module block.Module) (results rules.Results) {
if checkIfExempt(resourceBlock, module) {
return
}
protocolAttr := resourceBlock.GetAttribute("protocol")
if protocolAttr.IsNotNil() {
if protocolAttr.IsResolvable() && (protocolAttr.Equals("HTTPS", block.IgnoreCase) ||
protocolAttr.Equals("TLS", block.IgnoreCase)) {
return
}
if protocolAttr.IsResolvable() && protocolAttr.Equals("HTTP") {
// check if this is a redirect to HTTPS - if it is, then no problem
if redirectProtocolAttr := resourceBlock.GetNestedAttribute("default_action.redirect.protocol"); redirectProtocolAttr.IsNotNil() {
if redirectProtocolAttr.IsResolvable() && redirectProtocolAttr.Equals("HTTPS") {
return
}
}
}
results.Add("Resource uses plain HTTP instead of HTTPS.", protocolAttr)
} else {
results.Add("Resource uses plain HTTP instead of HTTPS.", resourceBlock)
}
return results
},
})
}
func checkIfExempt(resourceBlock block.Block, module block.Module) bool {
if resourceBlock.HasChild("load_balancer_arn") {
lbaAttr := resourceBlock.GetAttribute("load_balancer_arn")
if lbaAttr.IsResourceBlockReference("aws_lb") {
referencedBlock, err := module.GetReferencedBlock(lbaAttr, resourceBlock)
if err == nil {
if referencedBlock.HasChild("load_balancer_type") {
loadBalancerType := referencedBlock.GetAttribute("load_balancer_type")
if loadBalancerType.IsAny("gateway", "network") {
return true
}
}
} else {
debug.Log(err.Error())
}
}
}
return false
}
|
"github.com/aquasecurity/tfsec/internal/app/tfsec/debug"
"github.com/aquasecurity/tfsec/internal/app/tfsec/scanner"
"github.com/aquasecurity/tfsec/pkg/rule"
)
|
cie.rs
|
#[doc = "Register `CIE` writer"]
pub struct W(crate::W<CIE_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<CIE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<CIE_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<CIE_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `BIE` writer - End of Block Interrupt Enable Bit"]
pub struct BIE_W<'a> {
w: &'a mut W,
}
impl<'a> BIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "Field `LIE` writer - End of Linked List Interrupt Enable Bit"]
pub struct LIE_W<'a> {
w: &'a mut W,
}
impl<'a> LIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1);
self.w
}
}
#[doc = "Field `DIE` writer - End of Disable Interrupt Enable Bit"]
pub struct DIE_W<'a> {
w: &'a mut W,
}
impl<'a> DIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "Field `FIE` writer - End of Flush Interrupt Enable Bit"]
pub struct FIE_W<'a> {
w: &'a mut W,
}
impl<'a> FIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3);
self.w
}
}
#[doc = "Field `RBIE` writer - Read Bus Error Interrupt Enable Bit"]
pub struct
|
<'a> {
w: &'a mut W,
}
impl<'a> RBIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4);
self.w
}
}
#[doc = "Field `WBIE` writer - Write Bus Error Interrupt Enable Bit"]
pub struct WBIE_W<'a> {
w: &'a mut W,
}
impl<'a> WBIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5);
self.w
}
}
#[doc = "Field `ROIE` writer - Request Overflow Error Interrupt Enable Bit"]
pub struct ROIE_W<'a> {
w: &'a mut W,
}
impl<'a> ROIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6);
self.w
}
}
impl W {
#[doc = "Bit 0 - End of Block Interrupt Enable Bit"]
#[inline(always)]
pub fn bie(&mut self) -> BIE_W {
BIE_W { w: self }
}
#[doc = "Bit 1 - End of Linked List Interrupt Enable Bit"]
#[inline(always)]
pub fn lie(&mut self) -> LIE_W {
LIE_W { w: self }
}
#[doc = "Bit 2 - End of Disable Interrupt Enable Bit"]
#[inline(always)]
pub fn die(&mut self) -> DIE_W {
DIE_W { w: self }
}
#[doc = "Bit 3 - End of Flush Interrupt Enable Bit"]
#[inline(always)]
pub fn fie(&mut self) -> FIE_W {
FIE_W { w: self }
}
#[doc = "Bit 4 - Read Bus Error Interrupt Enable Bit"]
#[inline(always)]
pub fn rbie(&mut self) -> RBIE_W {
RBIE_W { w: self }
}
#[doc = "Bit 5 - Write Bus Error Interrupt Enable Bit"]
#[inline(always)]
pub fn wbie(&mut self) -> WBIE_W {
WBIE_W { w: self }
}
#[doc = "Bit 6 - Request Overflow Error Interrupt Enable Bit"]
#[inline(always)]
pub fn roie(&mut self) -> ROIE_W {
ROIE_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Channel Interrupt Enable Register (chid = 0)\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [cie](index.html) module"]
pub struct CIE_SPEC;
impl crate::RegisterSpec for CIE_SPEC {
type Ux = u32;
}
#[doc = "`write(|w| ..)` method takes [cie::W](W) writer structure"]
impl crate::Writable for CIE_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets CIE to value 0"]
impl crate::Resettable for CIE_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
|
RBIE_W
|
drawing.go
|
package main
import (
"bytes"
"fmt"
"sort"
"unicode/utf8"
"github.com/bitly/go-simplejson"
runewidth "github.com/mattn/go-runewidth"
termbox "github.com/nsf/termbox-go"
)
const (
prompt = ">>> "
promptY = 0
completionY = promptY + 1
)
func drawString(offsetY int, contents string, fgColor termbox.Attribute, bgColor termbox.Attribute) {
var cells []termbox.Cell
for _, ch := range contents {
cells = append(cells, termbox.Cell{Ch: ch, Fg: fgColor, Bg: bgColor})
}
drawLine(offsetY, cells)
}
func drawLine(offsetY int, cells []termbox.Cell) {
for i, c := range cells {
termbox.SetCell(i, offsetY, c.Ch, c.Fg, c.Bg)
}
}
func (e *Explorer) drawCompletions() {
if e.display.ActiveCompletion == -1 || len(e.completions) < 2 {
fg := termbox.ColorDefault
bg := termbox.ColorDefault
var cells []termbox.Cell
w, _ := termbox.Size()
for i := 0; i < w; i++ {
cells = append(cells, termbox.Cell{Ch: ' ', Fg: fg, Bg: bg})
}
drawLine(completionY, cells)
return
}
var cells []termbox.Cell
for i, key := range e.completions {
fg := termbox.ColorDefault
bg := termbox.ColorDefault
if i == e.display.ActiveCompletion {
fg = termbox.ColorDefault | termbox.AttrBold
}
for _, char := range key {
cells = append(cells, termbox.Cell{Ch: char, Fg: fg, Bg: bg})
}
cells = append(cells, termbox.Cell{Ch: ' ', Fg: termbox.ColorDefault, Bg: termbox.ColorDefault})
}
drawLine(completionY, cells)
}
func (e *Explorer) drawQueryLine() {
termbox.SetCursor(e.query.QueryPos+runewidth.StringWidth(prompt), promptY)
var lastToken Token = Key("")
if len(e.query.Parsed) != 0 {
lastToken = e.query.Parsed[len(e.query.Parsed)-1]
}
firstCompl := bestCompletion(lastToken, e.completions)
promptLen := utf8.RuneCountInString(prompt)
queryLen := utf8.RuneCountInString(e.query.Raw())
for x, symbol := range prompt + e.query.Raw() + firstCompl {
textColor := termbox.ColorDefault
if x >= promptLen && x < promptLen+queryLen {
textColor = termbox.ColorBlue
} else if x > promptLen+queryLen {
textColor = termbox.ColorGreen
}
termbox.SetCell(x, promptY, symbol, textColor, termbox.ColorDefault)
}
}
func (e *Explorer) drawContents(clear bool) {
if clear {
_, h := termbox.Size()
for y := completionY + 1; y < h; y++ {
clearLine(y)
}
}
if e.display.Doc == nil {
for x, ch := range "--- no results ---" {
termbox.SetCell(x, completionY+1, ch, termbox.ColorRed, termbox.ColorDefault)
}
return
}
if e.display.OnlyKeys {
displayKeys(e.display.Doc)
return
}
json, err := e.display.Doc.EncodePretty()
if err != nil {
termboxFatalln(err)
}
e.display.DocHeight = bytes.Count(json, []byte("\n")) + 1
JSONcells := *colorizeJSON(json)
for i, line := range JSONcells[e.display.DocOffsetY:] {
drawLine(completionY+1+i, line)
}
}
func displayKeys(doc *simplejson.Json) {
obj, err := doc.Map()
if err != nil {
idxs, err := doc.Array()
if err != nil {
drawString(completionY+1, "--- not an object or array ---", termbox.ColorRed, termbox.ColorDefault)
return
}
drawString(completionY+1, fmt.Sprintf("%d..%d", 0, len(idxs)-1), termbox.ColorDefault, termbox.ColorDefault)
return
}
var keys []string
for k := range obj {
keys = append(keys, k)
}
sort.Strings(keys)
for i, k := range keys {
drawString(completionY+1+i, k, termbox.ColorDefault, termbox.ColorDefault)
}
}
func
|
(y int) {
maxX, _ := termbox.Size()
for i := 0; i < maxX; i++ {
termbox.SetCell(i, y, ' ', termbox.ColorDefault, termbox.ColorDefault)
}
}
|
clearLine
|
helper.js
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.mergeAll = exports.unique = exports.hasValues = exports.isObject = exports.isString = void 0;
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; var ownKeys = Object.keys(source); if (typeof Object.getOwnPropertySymbols === 'function') { ownKeys = ownKeys.concat(Object.getOwnPropertySymbols(source).filter(function (sym) { return Object.getOwnPropertyDescriptor(source, sym).enumerable; })); } ownKeys.forEach(function (key) { _defineProperty(target, key, source[key]); }); } return target; }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
function _toConsumableArray(arr) { return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _nonIterableSpread(); }
function _nonIterableSpread() { throw new TypeError("Invalid attempt to spread non-iterable instance"); }
function _iterableToArray(iter) { if (Symbol.iterator in Object(iter) || Object.prototype.toString.call(iter) === "[object Arguments]") return Array.from(iter); }
function _arrayWithoutHoles(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = new Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } }
function _typeof(obj) { if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; } return _typeof(obj); }
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
var isString = function isString(value) {
return typeof value === 'string';
};
exports.isString = isString;
var isObject = function isObject(value) {
return _typeof(value) === 'object' && value !== null;
};
exports.isObject = isObject;
var hasValues = function hasValues(values) {
return Object.keys(values).length > 0;
};
|
var arr = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];
return _toConsumableArray(new Set(arr));
};
exports.unique = unique;
var merge = function merge(a, b) {
return unique(_toConsumableArray(Object.keys(a)).concat(_toConsumableArray(Object.keys(b)))).reduce(function (acc, key) {
if (isObject(a[key]) && isObject(b[key]) && !Array.isArray(a[key]) && !Array.isArray(b[key])) {
return _objectSpread({}, acc, _defineProperty({}, key, merge(a[key], b[key])));
}
return _objectSpread({}, acc, _defineProperty({}, key, b[key] === undefined ? a[key] : b[key]));
}, {});
};
var mergeAll = function mergeAll() {
for (var _len = arguments.length, sources = new Array(_len), _key = 0; _key < _len; _key++) {
sources[_key] = arguments[_key];
}
return sources.filter(isObject).reduce(function (acc, source) {
return merge(acc, source);
});
};
exports.mergeAll = mergeAll;
|
exports.hasValues = hasValues;
var unique = function unique() {
|
0007_room_hangout_room.py
|
# Generated by Django 2.2.15 on 2020-08-14 21:42
from django.db import migrations, models
class Migration(migrations.Migration):
|
dependencies = [
('bbb', '0006_auto_20200813_1954'),
]
operations = [
migrations.AddField(
model_name='room',
name='hangout_room',
field=models.BooleanField(default=False),
),
]
|
|
top.js
|
import {
addClass,
getScrollbarWidth,
getScrollTop,
getWindowScrollLeft,
hasClass,
outerHeight,
innerWidth,
removeClass,
setOverlayPosition,
} from './../../../../helpers/dom/element';
import {WalkontableOverlay} from './_base';
/**
* @class WalkontableTopOverlay
*/
class WalkontableTopOverlay extends WalkontableOverlay {
/**
* @param {Walkontable} wotInstance
*/
constructor(wotInstance) {
super(wotInstance);
this.clone = this.makeClone(WalkontableOverlay.CLONE_TOP);
}
/**
* Checks if overlay should be fully rendered
*
* @returns {Boolean}
*/
shouldBeRendered() {
return this.wot.getSetting('fixedRowsTop') || this.wot.getSetting('columnHeaders').length ? true : false;
}
/**
* Updates the top overlay position
*/
resetFixedPosition() {
if (!this.needFullRender || !this.wot.wtTable.holder.parentNode) {
// removed from DOM
return;
}
let overlayRoot = this.clone.wtTable.holder.parentNode;
let headerPosition = 0;
let preventOverflow = this.wot.getSetting('preventOverflow');
if (this.trimmingContainer === window && (!preventOverflow || preventOverflow !== 'vertical')) {
let box = this.wot.wtTable.hider.getBoundingClientRect();
let top = Math.ceil(box.top);
let bottom = Math.ceil(box.bottom);
let finalLeft;
let finalTop;
finalLeft = this.wot.wtTable.hider.style.left;
finalLeft = finalLeft === '' ? 0 : finalLeft;
if (top < 0 && (bottom - overlayRoot.offsetHeight) > 0) {
finalTop = -top;
} else {
finalTop = 0;
}
headerPosition = finalTop;
finalTop = finalTop + 'px';
setOverlayPosition(overlayRoot, finalLeft, finalTop);
} else {
headerPosition = this.getScrollPosition();
}
this.adjustHeaderBordersPosition(headerPosition);
this.adjustElementsSize();
}
/**
* Sets the main overlay's vertical scroll position
*
* @param {Number} pos
*/
setScrollPosition(pos) {
if (this.mainTableScrollableElement === window) {
window.scrollTo(getWindowScrollLeft(), pos);
} else {
this.mainTableScrollableElement.scrollTop = pos;
}
}
/**
* Triggers onScroll hook callback
*/
onScroll() {
this.wot.getSetting('onScrollHorizontally');
}
/**
* Calculates total sum cells height
*
* @param {Number} from Row index which calculates started from
* @param {Number} to Row index where calculation is finished
* @returns {Number} Height sum
*/
sumCellSizes(from, to) {
let sum = 0;
let defaultRowHeight = this.wot.wtSettings.settings.defaultRowHeight;
while (from < to) {
let height = this.wot.wtTable.getRowHeight(from);
sum += height === void 0 ? defaultRowHeight : height;
from++;
}
return sum;
}
/**
* Adjust overlay root element, childs and master table element sizes (width, height).
*
* @param {Boolean} [force=false]
*/
adjustElementsSize(force = false) {
if (this.needFullRender || force) {
this.adjustRootElementSize();
this.adjustRootChildrenSize();
if (!force) {
this.areElementSizesAdjusted = true;
}
}
}
/**
* Adjust overlay root element size (width and height).
*/
adjustRootElementSize() {
let masterHolder = this.wot.wtTable.holder;
let scrollbarWidth = masterHolder.clientWidth === masterHolder.offsetWidth ? 0 : getScrollbarWidth();
let overlayRoot = this.clone.wtTable.holder.parentNode;
let overlayRootStyle = overlayRoot.style;
let preventOverflow = this.wot.getSetting('preventOverflow');
let tableHeight;
if (this.trimmingContainer !== window || preventOverflow === 'horizontal') {
let width = this.wot.wtViewport.getWorkspaceWidth() - scrollbarWidth;
width = Math.min(width, innerWidth(this.wot.wtTable.wtRootElement));
overlayRootStyle.width = width + 'px';
}
this.clone.wtTable.holder.style.width = overlayRootStyle.width;
tableHeight = outerHeight(this.clone.wtTable.TABLE);
overlayRootStyle.height = (tableHeight === 0 ? tableHeight : tableHeight + 4) + 'px';
}
/**
* Adjust overlay root childs size
*/
adjustRootChildrenSize() {
let scrollbarWidth = getScrollbarWidth();
this.clone.wtTable.hider.style.width = this.hider.style.width;
this.clone.wtTable.holder.style.width = this.clone.wtTable.holder.parentNode.style.width;
if (scrollbarWidth === 0) {
scrollbarWidth = 30;
}
this.clone.wtTable.holder.style.height = parseInt(this.clone.wtTable.holder.parentNode.style.height, 10) + scrollbarWidth + 'px';
}
/**
* Adjust the overlay dimensions and position
*/
applyToDOM() {
let total = this.wot.getSetting('totalRows');
if (!this.areElementSizesAdjusted) {
this.adjustElementsSize();
}
if (typeof this.wot.wtViewport.rowsRenderCalculator.startPosition === 'number') {
this.spreader.style.top = this.wot.wtViewport.rowsRenderCalculator.startPosition + 'px';
} else if (total === 0) {
// can happen if there are 0 rows
this.spreader.style.top = '0';
} else {
throw new Error('Incorrect value of the rowsRenderCalculator');
}
this.spreader.style.bottom = '';
if (this.needFullRender) {
this.syncOverlayOffset();
}
}
/**
* Synchronize calculated left position to an element
*/
syncOverlayOffset() {
if (typeof this.wot.wtViewport.columnsRenderCalculator.startPosition === 'number') {
this.clone.wtTable.spreader.style.left = this.wot.wtViewport.columnsRenderCalculator.startPosition + 'px';
} else {
this.clone.wtTable.spreader.style.left = '';
}
}
/**
* Scrolls vertically to a row
*
* @param sourceRow {Number} Row index which you want to scroll to
* @param [bottomEdge=false] {Boolean} if `true`, scrolls according to the bottom edge (top edge is by default)
*/
scrollTo(sourceRow, bottomEdge) {
let newY = this.getTableParentOffset();
let sourceInstance = this.wot.cloneSource ? this.wot.cloneSource : this.wot;
let mainHolder = sourceInstance.wtTable.holder;
let scrollbarCompensation = 0;
if (bottomEdge && mainHolder.offsetHeight !== mainHolder.clientHeight) {
scrollbarCompensation = getScrollbarWidth();
}
if (bottomEdge) {
let fixedRowsBottom = this.wot.getSetting('fixedRowsBottom');
let fixedRowsTop = this.wot.getSetting('fixedRowsTop');
let totalRows = this.wot.getSetting('totalRows');
newY += this.sumCellSizes(0, sourceRow + 1);
newY -= this.wot.wtViewport.getViewportHeight() - this.sumCellSizes(totalRows - fixedRowsBottom, totalRows);
// Fix 1 pixel offset when cell is selected
newY += 1;
} else {
newY += this.sumCellSizes(this.wot.getSetting('fixedRowsTop'), sourceRow);
}
newY += scrollbarCompensation;
this.setScrollPosition(newY);
}
/**
* Gets table parent top position
|
*/
getTableParentOffset() {
if (this.mainTableScrollableElement === window) {
return this.wot.wtTable.holderOffset.top;
} else {
return 0;
}
}
/**
* Gets the main overlay's vertical scroll position
*
* @returns {Number} Main table's vertical scroll position
*/
getScrollPosition() {
return getScrollTop(this.mainTableScrollableElement);
}
/**
* Adds css classes to hide the header border's header (cell-selection border hiding issue)
*
* @param {Number} position Header Y position if trimming container is window or scroll top if not
*/
adjustHeaderBordersPosition(position) {
if (this.wot.getSetting('fixedRowsTop') === 0 && this.wot.getSetting('columnHeaders').length > 0) {
let masterParent = this.wot.wtTable.holder.parentNode;
let previousState = hasClass(masterParent, 'innerBorderTop');
if (position || this.wot.getSetting('totalRows') === 0) {
addClass(masterParent, 'innerBorderTop');
} else {
removeClass(masterParent, 'innerBorderTop');
}
if (!previousState && position || previousState && !position) {
this.wot.wtOverlays.adjustElementsSize();
}
}
// nasty workaround for double border in the header, TODO: find a pure-css solution
if (this.wot.getSetting('rowHeaders').length === 0) {
let secondHeaderCell = this.clone.wtTable.THEAD.querySelectorAll('th:nth-of-type(2)');
if (secondHeaderCell) {
for (let i = 0; i < secondHeaderCell.length; i++) {
secondHeaderCell[i].style['border-left-width'] = 0;
}
}
}
}
}
export {WalkontableTopOverlay};
window.WalkontableTopOverlay = WalkontableTopOverlay;
WalkontableOverlay.registerOverlay(WalkontableOverlay.CLONE_TOP, WalkontableTopOverlay);
|
*
* @returns {Number}
|
serializer.ts
|
import { deflateSync, inflateSync } from 'fflate';
const EPOCH = 1577836800; //2020-01-01T00:00:00
export const Serializer = {
//ArrayBuffer: byte-array in physical memory: generic fixed-length container for binary data.
// Int8Array, Uint8Array, Uint8ClampedArray, Int16Array, Uint16Array, Int32Array, Uint3Array, Float32Array and Float64Array.
// : width-dependent views
//DataView: flexible ArrayBuffer, read variable widths and from positions in the buffer that is not necessarily width or memory aligned.
//https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/DataView
//Blob: immutable
//Arraybuffer: mutable, can be generated from blob and vice versa
//const arrBuff = await blob.arrayBuffer() or FileReader().readAsArrayBuffer()
//const blob = new Blob([new Uint8Array(arrayBuff)])
//Websocket: binaryType: 'arraybuffer' or 'blob'
//XHR: responseType: 'arraybuffer', 'blob', 'json', 'text'
//Base 64
fromBase64(base64: string) : Uint8Array {
return Uint8Array.from(atob(base64), c => c.charCodeAt(0));
},
toBase64(bytes: Uint8Array) : string {
return btoa(String.fromCharCode(...bytes));
},
toHex(bytes: Uint8Array): string {
return [...bytes]
.map(x => x.toString(16).padStart(2, '0'))
.join('');
},
fromJsonBytes(jsonBytes: Uint8Array) : any {
return JSON.parse(new TextDecoder().decode(jsonBytes));
},
toJsonBytes(obj: any) : Uint8Array {
return new TextEncoder().encode(JSON.stringify(obj));
},
//Date EPOCH = 1577836800 <-- unix timestamp at 2020/01/01T00:00:00
fromDate(date: Date) : Uint8Array {
const ts = parseInt((date.getTime() / 1000).toFixed(0)) - EPOCH;
const buff = new ArrayBuffer(4);
new DataView(buff).setUint32(0, ts);
return new Uint8Array(buff);
},
toDate(bytes: Uint8Array) : Date {
const nr = new DataView(bytes.buffer).getUint32(0);
return new Date((nr + EPOCH) * 1000);
},
equal(one: Uint8Array | undefined, other: Uint8Array | undefined) : boolean {
if(one === undefined && other === undefined) return true;
if(one === undefined || other === undefined) return false;
if(one.length !== other.length) return false;
for (let i = 0; i < one.length; i++) {
if(one[i] != other[i])
return false;
}
return true;
},
compress(input: Uint8Array) : Uint8Array {
return deflateSync(input, { level: 9 });
},
decompress(input: Uint8Array) : Uint8Array {
return inflateSync(input);
},
concat(...arrays: Uint8Array[]) : Uint8Array {
const dest = new Uint8Array(arrays.reduce((a,b) => a + b.length, 0));
let idx = 0;
for (let i = 0; i < arrays.length; i++) {
dest.set(arrays[i], idx);
idx += arrays[i].length;
}
return dest;
},
getFlagCount(flag: number) : number {
let cnt = 0;
for (let i = 7; i >= 0; i--)
cnt += (flag >> i) & 1;
return cnt;
},
getFlags(flag: number) : number[] {
const types: number[] = [];
for (let i = 0; i <= 8; i++)
{
const f = 1 << i;
if ((flag & f) == f)
types.push(f);
}
return types;
|
}
};
|
},
isBitSet(nr: number, pos: number) : boolean {
return (nr & (1 << pos)) != 0;
|
0016_parsed_data_url.py
|
# Generated by Django 2.1.11 on 2020-06-08 07:45
from django.db import migrations, models
class Migration(migrations.Migration):
|
dependencies = [
('examples', '0015_img_data_img_folder_path'),
]
operations = [
migrations.AddField(
model_name='parsed_data',
name='url',
field=models.URLField(default=''),
),
]
|
|
autovivification.py
|
class AutoVivification(dict):
"""Implementation of perl's autovivification."""
|
return value
weather = AutoVivification()
weather['china']['guangdong']['shenzhen'] = 'sunny'
weather['china']['hubei']['wuhan'] = 'sunny'
weather['USA']['California']['Los Angeles'] = 'sunny'
print(weather)
|
def __missing__(self, key):
value = self[key] = type(self)()
|
login.go
|
package options
import (
"errors"
|
)
// Login contains the parsed login action environment variables
type Login struct {
Username string `env:"INPUT_USERNAME"`
Password string `env:"INPUT_PASSWORD"`
}
var errLoginVarValidation = errors.New("both username and password must be set to login")
// GetLoginOptions gets the login action environment variables
func GetLoginOptions() (Login, error) {
var login Login
if err := env.Parse(&login); err != nil {
return login, err
}
if login.Username != "" && login.Password == "" ||
login.Username == "" && login.Password != "" {
return login, errLoginVarValidation
}
return login, nil
}
|
"github.com/caarlos0/env/v6"
|
Node.tsx
|
import React from 'react'
import { styled, css, keyframes } from '../helpers/theme'
import { menuFadeTypes } from '../helpers/types'
import NodeItem from './NodeItem'
import NodeBackLink from './NodeBackLink'
const SlideLeftOut = keyframes`
0% {
transform: translateX(0);
opacity: 1;
}
100% {
transform: translateX(-100%);
opacity: 0;
}
`
const SlideRightIn = keyframes`
0% {
transform: translateX(-100%);
opacity: 0;
}
100% {
transform: translateX(0px);
opacity: 1;
}
`
const SlideLeftIn = keyframes`
0% {
transform: translateX(100%);
opacity: 0;
}
100% {
transform: translateX(0px);
opacity: 1;
}
`
const SlideRightOut = keyframes`
0% {
transform: translateX(0px);
opacity: 1;
}
100% {
transform: translateX(100%);
opacity: 0;
}
`
type NodeTypes = {
fade: menuFadeTypes
visible: boolean
}
const Node = styled.ul<NodeTypes>`
position: absolute;
|
padding: 0;
background: ${(props) => props.theme.backgroundColor};
font-family: Arial, Helvetica, sans-serif;
backface-visibility: hidden;
${(props) =>
props.fade === 'out-left' &&
css`
animation: ${SlideLeftOut};
`}
${(props) =>
props.fade === 'in-right' &&
css`
animation: ${SlideRightIn};
`}
${(props) =>
props.fade === 'in-left' &&
css`
animation: ${SlideLeftIn};
`}
${(props) =>
props.fade === 'out-right' &&
css`
animation: ${SlideRightOut};
`}
${(props) =>
props.fade !== null &&
css`
animation-duration: ${props.theme.transitionDuration};
animation-timing-function: ease-in-out;
animation-fill-mode: forwards;
`}
${(props) =>
!props.visible &&
css`
opacity: 0;
display: none;
`}
`
interface NodeProps {
visible: boolean
fade: menuFadeTypes
backButtonText: string
onAnimationEnd: () => void
backLinkClickHandler: () => void
menuClickHandler: (id: number, title: string) => void
backLink: boolean
menuData: Array<any>
}
function NodeElement(props: NodeProps) {
return (
<Node visible={props.visible} fade={props.fade} onAnimationEnd={props.onAnimationEnd}>
{props.backLink && (
<NodeBackLink backButtonText={props.backButtonText} handleClick={props.backLinkClickHandler} />
)}
{props.menuData.map((data) => (
<NodeItem
key={data.id}
id={data.id}
hasChildren={data.hasOwnProperty('subnodes') ? true : false}
url={data.hasOwnProperty('subnodes') ? '#' : data.url}
title={data.Title}
handleClick={props.menuClickHandler}
/>
))}
</Node>
)
}
export default NodeElement
|
width: 100%;
list-style: none;
margin: 0;
|
plugin.js
|
/**
* Copyright (c) Tiny Technologies, Inc. All rights reserved.
* Licensed under the LGPL or a commercial license.
* For LGPL see License.txt in the project root for license information.
* For commercial licenses see https://www.tiny.cloud/
*
* Version: 5.0.9 (2019-06-26)
*/
(function () {
'use strict';
var global = tinymce.util.Tools.resolve('tinymce.PluginManager');
var register = function (editor) {
editor.addCommand('InsertHorizontalRule', function () {
editor.execCommand('mceInsertContent', false, '<hr />');
});
};
var Commands = { register: register };
var register$1 = function (editor) {
editor.ui.registry.addButton('hr', {
icon: 'horizontal-rule',
tooltip: 'Horizontal line',
onAction: function () {
return editor.execCommand('InsertHorizontalRule');
}
});
editor.ui.registry.addMenuItem('hr', {
icon: 'horizontal-rule',
text: 'Horizontal line',
onAction: function () {
return editor.execCommand('InsertHorizontalRule');
}
});
};
var Buttons = { register: register$1 };
function
|
() {
global.add('hr', function (editor) {
Commands.register(editor);
Buttons.register(editor);
});
}
Plugin();
}());
|
Plugin
|
next.config.js
|
/* eslint-disable @typescript-eslint/no-var-requires */
const withPlugins = require("next-compose-plugins");
const withBundleAnalyzer = require("@next/bundle-analyzer")({
enabled: process.env.ANALYZE === "true",
});
const { nextI18NextRewrites } = require("next-i18next/rewrites");
const localeSubpaths = {
tr: "tr",
en: "en",
|
const config = {
rewrites: async () => nextI18NextRewrites(localeSubpaths),
publicRuntimeConfig: {
localeSubpaths,
},
};
module.exports = withPlugins([[withBundleAnalyzer]], config);
|
};
|
Bank.ts
|
// Copyright 2022 Cartesi Pte. Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
import { deployments, ethers } from "hardhat";
import { expect, use } from "chai";
import { solidity } from "ethereum-waffle";
import { Signer } from "ethers";
import {
deployMockContract,
MockContract,
} from "@ethereum-waffle/mock-contract";
import {
Bank,
Bank__factory,
SimpleToken,
SimpleToken__factory,
} from "../src/types";
import { deployTestBank } from "./utils";
use(solidity);
describe("Bank", () => {
let alice: Signer;
let bob: Signer;
let bank: Bank;
let token: SimpleToken;
let mockToken: MockContract;
const initialSupply = 1000000;
const addressZero = "0x0000000000000000000000000000000000000000";
async function
|
() {
const IERC20 = await deployments.getArtifact("IERC20");
mockToken = await deployMockContract(alice, IERC20.abi);
let Bank = await deployments.deploy("Bank", {
from: await alice.getAddress(),
args: [mockToken.address],
});
bank = Bank__factory.connect(Bank.address, alice);
}
async function getState() {
return {
bankTokenBalance: await token.balanceOf(bank.address),
aliceTokenBalance: await token.balanceOf(await alice.getAddress()),
bobTokenBalance: await token.balanceOf(await bob.getAddress()),
aliceBankBalance: await bank.balanceOf(await alice.getAddress()),
bobBankBalance: await bank.balanceOf(await bob.getAddress()),
};
}
beforeEach(async () => {
[alice, bob] = await ethers.getSigners();
// Deploy a simple token
let { Bank, SimpleToken } = await deployTestBank({ initialSupply });
// Connect to token as the contract deployer (Alice)
token = SimpleToken__factory.connect(SimpleToken.address, alice);
// Connect to bank as Alice
bank = Bank__factory.connect(Bank.address, alice);
});
it("Get token", async () => {
expect(await bank.getToken(), "Get Bank token").to.equal(token.address);
});
it("Deploy bank with invalid token", async () => {
await expect(
deployments.deploy("Bank", {
from: await alice.getAddress(),
args: [addressZero],
}),
"Deploying Bank with address(0) as token should revert"
).to.be.revertedWith("Bank: invalid token");
});
it("Check initial state", async () => {
const st = await getState();
expect(
st.bankTokenBalance,
"Initial Bank's token balance should be zero"
).to.equal(0);
expect(
st.aliceTokenBalance,
"Initial Alice's token balance should be `initialSupply`"
).to.equal(initialSupply);
expect(
st.bobTokenBalance,
"Initial Bob's token balance should be zero"
).to.equal(0);
expect(
st.aliceBankBalance,
"Initial Alice's bank balance should be zero"
).to.equal(0);
expect(
st.bobBankBalance,
"Initial Bob's bank balance should be zero"
).to.equal(0);
});
it("Invalid recipient", async () => {
await expect(
bank.depositTokens(addressZero, 10),
"Deposits to address(0) should revert"
).to.be.revertedWith("Bank: invalid recipient");
});
it("Deposit 0 tokens", async () => {
const st1 = await getState();
await expect(
bank.depositTokens(await bob.getAddress(), 0),
"Deposit 0 tokens in Bob's account"
)
.to.emit(bank, "Deposit")
.withArgs(await alice.getAddress(), await bob.getAddress(), 0);
const st2 = await getState();
expect(
st2.bankTokenBalance,
"Bank's token balance should be the same"
).to.equal(st1.bankTokenBalance);
expect(
st2.aliceTokenBalance,
"Alice's token balance should be the same"
).to.equal(st1.aliceTokenBalance);
expect(
st2.bobTokenBalance,
"Bob's token balance should be the same"
).to.equal(st1.bobTokenBalance);
expect(
st2.aliceBankBalance,
"Alice's bank balance should be the same"
).to.equal(st1.aliceBankBalance);
expect(
st2.bobBankBalance,
"Bob's bank balance should be the same"
).to.equal(st1.bobBankBalance);
});
it("Not enough approval", async () => {
// Give no approval
await expect(
bank.depositTokens(await bob.getAddress(), 10),
"Deposits with not enough approval should revert"
).to.be.revertedWith("ERC20: insufficient allowance");
// Give insufficient approval
await token.approve(bank.address, 9);
await expect(
bank.depositTokens(await bob.getAddress(), 10),
"Deposits with not enough approval should revert"
).to.be.revertedWith("ERC20: insufficient allowance");
// Give too much approval
await token.approve(bank.address, initialSupply + 1);
await expect(
bank.depositTokens(await bob.getAddress(), initialSupply + 1),
"Deposits with not enough balance should revert"
).to.be.revertedWith("ERC20: transfer amount exceeds balance");
// Have no balance
await token.connect(bob).approve(bank.address, 10);
await expect(
bank.connect(bob).depositTokens(await alice.getAddress(), 10),
"Deposits with not enough balance should revert"
).to.be.revertedWith("ERC20: transfer amount exceeds balance");
});
it("Just enough approval", async () => {
const value = 10;
const st1 = await getState();
// Deposits `value` tokens into Bank
await token.approve(bank.address, value);
expect(
await bank.depositTokens(await bob.getAddress(), value),
"A successful call to depositTokens should emit a Deposit event"
)
.to.emit(bank, "Deposit")
.withArgs(await alice.getAddress(), await bob.getAddress(), value);
const st2 = await getState();
expect(
st2.bankTokenBalance,
"Bank's token balance should be increased by `value`"
).to.equal(st1.bankTokenBalance.add(value));
expect(
st2.aliceTokenBalance,
"Alice's token balance should be decreased by `value`"
).to.equal(st1.aliceTokenBalance.sub(value));
expect(
st2.bobTokenBalance,
"Bob's token balance should be the same"
).to.equal(st1.bobTokenBalance);
expect(
st2.aliceBankBalance,
"Alice's bank balance should be the same"
).to.equal(st1.aliceBankBalance);
expect(
st2.bobBankBalance,
"Bob's bank balance should be increased by `value`"
).to.equal(st1.bobBankBalance.add(value));
// Approved amount should be updated
await expect(
bank.depositTokens(await bob.getAddress(), 1),
"Deposits with not enough approval should revert"
).to.be.revertedWith("ERC20: insufficient allowance");
});
it("Transfer 0 tokens", async () => {
const st1 = await getState();
await expect(
bank.transferTokens(await bob.getAddress(), 0),
"Transfer 0 tokens to Bob's account"
)
.to.emit(bank, "Transfer")
.withArgs(await alice.getAddress(), await bob.getAddress(), 0);
const st2 = await getState();
expect(
st2.bankTokenBalance,
"Bank's token balance should be the same"
).to.equal(st1.bankTokenBalance);
expect(
st2.aliceTokenBalance,
"Alice's token balance should be the same"
).to.equal(st1.aliceTokenBalance);
expect(
st2.bobTokenBalance,
"Bob's token balance should be the same"
).to.equal(st1.bobTokenBalance);
expect(
st2.aliceBankBalance,
"Alice's bank balance should be the same"
).to.equal(st1.aliceBankBalance);
expect(
st2.bobBankBalance,
"Bob's bank balance should be the same"
).to.equal(st1.bobBankBalance);
});
it("Failed transferFrom", async () => {
await setupMockToken();
await mockToken.mock.transferFrom.returns(false);
await expect(
bank.depositTokens(await bob.getAddress(), 0),
"Depositing 0 tokens in Bob's bank account"
).to.be.revertedWith("Bank: transferFrom failed");
});
it("Not enough balance", async () => {
let st = await getState();
// Try transfering with no balance
await expect(
bank.transferTokens(
await bob.getAddress(),
st.aliceBankBalance.add(1)
),
"Transfering too many token"
).to.be.revertedWith("Bank: not enough balance");
// Deposit some tokens into Alice's bank account
const value = 10;
await token.approve(bank.address, value);
await bank.depositTokens(await alice.getAddress(), value);
// Get new state
st = await getState();
// Try transfering with some balance
await expect(
bank.transferTokens(
await bob.getAddress(),
st.aliceBankBalance.add(1)
),
"Transfering too many token"
).to.be.revertedWith("Bank: not enough balance");
});
it("Just enough balance", async () => {
const value = 10;
await token.approve(bank.address, value);
await bank.depositTokens(await alice.getAddress(), value);
const st1 = await getState();
await expect(
bank.transferTokens(await bob.getAddress(), value),
`Transfering ${value} tokens to Bob's token account`
)
.to.emit(bank, "Transfer")
.withArgs(await alice.getAddress(), await bob.getAddress(), value);
const st2 = await getState();
expect(
st2.bankTokenBalance,
"Bank's token balance should decreased by `value`"
).to.equal(st1.bankTokenBalance.sub(value));
expect(
st2.aliceTokenBalance,
"Alice's token balance should be the same"
).to.equal(st1.aliceTokenBalance);
expect(
st2.bobTokenBalance,
"Bob's token balance should be increased by `value`"
).to.equal(st1.bobTokenBalance.add(value));
expect(
st2.aliceBankBalance,
"Alice's bank balance should be decreased by `value`"
).to.equal(st1.aliceBankBalance.sub(value));
expect(
st2.bobBankBalance,
"Bob's bank balance should be the same"
).to.equal(st1.bobBankBalance);
});
it("Failed transfer", async () => {
await setupMockToken();
await mockToken.mock.transfer.returns(false);
await expect(
bank.transferTokens(await bob.getAddress(), 0),
"Depositing 0 tokens in Bob's bank account"
).to.be.revertedWith("Bank: transfer failed");
});
it("Balance overflow", async () => {
await setupMockToken();
await mockToken.mock.transferFrom.returns(true);
const value = ethers.constants.MaxUint256;
await expect(
bank.depositTokens(await bob.getAddress(), value),
"Deposit `uint256.max` tokens in Bob's bank account"
)
.to.emit(bank, "Deposit")
.withArgs(await alice.getAddress(), await bob.getAddress(), value);
await expect(
bank.depositTokens(await bob.getAddress(), 1),
"Deposit 1 token in Bob's bank account"
).to.be.reverted;
});
});
|
setupMockToken
|
transaction_request.rs
|
use crate::types::{Address, Bytes, U256};
use serde::{Deserialize, Serialize};
/// Call contract request (eth_call / eth_estimateGas)
///
/// When using this for `eth_estimateGas`, all the fields
/// are optional. However, for usage in `eth_call` the
/// `to` field must be provided.
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct CallRequest {
/// Sender address (None for arbitrary address)
#[serde(skip_serializing_if = "Option::is_none")]
pub from: Option<Address>,
/// To address (None allowed for eth_estimateGas)
#[serde(skip_serializing_if = "Option::is_none")]
pub to: Option<Address>,
/// Supplied gas (None for sensible default)
#[serde(skip_serializing_if = "Option::is_none")]
pub gas: Option<U256>,
/// Gas price (None for sensible default)
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "gasPrice")]
pub gas_price: Option<U256>,
/// Transfered value (None for no transfer)
#[serde(skip_serializing_if = "Option::is_none")]
pub value: Option<U256>,
/// Data (None for empty data)
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<Bytes>,
}
/// Send Transaction Parameters
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct TransactionRequest {
/// Sender address
pub from: Address,
/// Recipient address (None for contract creation)
#[serde(skip_serializing_if = "Option::is_none")]
pub to: Option<Address>,
/// Supplied gas (None for sensible default)
#[serde(skip_serializing_if = "Option::is_none")]
pub gas: Option<U256>,
/// Gas price (None for sensible default)
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "gasPrice")]
pub gas_price: Option<U256>,
/// Transfered value (None for no transfer)
#[serde(skip_serializing_if = "Option::is_none")]
pub value: Option<U256>,
/// Transaction data (None for empty bytes)
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<Bytes>,
/// Transaction nonce (None for next available nonce)
#[serde(skip_serializing_if = "Option::is_none")]
pub nonce: Option<U256>,
/// Min block inclusion (None for include immediately)
#[serde(skip_serializing_if = "Option::is_none")]
pub condition: Option<TransactionCondition>,
}
/// Represents condition on minimum block number or block timestamp.
#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub enum TransactionCondition {
/// Valid at this minimum block number.
#[serde(rename = "block")]
Block(u64),
/// Valid at given unix time.
#[serde(rename = "time")]
Timestamp(u64),
}
#[cfg(test)]
mod tests {
use super::{Address, CallRequest, TransactionCondition, TransactionRequest};
use hex_literal::hex;
#[test]
fn should_serialize_call_request()
|
#[test]
fn should_deserialize_call_request() {
let serialized = r#"{
"to": "0x0000000000000000000000000000000000000005",
"gas": "0x5208",
"value": "0x4c4b40",
"data": "0x010203"
}"#;
let deserialized: CallRequest = serde_json::from_str(&serialized).unwrap();
assert_eq!(deserialized.from, None);
assert_eq!(deserialized.to, Some(Address::from_low_u64_be(5)));
assert_eq!(deserialized.gas, Some(21_000.into()));
assert_eq!(deserialized.gas_price, None);
assert_eq!(deserialized.value, Some(5_000_000.into()));
assert_eq!(deserialized.data, Some(hex!("010203").into()));
}
#[test]
fn should_serialize_transaction_request() {
// given
let tx_request = TransactionRequest {
from: Address::from_low_u64_be(5),
to: None,
gas: Some(21_000.into()),
gas_price: None,
value: Some(5_000_000.into()),
data: Some(hex!("010203").into()),
nonce: None,
condition: Some(TransactionCondition::Block(5)),
};
// when
let serialized = serde_json::to_string_pretty(&tx_request).unwrap();
// then
assert_eq!(
serialized,
r#"{
"from": "0x0000000000000000000000000000000000000005",
"gas": "0x5208",
"value": "0x4c4b40",
"data": "0x010203",
"condition": {
"block": 5
}
}"#
);
}
#[test]
fn should_deserialize_transaction_request() {
let serialized = r#"{
"from": "0x0000000000000000000000000000000000000005",
"gas": "0x5208",
"value": "0x4c4b40",
"data": "0x010203",
"condition": {
"block": 5
}
}"#;
let deserialized: TransactionRequest = serde_json::from_str(&serialized).unwrap();
assert_eq!(deserialized.from, Address::from_low_u64_be(5));
assert_eq!(deserialized.to, None);
assert_eq!(deserialized.gas, Some(21_000.into()));
assert_eq!(deserialized.gas_price, None);
assert_eq!(deserialized.value, Some(5_000_000.into()));
assert_eq!(deserialized.data, Some(hex!("010203").into()));
assert_eq!(deserialized.nonce, None);
assert_eq!(deserialized.condition, Some(TransactionCondition::Block(5)));
}
}
|
{
// given
let call_request = CallRequest {
from: None,
to: Some(Address::from_low_u64_be(5)),
gas: Some(21_000.into()),
gas_price: None,
value: Some(5_000_000.into()),
data: Some(hex!("010203").into()),
};
// when
let serialized = serde_json::to_string_pretty(&call_request).unwrap();
// then
assert_eq!(
serialized,
r#"{
"to": "0x0000000000000000000000000000000000000005",
"gas": "0x5208",
"value": "0x4c4b40",
"data": "0x010203"
}"#
);
}
|
deploy_sets.rs
|
use std::{
collections::HashMap,
fmt::{self, Display, Formatter},
};
use datasize::DataSize;
use serde::{Deserialize, Serialize};
use super::{event::DeployType, BlockHeight, FinalizationQueue};
use crate::types::{Chainspec, DeployHash, DeployHeader, Timestamp};
/// Stores the internal state of the BlockProposer.
#[derive(Clone, DataSize, Debug, Deserialize, Serialize)]
pub struct BlockProposerDeploySets {
/// The collection of deploys pending for inclusion in a block, with a timestamp of when we
/// received them.
pub(super) pending: HashMap<DeployHash, (DeployType, Timestamp)>,
/// The deploys that have already been included in a finalized block.
pub(super) finalized_deploys: HashMap<DeployHash, DeployHeader>,
/// The next block height we expect to be finalized.
/// If we receive a notification of finalization of a later block, we will store it in
/// finalization_queue.
/// If we receive a request that contains a later next_finalized, we will store it in
/// request_queue.
pub(super) next_finalized: BlockHeight,
/// The queue of finalized block contents awaiting inclusion in `self.finalized_deploys`.
pub(super) finalization_queue: FinalizationQueue,
}
impl Default for BlockProposerDeploySets {
fn default() -> Self {
let pending = HashMap::new();
let finalized_deploys = Default::default();
let next_finalized = Default::default();
let finalization_queue = Default::default();
BlockProposerDeploySets {
pending,
finalized_deploys,
next_finalized,
finalization_queue,
}
}
}
impl BlockProposerDeploySets {
/// Constructs the instance of `BlockProposerDeploySets` from the list of finalized deploys.
pub(super) fn from_finalized(
finalized_deploys: Vec<(DeployHash, DeployHeader)>,
next_finalized_height: u64,
) -> BlockProposerDeploySets {
BlockProposerDeploySets {
pending: HashMap::new(),
finalized_deploys: finalized_deploys.into_iter().collect(),
next_finalized: next_finalized_height,
finalization_queue: Default::default(),
}
}
}
impl Display for BlockProposerDeploySets {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(
f,
"(pending:{}, finalized:{})",
self.pending.len(),
self.finalized_deploys.len()
)
}
}
/// Create a state storage key for block proposer deploy sets based on a chainspec.
///
/// We namespace based on a chainspec to prevent validators from loading data for a different chain
/// if they forget to clear their state.
pub fn create_storage_key(chainspec: &Chainspec) -> Vec<u8>
|
impl BlockProposerDeploySets {
/// Prunes expired deploy information from the BlockProposerState, returns the total deploys
/// pruned
pub(crate) fn prune(&mut self, current_instant: Timestamp) -> usize {
let pending = prune_pending_deploys(&mut self.pending, current_instant);
let finalized = prune_deploys(&mut self.finalized_deploys, current_instant);
pending + finalized
}
}
/// Prunes expired deploy information from an individual deploy collection, returns the total
/// deploys pruned
pub(super) fn prune_deploys(
deploys: &mut HashMap<DeployHash, DeployHeader>,
current_instant: Timestamp,
) -> usize {
let initial_len = deploys.len();
deploys.retain(|_hash, header| !header.expired(current_instant));
initial_len - deploys.len()
}
/// Prunes expired deploy information from an individual pending deploy collection, returns the
/// total deploys pruned
pub(super) fn prune_pending_deploys(
deploys: &mut HashMap<DeployHash, (DeployType, Timestamp)>,
current_instant: Timestamp,
) -> usize {
let initial_len = deploys.len();
deploys.retain(|_hash, (deploy_type, _)| !deploy_type.header().expired(current_instant));
initial_len - deploys.len()
}
|
{
format!(
"block_proposer_deploy_sets:version={},chain_name={}",
chainspec.protocol_config.version, chainspec.network_config.name
)
.into()
}
|
Departments.js
|
/* eslint global-require: 0 */
// @flow
import React, { PureComponent } from 'react';
import PropTypes from 'prop-types';
import { Link } from 'react-router-dom';
import { AireLibreIcon, CuidadoPersonalIcon, HogarIcon, LineaBlancaIcon, TecnologiaIcon } from '../UI/Icons';
import Department from './Department';
import styles from './Departments.scss';
const departmentAssets = {
'1': {
banner: require('../../images/mundo-tecnologia.png'),
icon: TecnologiaIcon
},
'2': {
banner: require('../../images/mundo-lineablanca.png'),
icon: LineaBlancaIcon
},
'3': {
banner: require('../../images/mundo-cuidadopersonal.png'),
icon: CuidadoPersonalIcon
},
'4': {
banner: require('../../images/mundo-hogar.png'),
icon: HogarIcon
},
'5': {
banner: require('../../images/mundo-airelibre.png'),
icon: AireLibreIcon
}
};
export default class
|
extends PureComponent {
static propTypes = {
categoryTree: PropTypes.array.isRequired
}
render() {
const { categoryTree } = this.props;
return (
<div className={styles.container}>
<div className={styles.header}>Encontrá aquí lo que estás busando</div>
<div className={styles.departments}>
{categoryTree.map(department => (
<Link
key={department.id}
to={`/category/${department.id}`}
className={styles.departmentLink}
>
<Department
id={department.id}
name={department.name}
bannerSrc={departmentAssets[department.id].banner}
Icon={departmentAssets[department.id].icon}
/>
</Link>
))}
</div>
</div>
);
}
}
|
Departments
|
grid.go
|
// Copyright 2017 Zack Guo <[email protected]>. All rights reserved.
// Use of this source code is governed by a MIT license that can
// be found in the LICENSE file.
// +build ignore
package main
import (
"log"
"math"
"time"
ui "github.com/chenjiandongx/termui/v3"
"github.com/chenjiandongx/termui/v3/widgets"
)
func main() {
if err := ui.Init(); err != nil {
log.Fatalf("failed to initialize termui: %v", err)
}
defer ui.Close()
sinFloat64 := (func() []float64 {
n := 400
data := make([]float64, n)
for i := range data {
data[i] = 1 + math.Sin(float64(i)/5)
}
return data
})()
sl := widgets.NewSparkline()
sl.Data = sinFloat64[:100]
sl.LineColor = ui.ColorCyan
sl.TitleStyle.Fg = ui.ColorWhite
slg := widgets.NewSparklineGroup(sl)
slg.Title = "Sparkline"
lc := widgets.NewPlot()
lc.Title = "braille-mode Line Chart"
lc.Data = append(lc.Data, sinFloat64)
lc.AxesColor = ui.ColorWhite
lc.LineColors[0] = ui.ColorYellow
gs := make([]*widgets.Gauge, 3)
for i := range gs {
gs[i] = widgets.NewGauge()
gs[i].Percent = i * 10
gs[i].BarColor = ui.ColorRed
}
ls := widgets.NewList()
ls.Rows = []string{
"[1] Downloading File 1",
"",
|
"[2] Downloading File 2",
"",
"",
"",
"[3] Uploading File 3",
}
ls.Border = false
p := widgets.NewParagraph()
p.Text = "<> This row has 3 columns\n<- Widgets can be stacked up like left side\n<- Stacked widgets are treated as a single widget"
p.Title = "Demonstration"
grid := ui.NewGrid()
termWidth, termHeight := ui.TerminalDimensions()
grid.SetRect(0, 0, termWidth, termHeight)
grid.Set(
ui.NewRow(1.0/2,
ui.NewCol(1.0/2, slg),
ui.NewCol(1.0/2, lc),
),
ui.NewRow(1.0/2,
ui.NewCol(1.0/4, ls),
ui.NewCol(1.0/4,
ui.NewRow(.9/3, gs[0]),
ui.NewRow(.9/3, gs[1]),
ui.NewRow(1.2/3, gs[2]),
),
ui.NewCol(1.0/2, p),
),
)
ui.Render(grid)
tickerCount := 1
uiEvents := ui.PollEvents()
ticker := time.NewTicker(time.Second).C
for {
select {
case e := <-uiEvents:
switch e.ID {
case "q", "<C-c>":
return
case "<Resize>":
payload := e.Payload.(ui.Resize)
grid.SetRect(0, 0, payload.Width, payload.Height)
ui.Clear()
ui.Render(grid)
}
case <-ticker:
if tickerCount == 100 {
return
}
for _, g := range gs {
g.Percent = (g.Percent + 3) % 100
}
slg.Sparklines[0].Data = sinFloat64[tickerCount : tickerCount+100]
lc.Data[0] = sinFloat64[2*tickerCount:]
ui.Render(grid)
tickerCount++
}
}
}
|
"",
"",
|
faci_training.py
|
from glob import glob
import config
import errno
import os
def unique_id(msg):
ext = '.txt'
f = glob("*"+ext)[0]
num_trail = int(f.split(".")[0])
newf = "./" + str(num_trail+1) + ext
os.rename(f, newf)
outdir = os.path.join("../weights", config.summary_prefix+"%02d"%num_trail)
mkdir_p(outdir)
f= open(outdir + "/msg.txt","w+")
f.write(msg)
f.close()
return num_trail
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
|
raise
|
|
error.rs
|
//! Error types
use anomaly::{BoxError, Context};
use thiserror::Error;
/// An error raised within the relayer CLI
pub type Error = anomaly::Error<Kind>;
/// Kinds of errors
#[derive(Copy, Clone, Debug, Eq, Error, PartialEq)]
pub enum
|
{
/// Error in configuration file
#[error("config error")]
Config,
/// Input/output error
#[error("I/O error")]
Io,
/// Error during network query
#[error("query error")]
Query,
/// Error during transaction submission
#[error("tx error")]
Tx,
/// Error during transaction submission
#[error("keys error")]
Keys,
}
impl Kind {
/// Create an error context from this error
pub fn context(self, source: impl Into<BoxError>) -> Context<Kind> {
Context::new(self, Some(source.into()))
}
}
|
Kind
|
test_actionparams.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from nose.tools import raises
from construct import actionparams
from construct.errors import ArgumentError
params_0 = dict()
params_1 = dict(
str_arg={
'label': 'String Argument',
'help': 'A String Argument',
'required': True,
'type': str
},
int_arg={
'label': 'Integer Argument',
'help': 'An Integer Argument',
'required': True,
'default': 1,
'type': int
},
float_arg={
'label': 'Float Argument',
'help': 'A Float Argument',
'required': False,
'default': 10.0,
'type': float
},
dict_arg={
'label': 'Dict Argument',
'help': 'A Dict Argument',
'required': True,
'type': dict
}
)
def test_validate_nada():
'''Validate empty params dict'''
actionparams.validate(params_0)
actionparams.validate(params_1)
@raises(ArgumentError)
def test_pass_args_to_empty_params():
'''Validate kwargs against empty params'''
actionparams.validate_kwargs(params_0, {'invalid': 'kwargs'})
@raises(ArgumentError)
def test_missing_required():
|
'''Validate kwargs with missing required argument'''
actionparams.validate_kwargs(
params_1,
{'str_arg': 'str', 'dict_arg': {}}
)
|
|
create_robot_urlbuilder.go
|
// Code generated by go-swagger; DO NOT EDIT.
package robot
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"errors"
"net/url"
golangswaggerpaths "path"
)
|
// CreateRobotURL generates an URL for the create robot operation
type CreateRobotURL struct {
_basePath string
}
// WithBasePath sets the base path for this url builder, only required when it's different from the
// base path specified in the swagger spec.
// When the value of the base path is an empty string
func (o *CreateRobotURL) WithBasePath(bp string) *CreateRobotURL {
o.SetBasePath(bp)
return o
}
// SetBasePath sets the base path for this url builder, only required when it's different from the
// base path specified in the swagger spec.
// When the value of the base path is an empty string
func (o *CreateRobotURL) SetBasePath(bp string) {
o._basePath = bp
}
// Build a url path and query string
func (o *CreateRobotURL) Build() (*url.URL, error) {
var _result url.URL
var _path = "/robots"
_basePath := o._basePath
if _basePath == "" {
_basePath = "/api/v2.0"
}
_result.Path = golangswaggerpaths.Join(_basePath, _path)
return &_result, nil
}
// Must is a helper function to panic when the url builder returns an error
func (o *CreateRobotURL) Must(u *url.URL, err error) *url.URL {
if err != nil {
panic(err)
}
if u == nil {
panic("url can't be nil")
}
return u
}
// String returns the string representation of the path with query string
func (o *CreateRobotURL) String() string {
return o.Must(o.Build()).String()
}
// BuildFull builds a full url with scheme, host, path and query string
func (o *CreateRobotURL) BuildFull(scheme, host string) (*url.URL, error) {
if scheme == "" {
return nil, errors.New("scheme is required for a full url on CreateRobotURL")
}
if host == "" {
return nil, errors.New("host is required for a full url on CreateRobotURL")
}
base, err := o.Build()
if err != nil {
return nil, err
}
base.Scheme = scheme
base.Host = host
return base, nil
}
// StringFull returns the string representation of a complete url
func (o *CreateRobotURL) StringFull(scheme, host string) string {
return o.Must(o.BuildFull(scheme, host)).String()
}
| |
index.d.ts
|
import { AxiosInstance, AxiosRequestConfig } from 'axios'
import Vue from 'vue'
interface NuxtAxiosInstance extends AxiosInstance {
$request<T = any>(config: AxiosRequestConfig): Promise<T>
$get<T = any>(url: string, config?: AxiosRequestConfig): Promise<T>
$delete<T = any>(url: string, config?: AxiosRequestConfig): Promise<T>
$head<T = any>(url: string, config?: AxiosRequestConfig): Promise<T>
$options<T = any>(url: string, config?: AxiosRequestConfig): Promise<T>
$post<T = any>(url: string, data?: any, config?: AxiosRequestConfig): Promise<T>
$put<T = any>(url: string, data?: any, config?: AxiosRequestConfig): Promise<T>
$patch<T = any>(url: string, data?: any, config?: AxiosRequestConfig): Promise<T>
}
|
declare module 'vue/types/vue' {
interface Vue {
$axios: NuxtAxiosInstance
}
}
| |
parse.py
|
from struct import (unpack_from, calcsize)
from bglcapi.types import MessageType
from . import rsp
from . import evt
PARSE_MAP = {
MessageType.COMMAND_RESPONSE: {
0x00: rsp.message_to_target,
},
MessageType.EVENT: {
0x00: evt.message_to_host,
},
}
def
|
(msg_type: int, msg_id: int, data: bytes, offset: int):
return PARSE_MAP[msg_type][msg_id](data, offset)
|
from_binary
|
Leaflet.js
|
import React, {Component} from 'react'
import autobind from 'autobind-decorator'
import L from 'leaflet'
import 'leaflet/dist/leaflet.css'
const css = {
height: '500px',
marginBottom: '80px',
zIndex: 1,
}
export default class Leaflet extends Component {
static propTypes = {
markers: React.PropTypes.array,
}
static contextTypes = {
app: React.PropTypes.object.isRequired
}
constructor(props) {
super(props)
this.state = {
map: null,
center: null,
layerControl: null,
markerLayer: null,
hasLayers: false
}
this.icon = L.icon({
iconUrl: '/assets/img/leaflet/marker-icon.png',
iconRetinaUrl: '/assets/img/leaflet/marker-icon-2x.png',
shadowUrl: '/assets/img/leaflet/marker-shadow.png',
iconSize: [25, 41],
iconAnchor: [12, 41],
popupAnchor: [1, -34],
tooltipAnchor: [16, -28],
shadowSize: [41, 41]
})
}
componentDidMount() {
// let app = this.context.app;
let map = L.map('map', {zoomControl:true}).setView([34.52, 69.16], 10)
/* let nexrad = L.tileLayer.wms("http://mesonet.agron.iastate.edu/cgi-bin/wms/nexrad/n0r.cgi", {
layers: 'nexrad-n0r-900913',
format: 'image/png',
transparent: true,
attribution: "Weather data © 2012 IEM Nexrad"
});
let nmra = L.tileLayer.wms("https://mrdata.usgs.gov/services/nmra", {
layers: 'USNationalMineralAssessment1998',
format: 'image/png',
transparent: true
})
let osm = L.tileLayer('http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png');
let baseLayers = { "Nexrad" : nexrad, "NMRA" : nmra, "OSM" : osm}
*/
let layerControl = L.control.layers({}, {})
layerControl.addTo(map)
map.on('moveend', this.moveEnd)
let state = this.state
state.map = map
state.layerControl = layerControl
state.markerLayer = L.featureGroup([]).addTo(map)
this.setState(state)
this.tryAddLayers()
this.updateMarkerLayer(this.props.markers)
}
@autobind
tryAddLayers() {
if (this.state.hasLayers === false) {
this.addLayers()
}
}
|
componentWillReceiveProps(nextProps) {
this.tryAddLayers()
let existingMarkers = this.state.markerLayer.getLayers()
let markersToAdd = nextProps.markers.filter(m =>
existingMarkers.findIndex(el => el.options.id === m.id) === -1
)
let markersToRemove = existingMarkers.filter(m =>
nextProps.markers.findIndex(el => m.options.id === el.id) === -1
)
this.updateMarkerLayer(markersToAdd, markersToRemove)
}
@autobind
updateMarkerLayer(markersToAdd, markersToRemove) {
let markers = markersToAdd || []
markersToRemove = markersToRemove || []
let newMarkers = []
let markerLayer = this.state.markerLayer
markers.forEach(m => {
let latLng = (m.lat && m.lng) ? [m.lat, m.lng] : this.state.map.getCenter()
let marker = L.marker(latLng, {icon: this.icon, draggable: (m.draggable || false), id: m.id})
.bindPopup(m.name)
if (m.onMove) {
marker.on('move', m.onMove)
}
newMarkers.push(marker)
markerLayer.addLayer(marker)
})
markersToRemove.forEach(m => {
markerLayer.removeLayer(m)
})
if (newMarkers.length > 0) {
if (markerLayer.getBounds() && markerLayer.getBounds().isValid()) {
this.state.map.fitBounds(markerLayer.getBounds(), {maxZoom: 15})
}
}
}
@autobind
addLayers() {
let app = this.context.app
let rawLayers = app.state.settings.MAP_LAYERS
if (!rawLayers || rawLayers.length === 0) {
return
}
let mapLayers = JSON.parse(rawLayers)
let defaultLayer = null
mapLayers.forEach(l => {
let layer = null
if (l.type === 'wms') {
layer = L.tileLayer.wms(l.url, {
layers: l.layer,
format: l.format || 'image/png'
})
} else if (l.type === 'osm') {
layer = L.tileLayer(l.url)
}
if (layer) {
this.state.layerControl.addBaseLayer(layer, l.name)
}
if (l.default) { defaultLayer = layer }
})
if (defaultLayer) { this.state.map.addLayer(defaultLayer) }
this.setState({hasLayers:true})
}
render() {
return (
<div>
<div id="map" style={css} />
</div>
)
}
@autobind
moveEnd(event) {
let map = this.state.map
let center = map.getCenter()
this.setState({map, center: [center.lat, center.lng].join(',')})
}
}
|
componentWillUnmount() {
this.setState({hasLayers:false})
}
|
bodyfrom.go
|
package helpers
import (
"os"
"strings"
)
func BodyFrom(args []string) string
|
{
var s string
if (len(args) < 2) || os.Args[1] == "" {
s = "Create user"
} else {
s = strings.Join(args[1:], " ")
}
return s
}
|
|
parseLC.js
|
const plc = require('../index').parseLC
exports["Resiliant: don\'t throw errors on weird/local call numbers"] = function (test) {
plc("Optics 3.10")
plc("Personal Copy - Le Sueur")
plc("N")
test.done()
}
exports["Parse single letter superclass from call number"] = function (test) {
test.ok( plc("N 1234").letters === "N" )
test.ok( plc("P37 .P 1997").letters === "P" )
test.done()
}
exports["Parse double letter subclass from call number"] = function (test) {
test.ok( plc("HT 65 H85 1992").letters === "HT")
test.ok( plc("NA 1085 B47 1987").letters === "NA")
test.ok( plc("NA1010.G7 G69 2003").letters === "NA")
test.done()
}
exports["Parse numeric component of call number"] = function (test) {
test.ok( plc("HT123 .H337 1998").number === 123)
test.ok( plc("PR 6069 T6 R6 1967").number === 6069)
// parse floats as well, not just integers
test.ok( plc("Z 118.5").number === 118.5)
test.done()
}
exports["Retain complete, original call number string in output"] = function (test) {
test.ok( plc("NA 1085 B47 1987").original === "NA 1085 B47 1987")
|
test.done()
}
|
test.ok( plc("Faculty files").original === "Faculty files")
|
lifecycle_executor.go
|
package build
import (
"context"
"math/rand"
"time"
"github.com/buildpacks/pack/internal/cache"
"github.com/buildpacks/imgutil"
"github.com/buildpacks/lifecycle/api"
"github.com/docker/docker/client"
"github.com/google/go-containerregistry/pkg/name"
"github.com/buildpacks/pack/internal/builder"
"github.com/buildpacks/pack/logging"
)
var (
// SupportedPlatformAPIVersions lists the Platform API versions pack supports listed from earliest to latest
SupportedPlatformAPIVersions = builder.APISet{
api.MustParse("0.3"),
api.MustParse("0.4"),
}
)
type Builder interface {
Name() string
UID() int
GID() int
LifecycleDescriptor() builder.LifecycleDescriptor
Stack() builder.StackMetadata
Image() imgutil.Image
}
type LifecycleExecutor struct {
logger logging.Logger
docker client.CommonAPIClient
}
type Cache interface {
Name() string
Clear(context.Context) error
Type() cache.Type
}
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
type LifecycleOptions struct {
AppPath string
Image name.Reference
Builder Builder
LifecycleImage string
RunImage string
ClearCache bool
Publish bool
TrustBuilder bool
UseCreator bool
CacheImage string
HTTPProxy string
HTTPSProxy string
NoProxy string
Network string
AdditionalTags []string
Volumes []string
DefaultProcessType string
FileFilter func(string) bool
}
func NewLifecycleExecutor(logger logging.Logger, docker client.CommonAPIClient) *LifecycleExecutor
|
func (l *LifecycleExecutor) Execute(ctx context.Context, opts LifecycleOptions) error {
lifecycleExec, err := NewLifecycleExecution(l.logger, l.docker, opts)
if err != nil {
return err
}
defer lifecycleExec.Cleanup()
return lifecycleExec.Run(ctx, NewDefaultPhaseFactory)
}
|
{
return &LifecycleExecutor{logger: logger, docker: docker}
}
|
api.py
|
import os
import json
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.config import Config
from starlette.requests import Request
from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import HTMLResponse, RedirectResponse
from authlib.integrations.starlette_client import OAuth, OAuthError
app = FastAPI()
origins = [
"http://localhost:3000",
"localhost:3000"
]
app.add_middleware(SessionMiddleware, secret_key=os.environ.get("GOOGLE_CLIENT_SECRET"))
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
config = Config('.env')
oauth = OAuth(config)
CONF_URL = 'https://accounts.google.com/.well-known/openid-configuration'
oauth.register(
name='google',
server_metadata_url=CONF_URL,
client_kwargs={
'scope': 'openid email profile'
}
)
@app.route('/')
async def homepage(request: Request):
user = request.session.get('user')
if user:
data = json.dumps(user)
html = (
f'<pre>{data}</pre>'
'<a href="/logout">logout</a>'
)
return HTMLResponse(html)
return HTMLResponse('<a href="/login">login</a>')
@app.route('/login')
async def
|
(request: Request):
redirect_uri = request.url_for('auth')
return await oauth.google.authorize_redirect(request, redirect_uri)
@app.route('/auth')
async def auth(request: Request):
try:
token = await oauth.google.authorize_access_token(request)
except OAuthError as error:
return HTMLResponse(f'<h1>{error.error}</h1>')
user = await oauth.google.parse_id_token(request, token)
request.session['user'] = dict(user)
return RedirectResponse(url='/')
@app.route('/logout')
async def logout(request: Request):
request.session.pop('user', None)
return RedirectResponse(url='/')
|
login
|
manager.go
|
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Code generated by ack-generate. DO NOT EDIT.
package endpoint_config
import (
"context"
"fmt"
"time"
ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1"
ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare"
ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition"
ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config"
ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors"
ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics"
ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue"
ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log"
acktypes "github.com/aws-controllers-k8s/runtime/pkg/types"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
svcsdk "github.com/aws/aws-sdk-go/service/sagemaker"
svcsdkapi "github.com/aws/aws-sdk-go/service/sagemaker/sagemakeriface"
)
// +kubebuilder:rbac:groups=sagemaker.services.k8s.aws,resources=endpointconfigs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=sagemaker.services.k8s.aws,resources=endpointconfigs/status,verbs=get;update;patch
var lateInitializeFieldNames = []string{}
// resourceManager is responsible for providing a consistent way to perform
// CRUD operations in a backend AWS service API for Book custom resources.
type resourceManager struct {
// cfg is a copy of the ackcfg.Config object passed on start of the service
// controller
cfg ackcfg.Config
// log refers to the logr.Logger object handling logging for the service
// controller
log logr.Logger
// metrics contains a collection of Prometheus metric objects that the
// service controller and its reconcilers track
metrics *ackmetrics.Metrics
// rr is the Reconciler which can be used for various utility
// functions such as querying for Secret values given a SecretReference
rr acktypes.Reconciler
// awsAccountID is the AWS account identifier that contains the resources
// managed by this resource manager
awsAccountID ackv1alpha1.AWSAccountID
// The AWS Region that this resource manager targets
awsRegion ackv1alpha1.AWSRegion
// sess is the AWS SDK Session object used to communicate with the backend
// AWS service API
sess *session.Session
// sdk is a pointer to the AWS service API interface exposed by the
// aws-sdk-go/services/{alias}/{alias}iface package.
sdkapi svcsdkapi.SageMakerAPI
}
// concreteResource returns a pointer to a resource from the supplied
// generic AWSResource interface
func (rm *resourceManager) concreteResource(
res acktypes.AWSResource,
) *resource {
// cast the generic interface into a pointer type specific to the concrete
// implementing resource type managed by this resource manager
return res.(*resource)
}
// ReadOne returns the currently-observed state of the supplied AWSResource in
// the backend AWS service API.
func (rm *resourceManager) ReadOne(
ctx context.Context,
res acktypes.AWSResource,
) (acktypes.AWSResource, error) {
r := rm.concreteResource(res)
if r.ko == nil {
// Should never happen... if it does, it's buggy code.
panic("resource manager's ReadOne() method received resource with nil CR object")
}
observed, err := rm.sdkFind(ctx, r)
if err != nil {
if observed != nil {
return rm.onError(observed, err)
}
return rm.onError(r, err)
}
return rm.onSuccess(observed)
}
// Create attempts to create the supplied AWSResource in the backend AWS
// service API, returning an AWSResource representing the newly-created
// resource
func (rm *resourceManager) Create(
ctx context.Context,
res acktypes.AWSResource,
) (acktypes.AWSResource, error) {
r := rm.concreteResource(res)
if r.ko == nil {
// Should never happen... if it does, it's buggy code.
panic("resource manager's Create() method received resource with nil CR object")
}
created, err := rm.sdkCreate(ctx, r)
if err != nil {
return rm.onError(r, err)
}
return rm.onSuccess(created)
}
// Update attempts to mutate the supplied desired AWSResource in the backend AWS
// service API, returning an AWSResource representing the newly-mutated
// resource.
// Note for specialized logic implementers can check to see how the latest
// observed resource differs from the supplied desired state. The
// higher-level reonciler determines whether or not the desired differs
// from the latest observed and decides whether to call the resource
// manager's Update method
func (rm *resourceManager) Update(
ctx context.Context,
resDesired acktypes.AWSResource,
resLatest acktypes.AWSResource,
delta *ackcompare.Delta,
) (acktypes.AWSResource, error) {
desired := rm.concreteResource(resDesired)
latest := rm.concreteResource(resLatest)
if desired.ko == nil || latest.ko == nil {
// Should never happen... if it does, it's buggy code.
panic("resource manager's Update() method received resource with nil CR object")
}
updated, err := rm.sdkUpdate(ctx, desired, latest, delta)
if err != nil {
return rm.onError(latest, err)
}
return rm.onSuccess(updated)
}
// Delete attempts to destroy the supplied AWSResource in the backend AWS
// service API, returning an AWSResource representing the
// resource being deleted (if delete is asynchronous and takes time)
func (rm *resourceManager) Delete(
ctx context.Context,
res acktypes.AWSResource,
) (acktypes.AWSResource, error) {
r := rm.concreteResource(res)
if r.ko == nil {
// Should never happen... if it does, it's buggy code.
panic("resource manager's Update() method received resource with nil CR object")
}
observed, err := rm.sdkDelete(ctx, r)
if err != nil {
if observed != nil {
return rm.onError(observed, err)
}
return rm.onError(r, err)
}
return rm.onSuccess(observed)
}
// ARNFromName returns an AWS Resource Name from a given string name. This
// is useful for constructing ARNs for APIs that require ARNs in their
// GetAttributes operations but all we have (for new CRs at least) is a
// name for the resource
func (rm *resourceManager) ARNFromName(name string) string {
return fmt.Sprintf(
"arn:aws:sagemaker:%s:%s:%s",
rm.awsRegion,
rm.awsAccountID,
name,
)
}
// LateInitialize returns an acktypes.AWSResource after setting the late initialized
// fields from the readOne call. This method will initialize the optional fields
// which were not provided by the k8s user but were defaulted by the AWS service.
// If there are no such fields to be initialized, the returned object is similar to
// object passed in the parameter.
func (rm *resourceManager) LateInitialize(
ctx context.Context,
latest acktypes.AWSResource,
) (acktypes.AWSResource, error) {
rlog := ackrtlog.FromContext(ctx)
// If there are no fields to late initialize, do nothing
if len(lateInitializeFieldNames) == 0 {
rlog.Debug("no late initialization required.")
return latest, nil
}
latestCopy := latest.DeepCopy()
lateInitConditionReason := ""
lateInitConditionMessage := ""
observed, err := rm.ReadOne(ctx, latestCopy)
if err != nil {
lateInitConditionMessage = "Unable to complete Read operation required for late initialization"
lateInitConditionReason = "Late Initialization Failure"
ackcondition.SetLateInitialized(latestCopy, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason)
return latestCopy, err
}
lateInitializedRes := rm.lateInitializeFromReadOneOutput(observed, latestCopy)
incompleteInitialization := rm.incompleteLateInitialization(lateInitializedRes)
if incompleteInitialization {
// Add the condition with LateInitialized=False
lateInitConditionMessage = "Late initialization did not complete, requeuing with delay of 5 seconds"
lateInitConditionReason = "Delayed Late Initialization"
ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason)
return lateInitializedRes, ackrequeue.NeededAfter(nil, time.Duration(5)*time.Second)
}
// Set LateInitialized condition to True
|
lateInitConditionMessage = "Late initialization successful"
lateInitConditionReason = "Late initialization successful"
ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionTrue, &lateInitConditionMessage, &lateInitConditionReason)
return lateInitializedRes, nil
}
// incompleteLateInitialization return true if there are fields which were supposed to be
// late initialized but are not. If all the fields are late initialized, false is returned
func (rm *resourceManager) incompleteLateInitialization(
res acktypes.AWSResource,
) bool {
return false
}
// lateInitializeFromReadOneOutput late initializes the 'latest' resource from the 'observed'
// resource and returns 'latest' resource
func (rm *resourceManager) lateInitializeFromReadOneOutput(
observed acktypes.AWSResource,
latest acktypes.AWSResource,
) acktypes.AWSResource {
return latest
}
// newResourceManager returns a new struct implementing
// acktypes.AWSResourceManager
func newResourceManager(
cfg ackcfg.Config,
log logr.Logger,
metrics *ackmetrics.Metrics,
rr acktypes.Reconciler,
sess *session.Session,
id ackv1alpha1.AWSAccountID,
region ackv1alpha1.AWSRegion,
) (*resourceManager, error) {
return &resourceManager{
cfg: cfg,
log: log,
metrics: metrics,
rr: rr,
awsAccountID: id,
awsRegion: region,
sess: sess,
sdkapi: svcsdk.New(sess),
}, nil
}
// onError updates resource conditions and returns updated resource
// it returns nil if no condition is updated.
func (rm *resourceManager) onError(
r *resource,
err error,
) (acktypes.AWSResource, error) {
if r == nil {
return nil, err
}
r1, updated := rm.updateConditions(r, false, err)
if !updated {
return r, err
}
for _, condition := range r1.Conditions() {
if condition.Type == ackv1alpha1.ConditionTypeTerminal &&
condition.Status == corev1.ConditionTrue {
// resource is in Terminal condition
// return Terminal error
return r1, ackerr.Terminal
}
}
return r1, err
}
// onSuccess updates resource conditions and returns updated resource
// it returns the supplied resource if no condition is updated.
func (rm *resourceManager) onSuccess(
r *resource,
) (acktypes.AWSResource, error) {
if r == nil {
return nil, nil
}
r1, updated := rm.updateConditions(r, true, nil)
if !updated {
return r, nil
}
return r1, nil
}
| |
agglomerative_clustering.py
|
#-*- coding: utf-8 -*-
import numpy as np
from sklearn.cluster import AgglomerativeClustering as sk_AgglomerativeClustering
from sklearn.externals.joblib import Memory
from .clustering import Clustering
class AgglomerativeClustering(Clustering):
"""docstring for AgglomerativeClustering."""
def __init__(self, data, n_clusters = 2, affinity = 'euclidean',
memory = Memory(cachedir = None), connectivity = None,
compute_full_tree = 'auto', linkage = 'ward',
pooling_func = np.mean):
super(AgglomerativeClustering, self).__init__()
self.data = data
self.n_clusters = n_clusters
self.affinity = affinity
self.memory = memory
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.pooling_func = pooling_func
def execute(self):
"""Constroi o modelo de clusterizacao."""
self.model = sk_AgglomerativeClustering(n_clusters = self.n_clusters,
affinity = self.affinity,
memory = self.memory,
connectivity = self.connectivity,
compute_full_tree = self.compute_full_tree,
linkage = self.linkage,
pooling_func = self.pooling_func).fit(self.data)
self.clusters = super().make_clusters(self.data, self.model.labels_)
@property
def labels_(self):
"""Retorna os labels dos elementos do dataset."""
return self.model.labels_
@property
def clusters_(self):
"""Retorna um dicionaro onde os indices dos grupos sao as chaves."""
return self.clusters
@property
def
|
(self):
"""Retorna o modelo de agrupamento."""
return self.model
|
model_
|
branch_tests.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::unit_tests::testutils::compile_script_string;
use move_binary_format::file_format::Bytecode::*;
#[test]
fn compile_if_else_with_fallthrough() {
let code = String::from(
"
main() {
let x: u64;
label b0:
jump_if (42 > 0) b2;
label b1:
x = 1;
label b2:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrTrue(_)), 1);
assert_eq!(instr_count!(compiled_script, Branch(_)), 0);
}
#[test]
fn compile_if_else_with_jumps() {
let code = String::from(
"
main() {
let x: u64;
let y: u64;
label b0:
jump_if (42 > 0) b2;
label b1:
x = 1;
jump b3;
label b2:
y = 1;
jump b3;
label b3:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrTrue(_)), 1);
assert_eq!(instr_count!(compiled_script, Branch(_)), 2);
}
#[test]
fn compile_nested_if_else() {
let code = String::from(
"
main() {
let x: u64;
label b0:
jump_if (42 > 0) b2;
label b1:
x = 1;
jump exit;
label b2:
jump_if (5 > 10) b4;
label b3:
x = 2;
jump exit;
label b4:
x = 3;
jump exit;
label exit:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrTrue(_)), 2);
assert_eq!(instr_count!(compiled_script, Branch(_)), 3);
}
#[test]
fn compile_if_else_with_if_return() {
let code = String::from(
"
main() {
let x: u64;
label b0:
jump_if (42 > 0) b2;
label b1:
x = 1;
jump b3;
label b2:
return;
label b3:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrTrue(_)), 1);
assert_eq!(instr_count!(compiled_script, Branch(_)), 1);
assert_eq!(instr_count!(compiled_script, Ret), 2);
}
#[test]
fn compile_if_else_with_else_return() {
let code = String::from(
"
main() {
let x: u64;
label b0:
jump_if (42 > 0) b2;
label b1:
return;
label b2:
x = 1;
jump b3;
label b3:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrTrue(_)), 1);
assert_eq!(instr_count!(compiled_script, Branch(_)), 1);
assert_eq!(instr_count!(compiled_script, Ret), 2);
}
#[test]
fn compile_if_else_with_two_returns() {
let code = String::from(
"
main() {
label b0:
jump_if (42 > 0) b2;
label b1:
return;
label b2:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrTrue(_)), 1);
assert_eq!(instr_count!(compiled_script, Ret), 2);
}
#[test]
fn compile_while() {
let code = String::from(
"
main() {
let x: u64;
label b0:
x = 0;
label while:
jump_if_false (copy(x) < 5) while_cont;
label while_b0:
x = copy(x) + 1;
jump while;
label while_cont:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrFalse(_)), 1);
assert_eq!(instr_count!(compiled_script, Branch(_)), 1);
}
#[test]
fn
|
() {
let code = String::from(
"
main() {
label while:
jump_if_false (42 > 0) while_cont;
label while_b0:
return;
label while_cont:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrFalse(_)), 1);
assert_eq!(instr_count!(compiled_script, Ret), 2);
}
#[test]
fn compile_nested_while() {
let code = String::from(
"
main() {
let x: u64;
let y: u64;
label b0:
x = 0;
label outer_while:
jump_if_false (copy(x) < 5) outer_while_cont;
label outer_while_b0:
x = move(x) + 1;
y = 0;
label inner_while:
jump_if_false (copy(y) < 5) inner_while_cont;
label inner_while_b0:
y = move(y) + 1;
jump inner_while;
label inner_while_cont:
jump outer_while;
label outer_while_cont:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrFalse(_)), 2);
assert_eq!(instr_count!(compiled_script, Branch(_)), 2);
}
#[test]
fn compile_while_break() {
let code = String::from(
"
main() {
label while:
jump_if_false (true) while_cont;
label while_b0:
jump while_cont;
label while_cont:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrFalse(_)), 1);
assert_eq!(instr_count!(compiled_script, Branch(_)), 1);
}
#[test]
fn compile_while_continue() {
let code = String::from(
"
main() {
label while:
jump_if_false (false) while_cont;
label while_b0:
jump while;
label while_cont:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, BrFalse(_)), 1);
assert_eq!(instr_count!(compiled_script, Branch(_)), 1);
}
#[test]
fn compile_loop_empty() {
let code = String::from(
"
main() {
label loop:
jump loop;
label exit:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, Branch(_)), 1);
}
#[test]
fn compile_loop_nested_break() {
let code = String::from(
"
main() {
label outer_loop:
label inner_loop:
jump inner_loop_cont;
jump inner_loop;
label inner_loop_cont:
jump outer_loop_cont;
jump outer_loop;
label outer_loop_cont:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, Branch(_)), 4);
}
#[test]
fn compile_loop_break_continue() {
let code = String::from(
"
main() {
let x: u64;
let y: u64;
label b0:
x = 0;
y = 0;
label loop:
x = move(x) + 1;
jump_if (copy(x) >= 10) loop_b2;
label loop_b0:
jump_if (copy(x) % 2 == 0) loop_b3;
label loop_b1:
y = move(y) + copy(x);
jump loop;
label loop_b2:
jump loop_cont;
label loop_b3:
jump loop;
label loop_cont:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, Branch(_)), 3);
assert_eq!(instr_count!(compiled_script, BrTrue(_)), 2);
}
#[test]
fn compile_loop_return() {
let code = String::from(
"
main() {
label outer_loop:
label inner_loop:
return;
jump inner_loop;
label inner_loop_cont:
return;
jump outer_loop;
label outer_loop_cont:
return;
}
",
);
let compiled_script_res = compile_script_string(&code);
let compiled_script = compiled_script_res.unwrap();
assert_eq!(instr_count!(compiled_script, Branch(_)), 2);
assert_eq!(instr_count!(compiled_script, Ret), 3);
}
|
compile_while_return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.