file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
startup.py
# Copyright (c) 2018-2022 Micro Focus or one of its affiliates. # Copyright (c) 2018 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) 2013-2017 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Startup message To begin a session, the frontend opens a connection to the backend and sends a Startup message. """ from __future__ import print_function, division, absolute_import import platform import os from struct import pack # noinspection PyUnresolvedReferences,PyCompatibility import vertica_python from ..message import BulkFrontendMessage class Startup(BulkFrontendMessage): message_id = None def __init__(self, user, database, session_label, os_user_name):
def read_bytes(self): # The fixed protocol version is followed by pairs of parameter name and value strings. # A zero byte is required as a terminator after the last name/value pair. # Parameters can appear in any order. fixed_protocol_version = 3 << 16 | 5 bytes_ = pack('!I', fixed_protocol_version) # The frontend sends a requested protocol version to the backend. # Old servers (protocol < 3.7) ignore this value and use the fixed protocol version. # New servers (protocol >= 3.7) would try to find the common protocol # version in use for both client and server, and send back a ParameterStatus # message (key='protocol_version', value=<effective protocol version>) bytes_ += pack('!16sxIx', b'protocol_version', vertica_python.PROTOCOL_VERSION) for k in self.parameters: v = self.parameters[k].encode('utf-8') bytes_ += pack('!{}sx{}sx'.format(len(k), len(v)), k, v) bytes_ += pack('x') return bytes_
BulkFrontendMessage.__init__(self) try: os_platform = platform.platform() except Exception as e: os_platform = '' print("WARN: Cannot get the OS info: {}".format(str(e))) try: pid = str(os.getpid()) except Exception as e: pid = '0' print("WARN: Cannot get the process ID: {}".format(str(e))) self.parameters = { b'user': user, b'database': database, b'client_label': session_label, b'client_type': 'vertica-python', b'client_version': vertica_python.__version__, b'client_os': os_platform, b'client_os_user_name': os_user_name, b'client_pid': pid, }
node_test.go
package printer import ( "context" "testing" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "github.com/vmware/octant/internal/testutil" "github.com/vmware/octant/pkg/view/component" ) func TestNodeListHandler(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() tpo := newTestPrinterOptions(controller) printOptions := tpo.ToOptions() node := testutil.CreateNode("node-1") node.Status.NodeInfo.KubeletVersion = "1.15.1" node.CreationTimestamp = *testutil.CreateTimestamp() tpo.PathForObject(node, node.Name, "/node") list := &corev1.NodeList{ Items: []corev1.Node{ *node, }, } ctx := context.Background() got, err := NodeListHandler(ctx, list, printOptions) require.NoError(t, err) expected := component.NewTableWithRows("Nodes", "We couldn't find any nodes!", nodeListColumns, []component.TableRow{ { "Age": component.NewTimestamp(node.CreationTimestamp.Time), "Name": component.NewLink("", "node-1", "/node"), "Labels": component.NewLabels(make(map[string]string)), "Version": component.NewText("1.15.1"), "Status": component.NewText("Unknown"), "Roles": component.NewText("<none>"), }, }) component.AssertEqual(t, expected, got) } func Test_nodeAddresses(t *testing.T) { node := testutil.CreateNode("node-1") node.Status.Addresses = []corev1.NodeAddress{ { Type: corev1.NodeHostName, Address: "host.local", }, { Type: corev1.NodeInternalIP, Address: "192.168.1.1", }, } got, err := nodeAddresses(node) require.NoError(t, err) expected := component.NewTableWithRows("Addresses", "There are no addresses!", nodeAddressesColumns, []component.TableRow{ { "Type": component.NewText("Hostname"), "Address": component.NewText("host.local"), }, { "Type": component.NewText("InternalIP"), "Address": component.NewText("192.168.1.1"), }, }) component.AssertEqual(t, expected, got) } func Test_nodeResources(t *testing.T) { node := testutil.CreateNode("node-1") capacityQuantity, err := resource.ParseQuantity("1") require.NoError(t, err) allocatedQuantity, err := resource.ParseQuantity("2") require.NoError(t, err) resourceNames := []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourcePods, corev1.ResourceEphemeralStorage} node.Status.Allocatable = map[corev1.ResourceName]resource.Quantity{} node.Status.Capacity = map[corev1.ResourceName]resource.Quantity{} for _, resourceName := range resourceNames { node.Status.Allocatable[resourceName] = allocatedQuantity node.Status.Capacity[resourceName] = capacityQuantity } got, err := nodeResources(node) require.NoError(t, err) expected := component.NewTableWithRows("Resources", "There are no resources!", nodeResourcesColumns, []component.TableRow{ { "Key": component.NewText("CPU"), "Capacity": component.NewText("1"), "Allocatable": component.NewText("2"), }, { "Key": component.NewText("Memory"), "Capacity": component.NewText("1"), "Allocatable": component.NewText("2"), }, { "Key": component.NewText("Ephemeral Storage"), "Capacity": component.NewText("1"), "Allocatable": component.NewText("2"), }, { "Key": component.NewText("Pods"), "Capacity": component.NewText("1"), "Allocatable": component.NewText("2"), }, }) component.AssertEqual(t, expected, got) } func Test_nodeConditions(t *testing.T) { node := testutil.CreateNode("node-1") node.Status.Conditions = []corev1.NodeCondition{ { Type: "type", Status: "status", LastHeartbeatTime: *testutil.CreateTimestamp(), LastTransitionTime: *testutil.CreateTimestamp(), Reason: "reason", Message: "message", }, } got, err := nodeConditions(node) require.NoError(t, err) expected := component.NewTableWithRows("Conditions", "There are no node conditions!", nodeConditionsColumns, []component.TableRow{ { "Type": component.NewText("type"), "Reason": component.NewText("reason"), "Status": component.NewText("status"), "Message": component.NewText("message"), "Last Heartbeat": component.NewTimestamp(node.Status.Conditions[0].LastHeartbeatTime.Time), "Last Transition": component.NewTimestamp(node.Status.Conditions[0].LastTransitionTime.Time), }, }) component.AssertEqual(t, expected, got)
} func Test_nodeImages(t *testing.T) { node := testutil.CreateNode("node-1") node.Status.Images = []corev1.ContainerImage{ { Names: []string{"a"}, SizeBytes: 10, }, { Names: []string{"b-1", "b-2"}, SizeBytes: 10, }, } got, err := nodeImages(node) require.NoError(t, err) expected := component.NewTableWithRows("Images", "There are no images!", nodeImagesColumns, []component.TableRow{ { "Names": component.NewMarkdownText("a"), "Size": component.NewText("10"), }, { "Names": component.NewMarkdownText("b-1\nb-2"), "Size": component.NewText("10"), }, }) component.AssertEqual(t, expected, got) }
BackToTop.js
import React, {useEffect} from 'react'; import {Styles} from "./styles/backToTop.js"; function BackToTop() { useEffect(() => { const topBtn = document.querySelector(".totop-btn"); window.addEventListener("scroll", () => { if (window.scrollY > 750) { topBtn.classList.add("show"); } else { topBtn.classList.remove("show"); } }); topBtn.addEventListener("click", smoothScrollBackToTop); function smoothScrollBackToTop() { const targetPosition = 0; const startPosition = window.pageYOffset; const distance = targetPosition - startPosition; const duration = 750; let start = null; window.requestAnimationFrame(step); function step(timestamp) { if (!start) start = timestamp; const progress = timestamp - start; window.scrollTo(0, easeInOutCubic(progress, startPosition, distance, duration)); if (progress < duration) window.requestAnimationFrame(step); } } function
(t, b, c, d) { t /= d / 2; if (t < 1) return c / 2 * t * t * t + b; t -= 2; return c / 2 * (t * t * t + 2) + b; } }); return ( <Styles> {/* Back To Top */} <button type="text" className="totop-btn"> <i className="las la-arrow-up"></i> </button> </Styles> ) } export default BackToTop
easeInOutCubic
testCommentOptions.py
import unittest from .testHelpers import get_cpp_function_list class TestCommentOptions(unittest.TestCase):
def test_function_with_comment_option_should_be_forgiven(self): function_list = get_cpp_function_list("void foo(){/* #lizard forgives*/}") self.assertEqual(0, len(function_list)) def test_function_with_comment_option_before_it_should_be_forgiven(self): function_list = get_cpp_function_list("/* #lizard forgives*/void foo(){}") self.assertEqual(0, len(function_list)) def test_function_after_comment_option_should_not_be_forgiven(self): function_list = get_cpp_function_list("/* #lizard forgives*/void foo(){}void bar(){}") self.assertEqual(1, len(function_list)) def test_generated_code_should_be_ignored(self): function_list = get_cpp_function_list("/* GENERATED CODE */void foo(){}") self.assertEqual(0, len(function_list))
build.rs
#[cfg(feature = "generate-c-api-headers")] extern crate cbindgen; use std::env; static CAPI_ENV_VAR: &str = "WASM_EMSCRIPTEN_GENERATE_C_API_HEADERS"; fn main() { if env::var(CAPI_ENV_VAR).unwrap_or("0".to_string()) == "1" { build(); } } #[cfg(feature = "generate-c-api-headers")] fn build() { let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); use cbindgen::Language; cbindgen::Builder::new() .with_crate(crate_dir.clone())
.write_to_file("wasmer.h"); cbindgen::Builder::new() .with_crate(crate_dir) .with_language(Language::Cxx) .with_include_guard("WASMER_H") .generate() .expect("Unable to generate C++ bindings") .write_to_file("wasmer.hh"); } #[cfg(not(feature = "generate-c-api-headers"))] fn build() { panic!("environment var set to generate wasmer c API headers but generate-c-api-headers feature not enabled") }
.with_language(Language::C) .with_include_guard("WASMER_H") .generate() .expect("Unable to generate C bindings")
info.go
package buildinfo var ( // Version is the version this server. Version = "dev"
)
helpers.ts
import { GitProcess, IGitResult } from '../lib' // NOTE: bump these versions to the latest stable releases export const gitVersion = '2.14.2' export const gitLfsVersion = '2.3.1' const temp = require('temp').track()
export async function initialize(repositoryName: string): Promise<string> { const testRepoPath = temp.mkdirSync(`desktop-git-test-${repositoryName}`) await GitProcess.exec(['init'], testRepoPath) await GitProcess.exec(['config', 'user.email', '"[email protected]"'], testRepoPath) await GitProcess.exec(['config', 'user.name', '"Some User"'], testRepoPath) return testRepoPath } export function verify(result: IGitResult, callback: (result: IGitResult) => void) { try { callback(result) } catch (e) { console.log('error encountered while verifying; poking at response from Git:') console.log(` - exitCode: ${result.exitCode}`) console.log(` - stdout: ${result.stdout.trim()}`) console.log(` - stderr: ${result.stderr.trim()}`) console.log() throw e } }
urls.py
from peering_manager.api import OrderedDefaultRouter
router.APIRootView = views.PeeringRootView router.register("autonomous-systems", views.AutonomousSystemViewSet) router.register("bgp-groups", views.BGPGroupViewSet) router.register("communities", views.CommunityViewSet) router.register("direct-peering-sessions", views.DirectPeeringSessionViewSet) router.register("internet-exchanges", views.InternetExchangeViewSet) router.register( "internet-exchange-peering-sessions", views.InternetExchangePeeringSessionViewSet ) router.register("routers", views.RouterViewSet) router.register("routing-policies", views.RoutingPolicyViewSet) app_name = "peering-api" urlpatterns = router.urls
from . import views router = OrderedDefaultRouter()
trace.rs
use crate::core::ribosome::CallContext; use crate::core::ribosome::RibosomeT; use holochain_types::prelude::*; use holochain_wasmer_host::prelude::WasmError; use once_cell::unsync::Lazy; use std::sync::Arc; use tracing::*; #[instrument(skip(input))] pub fn wasm_trace(input: TraceMsg) { match input.level { holochain_types::prelude::Level::TRACE => tracing::trace!("{}", input.msg), holochain_types::prelude::Level::DEBUG => tracing::debug!("{}", input.msg), holochain_types::prelude::Level::INFO => tracing::info!("{}", input.msg), holochain_types::prelude::Level::WARN => tracing::warn!("{}", input.msg), holochain_types::prelude::Level::ERROR => tracing::error!("{}", input.msg), } } pub fn trace( _ribosome: Arc<impl RibosomeT>, _call_context: Arc<CallContext>, input: TraceMsg, ) -> Result<(), WasmError> { // Avoid dialing out to the environment on every trace. let wasm_log = Lazy::new(|| { std::env::var("WASM_LOG").unwrap_or_else(|_| "[wasm_trace]=debug".to_string()) }); let collector = tracing_subscriber::fmt() .with_env_filter(tracing_subscriber::EnvFilter::new((*wasm_log).clone())) .with_target(false)
.finish(); tracing::subscriber::with_default(collector, || wasm_trace(input)); Ok(()) } #[cfg(test)] #[cfg(feature = "slow_tests")] pub mod wasm_test { use super::trace; use crate::fixt::CallContextFixturator; use crate::fixt::RealRibosomeFixturator; use holochain_wasm_test_utils::TestWasm; use holochain_zome_types::prelude::*; use std::sync::Arc; use crate::core::ribosome::wasm_test::RibosomeTestFixture; /// we can get an entry hash out of the fn directly #[tokio::test(flavor = "multi_thread")] async fn trace_test() { let ribosome = RealRibosomeFixturator::new(crate::fixt::curve::Zomes(vec![])) .next() .unwrap(); let call_context = CallContextFixturator::new(::fixt::Unpredictable) .next() .unwrap(); let input = TraceMsg { level: Level::DEBUG, msg: "ribosome trace works".to_string(), }; let output: () = trace(Arc::new(ribosome), Arc::new(call_context), input).unwrap(); assert_eq!((), output); } #[tokio::test(flavor = "multi_thread")] async fn wasm_trace_test() { observability::test_run().ok(); let RibosomeTestFixture { conductor, alice, .. } = RibosomeTestFixture::new(TestWasm::Debug).await; let _: () = conductor.call(&alice, "debug", ()).await; } }
mkdir_command.go
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package command import ( "errors" "time" "github.com/urfave/cli" "github.com/polygon-io/etcd/client" ) // NewMakeDirCommand returns the CLI command for "mkdir". func NewMakeDirCommand() cli.Command { return cli.Command{ Name: "mkdir", Usage: "make a new directory", ArgsUsage: "<key>", Flags: []cli.Flag{ cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live in seconds"}, }, Action: func(c *cli.Context) error { mkdirCommandFunc(c, mustNewKeyAPI(c), client.PrevNoExist) return nil }, } } // mkdirCommandFunc executes the "mkdir" command. func mkdirCommandFunc(c *cli.Context, ki client.KeysAPI, prevExist client.PrevExistType) { if len(c.Args()) == 0
key := c.Args()[0] ttl := c.Int("ttl") ctx, cancel := contextWithTotalTimeout(c) resp, err := ki.Set(ctx, key, "", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist}) cancel() if err != nil { handleError(c, ExitServerError, err) } if c.GlobalString("output") != "simple" { printResponseKey(resp, c.GlobalString("output")) } }
{ handleError(c, ExitBadArgs, errors.New("key required")) }
mount.go
package create import ( "fmt" "strings" "github.com/rancher/norman/pkg/kv" "github.com/rancher/rio/cli/pkg/volumespec" "github.com/rancher/rio/types/client/rio/v1beta1" ) func ParseMounts(spec []string) ([]client.Mount, error) { var mounts []client.Mount for _, volume := range spec { serviceMount, err := volumespec.ParseVolume(volume) if err != nil { return nil, err } mount := createMount(serviceMount) mount, err = parseAdditionalOptions(mount, volume) if err != nil { return nil, err } mounts = append(mounts, createMount(serviceMount)) } return mounts, nil } func parseAdditionalOptions(mount client.Mount, spec string) (client.Mount, error) { if mount.Target == "" { return mount, fmt.Errorf("invalid volume spec, no target path found: %s", spec) } if !strings.Contains(mount.Source, "/") && !strings.Contains(mount.Source, "\\") && mount.VolumeOptions == nil { mount.VolumeOptions = &client.VolumeOptions{} } if len(strings.SplitN(spec, ":", 3)) < 3 { return mount, nil } for _, opt := range strings.Split(spec, ",") { key, value := kv.Split(opt, "=") key = strings.ToLower(key) switch key { case "driver": if mount.VolumeOptions == nil { return mount, fmt.Errorf("driver can only be used with volumes, not host bind mounts") } mount.VolumeOptions.Driver = value case "subpath": if mount.VolumeOptions == nil { return mount, fmt.Errorf("subpath can only be used with volumes, not host bind mounts") } mount.VolumeOptions.SubPath = value } } return mount, nil }
ReadOnly: serviceMount.ReadOnly, Source: serviceMount.Source, Target: serviceMount.Target, } if serviceMount.Bind != nil { mount.BindOptions = &client.BindOptions{ Propagation: serviceMount.Bind.Propagation, } } if serviceMount.Volume != nil { mount.VolumeOptions = &client.VolumeOptions{ NoCopy: serviceMount.Volume.NoCopy, } } return mount }
func createMount(serviceMount volumespec.ServiceVolumeConfig) client.Mount { mount := client.Mount{ Kind: serviceMount.Type,
news_sync_queue.go
package base import ( "fmt" "github.com/foxiswho/echo-go/models" "github.com/foxiswho/echo-go/module/db" "github.com/foxiswho/echo-go/util" ) type NewsSyncQueueService struct { } func NewNewsSyncQueueService() *NewsSyncQueueService { return new(NewsSyncQueueService) } //初始化列表 func newsSyncQueueNewMakeDataArr() []models.NewsSyncQueue { return
unc (s *NewsSyncQueueService) GetAll(where []*db.QueryCondition, fields []string, orderBy string, page int, limit int) (*db.Paginator, error) { m := models.NewNewsSyncQueue() session := db.Filter(where) count, err := session.Count(m) if err != nil { fmt.Println(err) return nil, util.NewError(err.Error()) } Query := db.Pagination(int(count), page, limit) if count == 0 { return Query, nil } session = db.Filter(where) if orderBy != "" { session.OrderBy(orderBy) } session.Limit(limit, Query.Offset) if len(fields) == 0 { session.AllCols() } data := newsSyncQueueNewMakeDataArr() err = session.Find(&data) if err != nil { fmt.Println(err) return nil, util.NewError(err.Error()) } Query.Data = make([]interface{}, len(data)) for y, x := range data { Query.Data[y] = x } return Query, nil } // 获取 单条记录 func (s *NewsSyncQueueService) GetById(id int) (*models.NewsSyncQueue, error) { m:=new(models.NewsSyncQueue) m.Id = id ok, err := db.DB().Engine.Get(m) if err != nil { return nil, err } if !ok{ return nil,util.NewError("数据不存在:"+err.Error()) } return m, nil } // 删除 单条记录 func (s *NewsSyncQueueService) Delete(id int) (int64, error) { m:=new(models.NewsSyncQueue) m.Id = id num, err := db.DB().Engine.Delete(m) if err == nil { return num, nil } return num, err }
make([]models.NewsSyncQueue, 0) } //列表查询 f
test_profile_audiences_changes.py
# coding: utf-8 """ Talon.One API The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech
import unittest import datetime import talon_one from talon_one.models.profile_audiences_changes import ProfileAudiencesChanges # noqa: E501 from talon_one.rest import ApiException class TestProfileAudiencesChanges(unittest.TestCase): """ProfileAudiencesChanges unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test ProfileAudiencesChanges include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = talon_one.models.profile_audiences_changes.ProfileAudiencesChanges() # noqa: E501 if include_optional : return ProfileAudiencesChanges( adds = [ 56 ], deletes = [ 56 ] ) else : return ProfileAudiencesChanges( adds = [ 56 ], deletes = [ 56 ], ) def testProfileAudiencesChanges(self): """Test ProfileAudiencesChanges""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main()
""" from __future__ import absolute_import
instances_client.go
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go_gapic. DO NOT EDIT. package compute import ( "bytes" "context" "fmt" "io/ioutil" "math" "net/http" "net/url" "sort" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" httptransport "google.golang.org/api/transport/http" computepb "google.golang.org/genproto/googleapis/cloud/compute/v1" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" ) var newInstancesClientHook clientHook // InstancesCallOptions contains the retry settings for each method of InstancesClient. type InstancesCallOptions struct { AddAccessConfig []gax.CallOption AddResourcePolicies []gax.CallOption AggregatedList []gax.CallOption AttachDisk []gax.CallOption BulkInsert []gax.CallOption Delete []gax.CallOption DeleteAccessConfig []gax.CallOption DetachDisk []gax.CallOption Get []gax.CallOption GetEffectiveFirewalls []gax.CallOption GetGuestAttributes []gax.CallOption GetIamPolicy []gax.CallOption GetScreenshot []gax.CallOption GetSerialPortOutput []gax.CallOption GetShieldedInstanceIdentity []gax.CallOption Insert []gax.CallOption List []gax.CallOption ListReferrers []gax.CallOption RemoveResourcePolicies []gax.CallOption Reset []gax.CallOption SetDeletionProtection []gax.CallOption SetDiskAutoDelete []gax.CallOption SetIamPolicy []gax.CallOption SetLabels []gax.CallOption SetMachineResources []gax.CallOption SetMachineType []gax.CallOption SetMetadata []gax.CallOption SetMinCpuPlatform []gax.CallOption SetScheduling []gax.CallOption SetServiceAccount []gax.CallOption SetShieldedInstanceIntegrityPolicy []gax.CallOption SetTags []gax.CallOption SimulateMaintenanceEvent []gax.CallOption Start []gax.CallOption StartWithEncryptionKey []gax.CallOption Stop []gax.CallOption TestIamPermissions []gax.CallOption Update []gax.CallOption UpdateAccessConfig []gax.CallOption UpdateDisplayDevice []gax.CallOption UpdateNetworkInterface []gax.CallOption UpdateShieldedInstanceConfig []gax.CallOption } // internalInstancesClient is an interface that defines the methods availaible from Google Compute Engine API. type internalInstancesClient interface { Close() error setGoogleClientInfo(...string) Connection() *grpc.ClientConn AddAccessConfig(context.Context, *computepb.AddAccessConfigInstanceRequest, ...gax.CallOption) (*Operation, error) AddResourcePolicies(context.Context, *computepb.AddResourcePoliciesInstanceRequest, ...gax.CallOption) (*Operation, error) AggregatedList(context.Context, *computepb.AggregatedListInstancesRequest, ...gax.CallOption) *InstancesScopedListPairIterator AttachDisk(context.Context, *computepb.AttachDiskInstanceRequest, ...gax.CallOption) (*Operation, error) BulkInsert(context.Context, *computepb.BulkInsertInstanceRequest, ...gax.CallOption) (*Operation, error) Delete(context.Context, *computepb.DeleteInstanceRequest, ...gax.CallOption) (*Operation, error) DeleteAccessConfig(context.Context, *computepb.DeleteAccessConfigInstanceRequest, ...gax.CallOption) (*Operation, error) DetachDisk(context.Context, *computepb.DetachDiskInstanceRequest, ...gax.CallOption) (*Operation, error) Get(context.Context, *computepb.GetInstanceRequest, ...gax.CallOption) (*computepb.Instance, error) GetEffectiveFirewalls(context.Context, *computepb.GetEffectiveFirewallsInstanceRequest, ...gax.CallOption) (*computepb.InstancesGetEffectiveFirewallsResponse, error) GetGuestAttributes(context.Context, *computepb.GetGuestAttributesInstanceRequest, ...gax.CallOption) (*computepb.GuestAttributes, error) GetIamPolicy(context.Context, *computepb.GetIamPolicyInstanceRequest, ...gax.CallOption) (*computepb.Policy, error) GetScreenshot(context.Context, *computepb.GetScreenshotInstanceRequest, ...gax.CallOption) (*computepb.Screenshot, error) GetSerialPortOutput(context.Context, *computepb.GetSerialPortOutputInstanceRequest, ...gax.CallOption) (*computepb.SerialPortOutput, error) GetShieldedInstanceIdentity(context.Context, *computepb.GetShieldedInstanceIdentityInstanceRequest, ...gax.CallOption) (*computepb.ShieldedInstanceIdentity, error) Insert(context.Context, *computepb.InsertInstanceRequest, ...gax.CallOption) (*Operation, error) List(context.Context, *computepb.ListInstancesRequest, ...gax.CallOption) *InstanceIterator ListReferrers(context.Context, *computepb.ListReferrersInstancesRequest, ...gax.CallOption) *ReferenceIterator RemoveResourcePolicies(context.Context, *computepb.RemoveResourcePoliciesInstanceRequest, ...gax.CallOption) (*Operation, error) Reset(context.Context, *computepb.ResetInstanceRequest, ...gax.CallOption) (*Operation, error) SetDeletionProtection(context.Context, *computepb.SetDeletionProtectionInstanceRequest, ...gax.CallOption) (*Operation, error) SetDiskAutoDelete(context.Context, *computepb.SetDiskAutoDeleteInstanceRequest, ...gax.CallOption) (*Operation, error) SetIamPolicy(context.Context, *computepb.SetIamPolicyInstanceRequest, ...gax.CallOption) (*computepb.Policy, error) SetLabels(context.Context, *computepb.SetLabelsInstanceRequest, ...gax.CallOption) (*Operation, error) SetMachineResources(context.Context, *computepb.SetMachineResourcesInstanceRequest, ...gax.CallOption) (*Operation, error) SetMachineType(context.Context, *computepb.SetMachineTypeInstanceRequest, ...gax.CallOption) (*Operation, error) SetMetadata(context.Context, *computepb.SetMetadataInstanceRequest, ...gax.CallOption) (*Operation, error) SetMinCpuPlatform(context.Context, *computepb.SetMinCpuPlatformInstanceRequest, ...gax.CallOption) (*Operation, error) SetScheduling(context.Context, *computepb.SetSchedulingInstanceRequest, ...gax.CallOption) (*Operation, error) SetServiceAccount(context.Context, *computepb.SetServiceAccountInstanceRequest, ...gax.CallOption) (*Operation, error) SetShieldedInstanceIntegrityPolicy(context.Context, *computepb.SetShieldedInstanceIntegrityPolicyInstanceRequest, ...gax.CallOption) (*Operation, error) SetTags(context.Context, *computepb.SetTagsInstanceRequest, ...gax.CallOption) (*Operation, error) SimulateMaintenanceEvent(context.Context, *computepb.SimulateMaintenanceEventInstanceRequest, ...gax.CallOption) (*Operation, error) Start(context.Context, *computepb.StartInstanceRequest, ...gax.CallOption) (*Operation, error) StartWithEncryptionKey(context.Context, *computepb.StartWithEncryptionKeyInstanceRequest, ...gax.CallOption) (*Operation, error) Stop(context.Context, *computepb.StopInstanceRequest, ...gax.CallOption) (*Operation, error) TestIamPermissions(context.Context, *computepb.TestIamPermissionsInstanceRequest, ...gax.CallOption) (*computepb.TestPermissionsResponse, error) Update(context.Context, *computepb.UpdateInstanceRequest, ...gax.CallOption) (*Operation, error) UpdateAccessConfig(context.Context, *computepb.UpdateAccessConfigInstanceRequest, ...gax.CallOption) (*Operation, error) UpdateDisplayDevice(context.Context, *computepb.UpdateDisplayDeviceInstanceRequest, ...gax.CallOption) (*Operation, error) UpdateNetworkInterface(context.Context, *computepb.UpdateNetworkInterfaceInstanceRequest, ...gax.CallOption) (*Operation, error) UpdateShieldedInstanceConfig(context.Context, *computepb.UpdateShieldedInstanceConfigInstanceRequest, ...gax.CallOption) (*Operation, error) } // InstancesClient is a client for interacting with Google Compute Engine API. // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. // // The Instances API. type InstancesClient struct { // The internal transport-dependent client. internalClient internalInstancesClient // The call options for this service. CallOptions *InstancesCallOptions } // Wrapper methods routed to the internal client. // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *InstancesClient) Close() error { return c.internalClient.Close() } // setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *InstancesClient) setGoogleClientInfo(keyval ...string) { c.internalClient.setGoogleClientInfo(keyval...) } // Connection returns a connection to the API service. // // Deprecated. func (c *InstancesClient) Connection() *grpc.ClientConn { return c.internalClient.Connection() } // AddAccessConfig adds an access config to an instance’s network interface. func (c *InstancesClient) AddAccessConfig(ctx context.Context, req *computepb.AddAccessConfigInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.AddAccessConfig(ctx, req, opts...) } // AddResourcePolicies adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations. func (c *InstancesClient) AddResourcePolicies(ctx context.Context, req *computepb.AddResourcePoliciesInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.AddResourcePolicies(ctx, req, opts...) } // AggregatedList retrieves aggregated list of all of the instances in your project across all regions and zones. func (c *InstancesClient) AggregatedList(ctx context.Context, req *computepb.AggregatedListInstancesRequest, opts ...gax.CallOption) *InstancesScopedListPairIterator { return c.internalClient.AggregatedList(ctx, req, opts...) } // AttachDisk attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance. func (c *InstancesClient) AttachDisk(ctx context.Context, req *computepb.AttachDiskInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.AttachDisk(ctx, req, opts...) } // BulkInsert creates multiple instances. Count specifies the number of instances to create. func (c *InstancesClient) BulkInsert(ctx context.Context, req *computepb.BulkInsertInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.BulkInsert(ctx, req, opts...) } // Delete deletes the specified Instance resource. For more information, see Deleting an instance. func (c *InstancesClient) Delete(ctx context.Context, req *computepb.DeleteInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.Delete(ctx, req, opts...) } // DeleteAccessConfig deletes an access config from an instance’s network interface. func (c *InstancesClient) DeleteAccessConfig(ctx context.Context, req *computepb.DeleteAccessConfigInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.DeleteAccessConfig(ctx, req, opts...) } // DetachDisk detaches a disk from an instance. func (c *InstancesClient) DetachDisk(ctx context.Context, req *computepb.DetachDiskInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.DetachDisk(ctx, req, opts...) } // Get returns the specified Instance resource. Gets a list of available instances by making a list() request. func (c *InstancesClient) Get(ctx context.Context, req *computepb.GetInstanceRequest, opts ...gax.CallOption) (*computepb.Instance, error) { return c.internalClient.Get(ctx, req, opts...) } // GetEffectiveFirewalls returns effective firewalls applied to an interface of the instance. func (c *InstancesClient) GetEffectiveFirewalls(ctx context.Context, req *computepb.GetEffectiveFirewallsInstanceRequest, opts ...gax.CallOption) (*computepb.InstancesGetEffectiveFirewallsResponse, error) { return c.internalClient.GetEffectiveFirewalls(ctx, req, opts...) } // GetGuestAttributes returns the specified guest attributes entry. func (c *InstancesClient) GetGuestAttributes(ctx context.Context, req *computepb.GetGuestAttributesInstanceRequest, opts ...gax.CallOption) (*computepb.GuestAttributes, error) { return c.internalClient.GetGuestAttributes(ctx, req, opts...) } // GetIamPolicy gets the access control policy for a resource. May be empty if no such policy or resource exists. func (c *InstancesClient) GetIamPolicy(ctx context.Context, req *computepb.GetIamPolicyInstanceRequest, opts ...gax.CallOption) (*computepb.Policy, error) { return c.internalClient.GetIamPolicy(ctx, req, opts...) } // GetScreenshot returns the screenshot from the specified instance. func (c *InstancesClient) GetScreenshot(ctx context.Context, req *computepb.GetScreenshotInstanceRequest, opts ...gax.CallOption) (*computepb.Screenshot, error) { return c.internalClient.GetScreenshot(ctx, req, opts...) } // GetSerialPortOutput returns the last 1 MB of serial port output from the specified instance. func (c *InstancesClient) GetSerialPortOutput(ctx context.Context, req *computepb.GetSerialPortOutputInstanceRequest, opts ...gax.CallOption) (*computepb.SerialPortOutput, error) { return c.internalClient.GetSerialPortOutput(ctx, req, opts...) } // GetShieldedInstanceIdentity returns the Shielded Instance Identity of an instance func (c *InstancesClient) GetShieldedInstanceIdentity(ctx context.Context, req *computepb.GetShieldedInstanceIdentityInstanceRequest, opts ...gax.CallOption) (*computepb.ShieldedInstanceIdentity, error) { return c.internalClient.GetShieldedInstanceIdentity(ctx, req, opts...) } // Insert creates an instance resource in the specified project using the data included in the request. func (c *InstancesClient) Insert(ctx context.Context, req *computepb.InsertInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.Insert(ctx, req, opts...) } // List retrieves the list of instances contained within the specified zone. func (c *InstancesClient) List(ctx context.Context, req *computepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator { return c.internalClient.List(ctx, req, opts...) } // ListReferrers retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances. func (c *InstancesClient) ListReferrers(ctx context.Context, req *computepb.ListReferrersInstancesRequest, opts ...gax.CallOption) *ReferenceIterator { return c.internalClient.ListReferrers(ctx, req, opts...) } // RemoveResourcePolicies removes resource policies from an instance. func (c *InstancesClient) RemoveResourcePolicies(ctx context.Context, req *computepb.RemoveResourcePoliciesInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.RemoveResourcePolicies(ctx, req, opts...) } // Reset performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance. func (c *InstancesClient) Reset(ctx context.Context, req *computepb.ResetInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.Reset(ctx, req, opts...) } // SetDeletionProtection sets deletion protection on the instance. func (c *InstancesClient) SetDeletionProtection(ctx context.Context, req *computepb.SetDeletionProtectionInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetDeletionProtection(ctx, req, opts...) } // SetDiskAutoDelete sets the auto-delete flag for a disk attached to an instance. func (c *InstancesClient) SetDiskAutoDelete(ctx context.Context, req *computepb.SetDiskAutoDeleteInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetDiskAutoDelete(ctx, req, opts...) } // SetIamPolicy sets the access control policy on the specified resource. Replaces any existing policy. func (c *InstancesClient) SetIamPolicy(ctx context.Context, req *computepb.SetIamPolicyInstanceRequest, opts ...gax.CallOption) (*computepb.Policy, error) { return c.internalClient.SetIamPolicy(ctx, req, opts...) } // SetLabels sets labels on an instance. To learn more about labels, read the Labeling Resources documentation. func (c *InstancesClient) SetLabels(ctx context.Context, req *computepb.SetLabelsInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetLabels(ctx, req, opts...) } // SetMachineResources changes the number and/or type of accelerator for a stopped instance to the values specified in the request. func (c *InstancesClient) SetMachineResources(ctx context.Context, req *computepb.SetMachineResourcesInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetMachineResources(ctx, req, opts...) } // SetMachineType changes the machine type for a stopped instance to the machine type specified in the request. func (c *InstancesClient) SetMachineType(ctx context.Context, req *computepb.SetMachineTypeInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetMachineType(ctx, req, opts...) } // SetMetadata sets metadata for the specified instance to the data included in the request. func (c *InstancesClient) SetMetadata(ctx context.Context, req *computepb.SetMetadataInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetMetadata(ctx, req, opts...) } // SetMinCpuPlatform changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform. func (c *InstancesClient) SetMinCpuPlatform(ctx context.Context, req *computepb.SetMinCpuPlatformInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetMinCpuPlatform(ctx, req, opts...) } // SetScheduling sets an instance’s scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a TERMINATED state. See Instance Life Cycle for more information on the possible instance states. func (c *InstancesClient) SetScheduling(ctx context.Context, req *computepb.SetSchedulingInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetScheduling(ctx, req, opts...) } // SetServiceAccount sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance. func (c *InstancesClient) SetServiceAccount(ctx context.Context, req *computepb.SetServiceAccountInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetServiceAccount(ctx, req, opts...) } // SetShieldedInstanceIntegrityPolicy sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. func (c *InstancesClient) SetShieldedInstanceIntegrityPolicy(ctx context.Context, req *computepb.SetShieldedInstanceIntegrityPolicyInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetShieldedInstanceIntegrityPolicy(ctx, req, opts...) } // SetTags sets network tags for the specified instance to the data included in the request. func (c *InstancesClient) SetTags(ctx context.Context, req *computepb.SetTagsInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SetTags(ctx, req, opts...) } // SimulateMaintenanceEvent simulates a maintenance event on the instance. func (c *InstancesClient) SimulateMaintenanceEvent(ctx context.Context, req *computepb.SimulateMaintenanceEventInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.SimulateMaintenanceEvent(ctx, req, opts...) } // Start starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. func (c *InstancesClient) Start(ctx context.Context, req *computepb.StartInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.Start(ctx, req, opts...) } // StartWithEncryptionKey starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. func (c *InstancesClient) StartWithEncryptionKey(ctx context.Context, req *computepb.StartWithEncryptionKeyInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.StartWithEncryptionKey(ctx, req, opts...) } // Stop stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance. func (c *InstancesClient) Stop(ctx context.Context, req *computepb.StopInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.Stop(ctx, req, opts...) } // TestIamPermissions returns permissions that a caller has on the specified resource. func (c *InstancesClient) TestIamPermissions(ctx context.Context, req *computepb.TestIamPermissionsInstanceRequest, opts ...gax.CallOption) (*computepb.TestPermissionsResponse, error) { return c.internalClient.TestIamPermissions(ctx, req, opts...) } // Update updates an instance only if the necessary resources are available. This method can update only a specific set of instance properties. See Updating a running instance for a list of updatable instance properties. func (c *InstancesClient) Update(ctx context.Context, req *computepb.UpdateInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.Update(ctx, req, opts...) } // UpdateAccessConfig updates the specified access config from an instance’s network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. func (c *InstancesClient) UpdateAccessConfig(ctx context.Context, req *computepb.UpdateAccessConfigInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.UpdateAccessConfig(ctx, req, opts...) } // UpdateDisplayDevice updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. func (c *InstancesClient) UpdateDisplayDevice(ctx context.Context, req *computepb.UpdateDisplayDeviceInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.UpdateDisplayDevice(ctx, req, opts...) } // UpdateNetworkInterface updates an instance’s network interface. This method can only update an interface’s alias IP range and attached network. See Modifying alias IP ranges for an existing instance for instructions on changing alias IP ranges. See Migrating a VM between networks for instructions on migrating an interface. This method follows PATCH semantics. func (c *InstancesClient) UpdateNetworkInterface(ctx context.Context, req *computepb.UpdateNetworkInterfaceInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.UpdateNetworkInterface(ctx, req, opts...) } // UpdateShieldedInstanceConfig updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. func (c *InstancesClient) UpdateShieldedInstanceConfig(ctx context.Context, req *computepb.UpdateShieldedInstanceConfigInstanceRequest, opts ...gax.CallOption) (*Operation, error) { return c.internalClient.UpdateShieldedInstanceConfig(ctx, req, opts...) } // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. type instancesRESTClient struct { // The http endpoint to connect to. endpoint string // The http client. httpClient *http.Client // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } // NewInstancesRESTClient creates a new instances rest client. // // The Instances API. func NewInstancesRESTClient(ctx context.Context, opts ...option.ClientOption) (*InstancesClient, error) { clientOpts := append(defaultInstancesRESTClientOptions(), opts...) httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...) if err != nil { return nil, err } c := &instancesRESTClient{ endpoint: endpoint, httpClient: httpClient, } c.setGoogleClientInfo() return &InstancesClient{internalClient: c, CallOptions: &InstancesCallOptions{}}, nil } func defaultInsta
ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("https://compute.googleapis.com"), internaloption.WithDefaultMTLSEndpoint("https://compute.mtls.googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), } } // setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *instancesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) kv = append(kv, "gapic", versionClient, "gax", gax.Version, "rest", "UNKNOWN") c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *instancesRESTClient) Close() error { // Replace httpClient with nil to force cleanup. c.httpClient = nil return nil } // Connection returns a connection to the API service. // // Deprecated. func (c *instancesRESTClient) Connection() *grpc.ClientConn { return nil } // AddAccessConfig adds an access config to an instance’s network interface. func (c *instancesRESTClient) AddAccessConfig(ctx context.Context, req *computepb.AddAccessConfigInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetAccessConfigResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/addAccessConfig", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req.GetNetworkInterface() != "" { params.Add("networkInterface", fmt.Sprintf("%v", req.GetNetworkInterface())) } if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // AddResourcePolicies adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations. func (c *instancesRESTClient) AddResourcePolicies(ctx context.Context, req *computepb.AddResourcePoliciesInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstancesAddResourcePoliciesRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/addResourcePolicies", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // AggregatedList retrieves aggregated list of all of the instances in your project across all regions and zones. func (c *instancesRESTClient) AggregatedList(ctx context.Context, req *computepb.AggregatedListInstancesRequest, opts ...gax.CallOption) *InstancesScopedListPairIterator { it := &InstancesScopedListPairIterator{} req = proto.Clone(req).(*computepb.AggregatedListInstancesRequest) unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} it.InternalFetch = func(pageSize int, pageToken string) ([]InstancesScopedListPair, string, error) { resp := &computepb.InstanceAggregatedList{} if pageToken != "" { req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { req.MaxResults = proto.Uint32(math.MaxInt32) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/aggregated/instances", req.GetProject()) params := url.Values{} if req != nil && req.Filter != nil { params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) } if req != nil && req.IncludeAllScopes != nil { params.Add("includeAllScopes", fmt.Sprintf("%v", req.GetIncludeAllScopes())) } if req != nil && req.MaxResults != nil { params.Add("maxResults", fmt.Sprintf("%v", req.GetMaxResults())) } if req != nil && req.OrderBy != nil { params.Add("orderBy", fmt.Sprintf("%v", req.GetOrderBy())) } if req != nil && req.PageToken != nil { params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) } if req != nil && req.ReturnPartialSuccess != nil { params.Add("returnPartialSuccess", fmt.Sprintf("%v", req.GetReturnPartialSuccess())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, "", err } // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, "", err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, "", fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, "", err } unm.Unmarshal(buf, resp) it.Response = resp elems := make([]InstancesScopedListPair, 0, len(resp.GetItems())) for k, v := range resp.GetItems() { elems = append(elems, InstancesScopedListPair{k, v}) } sort.Slice(elems, func(i, j int) bool { return elems[i].Key < elems[j].Key }) return elems, resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetMaxResults()) it.pageInfo.Token = req.GetPageToken() return it } // AttachDisk attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance. func (c *instancesRESTClient) AttachDisk(ctx context.Context, req *computepb.AttachDiskInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetAttachedDiskResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/attachDisk", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.ForceAttach != nil { params.Add("forceAttach", fmt.Sprintf("%v", req.GetForceAttach())) } if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // BulkInsert creates multiple instances. Count specifies the number of instances to create. func (c *instancesRESTClient) BulkInsert(ctx context.Context, req *computepb.BulkInsertInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetBulkInsertInstanceResourceResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/bulkInsert", req.GetProject(), req.GetZone()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // Delete deletes the specified Instance resource. For more information, see Deleting an instance. func (c *instancesRESTClient) Delete(ctx context.Context, req *computepb.DeleteInstanceRequest, opts ...gax.CallOption) (*Operation, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // DeleteAccessConfig deletes an access config from an instance’s network interface. func (c *instancesRESTClient) DeleteAccessConfig(ctx context.Context, req *computepb.DeleteAccessConfigInstanceRequest, opts ...gax.CallOption) (*Operation, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/deleteAccessConfig", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req.GetAccessConfig() != "" { params.Add("accessConfig", fmt.Sprintf("%v", req.GetAccessConfig())) } if req.GetNetworkInterface() != "" { params.Add("networkInterface", fmt.Sprintf("%v", req.GetNetworkInterface())) } if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // DetachDisk detaches a disk from an instance. func (c *instancesRESTClient) DetachDisk(ctx context.Context, req *computepb.DetachDiskInstanceRequest, opts ...gax.CallOption) (*Operation, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/detachDisk", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req.GetDeviceName() != "" { params.Add("deviceName", fmt.Sprintf("%v", req.GetDeviceName())) } if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // Get returns the specified Instance resource. Gets a list of available instances by making a list() request. func (c *instancesRESTClient) Get(ctx context.Context, req *computepb.GetInstanceRequest, opts ...gax.CallOption) (*computepb.Instance, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v", req.GetProject(), req.GetZone(), req.GetInstance()) httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Instance{} return rsp, unm.Unmarshal(buf, rsp) } // GetEffectiveFirewalls returns effective firewalls applied to an interface of the instance. func (c *instancesRESTClient) GetEffectiveFirewalls(ctx context.Context, req *computepb.GetEffectiveFirewallsInstanceRequest, opts ...gax.CallOption) (*computepb.InstancesGetEffectiveFirewallsResponse, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/getEffectiveFirewalls", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req.GetNetworkInterface() != "" { params.Add("networkInterface", fmt.Sprintf("%v", req.GetNetworkInterface())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.InstancesGetEffectiveFirewallsResponse{} return rsp, unm.Unmarshal(buf, rsp) } // GetGuestAttributes returns the specified guest attributes entry. func (c *instancesRESTClient) GetGuestAttributes(ctx context.Context, req *computepb.GetGuestAttributesInstanceRequest, opts ...gax.CallOption) (*computepb.GuestAttributes, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/getGuestAttributes", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.QueryPath != nil { params.Add("queryPath", fmt.Sprintf("%v", req.GetQueryPath())) } if req != nil && req.VariableKey != nil { params.Add("variableKey", fmt.Sprintf("%v", req.GetVariableKey())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.GuestAttributes{} return rsp, unm.Unmarshal(buf, rsp) } // GetIamPolicy gets the access control policy for a resource. May be empty if no such policy or resource exists. func (c *instancesRESTClient) GetIamPolicy(ctx context.Context, req *computepb.GetIamPolicyInstanceRequest, opts ...gax.CallOption) (*computepb.Policy, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/getIamPolicy", req.GetProject(), req.GetZone(), req.GetResource()) params := url.Values{} if req != nil && req.OptionsRequestedPolicyVersion != nil { params.Add("optionsRequestedPolicyVersion", fmt.Sprintf("%v", req.GetOptionsRequestedPolicyVersion())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Policy{} return rsp, unm.Unmarshal(buf, rsp) } // GetScreenshot returns the screenshot from the specified instance. func (c *instancesRESTClient) GetScreenshot(ctx context.Context, req *computepb.GetScreenshotInstanceRequest, opts ...gax.CallOption) (*computepb.Screenshot, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/screenshot", req.GetProject(), req.GetZone(), req.GetInstance()) httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Screenshot{} return rsp, unm.Unmarshal(buf, rsp) } // GetSerialPortOutput returns the last 1 MB of serial port output from the specified instance. func (c *instancesRESTClient) GetSerialPortOutput(ctx context.Context, req *computepb.GetSerialPortOutputInstanceRequest, opts ...gax.CallOption) (*computepb.SerialPortOutput, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/serialPort", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.Port != nil { params.Add("port", fmt.Sprintf("%v", req.GetPort())) } if req != nil && req.Start != nil { params.Add("start", fmt.Sprintf("%v", req.GetStart())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.SerialPortOutput{} return rsp, unm.Unmarshal(buf, rsp) } // GetShieldedInstanceIdentity returns the Shielded Instance Identity of an instance func (c *instancesRESTClient) GetShieldedInstanceIdentity(ctx context.Context, req *computepb.GetShieldedInstanceIdentityInstanceRequest, opts ...gax.CallOption) (*computepb.ShieldedInstanceIdentity, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/getShieldedInstanceIdentity", req.GetProject(), req.GetZone(), req.GetInstance()) httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.ShieldedInstanceIdentity{} return rsp, unm.Unmarshal(buf, rsp) } // Insert creates an instance resource in the specified project using the data included in the request. func (c *instancesRESTClient) Insert(ctx context.Context, req *computepb.InsertInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstanceResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances", req.GetProject(), req.GetZone()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } if req != nil && req.SourceInstanceTemplate != nil { params.Add("sourceInstanceTemplate", fmt.Sprintf("%v", req.GetSourceInstanceTemplate())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // List retrieves the list of instances contained within the specified zone. func (c *instancesRESTClient) List(ctx context.Context, req *computepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator { it := &InstanceIterator{} req = proto.Clone(req).(*computepb.ListInstancesRequest) unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} it.InternalFetch = func(pageSize int, pageToken string) ([]*computepb.Instance, string, error) { resp := &computepb.InstanceList{} if pageToken != "" { req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { req.MaxResults = proto.Uint32(math.MaxInt32) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances", req.GetProject(), req.GetZone()) params := url.Values{} if req != nil && req.Filter != nil { params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) } if req != nil && req.MaxResults != nil { params.Add("maxResults", fmt.Sprintf("%v", req.GetMaxResults())) } if req != nil && req.OrderBy != nil { params.Add("orderBy", fmt.Sprintf("%v", req.GetOrderBy())) } if req != nil && req.PageToken != nil { params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) } if req != nil && req.ReturnPartialSuccess != nil { params.Add("returnPartialSuccess", fmt.Sprintf("%v", req.GetReturnPartialSuccess())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, "", err } // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, "", err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, "", fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, "", err } unm.Unmarshal(buf, resp) it.Response = resp return resp.GetItems(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetMaxResults()) it.pageInfo.Token = req.GetPageToken() return it } // ListReferrers retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances. func (c *instancesRESTClient) ListReferrers(ctx context.Context, req *computepb.ListReferrersInstancesRequest, opts ...gax.CallOption) *ReferenceIterator { it := &ReferenceIterator{} req = proto.Clone(req).(*computepb.ListReferrersInstancesRequest) unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} it.InternalFetch = func(pageSize int, pageToken string) ([]*computepb.Reference, string, error) { resp := &computepb.InstanceListReferrers{} if pageToken != "" { req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { req.MaxResults = proto.Uint32(math.MaxInt32) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/referrers", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.Filter != nil { params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) } if req != nil && req.MaxResults != nil { params.Add("maxResults", fmt.Sprintf("%v", req.GetMaxResults())) } if req != nil && req.OrderBy != nil { params.Add("orderBy", fmt.Sprintf("%v", req.GetOrderBy())) } if req != nil && req.PageToken != nil { params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) } if req != nil && req.ReturnPartialSuccess != nil { params.Add("returnPartialSuccess", fmt.Sprintf("%v", req.GetReturnPartialSuccess())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) if err != nil { return nil, "", err } // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, "", err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, "", fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, "", err } unm.Unmarshal(buf, resp) it.Response = resp return resp.GetItems(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetMaxResults()) it.pageInfo.Token = req.GetPageToken() return it } // RemoveResourcePolicies removes resource policies from an instance. func (c *instancesRESTClient) RemoveResourcePolicies(ctx context.Context, req *computepb.RemoveResourcePoliciesInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstancesRemoveResourcePoliciesRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/removeResourcePolicies", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // Reset performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance. func (c *instancesRESTClient) Reset(ctx context.Context, req *computepb.ResetInstanceRequest, opts ...gax.CallOption) (*Operation, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/reset", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetDeletionProtection sets deletion protection on the instance. func (c *instancesRESTClient) SetDeletionProtection(ctx context.Context, req *computepb.SetDeletionProtectionInstanceRequest, opts ...gax.CallOption) (*Operation, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setDeletionProtection", req.GetProject(), req.GetZone(), req.GetResource()) params := url.Values{} if req != nil && req.DeletionProtection != nil { params.Add("deletionProtection", fmt.Sprintf("%v", req.GetDeletionProtection())) } if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetDiskAutoDelete sets the auto-delete flag for a disk attached to an instance. func (c *instancesRESTClient) SetDiskAutoDelete(ctx context.Context, req *computepb.SetDiskAutoDeleteInstanceRequest, opts ...gax.CallOption) (*Operation, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setDiskAutoDelete", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req.GetAutoDelete() { params.Add("autoDelete", fmt.Sprintf("%v", req.GetAutoDelete())) } if req.GetDeviceName() != "" { params.Add("deviceName", fmt.Sprintf("%v", req.GetDeviceName())) } if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetIamPolicy sets the access control policy on the specified resource. Replaces any existing policy. func (c *instancesRESTClient) SetIamPolicy(ctx context.Context, req *computepb.SetIamPolicyInstanceRequest, opts ...gax.CallOption) (*computepb.Policy, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetZoneSetPolicyRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setIamPolicy", req.GetProject(), req.GetZone(), req.GetResource()) httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Policy{} return rsp, unm.Unmarshal(buf, rsp) } // SetLabels sets labels on an instance. To learn more about labels, read the Labeling Resources documentation. func (c *instancesRESTClient) SetLabels(ctx context.Context, req *computepb.SetLabelsInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstancesSetLabelsRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setLabels", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetMachineResources changes the number and/or type of accelerator for a stopped instance to the values specified in the request. func (c *instancesRESTClient) SetMachineResources(ctx context.Context, req *computepb.SetMachineResourcesInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstancesSetMachineResourcesRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setMachineResources", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetMachineType changes the machine type for a stopped instance to the machine type specified in the request. func (c *instancesRESTClient) SetMachineType(ctx context.Context, req *computepb.SetMachineTypeInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstancesSetMachineTypeRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setMachineType", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetMetadata sets metadata for the specified instance to the data included in the request. func (c *instancesRESTClient) SetMetadata(ctx context.Context, req *computepb.SetMetadataInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetMetadataResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setMetadata", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetMinCpuPlatform changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform. func (c *instancesRESTClient) SetMinCpuPlatform(ctx context.Context, req *computepb.SetMinCpuPlatformInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstancesSetMinCpuPlatformRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setMinCpuPlatform", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetScheduling sets an instance’s scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a TERMINATED state. See Instance Life Cycle for more information on the possible instance states. func (c *instancesRESTClient) SetScheduling(ctx context.Context, req *computepb.SetSchedulingInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetSchedulingResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setScheduling", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetServiceAccount sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance. func (c *instancesRESTClient) SetServiceAccount(ctx context.Context, req *computepb.SetServiceAccountInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstancesSetServiceAccountRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setServiceAccount", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetShieldedInstanceIntegrityPolicy sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. func (c *instancesRESTClient) SetShieldedInstanceIntegrityPolicy(ctx context.Context, req *computepb.SetShieldedInstanceIntegrityPolicyInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetShieldedInstanceIntegrityPolicyResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setShieldedInstanceIntegrityPolicy", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SetTags sets network tags for the specified instance to the data included in the request. func (c *instancesRESTClient) SetTags(ctx context.Context, req *computepb.SetTagsInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetTagsResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/setTags", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // SimulateMaintenanceEvent simulates a maintenance event on the instance. func (c *instancesRESTClient) SimulateMaintenanceEvent(ctx context.Context, req *computepb.SimulateMaintenanceEventInstanceRequest, opts ...gax.CallOption) (*Operation, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/simulateMaintenanceEvent", req.GetProject(), req.GetZone(), req.GetInstance()) httpReq, err := http.NewRequest("POST", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // Start starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. func (c *instancesRESTClient) Start(ctx context.Context, req *computepb.StartInstanceRequest, opts ...gax.CallOption) (*Operation, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/start", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // StartWithEncryptionKey starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. func (c *instancesRESTClient) StartWithEncryptionKey(ctx context.Context, req *computepb.StartWithEncryptionKeyInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstancesStartWithEncryptionKeyRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/startWithEncryptionKey", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // Stop stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance. func (c *instancesRESTClient) Stop(ctx context.Context, req *computepb.StopInstanceRequest, opts ...gax.CallOption) (*Operation, error) { baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/stop", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), nil) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // TestIamPermissions returns permissions that a caller has on the specified resource. func (c *instancesRESTClient) TestIamPermissions(ctx context.Context, req *computepb.TestIamPermissionsInstanceRequest, opts ...gax.CallOption) (*computepb.TestPermissionsResponse, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetTestPermissionsRequestResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/testIamPermissions", req.GetProject(), req.GetZone(), req.GetResource()) httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.TestPermissionsResponse{} return rsp, unm.Unmarshal(buf, rsp) } // Update updates an instance only if the necessary resources are available. This method can update only a specific set of instance properties. See Updating a running instance for a list of updatable instance properties. func (c *instancesRESTClient) Update(ctx context.Context, req *computepb.UpdateInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetInstanceResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.MinimalAction != nil { params.Add("minimalAction", fmt.Sprintf("%v", req.GetMinimalAction())) } if req != nil && req.MostDisruptiveAllowedAction != nil { params.Add("mostDisruptiveAllowedAction", fmt.Sprintf("%v", req.GetMostDisruptiveAllowedAction())) } if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("PUT", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // UpdateAccessConfig updates the specified access config from an instance’s network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. func (c *instancesRESTClient) UpdateAccessConfig(ctx context.Context, req *computepb.UpdateAccessConfigInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetAccessConfigResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/updateAccessConfig", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req.GetNetworkInterface() != "" { params.Add("networkInterface", fmt.Sprintf("%v", req.GetNetworkInterface())) } if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // UpdateDisplayDevice updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. func (c *instancesRESTClient) UpdateDisplayDevice(ctx context.Context, req *computepb.UpdateDisplayDeviceInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetDisplayDeviceResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/updateDisplayDevice", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // UpdateNetworkInterface updates an instance’s network interface. This method can only update an interface’s alias IP range and attached network. See Modifying alias IP ranges for an existing instance for instructions on changing alias IP ranges. See Migrating a VM between networks for instructions on migrating an interface. This method follows PATCH semantics. func (c *instancesRESTClient) UpdateNetworkInterface(ctx context.Context, req *computepb.UpdateNetworkInterfaceInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetNetworkInterfaceResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/updateNetworkInterface", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req.GetNetworkInterface() != "" { params.Add("networkInterface", fmt.Sprintf("%v", req.GetNetworkInterface())) } if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // UpdateShieldedInstanceConfig updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. func (c *instancesRESTClient) UpdateShieldedInstanceConfig(ctx context.Context, req *computepb.UpdateShieldedInstanceConfigInstanceRequest, opts ...gax.CallOption) (*Operation, error) { m := protojson.MarshalOptions{AllowPartial: true} body := req.GetShieldedInstanceConfigResource() jsonReq, err := m.Marshal(body) if err != nil { return nil, err } baseUrl, _ := url.Parse(c.endpoint) baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/zones/%v/instances/%v/updateShieldedInstanceConfig", req.GetProject(), req.GetZone(), req.GetInstance()) params := url.Values{} if req != nil && req.RequestId != nil { params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId())) } baseUrl.RawQuery = params.Encode() httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) if err != nil { return nil, err } httpReq = httpReq.WithContext(ctx) // Set the headers for k, v := range c.xGoogMetadata { httpReq.Header[k] = v } httpReq.Header["Content-Type"] = []string{"application/json"} httpRsp, err := c.httpClient.Do(httpReq) if err != nil { return nil, err } defer httpRsp.Body.Close() if httpRsp.StatusCode != http.StatusOK { return nil, fmt.Errorf(httpRsp.Status) } buf, err := ioutil.ReadAll(httpRsp.Body) if err != nil { return nil, err } unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} rsp := &computepb.Operation{} if err := unm.Unmarshal(buf, rsp); err != nil { return nil, err } op := &Operation{proto: rsp} return op, err } // InstanceIterator manages a stream of *computepb.Instance. type InstanceIterator struct { items []*computepb.Instance pageInfo *iterator.PageInfo nextFunc func() error // Response is the raw response for the current page. // It must be cast to the RPC response type. // Calling Next() or InternalFetch() updates this value. Response interface{} // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*computepb.Instance, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *InstanceIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *InstanceIterator) Next() (*computepb.Instance, error) { var item *computepb.Instance if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *InstanceIterator) bufLen() int { return len(it.items) } func (it *InstanceIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // InstancesScopedListPair is a holder type for string/*computepb.InstancesScopedList map entries type InstancesScopedListPair struct { Key string Value *computepb.InstancesScopedList } // InstancesScopedListPairIterator manages a stream of InstancesScopedListPair. type InstancesScopedListPairIterator struct { items []InstancesScopedListPair pageInfo *iterator.PageInfo nextFunc func() error // Response is the raw response for the current page. // It must be cast to the RPC response type. // Calling Next() or InternalFetch() updates this value. Response interface{} // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []InstancesScopedListPair, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *InstancesScopedListPairIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *InstancesScopedListPairIterator) Next() (InstancesScopedListPair, error) { var item InstancesScopedListPair if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *InstancesScopedListPairIterator) bufLen() int { return len(it.items) } func (it *InstancesScopedListPairIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // ReferenceIterator manages a stream of *computepb.Reference. type ReferenceIterator struct { items []*computepb.Reference pageInfo *iterator.PageInfo nextFunc func() error // Response is the raw response for the current page. // It must be cast to the RPC response type. // Calling Next() or InternalFetch() updates this value. Response interface{} // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*computepb.Reference, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *ReferenceIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *ReferenceIterator) Next() (*computepb.Reference, error) { var item *computepb.Reference if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *ReferenceIterator) bufLen() int { return len(it.items) } func (it *ReferenceIterator) takeBuf() interface{} { b := it.items it.items = nil return b }
ncesRESTClientOptions() []option.
duplicates.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package strategies import ( "strings" "github.com/golang/glog" "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options" "github.com/kubernetes-incubator/descheduler/pkg/api" "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions" podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod" ) //type creator string type DuplicatePodsMap map[string][]*v1.Pod // RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node. // A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same // namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) { if !strategy.Enabled { return } deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode) } // deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
for _, node := range nodes { glog.V(1).Infof("Processing node: %#v", node.Name) dpm := ListDuplicatePodsOnANode(client, node) for creator, pods := range dpm { if len(pods) > 1 { glog.V(1).Infof("%#v", creator) // i = 0 does not evict the first pod for i := 1; i < len(pods); i++ { if nodepodCount[node]+1 > maxPodsToEvict { break } success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun) if !success { glog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err) } else { nodepodCount[node]++ glog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err) } } } } podsEvicted += nodepodCount[node] } return podsEvicted } // ListDuplicatePodsOnANode lists duplicate pods on a given node. func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap { pods, err := podutil.ListEvictablePodsOnNode(client, node) if err != nil { return nil } return FindDuplicatePods(pods) } // FindDuplicatePods takes a list of pods and returns a duplicatePodsMap. func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap { dpm := DuplicatePodsMap{} for _, pod := range pods { // Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode // which checks for error. ownerRefList := podutil.OwnerRef(pod) for _, ownerRef := range ownerRefList { // ownerRef doesn't need namespace since the assumption is owner needs to be in the same namespace. s := strings.Join([]string{ownerRef.Kind, ownerRef.Name}, "/") dpm[s] = append(dpm[s], pod) } } return dpm }
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int { podsEvicted := 0
index.js
import request from '@/utils/request' export function page(query) { return request({ url: '/api/ai/xzq/list', method: 'get', params: query }) } export function addObj(obj) { return request({ url: '/api/ai/xzq', method: 'post', data: obj }) } export function getObj(id) { return request({ url: '/api/ai/xzq/' + id, method: 'get' }) } export function delObj(id) { return request({ url: '/api/ai/xzq/' + id, method: 'delete' }) } export function putObj(obj) { return request({ url: '/api/ai/xzq/' + obj.id, method: 'put', data: obj }) } export function tree() {
return request({ url: '/api/ai/xzq/tree', method: 'get' }) } export function updateFlag(params) { return request({ url: '/api/ai/xzq/updateFlag', method: 'get', params }) }
action.rs
use lazy_static::lazy_static; use regex::Regex; use std::str::FromStr; lazy_static! { pub static ref ACTION_USES_RE: Regex = Regex::new(r"(?i).*uses[\W]?: [\W]?(?P<repo>[a-z0-9-/]+)@(?P<version>[a-z0-9-/.]+)[\W]?",) .unwrap(); } #[derive(thiserror::Error, Debug)] pub enum Error { #[error("could not build action from line")] BuildLine, } #[derive(Debug, PartialEq, Default)] pub struct Action { /// The repository in {owner}/{name} format pub repository: String, /// The version used; can be a partial tag, complete tag, commit sha or branch pub version: String, /// The commit sha we resolved to pub sha: Option<String>, } impl FromStr for Action { type Err = Error; fn
(s: &str) -> Result<Self, Self::Err> { let caps = ACTION_USES_RE.captures(s).ok_or(Error::BuildLine)?; let res = Action { repository: caps["repo"].to_string(), version: caps["version"].to_string(), ..Default::default() }; Ok(res) } } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; use rstest::rstest; #[rstest] #[case( " uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # v2.4.0", "actions/checkout", "ec3a7ce113134d7a93b817d10a8272cb61118579" )] #[case(" - uses: actions/checkout@v2", "actions/checkout", "v2")] #[case(" - 'uses': actions/checkout@v2", "actions/checkout", "v2")] #[case(" - uses: 'actions/checkout@v2'", "actions/checkout", "v2")] #[case(" - uses: \"actions/checkout@v2\"", "actions/checkout", "v2")] #[case("uses: actions/checkout@1", "actions/checkout", "1")] #[case("uses: actions/[email protected]", "actions/checkout", "1.0.0")] #[case("uses: actions/[email protected]", "actions/checkout", "1.0-beta42")] fn from_str(#[case] input: &str, #[case] repository: &str, #[case] version: &str) { let res = Action::from_str(input).unwrap(); assert_eq!(repository, res.repository); assert_eq!(version, res.version); } }
from_str
common.py
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re from abc import abstractmethod, ABCMeta from collections import namedtuple import click import jwt from src.config import Config from src.model.data_storage_wrapper_type import WrapperType TransferResult = namedtuple('TransferResult', ['source_key', 'destination_key', 'destination_version', 'tags']) UploadResult = namedtuple('UploadResult', ['source_key', 'destination_key', 'destination_version', 'tags']) class StorageOperations: PATH_SEPARATOR = '/' DEFAULT_PAGE_SIZE = 100 MAX_TAGS_NUMBER = 10 MAX_KEY_LENGTH = 128 MAX_VALUE_LENGTH = 256 TAG_SHORTEN_SUFFIX = '...' TAGS_VALIDATION_PATTERN = re.compile('[^a-zA-Z0-9\s_.\-@:+/\\\]+') CP_SOURCE_TAG = 'CP_SOURCE' CP_OWNER_TAG = 'CP_OWNER' STORAGE_PATH = '%s://%s/%s' __config__ = None @classmethod def get_proxy_config(cls, target_url=None): if cls.__config__ is None: cls.__config__ = Config.instance() if cls.__config__.proxy is None: return None else: return cls.__config__.resolve_proxy(target_url=target_url) @classmethod def init_wrapper(cls, wrapper, versioning=False): delimiter = StorageOperations.PATH_SEPARATOR prefix = StorageOperations.get_prefix(wrapper.path) check_file = True if prefix.endswith(delimiter): prefix = prefix[:-1] check_file = False listing_manager = wrapper.get_list_manager(show_versions=versioning) for item in listing_manager.list_items(prefix, show_all=True): if prefix.endswith(item.name.rstrip(delimiter)) and (check_file or item.type == 'Folder'): wrapper.exists_flag = True wrapper.is_file_flag = item.type == 'File' break return wrapper @classmethod def get_prefix(cls, path, delimiter=PATH_SEPARATOR): if path: prefix = path if prefix.startswith(delimiter): prefix = prefix[1:] else: prefix = delimiter return prefix @classmethod def get_item_name(cls, path, prefix, delimiter=PATH_SEPARATOR): possible_folder_name = prefix if prefix.endswith(delimiter) else \ prefix + StorageOperations.PATH_SEPARATOR if prefix and path.startswith(prefix) and path != possible_folder_name and path != prefix: if not path == prefix: splitted = prefix.split(StorageOperations.PATH_SEPARATOR) return splitted[len(splitted) - 1] + path[len(prefix):] else: return path[len(prefix):] elif not path.endswith(StorageOperations.PATH_SEPARATOR) and path == prefix: return os.path.basename(path) elif path == possible_folder_name: return os.path.basename(path.rstrip(StorageOperations.PATH_SEPARATOR)) + StorageOperations.PATH_SEPARATOR else: return path @classmethod def normalize_path(cls, destination_wrapper, relative_path, delimiter=PATH_SEPARATOR): if destination_wrapper.path.endswith(delimiter) or not destination_wrapper.is_file(): if os.path.sep != delimiter: relative_path = relative_path.replace(os.path.sep, delimiter) skip_separator = destination_wrapper.path.endswith(delimiter) if destination_wrapper.path: if skip_separator: destination_key = destination_wrapper.path + relative_path else: destination_key = destination_wrapper.path + delimiter + relative_path else: destination_key = relative_path else: destination_key = destination_wrapper.path result = cls.remove_double_slashes(destination_key) if result.startswith(delimiter): return result[1:] else: return result @classmethod def remove_double_slashes(cls, path, delimiter=PATH_SEPARATOR): return re.sub(delimiter + '+', delimiter, path) @classmethod def show_progress(cls, quiet, size, lock=None): return not quiet and size is not None and size != 0 and lock is None @classmethod def get_local_file_size(cls, path): try: return os.path.getsize(path) except OSError: return None @classmethod def without_prefix(cls, string, prefix): if string.startswith(prefix): return string[len(prefix):] @classmethod def without_suffix(cls, string, suffix): if string.endswith(suffix): return string[:-len(suffix)] @classmethod def is_relative_path(cls, full_path, prefix, delimiter=PATH_SEPARATOR): relative_path = StorageOperations.without_prefix(full_path, prefix) return not relative_path or relative_path.startswith(delimiter) @classmethod def parse_tags(cls, tags): if not tags: return {} if len(tags) > cls.MAX_TAGS_NUMBER: raise ValueError( "Maximum allowed number of tags is {}. Provided {} tags.".format(cls.MAX_TAGS_NUMBER, len(tags))) tags_dict = {} for tag in tags: if "=" not in tag: raise ValueError("Tags must be specified as KEY=VALUE pair.") parts = tag.split("=", 1) key = parts[0] if len(key) > cls.MAX_KEY_LENGTH: click.echo("Maximum key value is {}. Provided key {}.".format(cls.MAX_KEY_LENGTH, key)) continue value = parts[1] value = value.replace('\\', '/') if not value or value.isspace() or bool(StorageOperations.TAGS_VALIDATION_PATTERN.search(value)): click.echo("The tag value you have provided is invalid: %s. The tag %s will be skipped." % (value, key)) continue if len(value) > cls.MAX_VALUE_LENGTH: value = value[:cls.MAX_VALUE_LENGTH - len(cls.TAG_SHORTEN_SUFFIX)] + cls.TAG_SHORTEN_SUFFIX tags_dict[key] = value return tags_dict @classmethod def get_user(cls): config = Config.instance() user_info = jwt.decode(config.get_token(), verify=False) if 'sub' in user_info: return user_info['sub'] raise RuntimeError('Cannot find user info.') @classmethod def generate_tags(cls, raw_tags, source): tags = StorageOperations.parse_tags(raw_tags) tags[StorageOperations.CP_SOURCE_TAG] = source tags[StorageOperations.CP_OWNER_TAG] = StorageOperations.get_user() return tags @classmethod def source_tags(cls, tags, source_path, storage_wrapper): bucket = storage_wrapper.bucket default_tags = {} if StorageOperations.CP_SOURCE_TAG not in tags: scheme = WrapperType.cloud_scheme(bucket.type) default_tags[StorageOperations.CP_SOURCE_TAG] = StorageOperations.STORAGE_PATH \ % (scheme, bucket.name, source_path) if StorageOperations.CP_OWNER_TAG not in tags: default_tags[StorageOperations.CP_OWNER_TAG] = StorageOperations.get_user() return default_tags @classmethod def get_items(cls, listing_manager, relative_path, delimiter=PATH_SEPARATOR): prefix = StorageOperations.get_prefix(relative_path).rstrip(delimiter) for item in listing_manager.list_items(prefix, recursive=True, show_all=True): if not StorageOperations.is_relative_path(item.name, prefix): continue if item.name == relative_path: item_relative_path = os.path.basename(item.name) else: item_relative_path = StorageOperations.get_item_name(item.name, prefix + delimiter) yield ('File', item.name, item_relative_path, item.size) @classmethod def file_is_empty(cls, size): return not size or size == 0 class AbstractTransferManager: __metaclass__ = ABCMeta @abstractmethod def get_destination_key(self, destination_wrapper, relative_path): pass @abstractmethod def get_source_key(self, source_wrapper, source_path): pass @abstractmethod def get_destination_size(self, destination_wrapper, destination_key): pass @abstractmethod def transfer(self, source_wrapper, destination_wrapper, path=None, relative_path=None, clean=False, quiet=False, size=None, tags=(), io_threads=None, lock=None): """ Transfers data from the source storage to the destination storage. :param source_wrapper: Source data storage resource wrapper. :type source_wrapper: DataStorageWrapper. :param destination_wrapper: Destination data storage resource wrapper. :type destination_wrapper: DataStorageWrapper. :param path: Transfer data full path. :param relative_path: Transfer data relative path. :param clean: Remove source files after the transferring. :param quiet: True if quite mode specified. :param size: Size of the transfer source object. :param tags: Additional tags that will be included to the transferring object. Tags CP_SOURCE and CP_OWNER will be included by default. :param io_threads: Number of threads to be used for a single file io operations. :param lock: The lock object if multithreaded transfer is requested :type lock: multiprocessing.Lock """ pass @staticmethod def skip_existing(source_key, source_size, destination_key, destination_size, quiet): if destination_size is not None and destination_size == source_size: if not quiet: click.echo('Skipping file %s since it exists in the destination %s' % (source_key, destination_key)) return True return False @staticmethod def create_local_folder(destination_key, lock): folder = os.path.dirname(destination_key) if lock: lock.acquire() try: if folder and not os.path.exists(folder): os.makedirs(folder) finally: if lock: lock.release() class AbstractListingManager: __metaclass__ = ABCMeta @abstractmethod def list_items(self, relative_path=None, recursive=False, page_size=StorageOperations.DEFAULT_PAGE_SIZE, show_all=False): """ Lists files and folders by a relative path in the current storage. :param relative_path: Storage relative path to be listed. :param recursive: Specifies if the listing has to be recursive. :param page_size: Max number of items to return. The argument is ignored if show_all argument is specified. :param show_all: Specifies if all items have to be listed. """ pass @abstractmethod def get_summary_with_depth(self, max_depth, relative_path=None): """ Returns tree with storage usage statistic under the given relative path and according to given depth. :param max_depth: returns N or fewer levels below :param relative_path: Storage relative path to be processed :return: tree with storage usage statistic """ pass @abstractmethod def get_summary(self, relative_path=None): """ Calculates storage usage statistic according to relative path :param relative_path: Storage relative path to be processed :return: <Storage path>, <total objects by path>, <total objects size> """ pass def get_items(self, relative_path): """ Returns all files under the given relative path in forms of tuples with the following structure: ('File', full_path, relative_path, size) :param relative_path: Path to a folder or a file. :return: Generator of file tuples. """ prefix = StorageOperations.get_prefix(relative_path).rstrip(StorageOperations.PATH_SEPARATOR) for item in self.list_items(prefix, recursive=True, show_all=True): if not StorageOperations.is_relative_path(item.name, prefix): continue if item.name == relative_path: item_relative_path = os.path.basename(item.name) else: item_relative_path = StorageOperations.get_item_name(item.name, prefix + StorageOperations.PATH_SEPARATOR) yield ('File', item.name, item_relative_path, item.size) def folder_exists(self, relative_path, delimiter=StorageOperations.PATH_SEPARATOR): prefix = StorageOperations.get_prefix(relative_path).rstrip(delimiter) + delimiter for item in self.list_items(prefix, show_all=True): if prefix.endswith(item.name): return True return False @abstractmethod def get_file_tags(self, relative_path): pass def get_file_size(self, relative_path): items = self.list_items(relative_path, show_all=True, recursive=True) for item in items: if item.name == relative_path: return item.size return None class AbstractDeleteManager: __metaclass__ = ABCMeta @abstractmethod def
(self, relative_path, recursive=False, exclude=[], include=[], version=None, hard_delete=False): """ Deletes all items under the given path. :param relative_path: Storage relative path to be deleted. :param recursive: Specifies if the deletion has to be recursive. The argument is required for folders deletion. :param exclude: Exclude item pattern. :param include: Include item pattern. :param version: Version to be deleted. :param hard_delete: Specifies if all item versions have to be deleted. """ pass class AbstractRestoreManager: __metaclass__ = ABCMeta @abstractmethod def restore_version(self, version, exclude, include, recursive): """ Restores item version. :param version: Version to be restored. """ pass
delete_items
group_service_test.go
package services_test import ( "context" "testing" "github.com/stretchr/testify/suite" "github.com/mreider/koto/backend/testutils" "github.com/mreider/koto/backend/userhub/caches" "github.com/mreider/koto/backend/userhub/migrate" "github.com/mreider/koto/backend/userhub/repo" "github.com/mreider/koto/backend/userhub/rpc" "github.com/mreider/koto/backend/userhub/services" ) type GroupServiceTestSuite struct { suite.Suite te *testutils.TestEnvironment repos repo.Repos service rpc.GroupService } func TestGroupServiceTestSuite(t *testing.T)
func (s *GroupServiceTestSuite) SetupSuite() { s.te = testutils.NewTestEnvironment("group_service", migrate.Migrate) s.repos = repo.NewRepos(s.te.DB) userCache := caches.NewUsers(s.te.DB) base := services.NewBase(s.repos, services.BaseServiceOptions{ UserCache: userCache, S3Storage: s.te.Storage, }) s.service = services.NewGroup(base) } func (s *GroupServiceTestSuite) SetupTest() { _, err := s.te.DB.Exec(`truncate table group_users, group_invites, groups, friends;`) s.Require().Nil(err) _, err = s.te.DB.Exec(`truncate table users cascade;`) s.Require().Nil(err) } func (s *GroupServiceTestSuite) TearDownSuite() { s.te.Cleanup() } func (s *GroupServiceTestSuite) Test_AddGroup_EmptyName() { ctx := s.userContext("user-1") _, err := s.service.AddGroup(ctx, &rpc.GroupAddGroupRequest{ Name: "", Description: "d", }) s.EqualError(err, "twirp error invalid_argument: name shouldn't be empty") } func (s *GroupServiceTestSuite) Test_AddGroup_EmptyDescription() { ctx := s.userContext("user-1") _, err := s.service.AddGroup(ctx, &rpc.GroupAddGroupRequest{ Name: "n", Description: "", }) s.EqualError(err, "twirp error invalid_argument: description shouldn't be empty") } func (s *GroupServiceTestSuite) Test_AddGroup_InvalidName() { ctx := s.userContext("user-1") _, err := s.service.AddGroup(ctx, &rpc.GroupAddGroupRequest{ Name: "n\t1", Description: "d", }) s.EqualError(err, "twirp error invalid_argument: name is invalid") } func (s *GroupServiceTestSuite) Test_AddPrivateGroup() { s.addHubAdmin("user-1") ctx := s.userContext("user-1") resp, err := s.service.AddGroup(ctx, &rpc.GroupAddGroupRequest{ Name: "group-1", Description: "d", }) s.Nil(err) s.True(resp.Group.Id != "") s.Equal("group-1", resp.Group.Name) s.Equal("d", resp.Group.Description) s.Equal(false, resp.Group.IsPublic) isGroupMember := s.repos.Group.IsGroupMember(resp.Group.Id, "user-1") s.True(isGroupMember) members := s.repos.Group.GroupMembers(resp.Group.Id) s.Equal(1, len(members)) s.Equal("user-1", members[0].ID) } func (s *GroupServiceTestSuite) Test_AddPublicGroup() { s.addHubAdmin("user-1") ctx := s.userContext("user-1") resp, err := s.service.AddGroup(ctx, &rpc.GroupAddGroupRequest{ Name: "group-1", Description: "d", IsPublic: true, BackgroundId: "back-1", }) s.Nil(err) s.True(resp.Group.Id != "") s.Equal("group-1", resp.Group.Name) s.Equal("d", resp.Group.Description) s.Equal(true, resp.Group.IsPublic) isGroupMember := s.repos.Group.IsGroupMember(resp.Group.Id, "user-1") s.True(isGroupMember) members := s.repos.Group.GroupMembers(resp.Group.Id) s.Equal(1, len(members)) s.Equal("user-1", members[0].ID) } func (s *GroupServiceTestSuite) Test_AddExistingGroup() { s.addHubAdmin("user-1") s.addUser("user-2") ctx := s.userContext("user-1") resp, err := s.service.AddGroup(ctx, &rpc.GroupAddGroupRequest{ Name: "group-1", Description: "d", IsPublic: true, }) s.Nil(err) s.True(resp.Group.Id != "") s.Equal("group-1", resp.Group.Name) s.Equal("d", resp.Group.Description) s.Equal(true, resp.Group.IsPublic) ctx = s.userContext("user-2") _, err = s.service.AddGroup(ctx, &rpc.GroupAddGroupRequest{ Name: "group-1", Description: "d", IsPublic: false, }) s.EqualError(err, "twirp error already_exists: group already exists") } func (s *GroupServiceTestSuite) Test_EditGroup_Admin() { groupID := s.addPublicGroup("group-1", "user-1") ctx := s.userContext("user-1") _, err := s.service.EditGroup(ctx, &rpc.GroupEditGroupRequest{ GroupId: groupID, DescriptionChanged: true, Description: "new description", IsPublicChanged: true, IsPublic: false, BackgroundChanged: true, BackgroundId: "back-2", }) s.Nil(err) group := s.repos.Group.FindGroupByID(groupID) s.Equal(groupID, group.ID) s.Equal("new description", group.Description) s.Equal(false, group.IsPublic) s.Equal("back-2", group.BackgroundID) } func (s *GroupServiceTestSuite) Test_EditGroup_NonAdmin() { groupID := s.addPublicGroup("group-1", "user-1") ctx := s.userContext("user-2") _, err := s.service.EditGroup(ctx, &rpc.GroupEditGroupRequest{ GroupId: groupID, DescriptionChanged: true, Description: "new description", IsPublicChanged: true, IsPublic: false, }) s.EqualError(err, "twirp error permission_denied: ") } func (s *GroupServiceTestSuite) Test_AddUser_NotAdmin() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") ctx := s.userContext("user-2") _, err := s.service.AddUser(ctx, &rpc.GroupAddUserRequest{ GroupId: groupID, User: "user-3", }) s.EqualError(err, "twirp error not_found: group not found") } func (s *GroupServiceTestSuite) Test_AddUser_Admin() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") _, err := s.te.DB.Exec(` insert into friends(user_id, friend_id) values ($1, $2), ($2, $1);`, "user-1", "user-3") s.Require().Nil(err) ctx := s.userContext("user-1") _, err = s.service.AddUser(ctx, &rpc.GroupAddUserRequest{ GroupId: groupID, User: "user-2", }) s.Nil(err) _, err = s.service.AddUser(ctx, &rpc.GroupAddUserRequest{ GroupId: groupID, User: "user-3", }) s.Nil(err) isGroupMember := s.repos.Group.IsGroupMember(groupID, "user-2") s.False(isGroupMember) isGroupMember = s.repos.Group.IsGroupMember(groupID, "user-3") s.True(isGroupMember) ctx = s.userContext("user-2") resp, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(resp.Invites)) s.Equal(groupID, resp.Invites[0].GroupId) s.Equal("group-1", resp.Invites[0].GroupName) s.Equal("group-1 description", resp.Invites[0].GroupDescription) s.Equal("user-1", resp.Invites[0].InviterId) s.Equal("user-1-name", resp.Invites[0].InviterName) s.Equal("user-1 user-1", resp.Invites[0].InviterFullName) s.NotEmpty(resp.Invites[0].CreatedAt) s.Empty(resp.Invites[0].AcceptedAt) s.Empty(resp.Invites[0].RejectedAt) s.Empty(resp.Invites[0].AcceptedByAdminAt) _, err = s.service.DeleteJoinRequest(ctx, &rpc.GroupDeleteJoinRequestRequest{ GroupId: groupID, }) s.Nil(err) isGroupMember = s.repos.Group.IsGroupMember(groupID, "user-2") s.False(isGroupMember) ctx = s.userContext("user-2") resp, err = s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(resp.Invites) } func (s *GroupServiceTestSuite) Test_RequestJoin_PrivateGroup() { groupID := s.addPrivateGroup("group-1", "user-1") s.addUser("user-2") ctx := s.userContext("user-2") _, err := s.service.RequestJoin(ctx, &rpc.GroupRequestJoinRequest{ GroupId: groupID, Message: "!123", }) s.EqualError(err, "twirp error not_found: group not found") } func (s *GroupServiceTestSuite) Test_RequestJoin_PublicGroup() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") ctx := s.userContext("user-2") _, err := s.service.RequestJoin(ctx, &rpc.GroupRequestJoinRequest{ GroupId: groupID, Message: "!123", }) s.Nil(err) isGroupMember := s.repos.Group.IsGroupMember(groupID, "user-2") s.False(isGroupMember) ctx = s.userContext("user-2") resp, err := s.service.InvitesFromMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(resp.Invites)) s.Equal(groupID, resp.Invites[0].GroupId) s.Equal("group-1", resp.Invites[0].GroupName) s.Equal("group-1 description", resp.Invites[0].GroupDescription) s.Equal("user-2", resp.Invites[0].InvitedId) s.Equal("user-2-name", resp.Invites[0].InvitedName) s.Equal("user-2 user-2", resp.Invites[0].InvitedFullName) s.Equal("!123", resp.Invites[0].Message) s.NotEmpty(resp.Invites[0].CreatedAt) s.NotEmpty(resp.Invites[0].AcceptedAt) s.Empty(resp.Invites[0].RejectedAt) s.Empty(resp.Invites[0].AcceptedByAdminAt) _, err = s.service.DeleteJoinRequest(ctx, &rpc.GroupDeleteJoinRequestRequest{ GroupId: groupID, }) s.Nil(err) isGroupMember = s.repos.Group.IsGroupMember(groupID, "user-2") s.False(isGroupMember) ctx = s.userContext("user-2") resp, err = s.service.InvitesFromMe(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(resp.Invites) } func (s *GroupServiceTestSuite) Test_CreateInvite_SameUser_NotMember() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-2", Message: "!123", }) s.Nil(err) } func (s *GroupServiceTestSuite) Test_CreateInvite_SameUser_AlreadyMember() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-2", Message: "!123", }) s.EqualError(err, "twirp error already_exists: already in the group") } func (s *GroupServiceTestSuite) Test_CreateInvite_NotGroupMember() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-3", Message: "!123", }) s.EqualError(err, "twirp error permission_denied: not a group member") } func (s *GroupServiceTestSuite) Test_CreateInvite() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-3", Message: "!123", }) s.Nil(err) isMember := s.repos.Group.IsGroupMember(groupID, "user-2") s.True(isMember) isMember = s.repos.Group.IsGroupMember(groupID, "user-3") s.False(isMember) respForUser2, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(respForUser2.Invites) respFromUser2, err := s.service.InvitesFromMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(respFromUser2.Invites)) s.Equal(groupID, respFromUser2.Invites[0].GroupId) s.Equal("group-1", respFromUser2.Invites[0].GroupName) s.Equal("group-1 description", respFromUser2.Invites[0].GroupDescription) s.Equal("user-3", respFromUser2.Invites[0].InvitedId) s.Equal("user-3-name", respFromUser2.Invites[0].InvitedName) s.Equal("user-3 user-3", respFromUser2.Invites[0].InvitedFullName) s.Equal("!123", respFromUser2.Invites[0].Message) s.NotEmpty(respFromUser2.Invites[0].CreatedAt) s.Empty(respFromUser2.Invites[0].AcceptedAt) s.Empty(respFromUser2.Invites[0].RejectedAt) s.Empty(respFromUser2.Invites[0].AcceptedByAdminAt) ctx = s.userContext("user-3") respForUser3, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(respForUser3.Invites)) s.Equal(groupID, respForUser3.Invites[0].GroupId) s.Equal("group-1", respForUser3.Invites[0].GroupName) s.Equal("group-1 description", respForUser3.Invites[0].GroupDescription) s.Equal("user-2", respForUser3.Invites[0].InviterId) s.Equal("user-2-name", respForUser3.Invites[0].InviterName) s.Equal("user-2 user-2", respForUser3.Invites[0].InviterFullName) s.Equal("!123", respForUser3.Invites[0].Message) s.NotEmpty(respForUser3.Invites[0].CreatedAt) s.Empty(respForUser3.Invites[0].AcceptedAt) s.Empty(respForUser3.Invites[0].RejectedAt) s.Empty(respForUser3.Invites[0].AcceptedByAdminAt) respFromUser3, err := s.service.InvitesFromMe(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(respFromUser3.Invites) } func (s *GroupServiceTestSuite) Test_CreateInvite_GroupAdmin() { groupID := s.addPublicGroup("group-1", "user-2") s.addUser("user-3") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-3", Message: "!123", }) s.Nil(err) isMember := s.repos.Group.IsGroupMember(groupID, "user-2") s.True(isMember) isMember = s.repos.Group.IsGroupMember(groupID, "user-3") s.False(isMember) respForUser2, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(respForUser2.Invites) respFromUser2, err := s.service.InvitesFromMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(respFromUser2.Invites)) s.Equal(groupID, respFromUser2.Invites[0].GroupId) s.Equal("group-1", respFromUser2.Invites[0].GroupName) s.Equal("group-1 description", respFromUser2.Invites[0].GroupDescription) s.Equal("user-3", respFromUser2.Invites[0].InvitedId) s.Equal("user-3-name", respFromUser2.Invites[0].InvitedName) s.Equal("user-3 user-3", respFromUser2.Invites[0].InvitedFullName) s.Equal("!123", respFromUser2.Invites[0].Message) s.NotEmpty(respFromUser2.Invites[0].CreatedAt) s.Empty(respFromUser2.Invites[0].AcceptedAt) s.Empty(respFromUser2.Invites[0].RejectedAt) s.Empty(respFromUser2.Invites[0].AcceptedByAdminAt) ctx = s.userContext("user-3") respForUser3, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(respForUser3.Invites)) s.Equal(groupID, respForUser3.Invites[0].GroupId) s.Equal("group-1", respForUser3.Invites[0].GroupName) s.Equal("group-1 description", respForUser3.Invites[0].GroupDescription) s.Equal("user-2", respForUser3.Invites[0].InviterId) s.Equal("user-2-name", respForUser3.Invites[0].InviterName) s.Equal("user-2 user-2", respForUser3.Invites[0].InviterFullName) s.Equal("!123", respForUser3.Invites[0].Message) s.NotEmpty(respForUser3.Invites[0].CreatedAt) s.Empty(respForUser3.Invites[0].AcceptedAt) s.Empty(respForUser3.Invites[0].RejectedAt) s.Empty(respForUser3.Invites[0].AcceptedByAdminAt) respFromUser3, err := s.service.InvitesFromMe(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(respFromUser3.Invites) } func (s *GroupServiceTestSuite) Test_CreateInvite_ByEmail() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "[email protected]", Message: "!123", }) s.Nil(err) isMember := s.repos.Group.IsGroupMember(groupID, "user-2") s.True(isMember) isMember = s.repos.Group.IsGroupMember(groupID, "user-3") s.False(isMember) respForUser2, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(respForUser2.Invites) respFromUser2, err := s.service.InvitesFromMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(respFromUser2.Invites)) s.Equal(groupID, respFromUser2.Invites[0].GroupId) s.Equal("group-1", respFromUser2.Invites[0].GroupName) s.Equal("group-1 description", respFromUser2.Invites[0].GroupDescription) s.Equal("user-3", respFromUser2.Invites[0].InvitedId) s.Equal("user-3-name", respFromUser2.Invites[0].InvitedName) s.Equal("user-3 user-3", respFromUser2.Invites[0].InvitedFullName) s.Equal("!123", respFromUser2.Invites[0].Message) s.NotEmpty(respFromUser2.Invites[0].CreatedAt) s.Empty(respFromUser2.Invites[0].AcceptedAt) s.Empty(respFromUser2.Invites[0].RejectedAt) s.Empty(respFromUser2.Invites[0].AcceptedByAdminAt) ctx = s.userContext("user-3") respForUser3, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(respForUser3.Invites)) s.Equal(groupID, respForUser3.Invites[0].GroupId) s.Equal("group-1", respForUser3.Invites[0].GroupName) s.Equal("group-1 description", respForUser3.Invites[0].GroupDescription) s.Equal("user-2", respForUser3.Invites[0].InviterId) s.Equal("user-2-name", respForUser3.Invites[0].InviterName) s.Equal("user-2 user-2", respForUser3.Invites[0].InviterFullName) s.Equal("!123", respForUser3.Invites[0].Message) s.NotEmpty(respForUser3.Invites[0].CreatedAt) s.Empty(respForUser3.Invites[0].AcceptedAt) s.Empty(respForUser3.Invites[0].RejectedAt) s.Empty(respForUser3.Invites[0].AcceptedByAdminAt) respFromUser3, err := s.service.InvitesFromMe(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(respFromUser3.Invites) } func (s *GroupServiceTestSuite) Test_CreateInvite_UnregisteredUser() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "[email protected]", Message: "!123", }) s.Nil(err) isMember := s.repos.Group.IsGroupMember(groupID, "user-2") s.True(isMember) isMember = s.repos.Group.IsGroupMember(groupID, "user-3") s.False(isMember) respForUser2, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(respForUser2.Invites) respFromUser2, err := s.service.InvitesFromMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(respFromUser2.Invites)) s.Equal(groupID, respFromUser2.Invites[0].GroupId) s.Equal("group-1", respFromUser2.Invites[0].GroupName) s.Equal("group-1 description", respFromUser2.Invites[0].GroupDescription) s.Equal("", respFromUser2.Invites[0].InvitedId) s.Equal("[email protected]", respFromUser2.Invites[0].InvitedName) s.Equal("", respFromUser2.Invites[0].InvitedFullName) s.Equal("!123", respFromUser2.Invites[0].Message) s.NotEmpty(respFromUser2.Invites[0].CreatedAt) s.Empty(respFromUser2.Invites[0].AcceptedAt) s.Empty(respFromUser2.Invites[0].RejectedAt) s.Empty(respFromUser2.Invites[0].AcceptedByAdminAt) } func (s *GroupServiceTestSuite) Test_AcceptInvite() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-3", }) s.Require().Nil(err) ctx = s.userContext("user-3") _, err = s.service.AcceptInvite(ctx, &rpc.GroupAcceptInviteRequest{ GroupId: groupID, InviterId: "user-2", }) s.Nil(err) respForUser3, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(respForUser3.Invites)) s.Equal(groupID, respForUser3.Invites[0].GroupId) s.Equal("group-1", respForUser3.Invites[0].GroupName) s.Equal("group-1 description", respForUser3.Invites[0].GroupDescription) s.Equal("user-2", respForUser3.Invites[0].InviterId) s.Equal("user-2-name", respForUser3.Invites[0].InviterName) s.Equal("user-2 user-2", respForUser3.Invites[0].InviterFullName) s.NotEmpty(respForUser3.Invites[0].CreatedAt) s.NotEmpty(respForUser3.Invites[0].AcceptedAt) s.Empty(respForUser3.Invites[0].RejectedAt) s.Empty(respForUser3.Invites[0].AcceptedByAdminAt) isMember := s.repos.Group.IsGroupMember(groupID, "user-3") s.False(isMember) areFriends := s.repos.Friend.AreFriends("user-1", "user-3") s.False(areFriends) } func (s *GroupServiceTestSuite) Test_AcceptInvite_FromGroupAdmin() { groupID := s.addPublicGroup("group-1", "user-2") s.addUser("user-3") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-3", }) s.Require().Nil(err) ctx = s.userContext("user-3") _, err = s.service.AcceptInvite(ctx, &rpc.GroupAcceptInviteRequest{ GroupId: groupID, InviterId: "user-2", }) s.Nil(err) respForUser3, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(0, len(respForUser3.Invites)) isMember := s.repos.Group.IsGroupMember(groupID, "user-3") s.True(isMember) areFriends := s.repos.Friend.AreFriends("user-2", "user-3") s.True(areFriends) } func (s *GroupServiceTestSuite) Test_RejectInvite() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-3", }) s.Require().Nil(err) ctx = s.userContext("user-3") _, err = s.service.RejectInvite(ctx, &rpc.GroupRejectInviteRequest{ GroupId: groupID, InviterId: "user-2", }) s.Nil(err) respForUser3, err := s.service.InvitesForMe(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(respForUser3.Invites)) s.Equal(groupID, respForUser3.Invites[0].GroupId) s.Equal("group-1", respForUser3.Invites[0].GroupName) s.Equal("group-1 description", respForUser3.Invites[0].GroupDescription) s.Equal("user-2", respForUser3.Invites[0].InviterId) s.Equal("user-2-name", respForUser3.Invites[0].InviterName) s.Equal("user-2 user-2", respForUser3.Invites[0].InviterFullName) s.NotEmpty(respForUser3.Invites[0].CreatedAt) s.Empty(respForUser3.Invites[0].AcceptedAt) s.NotEmpty(respForUser3.Invites[0].RejectedAt) s.Empty(respForUser3.Invites[0].AcceptedByAdminAt) isMember := s.repos.Group.IsGroupMember(groupID, "user-3") s.False(isMember) } func (s *GroupServiceTestSuite) Test_ConfirmInvite() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-3", Message: "!123", }) s.Require().Nil(err) ctx = s.userContext("user-3") _, err = s.service.AcceptInvite(ctx, &rpc.GroupAcceptInviteRequest{ GroupId: groupID, InviterId: "user-2", }) s.Require().Nil(err) isMember := s.repos.Group.IsGroupMember(groupID, "user-3") s.False(isMember) _, err = s.service.ConfirmInvite(ctx, &rpc.GroupConfirmInviteRequest{ GroupId: groupID, InviterId: "user-2", InvitedId: "user-3", }) s.EqualError(err, "twirp error permission_denied: ") ctx = s.userContext("user-1") resp, err := s.service.InvitesToConfirm(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(resp.Groups)) s.Equal(groupID, resp.Groups[0].Group.Id) s.Equal("group-1", resp.Groups[0].Group.Name) s.Equal("group-1 description", resp.Groups[0].Group.Description) s.Equal(true, resp.Groups[0].Group.IsPublic) s.Equal(1, len(resp.Groups[0].Invites)) s.Equal("user-2", resp.Groups[0].Invites[0].InviterId) s.Equal("user-2-name", resp.Groups[0].Invites[0].InviterName) s.Equal("user-2 user-2", resp.Groups[0].Invites[0].InviterFullName) s.Equal("user-3", resp.Groups[0].Invites[0].InvitedId) s.Equal("user-3-name", resp.Groups[0].Invites[0].InvitedName) s.Equal("user-3 user-3", resp.Groups[0].Invites[0].InvitedFullName) s.Equal("!123", resp.Groups[0].Invites[0].Message) s.NotEmpty(resp.Groups[0].Invites[0].CreatedAt) s.NotEmpty(resp.Groups[0].Invites[0].AcceptedAt) s.Empty(resp.Groups[0].Invites[0].RejectedAt) s.Empty(resp.Groups[0].Invites[0].AcceptedByAdminAt) s.Empty(resp.Groups[0].Invites[0].RejectedByAdminAt) areFriends := s.repos.Friend.AreFriends("user-1", "user-3") s.False(areFriends) _, err = s.service.ConfirmInvite(ctx, &rpc.GroupConfirmInviteRequest{ GroupId: groupID, InviterId: "user-2", InvitedId: "user-3", }) s.Nil(err) isMember = s.repos.Group.IsGroupMember(groupID, "user-3") s.True(isMember) resp, err = s.service.InvitesToConfirm(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(len(resp.Groups)) areFriends = s.repos.Friend.AreFriends("user-1", "user-3") s.True(areFriends) } func (s *GroupServiceTestSuite) Test_DenyInvite() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-2") _, err := s.service.CreateInvite(ctx, &rpc.GroupCreateInviteRequest{ GroupId: groupID, Invited: "user-3", Message: "!123", }) s.Require().Nil(err) ctx = s.userContext("user-3") _, err = s.service.AcceptInvite(ctx, &rpc.GroupAcceptInviteRequest{ GroupId: groupID, InviterId: "user-2", }) s.Require().Nil(err) isMember := s.repos.Group.IsGroupMember(groupID, "user-3") s.False(isMember) _, err = s.service.DenyInvite(ctx, &rpc.GroupDenyInviteRequest{ GroupId: groupID, InviterId: "user-2", InvitedId: "user-3", }) s.EqualError(err, "twirp error permission_denied: ") ctx = s.userContext("user-1") resp, err := s.service.InvitesToConfirm(ctx, &rpc.Empty{}) s.Nil(err) s.Equal(1, len(resp.Groups)) s.Equal(groupID, resp.Groups[0].Group.Id) s.Equal("group-1", resp.Groups[0].Group.Name) s.Equal("group-1 description", resp.Groups[0].Group.Description) s.Equal(true, resp.Groups[0].Group.IsPublic) s.Equal(1, len(resp.Groups[0].Invites)) s.Equal("user-2", resp.Groups[0].Invites[0].InviterId) s.Equal("user-2-name", resp.Groups[0].Invites[0].InviterName) s.Equal("user-2 user-2", resp.Groups[0].Invites[0].InviterFullName) s.Equal("user-3", resp.Groups[0].Invites[0].InvitedId) s.Equal("user-3-name", resp.Groups[0].Invites[0].InvitedName) s.Equal("user-3 user-3", resp.Groups[0].Invites[0].InvitedFullName) s.Equal("!123", resp.Groups[0].Invites[0].Message) s.NotEmpty(resp.Groups[0].Invites[0].CreatedAt) s.NotEmpty(resp.Groups[0].Invites[0].AcceptedAt) s.Empty(resp.Groups[0].Invites[0].RejectedAt) s.Empty(resp.Groups[0].Invites[0].AcceptedByAdminAt) s.Empty(resp.Groups[0].Invites[0].RejectedByAdminAt) areFriends := s.repos.Friend.AreFriends("user-1", "user-3") s.False(areFriends) _, err = s.service.DenyInvite(ctx, &rpc.GroupDenyInviteRequest{ GroupId: groupID, InviterId: "user-2", InvitedId: "user-3", }) s.Nil(err) isMember = s.repos.Group.IsGroupMember(groupID, "user-3") s.False(isMember) resp, err = s.service.InvitesToConfirm(ctx, &rpc.Empty{}) s.Nil(err) s.Empty(len(resp.Groups)) areFriends = s.repos.Friend.AreFriends("user-1", "user-3") s.False(areFriends) } func (s *GroupServiceTestSuite) Test_RemoveUser() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-2") _, err := s.service.RemoveUser(ctx, &rpc.GroupRemoveUserRequest{ GroupId: groupID, UserId: "user-1", }) s.EqualError(err, "twirp error permission_denied: ") isMember := s.repos.Group.IsGroupMember(groupID, "user-2") s.True(isMember) ctx = s.userContext("user-1") _, err = s.service.RemoveUser(ctx, &rpc.GroupRemoveUserRequest{ GroupId: groupID, UserId: "user-1", }) s.EqualError(err, "twirp error invalid_argument: can't delete himself") _, err = s.service.RemoveUser(ctx, &rpc.GroupRemoveUserRequest{ GroupId: groupID, UserId: "user-3", }) s.Nil(err) _, err = s.service.RemoveUser(ctx, &rpc.GroupRemoveUserRequest{ GroupId: groupID, UserId: "user-2", }) s.Nil(err) isMember = s.repos.Group.IsGroupMember(groupID, "user-2") s.False(isMember) } func (s *GroupServiceTestSuite) Test_LeaveGroup() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addUser("user-3") s.addGroupUser(groupID, "user-2") ctx := s.userContext("user-3") _, err := s.service.LeaveGroup(ctx, &rpc.GroupLeaveGroupRequest{ GroupId: groupID, }) s.Nil(err) ctx = s.userContext("user-1") _, err = s.service.LeaveGroup(ctx, &rpc.GroupLeaveGroupRequest{ GroupId: groupID, }) s.EqualError(err, "twirp error invalid_argument: admin can't leave the group") isMember := s.repos.Group.IsGroupMember(groupID, "user-2") s.True(isMember) ctx = s.userContext("user-2") _, err = s.service.LeaveGroup(ctx, &rpc.GroupLeaveGroupRequest{ GroupId: groupID, }) s.Nil(err) isMember = s.repos.Group.IsGroupMember(groupID, "user-2") s.False(isMember) } func (s *GroupServiceTestSuite) Test_DeleteGroup() { groupID := s.addPublicGroup("group-1", "user-1") s.addUser("user-2") s.addGroupUser(groupID, "user-2") g := s.repos.Group.FindGroupByID(groupID) s.NotNil(g) s.Equal(groupID, g.ID) ctx := s.userContext("user-1") _, err := s.service.DeleteGroup(ctx, &rpc.GroupDeleteGroupRequest{ GroupId: groupID, }) s.Nil(err) g = s.repos.Group.FindGroupByID(groupID) s.Nil(g) } func (s *GroupServiceTestSuite) userContext(name string) context.Context { ctx := context.WithValue(context.Background(), services.ContextUserKey, repo.User{ ID: name, }) return context.WithValue(ctx, services.ContextIsAdminKey, false) } func (s *GroupServiceTestSuite) addUser(name string) { s.repos.User.AddUser(name, name+"-name", name+"@mail.org", name+" "+name, "", false) } func (s *GroupServiceTestSuite) addHubAdmin(userName string) { s.addUser(userName) s.repos.MessageHubs.AddHub(userName+"-hub", userName+"-details", userName, 0, false) } func (s *GroupServiceTestSuite) addPublicGroup(name, adminName string) string { s.addHubAdmin(adminName) ctx := s.userContext(adminName) resp, err := s.service.AddGroup(ctx, &rpc.GroupAddGroupRequest{ Name: name, Description: name + " description", IsPublic: true, }) s.Require().Nil(err) return resp.Group.Id } func (s *GroupServiceTestSuite) addPrivateGroup(name, adminName string) string { s.addHubAdmin(adminName) ctx := s.userContext(adminName) resp, err := s.service.AddGroup(ctx, &rpc.GroupAddGroupRequest{ Name: name, Description: name + " description", IsPublic: false, }) s.Require().Nil(err) return resp.Group.Id } func (s *GroupServiceTestSuite) addGroupUser(groupID, userID string) { s.repos.Group.AddUserToGroup(groupID, userID) }
{ suite.Run(t, &GroupServiceTestSuite{}) }
visit_item_static.rs
impl<'a> ModuleVisitor<'a> { pub(super) fn visit_item_static(&mut self, node: &syn::ItemStatic) { if self.feature.is_unstable(&node.attrs, Some(&node.vis)) { let attrs = self.feature.strip_attrs(&node.attrs); self.visit_unstable_item(syn::ItemStatic { attrs: attrs.clone(), expr: Box::new(util::empty_expr()), ..node.clone() }); self.feature .assert_stable(node) .visit_item_static(&syn::ItemStatic { attrs, ..node.clone() }) } else { self.feature.assert_stable(node).visit_item_static(node) } } }
use super::*;
test_notice.py
# Copyright (c) 2019-2021 Micro Focus or one of its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function, division, absolute_import import mock from .base import VerticaPythonUnitTestCase from aiovertica.messages import NoticeResponse from aiovertica.errors import QueryError class NoticeTestCase(VerticaPythonUnitTestCase): SAMPLE_DATA = {b'S': 'FATAL', b'H': 'This is a test hint', b'L': '9999', b'M': 'Failure is on purpose'} @mock.patch.object(NoticeResponse, '_unpack_data') def test_error_message(self, mock_unpack_data): mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA notice = NoticeResponse(b'ignored-due-to-mock') self.assertEqual( notice.error_message(), 'Severity: FATAL, Message: Failure is on purpose, Hint: This is a test hint, Line: 9999' ) @mock.patch.object(NoticeResponse, '_unpack_data') def test_attribute_properties(self, mock_unpack_data): mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA notice = NoticeResponse(b'ignored-due-to-mock') self.assertEqual(notice.severity, 'FATAL') self.assertEqual(notice.hint, 'This is a test hint') # yes, line is still a string.
self.assertEqual(notice.message, 'Failure is on purpose') self.assertIsNone(notice.detail) self.assertIsNone(notice.sqlstate) @mock.patch.object(NoticeResponse, '_unpack_data') def test_labeled_values(self, mock_unpack_data): mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA notice = NoticeResponse(b'ignored-due-to-mock') self.assertEqual(notice.values, { 'Severity': 'FATAL', 'Hint': 'This is a test hint', 'Line': '9999', 'Message': 'Failure is on purpose'}) @mock.patch.object(NoticeResponse, '_unpack_data') def test_query_error(self, mock_unpack_data): mock_unpack_data.return_value = NoticeTestCase.SAMPLE_DATA notice = NoticeResponse(b'ignored-due-to-mock') query_error = QueryError(notice, 'Select Fake();') self.assertEqual(query_error.severity, 'FATAL') self.assertEqual(query_error.hint, 'This is a test hint') self.assertEqual(query_error.line, '9999') self.assertEqual(query_error.message, 'Failure is on purpose') self.assertIsNone(query_error.detail) self.assertIsNone(query_error.sqlstate) self.assertEqual( str(query_error), 'Severity: FATAL, Message: Failure is on purpose, Hint: This is a test hint, Line: 9999, SQL: \'Select Fake();\'')
self.assertEqual(notice.line, '9999')
formatters.js
module.exports = { // Create a vertical space verticalSpace(lines = 1) { for (let i = 0; i < lines; i++) { console.log(''); } }, // Create a horizontal line across the screen horizontalLine(color) { // Get the available screen size const width = process.stdout.columns; // Put in enough dashes to go across the screen let line = ''; for (let i = 0; i < width; i++) { line += '-'; } if (color) console.log(color, line); if (!color) console.log(line); }, // Create centered text on the screen centered(str = '', color) { if (str.length > 0) { // Get the available screen size const width = process.stdout.columns; // Calculate the left padding there should be const leftPadding = Math.floor((width - str.length) / 2); // Put in left padded spaces before the string itself
line += str; if (color) console.log(color, line); if (!color) console.log(line); } }, // 2 columns with padding columns(item, desc) { let line = item; const padding = 60 - item.length; for (let i = 0; i < padding; i++) { line += ' '; } line += desc; console.log(line); }, };
let line = ''; for (let i = 0; i < leftPadding; i++) { line += ' '; }
collect.rs
use std; use parser::*; use structs::*; use template_management::*; use nom; use nom::Needed; use serde_json; pub fn collect<Reader>(reader : &mut Reader) where Reader : std::io::Read, { let mut buffer = Vec::<u8>::new(); let mut cache = TemplateCache::default(); let mut message_num = 0; loop { match message_parser(&buffer.clone()[..]) { Ok((rest, message)) => { trace!("message header {}: {:?}", message_num, message.header); collect_message_body(&mut cache, message, message_num); message_num += 1; // discard consumed front of buffer let consumed_len = buffer.len() - rest.len(); buffer = buffer.split_off(consumed_len); } Err(nom::Err::Incomplete(needed)) => { let old_len = buffer.len(); let length = match needed { Needed::Size(length) => length, Needed::Unknown => 1, }; // get more input buffer.resize(old_len + length, 0x00); if reader.read_exact(&mut buffer[old_len..]).is_err() { return; } } // parser error and failure Err(e) => { error!("message {} unparseable", message_num); println!("error {:?}", e); println!("input = {:?}", buffer); return; } } } } fn collect_message_body(cache : &mut TemplateCache, message : Message, message_num : usize) { let mut set_num = 0; for (set_header, data) in message.sets { trace!("set header {}.{}: {:?}", message_num, set_num, set_header); match set_header.set_id { TEMPLATE_SET_ID | OPTIONS_TEMPLATE_SET_ID => { let templates = match template_records_parser(data, set_header) { Ok((_, t)) => t, Err(_) => { error!("template set {}.{} unparseable", message_num, set_num); continue; } }; for template in &templates { trace!( "template {}.{}.{}: {:?}", message_num, set_num, template.header.template_id,
); if let Err(e) = verify_template(template) { error!("{:?}", e); continue; } if let Err(e) = cache.update_with(template.clone()) { error!("{:?}", e); continue; } } } FIRST_TEMPLATE_ID...LAST_TEMPLATE_ID => { let template = match cache.lookup(set_header.set_id) { None => { error!("received data set without known template"); continue; } Some(template) => template, }; let records = match data_records_parser( data, set_header.length - SET_HEADER_LENGTH, template, ) { Ok(o) => o, Err(e) => { error!("{:?}", e); continue; } }; for (record_num, record) in records.1.iter().enumerate() { println!( "{}", serde_json::to_string(&TypedDataRecord { data : record, template, }).unwrap() ); trace!( "data record {}.{}.{}: {:?}", message_num, set_num, record_num, record ); } } id => error!("received set with reserved set id {}", id), } set_num += 1; } }
template
DiplomacyNegotiationStringOptions.ts
import { CollectionCache, CollectionKey } from "../../../common"; import { DiplomacyKeys } from "./DiplomacyKeys"; import { Cultures } from "./Cultures"; import { GovernmentTypes } from "./GovernmentTypes"; import { DiplomacyStrings } from "./DiplomacyStrings"; export namespace DiplomacyNegotiationStringOptions { export const KEY = new CollectionKey("diplomacy_negotiation_string_options"); export class Entry { private readonly collectionCache: CollectionCache; readonly _key: string; readonly _culture: string; readonly _governmentType: string; readonly _string: string; readonly option: number; constructor(collectionCache: CollectionCache, values: any) { this.collectionCache = collectionCache; this._key = values["key"]; this._culture = values["culture"]; this._governmentType = values["government_type"]; this._string = values["string"]; this.option = values["option"]; }
return collection.find(entry => entry.key === this._key); } get culture(): Cultures.Entry | undefined { const collection = <Cultures.Entry[]>this.collectionCache.getCollection(Cultures.KEY, Cultures.Entry); return collection.find(entry => entry.key === this._culture); } get governmentType(): GovernmentTypes.Entry | undefined { const collection = <GovernmentTypes.Entry[]>this.collectionCache.getCollection(GovernmentTypes.KEY, GovernmentTypes.Entry); return collection.find(entry => entry.governmentType === this._governmentType); } get string(): DiplomacyStrings.Entry | undefined { const collection = <DiplomacyStrings.Entry[]>this.collectionCache.getCollection(DiplomacyStrings.KEY, DiplomacyStrings.Entry); return collection.find(entry => entry.key === this._string); } } } export default DiplomacyNegotiationStringOptions;
get key(): DiplomacyKeys.Entry | undefined { const collection = <DiplomacyKeys.Entry[]>this.collectionCache.getCollection(DiplomacyKeys.KEY, DiplomacyKeys.Entry);
gunShip.js
import Phaser from 'phaser'; import Entity from './entity'; import EnemyLaser from './enemyLaser'; export default class GunShip extends Entity { constructor(scene, x, y) { super(scene, x, y, 'sprEnemy0', 'GunShip'); this.body.velocity.y = Phaser.Math.Between(50, 100); this.shootTimer = this.scene.time.addEvent({ delay: 1000, callback() { const laser = new EnemyLaser( this.scene, this.x, this.y,
laser.setScale(this.scaleX * 5); this.scene.enemyLasers.add(laser); }, callbackScope: this, loop: true, }); this.score = 30; } onDestroy() { if (this.shootTimer !== undefined) { if (this.shootTimer) { this.shootTimer.remove(false); } } } }
);
score.rs
use super::{Pending, UserIdentification};
Osu, }; #[cfg(feature = "cache")] use crate::client::cached::OsuCached; /// Retrieve a [`Score`](crate::model::Score). pub struct GetScore<'a> { fut: Option<Pending<'a>>, osu: Option<&'a Osu>, limit: Option<u32>, map_id: u32, mode: Option<GameMode>, mods: Option<GameMods>, user: Option<UserIdentification>, } /// Retrieve [`Score`](crate::model::Score)s pub struct GetScores<'a> { fut: Option<Pending<'a>>, osu: Option<&'a Osu>, limit: Option<u32>, map_id: u32, mode: Option<GameMode>, mods: Option<GameMods>, user: Option<UserIdentification>, } macro_rules! impl_score { ($name: ident, $default_limit: expr) => { impl<'a> $name<'a> { pub(crate) fn new(osu: &'a Osu, map_id: u32) -> Self { Self { osu: Some(osu), map_id, fut: None, limit: $default_limit, mode: None, mods: None, user: None, } } /// Optional, specify a user either by id (`u32`) or name (`String`/`&str`). #[inline] pub fn user(mut self, user: impl Into<UserIdentification>) -> Self { self.user.replace(user.into()); self } /// Optional, amount of results from the top. /// Range between 1 and 100, defaults to 50. #[inline] pub fn limit(mut self, limit: u32) -> Self { self.limit.replace(limit.max(1).min(100)); self } /// Optional, defaults to `GameMode::STD`. #[inline] pub fn mode(mut self, mode: GameMode) -> Self { self.mode.replace(mode); self } /// Optional, specify a mod combination. #[inline] pub fn mods(mut self, mods: GameMods) -> Self { self.mods.replace(mods); self } fn start(&mut self) { let route = Route::GetScore { limit: self.limit.take(), map_id: self.map_id, mode: self.mode.take(), mods: self.mods.take(), user: self.user.take(), }; #[cfg(feature = "metrics")] self.osu.unwrap().0.metrics.scores.inc(); #[cfg(feature = "cache")] self.fut.replace(Box::pin( self.osu.unwrap().request_bytes(route, OsuCached::Score), )); #[cfg(not(feature = "cache"))] self.fut .replace(Box::pin(self.osu.unwrap().request_bytes(route))); } } }; } impl_score!(GetScores, None); poll_vec_req!(GetScores<'_>, Score); impl_score!(GetScore, Some(1)); poll_req!(GetScore<'_>, Score);
use crate::{ model::{GameMode, GameMods, Score}, routing::Route,
analysis16s.py
#!/usr/bin/env python """ Copyright (C) 2014 Ivan Gregor This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Note that we could have written some parts of this code in a nicer way, but didn't have time. Be careful when reusing the source code. """ import os import sys import re import subprocess from Bio import SeqIO from algbioi.com import common, csv from scripts.configparserwrapper import ConfigParserWrapper class RRNA16S(): """ A class that handels the rRNA 16S analysis. """ def __init__(self, config, s16Database, workingDir): assert isinstance(config, ConfigParserWrapper) self._config = config self._workingDir = workingDir # self._refDir = os.path.normpath(s16Database) # self._refDict = csv.getMappingTuple(os.path.join(self._refDir, 'content.csv'), (0,1), (2,), '\t') def runHMM(self, inputFastaFile, outLog=None, hmmer=3, moltypes="ssu", kingdoms="arc,bac"): """ Run the hidden markov model to get regions in the input sequences where the 16S and 23S genes are located. """ if hmmer == 2: self.runHMM_2(inputFastaFile, outLog, moltypes=moltypes, kingdoms=kingdoms) elif hmmer == 3: self.runHMM_3(inputFastaFile, outLog, moltypes=moltypes, kingdoms=kingdoms) def runHMM_2(self, inputFastaFile, outLog=None, moltypes="ssu", kingdoms="arc,bac"): hmmInstallDir = self._config.get_value("MarkerGeneExtraction", 'rnaHmmInstallDir', is_path=True) rnammer_executable = self._config.get_value("MarkerGeneExtraction", 'rnammer', is_path=True) assert isinstance(hmmInstallDir, basestring) assert isinstance(rnammer_executable, basestring) out_file_name_prefix = os.path.join(self._workingDir, os.path.basename(inputFastaFile)) hmmer_args = [ "-i '{}'".format(inputFastaFile), "-o '{}'".format(out_file_name_prefix), "-r '{}'".format(rnammer_executable), "-m '{}'".format(moltypes), "-k '{}'".format(kingdoms), "-T '{}'".format(self._workingDir) ] cmd = "{script} {args}".format( script=os.path.join(hmmInstallDir, 'rna_hmm2.py'), args=" ".join(hmmer_args), ) if os.name != 'posix': print 'Cannot run HMM since your system is not "posix" but', str('"' + os.name + '"'), '\n', cmd return if outLog is not None: stdoutLog = open(outLog, 'a') else: stdoutLog = subprocess.PIPE # stdout=subprocess.STDOUT hmmProc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=hmmInstallDir, stdout=stdoutLog) print 'run cmd:', cmd hmmProc.wait() if outLog is not None: stdoutLog.close() print 'HMM return code:', hmmProc.returncode if hmmProc.returncode != 0: raise Exception("Command returned with non-zero %s status: %s" % (hmmProc.returncode, cmd)) def runHMM_3(self, inputFastaFile, outLog=None, moltypes="ssu", kingdoms="arc,bac"): hmmInstallDir = self._config.get_value("MarkerGeneExtraction", 'rnaHmmInstallDir', is_path=True) hmmerBinDir = self._config.get_value("MarkerGeneExtraction", 'hmmerBinDir', is_path=True) assert isinstance(hmmInstallDir, basestring) assert isinstance(hmmerBinDir, basestring) regionsFile = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.gff')) hmmer_args = [ "-i '{}'".format(inputFastaFile), "-o '{}'".format(regionsFile), "-r '{}'".format(os.path.join(hmmerBinDir, "hmmsearch")), "-m '{}'".format(moltypes), "-k '{}'".format(kingdoms) ] cmd = "{wrapper} {args}".format( hmmerBinDir=hmmerBinDir, wrapper=os.path.join(hmmInstallDir, 'rna_hmm3.py'), args=" ".join(hmmer_args) ) if os.name != 'posix': print 'Cannot run HMM since your system is not "posix" but', str('"' + os.name + '"'), '\n', cmd return if outLog is not None: stdoutLog = open(outLog, 'a') else: stdoutLog = subprocess.PIPE # stdout=subprocess.STDOUT hmmProc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=hmmInstallDir, stdout=stdoutLog) print 'run cmd:', cmd hmmProc.wait() if outLog is not None: stdoutLog.close() print 'HMM return code:', hmmProc.returncode if hmmProc.returncode != 0: raise Exception("Command returned with non-zero %s status: %s" % (hmmProc.returncode, cmd)) handle = open(inputFastaFile, "rU") record_dict = SeqIO.to_dict(SeqIO.parse(handle, "fasta")) handle.close() # trunkoutputfilename = inputFastaFile.split( "/" )[-1] trunkoutputfilename = os.path.join(self._workingDir, os.path.basename(inputFastaFile)) # parse results file line by line for line in open(regionsFile, "rU"): if line.startswith("#"): continue line = line.split() ident = line[0] start = int(line[3]) stop = int(line[4]) strand = line[6] gene = line[8] seq = record_dict[ident].seq if strand == "+": subseq = seq[start-1:stop] elif strand == "-": subseq = seq[start-1:stop].reverse_complement() else: sys.stderr.write(" analysis16s: invalid strand symbol") exit(1) outfile = open(trunkoutputfilename + "." + gene + ".fna", "a") print >> outfile, ">%s_%i_%i_%s" % (ident, start, stop, strand) print >> outfile, subseq outfile.close() def classify16S(self, inputFastaFile, outLog=None): """ Run mothur to classify the sequences. """ try: self._classify(16, inputFastaFile, outLog) except Exception: print('Mothur was not able to classify 16S sequences.') def classify23S(self, inputFastaFile, outLog=None): try: self._classify(23, inputFastaFile, outLog) except Exception: print('Mothur was not able to classify 23S sequences.') def classify5S(self, inputFastaFile, outLog=None): try: self._classify(5, inputFastaFile, outLog) except Exception: print('Mothur was not able to classify 5S sequences.') def _classify(self, mode, inputFastaFile, outLog=None): mothur = os.path.join(os.path.normpath(self._config.get('mothurInstallDir')), 'mothur') if mode == 16: extractedRegionsFasta = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16S_rRNA.fna')) taxonomyFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('16S_rRNA','taxonomyDNA')][0])) templateFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('16S_rRNA','templateDNA')][0])) #mothurPredFileName = str(extractedRegionsFasta[0:extractedRegionsFasta.rindex('.')] + '.taxonomy') mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile) predFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16P')) #extractedRegionsFasta = str(inputFastaFile + '.16S_rRNA.fna') #templateFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam16STemplate')) #taxonomyFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam16STaxonomy')) #mothurPredFileName = str(inputFastaFile + '.16S_rRNA.bacteria+archaea.taxonomy') #mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16S_rRNA.bacteria+archaea.taxonomy')) #mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16S_rRNA.fasta.taxonomy')) #predFileName = str(inputFastaFile + '.16P') elif mode == 23: extractedRegionsFasta = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23S_rRNA.fna')) taxonomyFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('23S_rRNA','taxonomyDNA')][0])) templateFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('23S_rRNA','templateDNA')][0])) #mothurPredFileName = str(extractedRegionsFasta[0:extractedRegionsFasta.rindex('.')] + '.taxonomy') mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile) predFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23P')) #extractedRegionsFasta = str(inputFastaFile + '.23S_rRNA.fna') #templateFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam23STemplate')) #taxonomyFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam23STaxonomy')) #mothurPredFileName = str(inputFastaFile + '.23S_rRNA.bacteria+archaea.taxonomy') #mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23S_rRNA.bacteria+archaea.taxonomy')) #mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23S_rRNA.fasta.taxonomy')) #predFileName = str(inputFastaFile + '.23P') elif mode == 5: #extractedRegionsFasta = str(inputFastaFile + '.5S_rRNA.fna') extractedRegionsFasta = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.5S_rRNA.fna')) taxonomyFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('5S_rRNA','taxonomyDNA')][0])) templateFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('5S_rRNA','templateDNA')][0])) mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile) predFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.5P')) #templateFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam5STemplate')) #taxonomyFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam5STaxonomy')) #mothurPredFileName = os.path.join(self._workingDir, # str(os.path.basename(inputFastaFile) + '.5S_rRNA.' + os.path.basename(taxonomyFile) + 'onomy'))#.taxonomy #predFileName = str(inputFastaFile + '.5P') else: raise Exception('Wrong branch') if not os.path.isfile(mothurPredFileName): mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile, suffix='.bayesian.taxonomy') param = self._config.get('mothurClassifyParamOther') cmd = str('time ' + mothur + ' "#classify.seqs(fasta=' + extractedRegionsFasta + ', template=' + templateFile + ', taxonomy=' + taxonomyFile + ', ' + param + ')"') if os.name == 'posix': if outLog is not None: stdoutLog = open(outLog, 'w') else: stdoutLog = subprocess.STDOUT mothurProc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=self._workingDir, stdout=stdoutLog) print 'run cmd:', cmd mothurProc.wait() if outLog is not None: stdoutLog.close() print 'mothur return code:', mothurProc.returncode if mothurProc.returncode != 0: raise Exception("Command returned with non-zero %s status: %s" % (mothurProc.returncode, cmd)) else: print 'Cannot run mothur since your system is not "posix" but', str('"' + os.name + '"'), '\n', cmd #transform mothur prediction files to the tab separated files self.mothurPredToTabSepPred(mothurPredFileName, predFileName) def mothurPredToTabSepPred(self, mothurPredFileName, outPredFileName): """ Transforms the mothur output prediction file (*.taxonomy) to the tab separated prediction file seqName tab ncbid tab weight. """ try: fr = open(os.path.normpath(mothurPredFileName),'r') except Exception: sys.stderr.write("Cannot open file:" + mothurPredFileName + '\n') raise else: try: fw = open(os.path.normpath(outPredFileName), 'w') lineCount = 0 for line in fr: line = common.noNewLine(line) try: if re.match(r'^[0-9]+_[0-9]+_[0-9]+_[0-9]+.*', line): name = re.sub(r'([0-9]+_[0-9]+)_[0-9]+_[0-9]+_[\+\-\t ]+.*', r'\1' , line) tag = re.sub(r'[0-9]+_[0-9]+_([0-9]+_[0-9]+_[\+\-]+)[\t ]+.*', r'\1' , line) placementList = re.sub(r'[0-9]+_[0-9]+_[0-9]+_[0-9]+_[\+\-\t ]+(.*)', r'\1' , line.replace('unclassified;', '')).rsplit(';') if len(placementList) < 2: continue placement = placementList[-2] try: clade = int(re.sub('([0-9]+)\(.*', r'\1' , placement)) except ValueError: continue weight = float(re.sub('[0-9]+\(([0-9\.]+)\)', r'\1' , placement)) lineCount += 1 if lineCount == 1: fw.write(name + '\t' + str(clade) + '\t' + str(weight) + '\t' + str(tag)) else: fw.write('\n' + name + '\t' + str(clade) + '\t' + str(weight) + '\t' + str(tag)) except Exception: sys.stderr.write('Cannot parse line: ' + str(lineCount) + 'in file: ' + mothurPredFileName + '\n') raise except Exception: sys.stderr.write("Cannot write to file:" + outPredFileName + '\n') raise finally: fw.close() fr.close() # def setCandidatePlacementFrom16S23S5S(self, sequences, taxonomy, inputFastaFile): #set assigned by the 16S countList[0] #set assigned by the 23S countList[1] #intersection = both countList[3] #predFileName16S = str(inputFastaFile + '.16P') predFileName16S = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16P')) #predFileName23S = str(inputFastaFile + '.23P') predFileName23S = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23P')) #predFileName5S = str(inputFastaFile + '.5P') predFileName5S = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.5P')) try: seqIdSet16S = self._setCandidatePlacement(sequences, taxonomy, predFileName16S, '16S_rRNA') except Exception: seqIdSet16S = set() print("Can't set candidate placement from 16S classification.") try: seqIdSet23S = self._setCandidatePlacement(sequences, taxonomy, predFileName23S, '23S_rRNA') except Exception: seqIdSet23S = set() print("Can't set candidate placement from 23S sequences.") try: seqIdSet5S = self._setCandidatePlacement(sequences, taxonomy, predFileName5S, '5S_rRNA') except Exception: seqIdSet5S = set() print("Can't set candidate placement from 5S sequences.") intersectSet = seqIdSet16S | seqIdSet23S | seqIdSet5S return [len(seqIdSet16S),len(seqIdSet23S),len(seqIdSet5S),len(intersectSet)] # def _setCandidatePlacement(self, sequences, taxonomy, predFileName, source):
if __name__ == "__main__": #print '16S analysis' #line = '125_528_- 2(100);unclassified;unclassified;unclassified;unclassified;unclassified;unclassified;unclassified;' line = '125_528_- 2157(100);28890(95.3333);183925(95.3333);2158(95.3333);2159(95.3333);2172(95.3333);unclassified;unclassified;' try: name = re.sub('([0-9]+_[0-9]+)_[\+\-\t ]+.*', r'\1' , line) placement = re.sub('[0-9]+_[0-9]+_[\+\-\t ]+(.*)', r'\1' , line.replace('unclassified;', '')).rsplit(';')[-2] clade = int(re.sub('([0-9]+)\(.*', r'\1' , placement)) weight = float(re.sub('[0-9]+\(([0-9\.]+)\)', r'\1' , placement)) except Exception: print sys.stderr.write('Cannot parse line in file x' + '\n') print name print placement print clade print weight
assignedIdList = [] try: f = open(os.path.normpath(predFileName),'r') except Exception: print "Cannot open file:", predFileName raise else: for line in f: line = common.noNewLine(line) if re.match(r'^[0-9]+_[0-9]+\t[0-9]+\t[0-9\.]+\t[^\t]+$', line): scaffoldId = int(re.sub(r'^([0-9]+)_[0-9]+\t[0-9]+\t[0-9\.]+\t[^\t]+$',r'\1' ,line)) contigId = int(re.sub(r'^[0-9]+_([0-9]+)\t[0-9]+\t[0-9\.]+\t[^\t]+$',r'\1' ,line)) ncbid = int(re.sub(r'^[0-9]+_[0-9]+\t([0-9]+)\t[0-9\.]+\t[^\t]+$',r'\1' ,line)) weight = float(re.sub(r'^[0-9]+_[0-9]+\t[0-9]+\t([0-9\.]+)\t[^\t]+$',r'\1' ,line)) tag = str(re.sub(r'^[0-9]+_[0-9]+\t[0-9]+\t[0-9\.]+\t([^\t]+)$',r'\1' ,line)) if ncbid != 1: taxPathDict = taxonomy.getPathToRoot(ncbid) if taxPathDict is not None and taxPathDict.keys() >= 1: sequences.setCandidateTaxonomyPath(contigId, scaffoldId, taxPathDict, weight, source, tag) assignedIdList.append(contigId) else: sys.stderr.write(str('No taxonomic path found for ncbid: ' + str(ncbid))) finally: f.close() return set(assignedIdList)
lib.rs
#[macro_use] extern crate add_getters_setters; pub mod protocol_numbers; pub mod ethertype_numbers; pub mod headers; mod protocol; mod helpers; pub use protocol::*; pub use helpers::*; use headers::{ Header, EthernetHeader, ArpHeader, IcmpHeader, IpHeader, TcpHeader, UdpHeader }; use std::collections::HashMap; /// represents a network packet. can be used to build or parse packets. #[derive(AddSetter)] pub struct Packet { buffer: Vec<u8>, selection: HashMap<Protocol, u32>, current_index: u32, #[set] payload: Vec<u8>, } impl Packet { /// creates a new `Packet` with the internal buffer capacity set to the appropriate size for the header data. /// note that the headers arent created with this method, you still have to add them with add_header. pub fn new(protos: Vec<Protocol>) -> Self { Self { buffer: Vec::with_capacity(protos.iter().fold(0, |c, protocol| c + protocol.min_header_len()) as usize), selection: HashMap::new(), current_index: 0, payload: Vec::new(), } } /// Creates a new `Packet` with an empty internal buffer and the capacity is 0 pub fn new_empty() -> Self { Self { buffer: Vec::new(), selection: HashMap::new(), current_index: 0, payload: Vec::new(), } } /// Adds the header into the internal packet buffer. /// If the header is TCP or UDP, this method will call the `set_pseudo_header` method for you, /// as this method is required to be called before calculating the checksum of the header pub fn add_header(&mut self, mut buf: impl Header) { self.calculate_fields(&mut buf); self.selection.insert(buf.get_proto(), self.current_index); self.current_index += buf.get_length() as u32; self.buffer.extend(buf.make().into_iter()); } /// used internally to call functions which calculate checsum and length fields when the header is added to the packet fn calculate_fields(&mut self, buf: &mut impl Header) { match buf.into_transport_header() { Some(th) => { match self.get_header_as_slice(Protocol::IP) { Some(ip_header) => { let src_ip: [u8; 4] = [ip_header[12], ip_header[13], ip_header[14], ip_header[15]]; let dst_ip: [u8; 4] = [ip_header[16], ip_header[17], ip_header[18], ip_header[19]]; let all_data_len: u16 = (self.buffer.len() + self.payload.len()) as u16; th.set_pseudo_header(src_ip, dst_ip, all_data_len); }, None => {} } } None => {} }; } /// If the header already exists in the packet, it will be updated with the one passed to this function. /// if the header doesn't already exist in the packet, it will be added as if you'd called `add_header` instead. pub fn update_header(&mut self, mut new_buf: impl Header) { match self.selection.remove(&new_buf.get_proto()){ Some(i) => { self.calculate_fields(&mut new_buf); let proto = new_buf.get_proto(); self.selection.insert(proto, i); let mut data_vec = new_buf.make(); let data = data_vec.as_mut_slice(); let index: usize = i as usize; let section = unsafe { self.buffer.get_unchecked_mut(index..(index + proto.min_header_len() as usize)) }; let mut i = 0; for byte in section.iter_mut() { *byte = data[i]; i += 1; } }, None => self.add_header(new_buf) } } /// Appends the given data to the payload of this packet pub fn extend_payload<T: IntoIterator<Item = u8>>(&mut self, buf: T) { self.payload.extend(buf); } /// consumes self and returns the buffer which is the cooked data packet. pub fn
(mut self) -> Vec<u8> { // calculate ICMP checksum if present. // this needs to be done here as it should include the payload in the checksum calculation if self.selection.contains_key(&Protocol::ICMP) { let index: usize = *self.selection.get(&Protocol::ICMP).unwrap() as usize; let mut icmp_data: Vec<u8> = self.buffer[(index as usize)..].iter().map(|x| *x).collect(); // assuming here that icmp header is the last one, i.e. there are no more added after it if self.payload.len() > 0 { icmp_data.extend(self.payload.iter()); } let checksum = checksum(&icmp_data, 1).split_to_bytes(); self.buffer[index + 2] = checksum[0]; self.buffer[index + 3] = checksum[1]; } self.buffer.append(&mut self.payload); self.buffer } /// Try to create a `Packet` from raw packet data and populate it with the values in the given data packet pub fn parse(raw_data: &[u8]) -> Result<Self, ParseError> { let mut packet = Self::new_empty(); if raw_data[0] >> 4 == 4 { packet.parse_ip_packet(raw_data)?; return Ok(packet); } packet.parse_ethernet_packet(raw_data)?; Ok(packet) } fn parse_ip_packet(&mut self, raw_data: &[u8]) -> Result<(), ParseError> { let ip_header = IpHeader::parse(raw_data)?; let next_protocol = Protocol::from(*ip_header.get_next_protocol()); let ip_hdr_len = ip_header.get_length() as usize; self.add_header(ip_header); match next_protocol { Protocol::ETH => { self.add_header(EthernetHeader::parse(&raw_data[ip_hdr_len..])?); // Ethernet in ip encapsulation }, Protocol::ICMP => { self.add_header(IcmpHeader::parse(&raw_data[ip_hdr_len..])?); }, Protocol::TCP => { self.add_header(TcpHeader::parse(&raw_data[ip_hdr_len..])?); }, Protocol::UDP => { self.add_header(UdpHeader::parse(&raw_data[ip_hdr_len..])?); }, Protocol::IP => { self.add_header(IpHeader::parse(&raw_data[ip_hdr_len..])?); }, _ => panic!("not a valid ip protocol"), } Ok(()) } fn parse_ethernet_packet(&mut self, raw_data: &[u8]) -> Result<(), ParseError> { let hdr: Box<EthernetHeader> = EthernetHeader::parse(raw_data)?; let et = *hdr.get_eth_type(); self.add_header(hdr); match et { ethertype_numbers::ETHERTYPE_IPV4 => { self.parse_ip_packet(&raw_data[(EthernetHeader::get_min_length() as usize)..])?; }, ethertype_numbers::ETHERTYPE_ARP | ethertype_numbers::ETHERTYPE_IPV6 | ethertype_numbers::ETHERTYPE_RARP | ethertype_numbers::ETHERTYPE_LLDP => { return Err(ParseError::NotYetImplemented); }, _ => return Err(ParseError::InvalidFormat) } Ok(()) } /// Returns `Option::Some(&[u8])` if the header is found in this packet, else None pub fn get_header_as_slice(&self, p: Protocol) -> Option<&[u8]> { match self.selection.get(&p) { Some(index) => { Some(&self.buffer[(*index as usize)..]) }, None => None, } } } macro_rules! impl_get_header_methods { ( $($funname:ident : $proto:path : $ret:ty),* ) => ( impl Packet { $( pub fn $funname(&self) -> Option<Box<$ret>> { let index = self.selection.get(&$proto)?; Some(<$ret>::parse(&self.buffer[(*index as usize)..]).unwrap()) } )* } ) } impl_get_header_methods!( get_ip_header : Protocol::IP : IpHeader, get_arp_header : Protocol::ARP : ArpHeader, get_eth_header : Protocol::ETH : EthernetHeader, get_tcp_header : Protocol::TCP : TcpHeader, get_udp_header : Protocol::UDP : UdpHeader, get_icmp_header : Protocol::ICMP : IcmpHeader );
into_vec
account.rs
use crate::acme_proto::account::{register_account, update_account_contacts, update_account_key}; use crate::endpoint::Endpoint; use crate::logs::HasLogger; use crate::storage::FileManager; use acme_common::crypto::{gen_keypair, HashFunction, JwsSignatureAlgorithm, KeyPair, KeyType}; use acme_common::error::Error; use std::collections::HashMap; use std::fmt; use std::str::FromStr; use std::time::SystemTime; mod contact; mod storage; #[derive(Clone, Debug)] pub struct ExternalAccount { pub identifier: String, pub key: Vec<u8>, pub signature_algorithm: JwsSignatureAlgorithm, } #[derive(Clone, Debug)] pub enum AccountContactType { Mailfrom, } impl FromStr for AccountContactType { type Err = Error; fn from_str(s: &str) -> Result<Self, Error> { match s.to_lowercase().as_str() { "mailfrom" => Ok(AccountContactType::Mailfrom), _ => Err(format!("{}: unknown contact type.", s).into()), } } } impl fmt::Display for AccountContactType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let s = match self { AccountContactType::Mailfrom => "mailfrom", }; write!(f, "{}", s) } } #[derive(Clone, Debug)] pub struct AccountKey { pub creation_date: SystemTime, pub key: KeyPair, pub signature_algorithm: JwsSignatureAlgorithm, } impl AccountKey { fn new(key_type: KeyType, signature_algorithm: JwsSignatureAlgorithm) -> Result<Self, Error> { Ok(AccountKey { creation_date: SystemTime::now(), key: gen_keypair(key_type)?, signature_algorithm, }) } } #[derive(Clone, Debug, Hash)] pub struct AccountEndpoint { pub creation_date: SystemTime, pub account_url: String, pub orders_url: String, pub key_hash: Vec<u8>, pub contacts_hash: Vec<u8>, pub external_account_hash: Vec<u8>, } impl AccountEndpoint { pub fn new() -> Self { AccountEndpoint { creation_date: SystemTime::UNIX_EPOCH, account_url: String::new(), orders_url: String::new(), key_hash: Vec::new(), contacts_hash: Vec::new(), external_account_hash: Vec::new(), } } } #[derive(Clone, Debug)] pub struct Account { pub name: String, pub endpoints: HashMap<String, AccountEndpoint>, pub contacts: Vec<contact::AccountContact>, pub current_key: AccountKey, pub past_keys: Vec<AccountKey>, pub file_manager: FileManager, pub external_account: Option<ExternalAccount>, } impl HasLogger for Account { fn warn(&self, msg: &str) { log::warn!("account \"{}\": {}", &self.name, msg); } fn info(&self, msg: &str)
fn debug(&self, msg: &str) { log::debug!("account \"{}\": {}", &self.name, msg); } fn trace(&self, msg: &str) { log::trace!("account \"{}\": {}", &self.name, msg); } } impl Account { pub fn get_endpoint_mut(&mut self, endpoint_name: &str) -> Result<&mut AccountEndpoint, Error> { match self.endpoints.get_mut(endpoint_name) { Some(ep) => Ok(ep), None => { let msg = format!( "\"{}\": unknown endpoint for account \"{}\"", endpoint_name, self.name ); Err(msg.into()) } } } pub fn get_endpoint(&self, endpoint_name: &str) -> Result<&AccountEndpoint, Error> { match self.endpoints.get(endpoint_name) { Some(ep) => Ok(ep), None => { let msg = format!( "\"{}\": unknown endpoint for account \"{}\"", endpoint_name, self.name ); Err(msg.into()) } } } pub fn get_past_key(&self, key_hash: &[u8]) -> Result<&AccountKey, Error> { let key_hash = key_hash.to_vec(); for key in &self.past_keys { let past_key_hash = hash_key(key)?; if past_key_hash == key_hash { return Ok(key); } } Err("key not found".into()) } pub fn load( file_manager: &FileManager, name: &str, contacts: &[(String, String)], key_type: &Option<String>, signature_algorithm: &Option<String>, external_account: &Option<ExternalAccount>, ) -> Result<Self, Error> { let contacts = contacts .iter() .map(|(k, v)| contact::AccountContact::new(k, v)) .collect::<Result<Vec<contact::AccountContact>, Error>>()?; let key_type = match key_type { Some(kt) => kt.parse()?, None => crate::DEFAULT_ACCOUNT_KEY_TYPE, }; let signature_algorithm = match signature_algorithm { Some(sa) => sa.parse()?, None => key_type.get_default_signature_alg(), }; key_type.check_alg_compatibility(&signature_algorithm)?; let account = match storage::fetch(file_manager, name)? { Some(mut a) => { a.update_keys(key_type, signature_algorithm)?; a.contacts = contacts; a.external_account = external_account.to_owned(); a } None => { let account = Account { name: name.to_string(), endpoints: HashMap::new(), contacts, current_key: AccountKey::new(key_type, signature_algorithm)?, past_keys: Vec::new(), file_manager: file_manager.clone(), external_account: external_account.to_owned(), }; account.debug("initializing a new account"); account } }; Ok(account) } pub fn add_endpoint_name(&mut self, endpoint_name: &str) { self.endpoints .entry(endpoint_name.to_string()) .or_insert_with(AccountEndpoint::new); } pub fn synchronize(&mut self, endpoint: &mut Endpoint) -> Result<(), Error> { let acc_ep = self.get_endpoint(&endpoint.name)?; if !acc_ep.account_url.is_empty() { if let Some(ec) = &self.external_account { let external_account_hash = hash_external_account(&ec); if external_account_hash != acc_ep.external_account_hash { let msg = format!( "external account changed on endpoint \"{}\"", &endpoint.name ); self.info(&msg); register_account(endpoint, self)?; return Ok(()); } } let ct_hash = hash_contacts(&self.contacts); let key_hash = hash_key(&self.current_key)?; let contacts_changed = ct_hash != acc_ep.contacts_hash; let key_changed = key_hash != acc_ep.key_hash; if contacts_changed { update_account_contacts(endpoint, self)?; } if key_changed { update_account_key(endpoint, self)?; } } else { register_account(endpoint, self)?; } Ok(()) } pub fn register(&mut self, endpoint: &mut Endpoint) -> Result<(), Error> { register_account(endpoint, self) } pub fn save(&self) -> Result<(), Error> { storage::save(&self.file_manager, self) } pub fn set_account_url(&mut self, endpoint_name: &str, account_url: &str) -> Result<(), Error> { let mut ep = self.get_endpoint_mut(endpoint_name)?; ep.account_url = account_url.to_string(); Ok(()) } pub fn set_orders_url(&mut self, endpoint_name: &str, orders_url: &str) -> Result<(), Error> { let mut ep = self.get_endpoint_mut(endpoint_name)?; ep.orders_url = orders_url.to_string(); Ok(()) } pub fn update_key_hash(&mut self, endpoint_name: &str) -> Result<(), Error> { let key = self.current_key.clone(); let mut ep = self.get_endpoint_mut(endpoint_name)?; ep.key_hash = hash_key(&key)?; Ok(()) } pub fn update_contacts_hash(&mut self, endpoint_name: &str) -> Result<(), Error> { let ct = self.contacts.clone(); let mut ep = self.get_endpoint_mut(endpoint_name)?; ep.contacts_hash = hash_contacts(&ct); Ok(()) } pub fn update_external_account_hash(&mut self, endpoint_name: &str) -> Result<(), Error> { if let Some(ec) = &self.external_account { let ec = ec.clone(); let mut ep = self.get_endpoint_mut(endpoint_name)?; ep.external_account_hash = hash_external_account(&ec); } Ok(()) } fn update_keys( &mut self, key_type: KeyType, signature_algorithm: JwsSignatureAlgorithm, ) -> Result<(), Error> { if self.current_key.key.key_type != key_type || self.current_key.signature_algorithm != signature_algorithm { self.debug("account key has been changed in the configuration, creating a new one..."); self.past_keys.push(self.current_key.to_owned()); self.current_key = AccountKey::new(key_type, signature_algorithm)?; self.save()?; let msg = format!( "new {} account key created, using {} as signing algorithm", key_type, signature_algorithm ); self.info(&msg); } else { self.trace("account key is up to date"); } Ok(()) } } fn hash_contacts(contacts: &[contact::AccountContact]) -> Vec<u8> { let msg = contacts .iter() .map(|v| v.to_string()) .collect::<Vec<String>>() .join("") .into_bytes(); HashFunction::Sha256.hash(&msg) } fn hash_key(key: &AccountKey) -> Result<Vec<u8>, Error> { let pem = key.key.public_key_to_pem()?; Ok(HashFunction::Sha256.hash(&pem)) } fn hash_external_account(ec: &ExternalAccount) -> Vec<u8> { let mut msg = ec.key.clone(); msg.extend(ec.identifier.as_bytes()); HashFunction::Sha256.hash(&msg) }
{ log::info!("account \"{}\": {}", &self.name, msg); }
upfirdn2d.py
# modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501 import os import torch from torch.autograd import Function from torch.nn import functional as F BASICSR_JIT = os.getenv('BASICSR_JIT') if BASICSR_JIT == 'True': from torch.utils.cpp_extension import load module_path = os.path.dirname(__file__) upfirdn2d_ext = load( 'upfirdn2d', sources=[ os.path.join(module_path, 'src', 'upfirdn2d.cpp'), os.path.join(module_path, 'src', 'upfirdn2d_kernel.cu'), ], ) else: try: from . import upfirdn2d_ext except ImportError: pass # avoid annoying print output # print(f'Cannot import deform_conv_ext. Error: {error}. You may need to: \n ' # '1. compile with BASICSR_EXT=True. or\n ' # '2. set BASICSR_JIT=True during running') class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_ext.upfirdn2d( grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1, ) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) gradgrad_out = upfirdn2d_ext.upfirdn2d( gradgrad_input, kernel, ctx.up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1, ) # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], # ctx.out_size[1], ctx.in_size[3]) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = (out_h, out_w) ctx.up = (up_x, up_y) ctx.down = (down_x, down_y) ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) out = upfirdn2d_ext.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) # out = out.view(major, out_h, out_w, minor) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply( grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size, ) return grad_input, None, None, None, None def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape( -1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, ) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ]
replicationfabrics.go
package siterecovery // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/tracing" "net/http" ) // ReplicationFabricsClient is the client for the ReplicationFabrics methods of the Siterecovery service. type ReplicationFabricsClient struct { BaseClient } // NewReplicationFabricsClient creates an instance of the ReplicationFabricsClient client. func NewReplicationFabricsClient(subscriptionID string, resourceGroupName string, resourceName string) ReplicationFabricsClient
// NewReplicationFabricsClientWithBaseURI creates an instance of the ReplicationFabricsClient client using a custom // endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure // stack). func NewReplicationFabricsClientWithBaseURI(baseURI string, subscriptionID string, resourceGroupName string, resourceName string) ReplicationFabricsClient { return ReplicationFabricsClient{NewWithBaseURI(baseURI, subscriptionID, resourceGroupName, resourceName)} } // CheckConsistency the operation to perform a consistency check on the fabric. // Parameters: // fabricName - fabric name. func (client ReplicationFabricsClient) CheckConsistency(ctx context.Context, fabricName string) (result ReplicationFabricsCheckConsistencyFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.CheckConsistency") defer func() { sc := -1 if result.FutureAPI != nil && result.FutureAPI.Response() != nil { sc = result.FutureAPI.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.CheckConsistencyPreparer(ctx, fabricName) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "CheckConsistency", nil, "Failure preparing request") return } result, err = client.CheckConsistencySender(req) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "CheckConsistency", nil, "Failure sending request") return } return } // CheckConsistencyPreparer prepares the CheckConsistency request. func (client ReplicationFabricsClient) CheckConsistencyPreparer(ctx context.Context, fabricName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "fabricName": autorest.Encode("path", fabricName), "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), "resourceName": autorest.Encode("path", client.ResourceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2018-01-10" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/checkConsistency", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CheckConsistencySender sends the CheckConsistency request. The method will close the // http.Response Body if it receives an error. func (client ReplicationFabricsClient) CheckConsistencySender(req *http.Request) (future ReplicationFabricsCheckConsistencyFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf future.Result = future.result return } // CheckConsistencyResponder handles the response to the CheckConsistency request. The method always // closes the http.Response Body. func (client ReplicationFabricsClient) CheckConsistencyResponder(resp *http.Response) (result Fabric, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Create the operation to create an Azure Site Recovery fabric (for e.g. Hyper-V site) // Parameters: // fabricName - name of the ASR fabric. // input - fabric creation input. func (client ReplicationFabricsClient) Create(ctx context.Context, fabricName string, input FabricCreationInput) (result ReplicationFabricsCreateFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.Create") defer func() { sc := -1 if result.FutureAPI != nil && result.FutureAPI.Response() != nil { sc = result.FutureAPI.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.CreatePreparer(ctx, fabricName, input) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "Create", nil, "Failure preparing request") return } result, err = client.CreateSender(req) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "Create", nil, "Failure sending request") return } return } // CreatePreparer prepares the Create request. func (client ReplicationFabricsClient) CreatePreparer(ctx context.Context, fabricName string, input FabricCreationInput) (*http.Request, error) { pathParameters := map[string]interface{}{ "fabricName": autorest.Encode("path", fabricName), "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), "resourceName": autorest.Encode("path", client.ResourceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2018-01-10" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}", pathParameters), autorest.WithJSON(input), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client ReplicationFabricsClient) CreateSender(req *http.Request) (future ReplicationFabricsCreateFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf future.Result = future.result return } // CreateResponder handles the response to the Create request. The method always // closes the http.Response Body. func (client ReplicationFabricsClient) CreateResponder(resp *http.Response) (result Fabric, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Delete the operation to delete or remove an Azure Site Recovery fabric. // Parameters: // fabricName - ASR fabric to delete func (client ReplicationFabricsClient) Delete(ctx context.Context, fabricName string) (result ReplicationFabricsDeleteFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.Delete") defer func() { sc := -1 if result.FutureAPI != nil && result.FutureAPI.Response() != nil { sc = result.FutureAPI.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.DeletePreparer(ctx, fabricName) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "Delete", nil, "Failure preparing request") return } result, err = client.DeleteSender(req) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "Delete", nil, "Failure sending request") return } return } // DeletePreparer prepares the Delete request. func (client ReplicationFabricsClient) DeletePreparer(ctx context.Context, fabricName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "fabricName": autorest.Encode("path", fabricName), "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), "resourceName": autorest.Encode("path", client.ResourceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2018-01-10" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/remove", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ReplicationFabricsClient) DeleteSender(req *http.Request) (future ReplicationFabricsDeleteFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf future.Result = future.result return } // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. func (client ReplicationFabricsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } // Get gets the details of an Azure Site Recovery fabric. // Parameters: // fabricName - fabric name. func (client ReplicationFabricsClient) Get(ctx context.Context, fabricName string) (result Fabric, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.Get") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.GetPreparer(ctx, fabricName) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "Get", nil, "Failure preparing request") return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "Get", resp, "Failure sending request") return } result, err = client.GetResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "Get", resp, "Failure responding to request") return } return } // GetPreparer prepares the Get request. func (client ReplicationFabricsClient) GetPreparer(ctx context.Context, fabricName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "fabricName": autorest.Encode("path", fabricName), "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), "resourceName": autorest.Encode("path", client.ResourceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2018-01-10" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ReplicationFabricsClient) GetSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always // closes the http.Response Body. func (client ReplicationFabricsClient) GetResponder(resp *http.Response) (result Fabric, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // List gets a list of the Azure Site Recovery fabrics in the vault. func (client ReplicationFabricsClient) List(ctx context.Context) (result FabricCollectionPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.List") defer func() { sc := -1 if result.fc.Response.Response != nil { sc = result.fc.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } result.fn = client.listNextResults req, err := client.ListPreparer(ctx) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "List", nil, "Failure preparing request") return } resp, err := client.ListSender(req) if err != nil { result.fc.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "List", resp, "Failure sending request") return } result.fc, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "List", resp, "Failure responding to request") return } if result.fc.hasNextLink() && result.fc.IsEmpty() { err = result.NextWithContext(ctx) return } return } // ListPreparer prepares the List request. func (client ReplicationFabricsClient) ListPreparer(ctx context.Context) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), "resourceName": autorest.Encode("path", client.ResourceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2018-01-10" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ReplicationFabricsClient) ListSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. func (client ReplicationFabricsClient) ListResponder(resp *http.Response) (result FabricCollection, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // listNextResults retrieves the next set of results, if any. func (client ReplicationFabricsClient) listNextResults(ctx context.Context, lastResults FabricCollection) (result FabricCollection, err error) { req, err := lastResults.fabricCollectionPreparer(ctx) if err != nil { return result, autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "listNextResults", nil, "Failure preparing next results request") } if req == nil { return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} return result, autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "listNextResults", resp, "Failure sending next results request") } result, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "listNextResults", resp, "Failure responding to next results request") } return } // ListComplete enumerates all values, automatically crossing page boundaries as required. func (client ReplicationFabricsClient) ListComplete(ctx context.Context) (result FabricCollectionIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.List") defer func() { sc := -1 if result.Response().Response.Response != nil { sc = result.page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } result.page, err = client.List(ctx) return } // MigrateToAad the operation to migrate an Azure Site Recovery fabric to AAD. // Parameters: // fabricName - ASR fabric to migrate. func (client ReplicationFabricsClient) MigrateToAad(ctx context.Context, fabricName string) (result ReplicationFabricsMigrateToAadFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.MigrateToAad") defer func() { sc := -1 if result.FutureAPI != nil && result.FutureAPI.Response() != nil { sc = result.FutureAPI.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.MigrateToAadPreparer(ctx, fabricName) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "MigrateToAad", nil, "Failure preparing request") return } result, err = client.MigrateToAadSender(req) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "MigrateToAad", nil, "Failure sending request") return } return } // MigrateToAadPreparer prepares the MigrateToAad request. func (client ReplicationFabricsClient) MigrateToAadPreparer(ctx context.Context, fabricName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "fabricName": autorest.Encode("path", fabricName), "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), "resourceName": autorest.Encode("path", client.ResourceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2018-01-10" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/migratetoaad", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // MigrateToAadSender sends the MigrateToAad request. The method will close the // http.Response Body if it receives an error. func (client ReplicationFabricsClient) MigrateToAadSender(req *http.Request) (future ReplicationFabricsMigrateToAadFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf future.Result = future.result return } // MigrateToAadResponder handles the response to the MigrateToAad request. The method always // closes the http.Response Body. func (client ReplicationFabricsClient) MigrateToAadResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } // Purge the operation to purge(force delete) an Azure Site Recovery fabric. // Parameters: // fabricName - ASR fabric to purge. func (client ReplicationFabricsClient) Purge(ctx context.Context, fabricName string) (result ReplicationFabricsPurgeFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.Purge") defer func() { sc := -1 if result.FutureAPI != nil && result.FutureAPI.Response() != nil { sc = result.FutureAPI.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.PurgePreparer(ctx, fabricName) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "Purge", nil, "Failure preparing request") return } result, err = client.PurgeSender(req) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "Purge", nil, "Failure sending request") return } return } // PurgePreparer prepares the Purge request. func (client ReplicationFabricsClient) PurgePreparer(ctx context.Context, fabricName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "fabricName": autorest.Encode("path", fabricName), "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), "resourceName": autorest.Encode("path", client.ResourceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2018-01-10" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // PurgeSender sends the Purge request. The method will close the // http.Response Body if it receives an error. func (client ReplicationFabricsClient) PurgeSender(req *http.Request) (future ReplicationFabricsPurgeFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf future.Result = future.result return } // PurgeResponder handles the response to the Purge request. The method always // closes the http.Response Body. func (client ReplicationFabricsClient) PurgeResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } // ReassociateGateway the operation to move replications from a process server to another process server. // Parameters: // fabricName - the name of the fabric containing the process server. // failoverProcessServerRequest - the input to the failover process server operation. func (client ReplicationFabricsClient) ReassociateGateway(ctx context.Context, fabricName string, failoverProcessServerRequest FailoverProcessServerRequest) (result ReplicationFabricsReassociateGatewayFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.ReassociateGateway") defer func() { sc := -1 if result.FutureAPI != nil && result.FutureAPI.Response() != nil { sc = result.FutureAPI.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.ReassociateGatewayPreparer(ctx, fabricName, failoverProcessServerRequest) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "ReassociateGateway", nil, "Failure preparing request") return } result, err = client.ReassociateGatewaySender(req) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "ReassociateGateway", nil, "Failure sending request") return } return } // ReassociateGatewayPreparer prepares the ReassociateGateway request. func (client ReplicationFabricsClient) ReassociateGatewayPreparer(ctx context.Context, fabricName string, failoverProcessServerRequest FailoverProcessServerRequest) (*http.Request, error) { pathParameters := map[string]interface{}{ "fabricName": autorest.Encode("path", fabricName), "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), "resourceName": autorest.Encode("path", client.ResourceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2018-01-10" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/reassociateGateway", pathParameters), autorest.WithJSON(failoverProcessServerRequest), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ReassociateGatewaySender sends the ReassociateGateway request. The method will close the // http.Response Body if it receives an error. func (client ReplicationFabricsClient) ReassociateGatewaySender(req *http.Request) (future ReplicationFabricsReassociateGatewayFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf future.Result = future.result return } // ReassociateGatewayResponder handles the response to the ReassociateGateway request. The method always // closes the http.Response Body. func (client ReplicationFabricsClient) ReassociateGatewayResponder(resp *http.Response) (result Fabric, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // RenewCertificate renews the connection certificate for the ASR replication fabric. // Parameters: // fabricName - fabric name to renew certs for. // renewCertificate - renew certificate input. func (client ReplicationFabricsClient) RenewCertificate(ctx context.Context, fabricName string, renewCertificate RenewCertificateInput) (result ReplicationFabricsRenewCertificateFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationFabricsClient.RenewCertificate") defer func() { sc := -1 if result.FutureAPI != nil && result.FutureAPI.Response() != nil { sc = result.FutureAPI.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.RenewCertificatePreparer(ctx, fabricName, renewCertificate) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "RenewCertificate", nil, "Failure preparing request") return } result, err = client.RenewCertificateSender(req) if err != nil { err = autorest.NewErrorWithError(err, "siterecovery.ReplicationFabricsClient", "RenewCertificate", nil, "Failure sending request") return } return } // RenewCertificatePreparer prepares the RenewCertificate request. func (client ReplicationFabricsClient) RenewCertificatePreparer(ctx context.Context, fabricName string, renewCertificate RenewCertificateInput) (*http.Request, error) { pathParameters := map[string]interface{}{ "fabricName": autorest.Encode("path", fabricName), "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), "resourceName": autorest.Encode("path", client.ResourceName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2018-01-10" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/renewCertificate", pathParameters), autorest.WithJSON(renewCertificate), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // RenewCertificateSender sends the RenewCertificate request. The method will close the // http.Response Body if it receives an error. func (client ReplicationFabricsClient) RenewCertificateSender(req *http.Request) (future ReplicationFabricsRenewCertificateFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } var azf azure.Future azf, err = azure.NewFutureFromResponse(resp) future.FutureAPI = &azf future.Result = future.result return } // RenewCertificateResponder handles the response to the RenewCertificate request. The method always // closes the http.Response Body. func (client ReplicationFabricsClient) RenewCertificateResponder(resp *http.Response) (result Fabric, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return }
{ return NewReplicationFabricsClientWithBaseURI(DefaultBaseURI, subscriptionID, resourceGroupName, resourceName) }
next-steps-modal.tsx
import { Code, createLinkTracker, ExternalLink, useAnalytics } from '../../core'; import { Button, Modal, TextContent } from '@patternfly/react-core'; import React from 'react'; import { ExtensionEntry } from '../pickers/extensions-picker'; import { GenerateResult, Target } from '../api/quarkus-project-utils'; interface NextStepsProps { result: GenerateResult; buildTool: string; extensions: ExtensionEntry[]; onClose?(reset?: boolean): void; } export function NextStepsModal(props: NextStepsProps) { const analytics = useAnalytics(); const baseEvent = ['UX', 'Post-Generate Popup Action']; const close = (reset?: boolean) => { analytics.event(baseEvent[0], baseEvent[1], reset ? 'Start new' : 'Close'); if (props.onClose) props.onClose(reset); }; const linkTracker = createLinkTracker(analytics, baseEvent[0], baseEvent[1], 'aria-label'); const onClickGuide = (id: string) => (e: any) => { linkTracker(e); analytics.event('Extension', 'Click "Open Extension Guide" link', id); }; const extensionsWithGuides = props.extensions.filter(e => !!e.guide); const devModeEvent = [...baseEvent, 'Copy "Dev mode" command']; return ( <Modal title="Your Supersonic Subatomic App is ready!" isSmall={true} className="next-steps-modal code-quarkus-modal" onClose={() => close(false)} isOpen={true} aria-label="Your new Quarkus app has been generated" actions={[ ( <Button key="go-back" variant="secondary" aria-label="Close this popup" onClick={() => close(false)}> Close </Button> ), ( <Button key="start-new" variant="secondary" aria-label="Start a new application" onClick={() => close()}> Start a new application </Button> ) ]} > <TextContent> {props.result.target === Target.SHARE && ( <React.Fragment> <p>Your can share the link to the zip:</p> <Code event={[...baseEvent, 'Copy the download link']} content={`${props.result.url}`}/> <p>Or the link to this configured application:</p> <Code event={[...baseEvent, 'Copy the share link']} content={`${props.result.shareUrl}`}/> </React.Fragment> )} {props.result.target === Target.DOWNLOAD && ( <React.Fragment> <p>Your download should start shortly. If it doesn't, please use the direct link:</p> <Button component="a" href={props.result.url} aria-label="Download the zip" className="download-button" onClick={linkTracker}>Download the zip</Button> </React.Fragment> )} {props.result.target === Target.GITHUB && ( <React.Fragment> <p>Your application is now on <ExternalLink href={props.result.url} aria-label={`Open GitHub repository`} onClick={linkTracker}>GitHub</ExternalLink> ready to be cloned:</p> <Code event={[...baseEvent, 'Copy git clone command']} content={`git clone ${props.result.url}`}/> </React.Fragment> )} <h1>What's next?</h1> <div> {props.result.target === Target.DOWNLOAD && ( <p>Unzip the project and start playing with Quarkus by running:</p> )} {props.result.target === Target.GITHUB && ( <p>Once your project is cloned locally, start playing with Quarkus by running:</p> )} {props.buildTool === 'MAVEN' && ( <Code event={devModeEvent} content="./mvnw compile quarkus:dev"/> )} {props.buildTool.startsWith('GRADLE') && ( <Code event={devModeEvent} content="./gradlew quarkusDev"/> )} </div> {extensionsWithGuides.length === 1 && ( <div> <b>Follow the <ExternalLink href={extensionsWithGuides[0].guide!} aria-label={`Open ${extensionsWithGuides[0].name} guide`} onClick={onClickGuide(extensionsWithGuides[0].id)}>{extensionsWithGuides[0].name} guide</ExternalLink> for your next steps!</b> </div> )}
<div> <b>Follow the guides we prepared for your application:</b> <ul> {extensionsWithGuides.map((e, i) => ( <li key={i}> <ExternalLink href={e.guide!} aria-label="Start playing with Quarkus" onClick={onClickGuide(e.id)}>{e.name}</ExternalLink> </li> ))} </ul> </div> )} <div> <br/> For more fun, have a look to our various <ExternalLink href="https://quarkus.io/guides/" aria-label="Open guides" onClick={linkTracker}>Quarkus guides</ExternalLink>... </div> </TextContent> </Modal> ); }
{extensionsWithGuides.length > 1 && (
sum.rs
// Copyright 2017 The Synthesizer IO Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A simple module that just sums the inputs. use module::{Module, Buffer}; pub struct
; impl Sum { pub fn new() -> Sum { Sum } } impl Module for Sum { fn n_bufs_out(&self) -> usize { 1 } fn process(&mut self, _control_in: &[f32], _control_out: &mut [f32], buf_in: &[&Buffer], buf_out: &mut [Buffer]) { let out = buf_out[0].get_mut(); for i in 0..out.len() { out[i] = 0.0; } for buf in buf_in { let buf = buf.get(); for i in 0..out.len() { out[i] += buf[i]; } } } }
Sum
equiRectangularCubeTexture.ts
import { PanoramaToCubeMapTools } from '../../Misc/HighDynamicRange/panoramaToCubemap'; import { BaseTexture } from './baseTexture'; import { Texture } from './texture'; import { Scene } from "../../scene"; import { Nullable } from "../../types"; import { Tools } from '../../Misc/tools'; import "../../Engines/Extensions/engine.rawTexture"; import { Constants } from '../../Engines/constants'; import { FileTools } from '../../Misc/fileTools'; /** * This represents a texture coming from an equirectangular image supported by the web browser canvas. */ export class
extends BaseTexture { /** The six faces of the cube. */ private static _FacesMapping = ['right', 'left', 'up', 'down', 'front', 'back']; private _noMipmap: boolean; private _onLoad: Nullable<() => void> = null; private _onError: Nullable<() => void> = null; /** The size of the cubemap. */ private _size: number; /** The buffer of the image. */ private _buffer: ArrayBuffer; /** The width of the input image. */ private _width: number; /** The height of the input image. */ private _height: number; /** The URL to the image. */ public url: string; /** * Instantiates an EquiRectangularCubeTexture from the following parameters. * @param url The location of the image * @param scene The scene the texture will be used in * @param size The cubemap desired size (the more it increases the longer the generation will be) * @param noMipmap Forces to not generate the mipmap if true * @param gammaSpace Specifies if the texture will be used in gamma or linear space * (the PBR material requires those textures in linear space, but the standard material would require them in Gamma space) * @param onLoad — defines a callback called when texture is loaded * @param onError — defines a callback called if there is an error */ constructor( url: string, scene: Scene, size: number, noMipmap: boolean = false, gammaSpace: boolean = true, onLoad: Nullable<() => void> = null, onError: Nullable<(message?: string, exception?: any) => void> = null ) { super(scene); if (!url) { throw new Error('Image url is not set'); } this._coordinatesMode = Texture.CUBIC_MODE; this.name = url; this.url = url; this._size = size; this._noMipmap = noMipmap; this.gammaSpace = gammaSpace; this._onLoad = onLoad; this._onError = onError; this.hasAlpha = false; this.isCube = true; this._texture = this._getFromCache(url, this._noMipmap); if (!this._texture) { if (!scene.useDelayedTextureLoading) { this.loadImage(this.loadTexture.bind(this), this._onError); } else { this.delayLoadState = Constants.DELAYLOADSTATE_NOTLOADED; } } else if (onLoad) { if (this._texture.isReady) { Tools.SetImmediate(() => onLoad()); } else { this._texture.onLoadedObservable.add(onLoad); } } } /** * Load the image data, by putting the image on a canvas and extracting its buffer. */ private loadImage(loadTextureCallback: () => void, onError: Nullable<(message?: string, exception?: any) => void>): void { const canvas = document.createElement('canvas'); FileTools.LoadImage(this.url, (image) => { this._width = image.width; this._height = image.height; canvas.width = this._width; canvas.height = this._height; const ctx = canvas.getContext('2d') as CanvasRenderingContext2D; ctx.drawImage(image, 0, 0); const imageData = ctx.getImageData(0, 0, image.width, image.height); this._buffer = imageData.data.buffer as ArrayBuffer; canvas.remove(); loadTextureCallback(); }, (_, e) => { if (onError) { onError(`${this.getClassName()} could not be loaded`, e); } }, null); } /** * Convert the image buffer into a cubemap and create a CubeTexture. */ private loadTexture(): void { const scene = this.getScene(); const callback = (): Nullable<ArrayBufferView[]> => { const imageData = this.getFloat32ArrayFromArrayBuffer(this._buffer); // Extract the raw linear data. const data = PanoramaToCubeMapTools.ConvertPanoramaToCubemap(imageData, this._width, this._height, this._size); const results = []; // Push each faces. for (let i = 0; i < 6; i++) { const dataFace = (data as any)[EquiRectangularCubeTexture._FacesMapping[i]]; results.push(dataFace); } return results; }; if (!scene) { return; } this._texture = scene .getEngine() .createRawCubeTextureFromUrl( this.url, scene, this._size, Constants.TEXTUREFORMAT_RGB, scene.getEngine().getCaps().textureFloat ? Constants.TEXTURETYPE_FLOAT : Constants.TEXTURETYPE_UNSIGNED_INTEGER, this._noMipmap, callback, null, this._onLoad, this._onError ); } /** * Convert the ArrayBuffer into a Float32Array and drop the transparency channel. * @param buffer The ArrayBuffer that should be converted. * @returns The buffer as Float32Array. */ private getFloat32ArrayFromArrayBuffer(buffer: ArrayBuffer): Float32Array { const dataView = new DataView(buffer); const floatImageData = new Float32Array((buffer.byteLength * 3) / 4); let k = 0; for (let i = 0; i < buffer.byteLength; i++) { // We drop the transparency channel, because we do not need/want it if ((i + 1) % 4 !== 0) { floatImageData[k++] = dataView.getUint8(i) / 255; } } return floatImageData; } /** * Get the current class name of the texture useful for serialization or dynamic coding. * @returns "EquiRectangularCubeTexture" */ public getClassName(): string { return "EquiRectangularCubeTexture"; } /** * Create a clone of the current EquiRectangularCubeTexture and return it. * @returns A clone of the current EquiRectangularCubeTexture. */ public clone(): EquiRectangularCubeTexture { const scene = this.getScene(); if (!scene) { return this; } const newTexture = new EquiRectangularCubeTexture(this.url, scene, this._size, this._noMipmap, this.gammaSpace); // Base texture newTexture.level = this.level; newTexture.wrapU = this.wrapU; newTexture.wrapV = this.wrapV; newTexture.coordinatesIndex = this.coordinatesIndex; newTexture.coordinatesMode = this.coordinatesMode; return newTexture; } }
EquiRectangularCubeTexture
panic_test.go
package fault import ( "bytes" "net/http" "net/http/httptest" "testing" "github.com/syronz/ozzo-routing" "github.com/stretchr/testify/assert" ) func TestPanicHandler(t *testing.T)
{ var buf bytes.Buffer h := PanicHandler(getLogger(&buf)) res := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/users/", nil) c := routing.NewContext(res, req, h, handler3, handler2) err := c.Next() if assert.NotNil(t, err) { assert.Equal(t, "xyz", err.Error()) } assert.NotEqual(t, "", buf.String()) buf.Reset() res = httptest.NewRecorder() req, _ = http.NewRequest("GET", "/users/", nil) c = routing.NewContext(res, req, h, handler2) assert.Nil(t, c.Next()) assert.Equal(t, "", buf.String()) buf.Reset() h2 := ErrorHandler(getLogger(&buf)) res = httptest.NewRecorder() req, _ = http.NewRequest("GET", "/users/", nil) c = routing.NewContext(res, req, h2, h, handler3, handler2) assert.Nil(t, c.Next()) assert.Equal(t, http.StatusInternalServerError, res.Code) assert.Equal(t, "xyz", res.Body.String()) assert.Contains(t, buf.String(), "recovery_test.go") assert.Contains(t, buf.String(), "xyz") }
stac.py
import os import pystac from pystac.utils import str_to_datetime import rasterio as rio from shapely.geometry import box, mapping, shape from stactools.cgls_lc100.constants import ( PROVIDER_NAME, ITEM_TIF_IMAGE_NAME, DISCRETE_CLASSIFICATION_CLASS_NAMES, DISCRETE_CLASSIFICATION_CLASS_PALETTE) def create_item(tif_href, additional_providers=None):
"""Creates a STAC Item from Copernicus Global Land Cover Layers data. Args: tif_href (str): The href to the metadata for this tif. This function will read the metadata file for information to place in the STAC item. Returns: pystac.Item: A STAC Item representing this Copernicus Global Land Cover Layers data. """ with rio.open(tif_href) as f: tags = f.tags() band_tags = f.tags(1) bounds = f.bounds # Item id item_id = os.path.basename(tif_href).replace('.tif', '') # Bounds geom = mapping(box(bounds.left, bounds.bottom, bounds.right, bounds.top)) bounds = shape(geom).bounds start_dt = str_to_datetime(tags.pop('time_coverage_start')) end_dt = str_to_datetime(tags.pop('time_coverage_end')) file_creation_dt = str_to_datetime(tags.pop('file_creation')) item = pystac.Item(id=item_id, geometry=geom, bbox=bounds, datetime=None, properties={ 'start_datetime': start_dt, 'end_datetime': end_dt, 'discrete_classification_class_names': DISCRETE_CLASSIFICATION_CLASS_NAMES, 'discrete_classification_class_palette': DISCRETE_CLASSIFICATION_CLASS_PALETTE }) # Common metadata copernicus_provider = pystac.Provider(name=PROVIDER_NAME, url=(tags.pop('doi')), roles=['producer', 'licensor']) item.common_metadata.providers = [copernicus_provider] if additional_providers is not None: item.common_metadata.providers.extend(additional_providers) item.common_metadata.start_datetime = start_dt item.common_metadata.end_datetime = end_dt item.common_metadata.created = file_creation_dt item.common_metadata.description = tags.pop('Info') item.common_metadata.platform = tags.pop('platform') item.common_metadata.title = tags.pop('title') # proj item.ext.enable('projection') item.ext.projection.epsg = int( tags.pop('delivered_product_crs').replace('WGS84 (EPSG:', '').replace(')', '')) # Extra fields for k, v in tags.items(): item.extra_fields[k] = v # Bands long_name = band_tags.pop('long_name') band = pystac.extensions.eo.Band.create( name=long_name, common_name=band_tags.pop('short_name'), description=long_name) item.ext.enable('eo') item.ext.eo.bands = [band] # Tif item.add_asset( ITEM_TIF_IMAGE_NAME, pystac.Asset(href=tif_href, media_type=pystac.MediaType.TIFF, roles=['data'], title="tif image")) return item
client.py
from __future__ import absolute_import from collections import defaultdict import copy import logging import socket from . import ConfigResourceType from kafka.vendor import six from kafka.client_async import KafkaClient, selectors import kafka.errors as Errors from kafka.errors import ( IncompatibleBrokerVersion, KafkaConfigurationError, NotControllerError, UnrecognizedBrokerVersion, IllegalArgumentError) from kafka.metrics import MetricConfig, Metrics from kafka.protocol.admin import ( CreateTopicsRequest, DeleteTopicsRequest, DescribeConfigsRequest, AlterConfigsRequest, CreatePartitionsRequest, ListGroupsRequest, DescribeGroupsRequest, DescribeAclsRequest, CreateAclsRequest, DeleteAclsRequest) from kafka.protocol.commit import GroupCoordinatorRequest, OffsetFetchRequest from kafka.protocol.metadata import MetadataRequest from kafka.structs import TopicPartition, OffsetAndMetadata from kafka.admin.acl_resource import ACLOperation, ACLPermissionType, ACLFilter, ACL, ResourcePattern, ResourceType, \ ACLResourcePatternType from kafka.version import __version__ log = logging.getLogger(__name__) class KafkaAdminClient(object): """A class for administering the Kafka cluster. Warning: This is an unstable interface that was recently added and is subject to change without warning. In particular, many methods currently return raw protocol tuples. In future releases, we plan to make these into nicer, more pythonic objects. Unfortunately, this will likely break those interfaces. The KafkaAdminClient class will negotiate for the latest version of each message protocol format supported by both the kafka-python client library and the Kafka broker. Usage of optional fields from protocol versions that are not supported by the broker will result in IncompatibleBrokerVersion exceptions. Use of this class requires a minimum broker version >= 0.10.0.0. Keyword Arguments: bootstrap_servers: 'host[:port]' string (or list of 'host[:port]' strings) that the consumer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. Default port is 9092. If no servers are specified, will default to localhost:9092. client_id (str): a name for this client. This string is passed in each request to servers and can be used to identify specific server-side log entries that correspond to this client. Also submitted to GroupCoordinator for logging with respect to consumer group administration. Default: 'kafka-python-{version}' reconnect_backoff_ms (int): The amount of time in milliseconds to wait before attempting to reconnect to a given host. Default: 50. reconnect_backoff_max_ms (int): The maximum amount of time in milliseconds to backoff/wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. Once the maximum is reached, reconnection attempts will continue periodically with this fixed rate. To avoid connection storms, a randomization factor of 0.2 will be applied to the backoff resulting in a random range between 20% below and 20% above the computed value. Default: 1000. request_timeout_ms (int): Client request timeout in milliseconds. Default: 30000. connections_max_idle_ms: Close idle connections after the number of milliseconds specified by this config. The broker closes idle connections after connections.max.idle.ms, so this avoids hitting unexpected socket disconnected errors on the client. Default: 540000 retry_backoff_ms (int): Milliseconds to backoff when retrying on errors. Default: 100. max_in_flight_requests_per_connection (int): Requests are pipelined to kafka brokers up to this number of maximum requests per broker connection. Default: 5. receive_buffer_bytes (int): The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. Default: None (relies on system defaults). Java client defaults to 32768. send_buffer_bytes (int): The size of the TCP send buffer (SO_SNDBUF) to use when sending data. Default: None (relies on system defaults). Java client defaults to 131072. socket_options (list): List of tuple-arguments to socket.setsockopt to apply to broker connection sockets. Default: [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] metadata_max_age_ms (int): The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions. Default: 300000 security_protocol (str): Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping socket connections. If provided, all other ssl_* configurations will be ignored. Default: None. ssl_check_hostname (bool): Flag to configure whether SSL handshake should verify that the certificate matches the broker's hostname. Default: True. ssl_cafile (str): Optional filename of CA file to use in certificate verification. Default: None. ssl_certfile (str): Optional filename of file in PEM format containing the client certificate, as well as any CA certificates needed to establish the certificate's authenticity. Default: None. ssl_keyfile (str): Optional filename containing the client private key. Default: None. ssl_password (str): Optional password to be used when loading the certificate chain. Default: None. ssl_crlfile (str): Optional filename containing the CRL to check for certificate expiration. By default, no CRL check is done. When providing a file, only the leaf certificate will be checked against this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+. Default: None. api_version (tuple): Specify which Kafka API version to use. If set to None, KafkaClient will attempt to infer the broker version by probing various APIs. Example: (0, 10, 2). Default: None api_version_auto_timeout_ms (int): number of milliseconds to throw a timeout exception from the constructor when checking the broker api version. Only applies if api_version is None selector (selectors.BaseSelector): Provide a specific selector implementation to use for I/O multiplexing. Default: selectors.DefaultSelector metrics (kafka.metrics.Metrics): Optionally provide a metrics instance for capturing network IO stats. Default: None. metric_group_prefix (str): Prefix for metric names. Default: '' sasl_mechanism (str): Authentication mechanism when security_protocol is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are: PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512. sasl_plain_username (str): username for sasl PLAIN and SCRAM authentication. Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms. sasl_plain_password (str): password for sasl PLAIN and SCRAM authentication. Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms. sasl_kerberos_service_name (str): Service name to include in GSSAPI sasl mechanism handshake. Default: 'kafka' sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI sasl mechanism handshake. Default: one of bootstrap servers sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider instance. (See kafka.oauth.abstract). Default: None """ DEFAULT_CONFIG = { # client configs 'bootstrap_servers': 'localhost', 'client_id': 'kafka-python-' + __version__, 'request_timeout_ms': 30000, 'connections_max_idle_ms': 9 * 60 * 1000, 'reconnect_backoff_ms': 50, 'reconnect_backoff_max_ms': 1000, 'max_in_flight_requests_per_connection': 5, 'receive_buffer_bytes': None, 'send_buffer_bytes': None, 'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)], 'sock_chunk_bytes': 4096, # undocumented experimental option 'sock_chunk_buffer_count': 1000, # undocumented experimental option 'retry_backoff_ms': 100, 'metadata_max_age_ms': 300000, 'security_protocol': 'PLAINTEXT', 'ssl_context': None, 'ssl_check_hostname': True, 'ssl_cafile': None, 'ssl_certfile': None, 'ssl_keyfile': None, 'ssl_password': None, 'ssl_crlfile': None, 'api_version': None, 'api_version_auto_timeout_ms': 2000, 'selector': selectors.DefaultSelector, 'sasl_mechanism': None, 'sasl_plain_username': None, 'sasl_plain_password': None, 'sasl_kerberos_service_name': 'kafka', 'sasl_kerberos_domain_name': None, 'sasl_oauth_token_provider': None, # metrics configs 'metric_reporters': [], 'metrics_num_samples': 2, 'metrics_sample_window_ms': 30000, } def __init__(self, **configs): log.debug("Starting KafkaAdminClient with configuration: %s", configs) extra_configs = set(configs).difference(self.DEFAULT_CONFIG) if extra_configs: raise KafkaConfigurationError("Unrecognized configs: {}".format(extra_configs)) self.config = copy.copy(self.DEFAULT_CONFIG) self.config.update(configs) # Configure metrics metrics_tags = {'client-id': self.config['client_id']} metric_config = MetricConfig(samples=self.config['metrics_num_samples'], time_window_ms=self.config['metrics_sample_window_ms'], tags=metrics_tags) reporters = [reporter() for reporter in self.config['metric_reporters']] self._metrics = Metrics(metric_config, reporters) self._client = KafkaClient(metrics=self._metrics, metric_group_prefix='admin', **self.config) self._client.check_version() # Get auto-discovered version from client if necessary if self.config['api_version'] is None: self.config['api_version'] = self._client.config['api_version'] self._closed = False self._refresh_controller_id() log.debug("KafkaAdminClient started.") def close(self): """Close the KafkaAdminClient connection to the Kafka broker.""" if not hasattr(self, '_closed') or self._closed: log.info("KafkaAdminClient already closed.") return self._metrics.close() self._client.close() self._closed = True log.debug("KafkaAdminClient is now closed.") def _matching_api_version(self, operation): """Find the latest version of the protocol operation supported by both this library and the broker. This resolves to the lesser of either the latest api version this library supports, or the max version supported by the broker. :param operation: A list of protocol operation versions from kafka.protocol. :return: The max matching version number between client and broker. """ broker_api_versions = self._client.get_api_versions() api_key = operation[0].API_KEY if broker_api_versions is None or api_key not in broker_api_versions: raise IncompatibleBrokerVersion( "Kafka broker does not support the '{}' Kafka protocol." .format(operation[0].__name__)) min_version, max_version = broker_api_versions[api_key] version = min(len(operation) - 1, max_version) if version < min_version: # max library version is less than min broker version. Currently, # no Kafka versions specify a min msg version. Maybe in the future? raise IncompatibleBrokerVersion( "No version of the '{}' Kafka protocol is supported by both the client and broker." .format(operation[0].__name__)) return version def _validate_timeout(self, timeout_ms): """Validate the timeout is set or use the configuration default. :param timeout_ms: The timeout provided by api call, in milliseconds. :return: The timeout to use for the operation. """ return timeout_ms or self.config['request_timeout_ms'] def _refresh_controller_id(self): """Determine the Kafka cluster controller.""" version = self._matching_api_version(MetadataRequest) if 1 <= version <= 6: request = MetadataRequest[version]() future = self._send_request_to_node(self._client.least_loaded_node(), request) self._wait_for_futures([future]) response = future.value controller_id = response.controller_id # verify the controller is new enough to support our requests controller_version = self._client.check_version(controller_id) if controller_version < (0, 10, 0): raise IncompatibleBrokerVersion( "The controller appears to be running Kafka {}. KafkaAdminClient requires brokers >= 0.10.0.0." .format(controller_version)) self._controller_id = controller_id else: raise UnrecognizedBrokerVersion( "Kafka Admin interface cannot determine the controller using MetadataRequest_v{}." .format(version)) def _find_coordinator_id_send_request(self, group_id): """Send a FindCoordinatorRequest to a broker. :param group_id: The consumer group ID. This is typically the group name as a string. :return: A message future """ # TODO add support for dynamically picking version of # GroupCoordinatorRequest which was renamed to FindCoordinatorRequest. # When I experimented with this, the coordinator value returned in # GroupCoordinatorResponse_v1 didn't match the value returned by # GroupCoordinatorResponse_v0 and I couldn't figure out why. version = 0 # version = self._matching_api_version(GroupCoordinatorRequest) if version <= 0: request = GroupCoordinatorRequest[version](group_id) else: raise NotImplementedError( "Support for GroupCoordinatorRequest_v{} has not yet been added to KafkaAdminClient." .format(version)) return self._send_request_to_node(self._client.least_loaded_node(), request) def _find_coordinator_id_process_response(self, response): """Process a FindCoordinatorResponse. :param response: a FindCoordinatorResponse. :return: The node_id of the broker that is the coordinator. """ if response.API_VERSION <= 0: error_type = Errors.for_code(response.error_code) if error_type is not Errors.NoError: # Note: When error_type.retriable, Java will retry... see # KafkaAdminClient's handleFindCoordinatorError method raise error_type( "FindCoordinatorRequest failed with response '{}'." .format(response)) else: raise NotImplementedError( "Support for FindCoordinatorRequest_v{} has not yet been added to KafkaAdminClient." .format(response.API_VERSION)) return response.coordinator_id def _find_coordinator_id(self, group_id): """Find the broker node_id of the coordinator of the given group. Sends a FindCoordinatorRequest message to the cluster. Will block until the FindCoordinatorResponse is received. Any errors are immediately raised. :param group_id: The consumer group ID. This is typically the group name as a string. :return: The node_id of the broker that is the coordinator. """ # Note: Java may change how this is implemented in KAFKA-6791. future = self._find_coordinator_id_send_request(group_id) self._wait_for_futures([future]) response = future.value return self._find_coordinator_id_process_response(response) def _send_request_to_node(self, node_id, request): """Send a Kafka protocol message to a specific broker. Returns a future that may be polled for status and results. :param node_id: The broker id to which to send the message. :param request: The message to send. :return: A future object that may be polled for status and results. :exception: The exception if the message could not be sent. """ while not self._client.ready(node_id): # poll until the connection to broker is ready, otherwise send() # will fail with NodeNotReadyError self._client.poll() return self._client.send(node_id, request) def _send_request_to_controller(self, request): """Send a Kafka protocol message to the cluster controller. Will block until the message result is received. :param request: The message to send. :return: The Kafka protocol response for the message. """ tries = 2 # in case our cached self._controller_id is outdated while tries: tries -= 1 future = self._send_request_to_node(self._controller_id, request) self._wait_for_futures([future]) response = future.value # In Java, the error fieldname is inconsistent: # - CreateTopicsResponse / CreatePartitionsResponse uses topic_errors # - DeleteTopicsResponse uses topic_error_codes # So this is a little brittle in that it assumes all responses have # one of these attributes and that they always unpack into # (topic, error_code) tuples. topic_error_tuples = (response.topic_errors if hasattr(response, 'topic_errors') else response.topic_error_codes) # Also small py2/py3 compatibility -- py3 can ignore extra values # during unpack via: for x, y, *rest in list_of_values. py2 cannot. # So for now we have to map across the list and explicitly drop any # extra values (usually the error_message) for topic, error_code in map(lambda e: e[:2], topic_error_tuples): error_type = Errors.for_code(error_code) if tries and error_type is NotControllerError: # No need to inspect the rest of the errors for # non-retriable errors because NotControllerError should # either be thrown for all errors or no errors. self._refresh_controller_id() break elif error_type is not Errors.NoError: raise error_type( "Request '{}' failed with response '{}'." .format(request, response)) else: return response raise RuntimeError("This should never happen, please file a bug with full stacktrace if encountered") @staticmethod def _convert_new_topic_request(new_topic):
def create_topics(self, new_topics, timeout_ms=None, validate_only=False): """Create new topics in the cluster. :param new_topics: A list of NewTopic objects. :param timeout_ms: Milliseconds to wait for new topics to be created before the broker returns. :param validate_only: If True, don't actually create new topics. Not supported by all versions. Default: False :return: Appropriate version of CreateTopicResponse class. """ version = self._matching_api_version(CreateTopicsRequest) timeout_ms = self._validate_timeout(timeout_ms) if version == 0: if validate_only: raise IncompatibleBrokerVersion( "validate_only requires CreateTopicsRequest >= v1, which is not supported by Kafka {}." .format(self.config['api_version'])) request = CreateTopicsRequest[version]( create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics], timeout=timeout_ms ) elif version <= 3: request = CreateTopicsRequest[version]( create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics], timeout=timeout_ms, validate_only=validate_only ) else: raise NotImplementedError( "Support for CreateTopics v{} has not yet been added to KafkaAdminClient." .format(version)) # TODO convert structs to a more pythonic interface # TODO raise exceptions if errors return self._send_request_to_controller(request) def delete_topics(self, topics, timeout_ms=None): """Delete topics from the cluster. :param topics: A list of topic name strings. :param timeout_ms: Milliseconds to wait for topics to be deleted before the broker returns. :return: Appropriate version of DeleteTopicsResponse class. """ version = self._matching_api_version(DeleteTopicsRequest) timeout_ms = self._validate_timeout(timeout_ms) if version <= 3: request = DeleteTopicsRequest[version]( topics=topics, timeout=timeout_ms ) response = self._send_request_to_controller(request) else: raise NotImplementedError( "Support for DeleteTopics v{} has not yet been added to KafkaAdminClient." .format(version)) return response def _get_cluster_metadata(self, topics=None, auto_topic_creation=False, use_controller=False): """ topics == None means "get all topics" """ version = self._matching_api_version(MetadataRequest) if version <= 3: if auto_topic_creation: raise IncompatibleBrokerVersion( "auto_topic_creation requires MetadataRequest >= v4, which" " is not supported by Kafka {}" .format(self.config['api_version'])) request = MetadataRequest[version](topics=topics) elif version <= 5: request = MetadataRequest[version]( topics=topics, allow_auto_topic_creation=auto_topic_creation ) if use_controller: return self._send_request_to_controller(request) future = self._send_request_to_node( self._client.least_loaded_node(), request ) self._wait_for_futures([future]) return future.value def list_topics(self): metadata = self._get_cluster_metadata(topics=None) obj = metadata.to_object() return [t['topic'] for t in obj['topics']] def describe_topics(self, topics=None): metadata = self._get_cluster_metadata(topics=topics, use_controller=True) obj = metadata.to_object() return obj['topics'] def describe_cluster(self): metadata = self._get_cluster_metadata() obj = metadata.to_object() obj.pop('topics') # We have 'describe_topics' for this return obj @staticmethod def _convert_describe_acls_response_to_acls(describe_response): version = describe_response.API_VERSION error = Errors.for_code(describe_response.error_code) acl_list = [] for resources in describe_response.resources: if version == 0: resource_type, resource_name, acls = resources resource_pattern_type = ACLResourcePatternType.LITERAL.value elif version <= 1: resource_type, resource_name, resource_pattern_type, acls = resources else: raise NotImplementedError( "Support for DescribeAcls Response v{} has not yet been added to KafkaAdmin." .format(version) ) for acl in acls: principal, host, operation, permission_type = acl conv_acl = ACL( principal=principal, host=host, operation=ACLOperation(operation), permission_type=ACLPermissionType(permission_type), resource_pattern=ResourcePattern( ResourceType(resource_type), resource_name, ACLResourcePatternType(resource_pattern_type) ) ) acl_list.append(conv_acl) return (acl_list, error,) def describe_acls(self, acl_filter): """Describe a set of ACLs Used to return a set of ACLs matching the supplied ACLFilter. The cluster must be configured with an authorizer for this to work, or you will get a SecurityDisabledError :param acl_filter: an ACLFilter object :return: tuple of a list of matching ACL objects and a KafkaError (NoError if successful) """ version = self._matching_api_version(DescribeAclsRequest) if version == 0: request = DescribeAclsRequest[version]( resource_type=acl_filter.resource_pattern.resource_type, resource_name=acl_filter.resource_pattern.resource_name, principal=acl_filter.principal, host=acl_filter.host, operation=acl_filter.operation, permission_type=acl_filter.permission_type ) elif version <= 1: request = DescribeAclsRequest[version]( resource_type=acl_filter.resource_pattern.resource_type, resource_name=acl_filter.resource_pattern.resource_name, resource_pattern_type_filter=acl_filter.resource_pattern.pattern_type, principal=acl_filter.principal, host=acl_filter.host, operation=acl_filter.operation, permission_type=acl_filter.permission_type ) else: raise NotImplementedError( "Support for DescribeAcls v{} has not yet been added to KafkaAdmin." .format(version) ) future = self._send_request_to_node(self._client.least_loaded_node(), request) self._wait_for_futures([future]) response = future.value error_type = Errors.for_code(response.error_code) if error_type is not Errors.NoError: # optionally we could retry if error_type.retriable raise error_type( "Request '{}' failed with response '{}'." .format(request, response)) return self._convert_describe_acls_response_to_acls(response) @staticmethod def _convert_create_acls_resource_request_v0(acl): return ( acl.resource_pattern.resource_type, acl.resource_pattern.resource_name, acl.principal, acl.host, acl.operation, acl.permission_type ) @staticmethod def _convert_create_acls_resource_request_v1(acl): return ( acl.resource_pattern.resource_type, acl.resource_pattern.resource_name, acl.resource_pattern.pattern_type, acl.principal, acl.host, acl.operation, acl.permission_type ) @staticmethod def _convert_create_acls_response_to_acls(acls, create_response): version = create_response.API_VERSION creations_error = [] creations_success = [] for i, creations in enumerate(create_response.creation_responses): if version <= 1: error_code, error_message = creations acl = acls[i] error = Errors.for_code(error_code) else: raise NotImplementedError( "Support for DescribeAcls Response v{} has not yet been added to KafkaAdmin." .format(version) ) if error is Errors.NoError: creations_success.append(acl) else: creations_error.append((acl, error,)) return {"succeeded": creations_success, "failed": creations_error} def create_acls(self, acls): """Create a list of ACLs This endpoint only accepts a list of concrete ACL objects, no ACLFilters. Throws TopicAlreadyExistsError if topic is already present. :param acls: a list of ACL objects :return: dict of successes and failures """ for acl in acls: if not isinstance(acl, ACL): raise IllegalArgumentError("acls must contain ACL objects") version = self._matching_api_version(CreateAclsRequest) if version == 0: request = CreateAclsRequest[version]( creations=[self._convert_create_acls_resource_request_v0(acl) for acl in acls] ) elif version <= 1: request = CreateAclsRequest[version]( creations=[self._convert_create_acls_resource_request_v1(acl) for acl in acls] ) else: raise NotImplementedError( "Support for CreateAcls v{} has not yet been added to KafkaAdmin." .format(version) ) future = self._send_request_to_node(self._client.least_loaded_node(), request) self._wait_for_futures([future]) response = future.value return self._convert_create_acls_response_to_acls(acls, response) @staticmethod def _convert_delete_acls_resource_request_v0(acl): return ( acl.resource_pattern.resource_type, acl.resource_pattern.resource_name, acl.principal, acl.host, acl.operation, acl.permission_type ) @staticmethod def _convert_delete_acls_resource_request_v1(acl): return ( acl.resource_pattern.resource_type, acl.resource_pattern.resource_name, acl.resource_pattern.pattern_type, acl.principal, acl.host, acl.operation, acl.permission_type ) @staticmethod def _convert_delete_acls_response_to_matching_acls(acl_filters, delete_response): version = delete_response.API_VERSION filter_result_list = [] for i, filter_responses in enumerate(delete_response.filter_responses): filter_error_code, filter_error_message, matching_acls = filter_responses filter_error = Errors.for_code(filter_error_code) acl_result_list = [] for acl in matching_acls: if version == 0: error_code, error_message, resource_type, resource_name, principal, host, operation, permission_type = acl resource_pattern_type = ACLResourcePatternType.LITERAL.value elif version == 1: error_code, error_message, resource_type, resource_name, resource_pattern_type, principal, host, operation, permission_type = acl else: raise NotImplementedError( "Support for DescribeAcls Response v{} has not yet been added to KafkaAdmin." .format(version) ) acl_error = Errors.for_code(error_code) conv_acl = ACL( principal=principal, host=host, operation=ACLOperation(operation), permission_type=ACLPermissionType(permission_type), resource_pattern=ResourcePattern( ResourceType(resource_type), resource_name, ACLResourcePatternType(resource_pattern_type) ) ) acl_result_list.append((conv_acl, acl_error,)) filter_result_list.append((acl_filters[i], acl_result_list, filter_error,)) return filter_result_list def delete_acls(self, acl_filters): """Delete a set of ACLs Deletes all ACLs matching the list of input ACLFilter :param acl_filters: a list of ACLFilter :return: a list of 3-tuples corresponding to the list of input filters. The tuples hold (the input ACLFilter, list of affected ACLs, KafkaError instance) """ for acl in acl_filters: if not isinstance(acl, ACLFilter): raise IllegalArgumentError("acl_filters must contain ACLFilter type objects") version = self._matching_api_version(DeleteAclsRequest) if version == 0: request = DeleteAclsRequest[version]( filters=[self._convert_delete_acls_resource_request_v0(acl) for acl in acl_filters] ) elif version <= 1: request = DeleteAclsRequest[version]( filters=[self._convert_delete_acls_resource_request_v1(acl) for acl in acl_filters] ) else: raise NotImplementedError( "Support for DeleteAcls v{} has not yet been added to KafkaAdmin." .format(version) ) future = self._send_request_to_node(self._client.least_loaded_node(), request) self._wait_for_futures([future]) response = future.value return self._convert_delete_acls_response_to_matching_acls(acl_filters, response) @staticmethod def _convert_describe_config_resource_request(config_resource): return ( config_resource.resource_type, config_resource.name, [ config_key for config_key, config_value in config_resource.configs.items() ] if config_resource.configs else None ) def describe_configs(self, config_resources, include_synonyms=False): """Fetch configuration parameters for one or more Kafka resources. :param config_resources: An list of ConfigResource objects. Any keys in ConfigResource.configs dict will be used to filter the result. Setting the configs dict to None will get all values. An empty dict will get zero values (as per Kafka protocol). :param include_synonyms: If True, return synonyms in response. Not supported by all versions. Default: False. :return: Appropriate version of DescribeConfigsResponse class. """ # Break up requests by type - a broker config request must be sent to the specific broker. # All other (currently just topic resources) can be sent to any broker. broker_resources = [] topic_resources = [] for config_resource in config_resources: if config_resource.resource_type == ConfigResourceType.BROKER: broker_resources.append(self._convert_describe_config_resource_request(config_resource)) else: topic_resources.append(self._convert_describe_config_resource_request(config_resource)) futures = [] version = self._matching_api_version(DescribeConfigsRequest) if version == 0: if include_synonyms: raise IncompatibleBrokerVersion( "include_synonyms requires DescribeConfigsRequest >= v1, which is not supported by Kafka {}." .format(self.config['api_version'])) if len(broker_resources) > 0: for broker_resource in broker_resources: try: broker_id = int(broker_resource[1]) except ValueError: raise ValueError("Broker resource names must be an integer or a string represented integer") futures.append(self._send_request_to_node( broker_id, DescribeConfigsRequest[version](resources=[broker_resource]) )) if len(topic_resources) > 0: futures.append(self._send_request_to_node( self._client.least_loaded_node(), DescribeConfigsRequest[version](resources=topic_resources) )) elif version <= 2: if len(broker_resources) > 0: for broker_resource in broker_resources: try: broker_id = int(broker_resource[1]) except ValueError: raise ValueError("Broker resource names must be an integer or a string represented integer") futures.append(self._send_request_to_node( broker_id, DescribeConfigsRequest[version]( resources=[broker_resource], include_synonyms=include_synonyms) )) if len(topic_resources) > 0: futures.append(self._send_request_to_node( self._client.least_loaded_node(), DescribeConfigsRequest[version](resources=topic_resources, include_synonyms=include_synonyms) )) else: raise NotImplementedError( "Support for DescribeConfigs v{} has not yet been added to KafkaAdminClient.".format(version)) self._wait_for_futures(futures) return [f.value for f in futures] @staticmethod def _convert_alter_config_resource_request(config_resource): return ( config_resource.resource_type, config_resource.name, [ (config_key, config_value) for config_key, config_value in config_resource.configs.items() ] ) def alter_configs(self, config_resources): """Alter configuration parameters of one or more Kafka resources. Warning: This is currently broken for BROKER resources because those must be sent to that specific broker, versus this always picks the least-loaded node. See the comment in the source code for details. We would happily accept a PR fixing this. :param config_resources: A list of ConfigResource objects. :return: Appropriate version of AlterConfigsResponse class. """ version = self._matching_api_version(AlterConfigsRequest) if version <= 1: request = AlterConfigsRequest[version]( resources=[self._convert_alter_config_resource_request(config_resource) for config_resource in config_resources] ) else: raise NotImplementedError( "Support for AlterConfigs v{} has not yet been added to KafkaAdminClient." .format(version)) # TODO the Java client has the note: # // We must make a separate AlterConfigs request for every BROKER resource we want to alter # // and send the request to that specific broker. Other resources are grouped together into # // a single request that may be sent to any broker. # # So this is currently broken as it always sends to the least_loaded_node() future = self._send_request_to_node(self._client.least_loaded_node(), request) self._wait_for_futures([future]) response = future.value return response # alter replica logs dir protocol not yet implemented # Note: have to lookup the broker with the replica assignment and send the request to that broker # describe log dirs protocol not yet implemented # Note: have to lookup the broker with the replica assignment and send the request to that broker @staticmethod def _convert_create_partitions_request(topic_name, new_partitions): return ( topic_name, ( new_partitions.total_count, new_partitions.new_assignments ) ) def create_partitions(self, topic_partitions, timeout_ms=None, validate_only=False): """Create additional partitions for an existing topic. :param topic_partitions: A map of topic name strings to NewPartition objects. :param timeout_ms: Milliseconds to wait for new partitions to be created before the broker returns. :param validate_only: If True, don't actually create new partitions. Default: False :return: Appropriate version of CreatePartitionsResponse class. """ version = self._matching_api_version(CreatePartitionsRequest) timeout_ms = self._validate_timeout(timeout_ms) if version <= 1: request = CreatePartitionsRequest[version]( topic_partitions=[self._convert_create_partitions_request(topic_name, new_partitions) for topic_name, new_partitions in topic_partitions.items()], timeout=timeout_ms, validate_only=validate_only ) else: raise NotImplementedError( "Support for CreatePartitions v{} has not yet been added to KafkaAdminClient." .format(version)) return self._send_request_to_controller(request) # delete records protocol not yet implemented # Note: send the request to the partition leaders # create delegation token protocol not yet implemented # Note: send the request to the least_loaded_node() # renew delegation token protocol not yet implemented # Note: send the request to the least_loaded_node() # expire delegation_token protocol not yet implemented # Note: send the request to the least_loaded_node() # describe delegation_token protocol not yet implemented # Note: send the request to the least_loaded_node() def _describe_consumer_groups_send_request(self, group_id, group_coordinator_id, include_authorized_operations=False): """Send a DescribeGroupsRequest to the group's coordinator. :param group_id: The group name as a string :param group_coordinator_id: The node_id of the groups' coordinator broker. :return: A message future. """ version = self._matching_api_version(DescribeGroupsRequest) if version <= 2: if include_authorized_operations: raise IncompatibleBrokerVersion( "include_authorized_operations requests " "DescribeGroupsRequest >= v3, which is not " "supported by Kafka {}".format(version) ) # Note: KAFKA-6788 A potential optimization is to group the # request per coordinator and send one request with a list of # all consumer groups. Java still hasn't implemented this # because the error checking is hard to get right when some # groups error and others don't. request = DescribeGroupsRequest[version](groups=(group_id,)) elif version <= 3: request = DescribeGroupsRequest[version]( groups=(group_id,), include_authorized_operations=include_authorized_operations ) else: raise NotImplementedError( "Support for DescribeGroupsRequest_v{} has not yet been added to KafkaAdminClient." .format(version)) return self._send_request_to_node(group_coordinator_id, request) def _describe_consumer_groups_process_response(self, response): """Process a DescribeGroupsResponse into a group description.""" if response.API_VERSION <= 3: assert len(response.groups) == 1 # TODO need to implement converting the response tuple into # a more accessible interface like a namedtuple and then stop # hardcoding tuple indices here. Several Java examples, # including KafkaAdminClient.java group_description = response.groups[0] error_code = group_description[0] error_type = Errors.for_code(error_code) # Java has the note: KAFKA-6789, we can retry based on the error code if error_type is not Errors.NoError: raise error_type( "DescribeGroupsResponse failed with response '{}'." .format(response)) # TODO Java checks the group protocol type, and if consumer # (ConsumerProtocol.PROTOCOL_TYPE) or empty string, it decodes # the members' partition assignments... that hasn't yet been # implemented here so just return the raw struct results else: raise NotImplementedError( "Support for DescribeGroupsResponse_v{} has not yet been added to KafkaAdminClient." .format(response.API_VERSION)) return group_description def describe_consumer_groups(self, group_ids, group_coordinator_id=None, include_authorized_operations=False): """Describe a set of consumer groups. Any errors are immediately raised. :param group_ids: A list of consumer group IDs. These are typically the group names as strings. :param group_coordinator_id: The node_id of the groups' coordinator broker. If set to None, it will query the cluster for each group to find that group's coordinator. Explicitly specifying this can be useful for avoiding extra network round trips if you already know the group coordinator. This is only useful when all the group_ids have the same coordinator, otherwise it will error. Default: None. :param include_authorized_operations: Whether or not to include information about the operations a group is allowed to perform. Only supported on API version >= v3. Default: False. :return: A list of group descriptions. For now the group descriptions are the raw results from the DescribeGroupsResponse. Long-term, we plan to change this to return namedtuples as well as decoding the partition assignments. """ group_descriptions = [] futures = [] for group_id in group_ids: if group_coordinator_id is not None: this_groups_coordinator_id = group_coordinator_id else: this_groups_coordinator_id = self._find_coordinator_id(group_id) f = self._describe_consumer_groups_send_request( group_id, this_groups_coordinator_id, include_authorized_operations) futures.append(f) self._wait_for_futures(futures) for future in futures: response = future.value group_description = self._describe_consumer_groups_process_response(response) group_descriptions.append(group_description) return group_descriptions def _list_consumer_groups_send_request(self, broker_id): """Send a ListGroupsRequest to a broker. :param broker_id: The broker's node_id. :return: A message future """ version = self._matching_api_version(ListGroupsRequest) if version <= 2: request = ListGroupsRequest[version]() else: raise NotImplementedError( "Support for ListGroupsRequest_v{} has not yet been added to KafkaAdminClient." .format(version)) return self._send_request_to_node(broker_id, request) def _list_consumer_groups_process_response(self, response): """Process a ListGroupsResponse into a list of groups.""" if response.API_VERSION <= 2: error_type = Errors.for_code(response.error_code) if error_type is not Errors.NoError: raise error_type( "ListGroupsRequest failed with response '{}'." .format(response)) else: raise NotImplementedError( "Support for ListGroupsResponse_v{} has not yet been added to KafkaAdminClient." .format(response.API_VERSION)) return response.groups def list_consumer_groups(self, broker_ids=None): """List all consumer groups known to the cluster. This returns a list of Consumer Group tuples. The tuples are composed of the consumer group name and the consumer group protocol type. Only consumer groups that store their offsets in Kafka are returned. The protocol type will be an empty string for groups created using Kafka < 0.9 APIs because, although they store their offsets in Kafka, they don't use Kafka for group coordination. For groups created using Kafka >= 0.9, the protocol type will typically be "consumer". As soon as any error is encountered, it is immediately raised. :param broker_ids: A list of broker node_ids to query for consumer groups. If set to None, will query all brokers in the cluster. Explicitly specifying broker(s) can be useful for determining which consumer groups are coordinated by those broker(s). Default: None :return list: List of tuples of Consumer Groups. :exception GroupCoordinatorNotAvailableError: The coordinator is not available, so cannot process requests. :exception GroupLoadInProgressError: The coordinator is loading and hence can't process requests. """ # While we return a list, internally use a set to prevent duplicates # because if a group coordinator fails after being queried, and its # consumer groups move to new brokers that haven't yet been queried, # then the same group could be returned by multiple brokers. consumer_groups = set() if broker_ids is None: broker_ids = [broker.nodeId for broker in self._client.cluster.brokers()] futures = [self._list_consumer_groups_send_request(b) for b in broker_ids] self._wait_for_futures(futures) for f in futures: response = f.value consumer_groups.update(self._list_consumer_groups_process_response(response)) return list(consumer_groups) def _list_consumer_group_offsets_send_request(self, group_id, group_coordinator_id, partitions=None): """Send an OffsetFetchRequest to a broker. :param group_id: The consumer group id name for which to fetch offsets. :param group_coordinator_id: The node_id of the group's coordinator broker. :return: A message future """ version = self._matching_api_version(OffsetFetchRequest) if version <= 3: if partitions is None: if version <= 1: raise ValueError( """OffsetFetchRequest_v{} requires specifying the partitions for which to fetch offsets. Omitting the partitions is only supported on brokers >= 0.10.2. For details, see KIP-88.""".format(version)) topics_partitions = None else: # transform from [TopicPartition("t1", 1), TopicPartition("t1", 2)] to [("t1", [1, 2])] topics_partitions_dict = defaultdict(set) for topic, partition in partitions: topics_partitions_dict[topic].add(partition) topics_partitions = list(six.iteritems(topics_partitions_dict)) request = OffsetFetchRequest[version](group_id, topics_partitions) else: raise NotImplementedError( "Support for OffsetFetchRequest_v{} has not yet been added to KafkaAdminClient." .format(version)) return self._send_request_to_node(group_coordinator_id, request) def _list_consumer_group_offsets_process_response(self, response): """Process an OffsetFetchResponse. :param response: an OffsetFetchResponse. :return: A dictionary composed of TopicPartition keys and OffsetAndMetada values. """ if response.API_VERSION <= 3: # OffsetFetchResponse_v1 lacks a top-level error_code if response.API_VERSION > 1: error_type = Errors.for_code(response.error_code) if error_type is not Errors.NoError: # optionally we could retry if error_type.retriable raise error_type( "OffsetFetchResponse failed with response '{}'." .format(response)) # transform response into a dictionary with TopicPartition keys and # OffsetAndMetada values--this is what the Java AdminClient returns offsets = {} for topic, partitions in response.topics: for partition, offset, metadata, error_code in partitions: error_type = Errors.for_code(error_code) if error_type is not Errors.NoError: raise error_type( "Unable to fetch consumer group offsets for topic {}, partition {}" .format(topic, partition)) offsets[TopicPartition(topic, partition)] = OffsetAndMetadata(offset, metadata) else: raise NotImplementedError( "Support for OffsetFetchResponse_v{} has not yet been added to KafkaAdminClient." .format(response.API_VERSION)) return offsets def list_consumer_group_offsets(self, group_id, group_coordinator_id=None, partitions=None): """Fetch Consumer Offsets for a single consumer group. Note: This does not verify that the group_id or partitions actually exist in the cluster. As soon as any error is encountered, it is immediately raised. :param group_id: The consumer group id name for which to fetch offsets. :param group_coordinator_id: The node_id of the group's coordinator broker. If set to None, will query the cluster to find the group coordinator. Explicitly specifying this can be useful to prevent that extra network round trip if you already know the group coordinator. Default: None. :param partitions: A list of TopicPartitions for which to fetch offsets. On brokers >= 0.10.2, this can be set to None to fetch all known offsets for the consumer group. Default: None. :return dictionary: A dictionary with TopicPartition keys and OffsetAndMetada values. Partitions that are not specified and for which the group_id does not have a recorded offset are omitted. An offset value of `-1` indicates the group_id has no offset for that TopicPartition. A `-1` can only happen for partitions that are explicitly specified. """ if group_coordinator_id is None: group_coordinator_id = self._find_coordinator_id(group_id) future = self._list_consumer_group_offsets_send_request( group_id, group_coordinator_id, partitions) self._wait_for_futures([future]) response = future.value return self._list_consumer_group_offsets_process_response(response) # delete groups protocol not yet implemented # Note: send the request to the group's coordinator. def _wait_for_futures(self, futures): while not all(future.succeeded() for future in futures): for future in futures: self._client.poll(future=future) if future.failed(): raise future.exception # pylint: disable-msg=raising-bad-type
return ( new_topic.name, new_topic.num_partitions, new_topic.replication_factor, [ (partition_id, replicas) for partition_id, replicas in new_topic.replica_assignments.items() ], [ (config_key, config_value) for config_key, config_value in new_topic.topic_configs.items() ] )
client.js
import Vue from 'vue' import App from './Client.vue' import VueRouter from 'vue-router' import FastClick from 'FastClick' import './assets/js/date' import routes from './extend/RouterMap' import ESlog from './extend/ESlog' import clientlib from './extend/client/' import {fetch} from './extend/fetch' import Util from './extend/Util' let config = require('./config/index') window.Truck = window.$GP = { Util, ESlog, Env: {}, Native: {}, Server: {fetch}, } window.Truck.Maliang = clientlib window.$GP.VueExtend = clientlib /** * 定义编辑配置属性的混合策略 */ var strategies = Vue.config.optionMergeStrategies strategies.editorMethods = strategies.methods strategies.editerMethods = strategies.methods Vue.use(VueRouter) FastClick.attach(document.body, {}) const router = new VueRouter(routes) const app = window.app = new Vue({ // eslint-disable-line router, render: h => h(App) }) function startApp () { if (startApp.started) return startApp.started = true mount() function mount () { Object.defineProperty(window, 'EventHub', { get () { return window.app } }) app.$mount('#app') window.addEventListener('pagehide', trackPVTime) sendPVTime() window.trackPVTime = trackPVTime } } startApp() function trackPVTime (from) { if (trackPVTime.called) return console.log(from) var end = Date.now() var start = window.loadingStartTime var route = window.location.pathname.match(new RegExp(`(?:/${config.VIEW_NAME || 'view'})?/(\\w+)`)) if (route && route[1]) { window.localStorage.setItem('ML_VIEW_TIME', route[1] + '|' + ((end - start) / 1000 | 0)) } trackPVTime.called = 1 } function sendPVTime () { if (proc
DE_ENV !== 'production') return var ML_VIEW_TIME = (window.localStorage.getItem('ML_VIEW_TIME') || '').split('|') if (ML_VIEW_TIME && ML_VIEW_TIME.length == 2) { ESlog.track({ app_id: 'tview', page_id: ML_VIEW_TIME[0], action: 'PV_TIME', duration: +ML_VIEW_TIME[1] || 0, }, 'no-fp') } }
ess.env.NO
workbox-8dc792ac.js
define("./workbox-8dc792ac.js",["exports"],(function(t){"use strict";try{self["workbox:core:6.2.4"]&&_()}catch(t){}const e=(t,...e)=>{let s=t;return e.length>0&&(s+=` :: ${JSON.stringify(e)}`),s};class s extends Error{constructor(t,s){super(e(t,s)),this.name=t,this.details=s}}try{self["workbox:routing:6.2.4"]&&_()}catch(t){}const n=t=>t&&"object"==typeof t?t:{handle:t};class r{constructor(t,e,s="GET"){this.handler=n(e),this.match=t,this.method=s}setCatchHandler(t){this.catchHandler=n(t)}}class
extends r{constructor(t,e,s){super((({url:e})=>{const s=t.exec(e.href);if(s&&(e.origin===location.origin||0===s.index))return s.slice(1)}),e,s)}}class o{constructor(){this.t=new Map,this.i=new Map}get routes(){return this.t}addFetchListener(){self.addEventListener("fetch",(t=>{const{request:e}=t,s=this.handleRequest({request:e,event:t});s&&t.respondWith(s)}))}addCacheListener(){self.addEventListener("message",(t=>{if(t.data&&"CACHE_URLS"===t.data.type){const{payload:e}=t.data,s=Promise.all(e.urlsToCache.map((e=>{"string"==typeof e&&(e=[e]);const s=new Request(...e);return this.handleRequest({request:s,event:t})})));t.waitUntil(s),t.ports&&t.ports[0]&&s.then((()=>t.ports[0].postMessage(!0)))}}))}handleRequest({request:t,event:e}){const s=new URL(t.url,location.href);if(!s.protocol.startsWith("http"))return;const n=s.origin===location.origin,{params:r,route:i}=this.findMatchingRoute({event:e,request:t,sameOrigin:n,url:s});let o=i&&i.handler;const c=t.method;if(!o&&this.i.has(c)&&(o=this.i.get(c)),!o)return;let a;try{a=o.handle({url:s,request:t,event:e,params:r})}catch(t){a=Promise.reject(t)}const h=i&&i.catchHandler;return a instanceof Promise&&(this.o||h)&&(a=a.catch((async n=>{if(h)try{return await h.handle({url:s,request:t,event:e,params:r})}catch(t){t instanceof Error&&(n=t)}if(this.o)return this.o.handle({url:s,request:t,event:e});throw n}))),a}findMatchingRoute({url:t,sameOrigin:e,request:s,event:n}){const r=this.t.get(s.method)||[];for(const i of r){let r;const o=i.match({url:t,sameOrigin:e,request:s,event:n});if(o)return r=o,(Array.isArray(r)&&0===r.length||o.constructor===Object&&0===Object.keys(o).length||"boolean"==typeof o)&&(r=void 0),{route:i,params:r}}return{}}setDefaultHandler(t,e="GET"){this.i.set(e,n(t))}setCatchHandler(t){this.o=n(t)}registerRoute(t){this.t.has(t.method)||this.t.set(t.method,[]),this.t.get(t.method).push(t)}unregisterRoute(t){if(!this.t.has(t.method))throw new s("unregister-route-but-not-found-with-method",{method:t.method});const e=this.t.get(t.method).indexOf(t);if(!(e>-1))throw new s("unregister-route-route-not-registered");this.t.get(t.method).splice(e,1)}}let c;const a=()=>(c||(c=new o,c.addFetchListener(),c.addCacheListener()),c);function h(t,e,n){let o;if("string"==typeof t){const s=new URL(t,location.href);o=new r((({url:t})=>t.href===s.href),e,n)}else if(t instanceof RegExp)o=new i(t,e,n);else if("function"==typeof t)o=new r(t,e,n);else{if(!(t instanceof r))throw new s("unsupported-route-type",{moduleName:"workbox-routing",funcName:"registerRoute",paramName:"capture"});o=t}return a().registerRoute(o),o}const u={googleAnalytics:"googleAnalytics",precache:"precache-v2",prefix:"workbox",runtime:"runtime",suffix:"undefined"!=typeof registration?registration.scope:""},l=t=>[u.prefix,t,u.suffix].filter((t=>t&&t.length>0)).join("-"),f=t=>t||l(u.precache),w=t=>t||l(u.runtime);function d(t,e){const s=e();return t.waitUntil(s),s}try{self["workbox:precaching:6.2.4"]&&_()}catch(t){}function p(t){if(!t)throw new s("add-to-cache-list-unexpected-type",{entry:t});if("string"==typeof t){const e=new URL(t,location.href);return{cacheKey:e.href,url:e.href}}const{revision:e,url:n}=t;if(!n)throw new s("add-to-cache-list-unexpected-type",{entry:t});if(!e){const t=new URL(n,location.href);return{cacheKey:t.href,url:t.href}}const r=new URL(n,location.href),i=new URL(n,location.href);return r.searchParams.set("__WB_REVISION__",e),{cacheKey:r.href,url:i.href}}class y{constructor(){this.updatedURLs=[],this.notUpdatedURLs=[],this.handlerWillStart=async({request:t,state:e})=>{e&&(e.originalRequest=t)},this.cachedResponseWillBeUsed=async({event:t,state:e,cachedResponse:s})=>{if("install"===t.type&&e&&e.originalRequest&&e.originalRequest instanceof Request){const t=e.originalRequest.url;s?this.notUpdatedURLs.push(t):this.updatedURLs.push(t)}return s}}}class g{constructor({precacheController:t}){this.cacheKeyWillBeUsed=async({request:t,params:e})=>{const s=e&&e.cacheKey||this.h.getCacheKeyForURL(t.url);return s?new Request(s):t},this.h=t}}let R;async function m(t,e){let n=null;if(t.url){n=new URL(t.url).origin}if(n!==self.location.origin)throw new s("cross-origin-copy-response",{origin:n});const r=t.clone(),i={headers:new Headers(r.headers),status:r.status,statusText:r.statusText},o=e?e(i):i,c=function(){if(void 0===R){const t=new Response("");if("body"in t)try{new Response(t.body),R=!0}catch(t){R=!1}R=!1}return R}()?r.body:await r.blob();return new Response(c,o)}function v(t,e){const s=new URL(t);for(const t of e)s.searchParams.delete(t);return s.href}class q{constructor(){this.promise=new Promise(((t,e)=>{this.resolve=t,this.reject=e}))}}const U=new Set;try{self["workbox:strategies:6.2.4"]&&_()}catch(t){}function b(t){return"string"==typeof t?new Request(t):t}class L{constructor(t,e){this.u={},Object.assign(this,e),this.event=e.event,this.l=t,this.p=new q,this.g=[],this.R=[...t.plugins],this.m=new Map;for(const t of this.R)this.m.set(t,{});this.event.waitUntil(this.p.promise)}async fetch(t){const{event:e}=this;let n=b(t);if("navigate"===n.mode&&e instanceof FetchEvent&&e.preloadResponse){const t=await e.preloadResponse;if(t)return t}const r=this.hasCallback("fetchDidFail")?n.clone():null;try{for(const t of this.iterateCallbacks("requestWillFetch"))n=await t({request:n.clone(),event:e})}catch(t){if(t instanceof Error)throw new s("plugin-error-request-will-fetch",{thrownErrorMessage:t.message})}const i=n.clone();try{let t;t=await fetch(n,"navigate"===n.mode?void 0:this.l.fetchOptions);for(const s of this.iterateCallbacks("fetchDidSucceed"))t=await s({event:e,request:i,response:t});return t}catch(t){throw r&&await this.runCallbacks("fetchDidFail",{error:t,event:e,originalRequest:r.clone(),request:i.clone()}),t}}async fetchAndCachePut(t){const e=await this.fetch(t),s=e.clone();return this.waitUntil(this.cachePut(t,s)),e}async cacheMatch(t){const e=b(t);let s;const{cacheName:n,matchOptions:r}=this.l,i=await this.getCacheKey(e,"read"),o=Object.assign(Object.assign({},r),{cacheName:n});s=await caches.match(i,o);for(const t of this.iterateCallbacks("cachedResponseWillBeUsed"))s=await t({cacheName:n,matchOptions:r,cachedResponse:s,request:i,event:this.event})||void 0;return s}async cachePut(t,e){const n=b(t);var r;await(r=0,new Promise((t=>setTimeout(t,r))));const i=await this.getCacheKey(n,"write");if(!e)throw new s("cache-put-with-no-response",{url:(o=i.url,new URL(String(o),location.href).href.replace(new RegExp(`^${location.origin}`),""))});var o;const c=await this.v(e);if(!c)return!1;const{cacheName:a,matchOptions:h}=this.l,u=await self.caches.open(a),l=this.hasCallback("cacheDidUpdate"),f=l?await async function(t,e,s,n){const r=v(e.url,s);if(e.url===r)return t.match(e,n);const i=Object.assign(Object.assign({},n),{ignoreSearch:!0}),o=await t.keys(e,i);for(const e of o)if(r===v(e.url,s))return t.match(e,n)}(u,i.clone(),["__WB_REVISION__"],h):null;try{await u.put(i,l?c.clone():c)}catch(t){if(t instanceof Error)throw"QuotaExceededError"===t.name&&await async function(){for(const t of U)await t()}(),t}for(const t of this.iterateCallbacks("cacheDidUpdate"))await t({cacheName:a,oldResponse:f,newResponse:c.clone(),request:i,event:this.event});return!0}async getCacheKey(t,e){if(!this.u[e]){let s=t;for(const t of this.iterateCallbacks("cacheKeyWillBeUsed"))s=b(await t({mode:e,request:s,event:this.event,params:this.params}));this.u[e]=s}return this.u[e]}hasCallback(t){for(const e of this.l.plugins)if(t in e)return!0;return!1}async runCallbacks(t,e){for(const s of this.iterateCallbacks(t))await s(e)}*iterateCallbacks(t){for(const e of this.l.plugins)if("function"==typeof e[t]){const s=this.m.get(e),n=n=>{const r=Object.assign(Object.assign({},n),{state:s});return e[t](r)};yield n}}waitUntil(t){return this.g.push(t),t}async doneWaiting(){let t;for(;t=this.g.shift();)await t}destroy(){this.p.resolve(null)}async v(t){let e=t,s=!1;for(const t of this.iterateCallbacks("cacheWillUpdate"))if(e=await t({request:this.request,response:e,event:this.event})||void 0,s=!0,!e)break;return s||e&&200!==e.status&&(e=void 0),e}}class E extends class{constructor(t={}){this.cacheName=w(t.cacheName),this.plugins=t.plugins||[],this.fetchOptions=t.fetchOptions,this.matchOptions=t.matchOptions}handle(t){const[e]=this.handleAll(t);return e}handleAll(t){t instanceof FetchEvent&&(t={event:t,request:t.request});const e=t.event,s="string"==typeof t.request?new Request(t.request):t.request,n="params"in t?t.params:void 0,r=new L(this,{event:e,request:s,params:n}),i=this.q(r,s,e);return[i,this.U(i,r,s,e)]}async q(t,e,n){let r;await t.runCallbacks("handlerWillStart",{event:n,request:e});try{if(r=await this.L(e,t),!r||"error"===r.type)throw new s("no-response",{url:e.url})}catch(s){if(s instanceof Error)for(const i of t.iterateCallbacks("handlerDidError"))if(r=await i({error:s,event:n,request:e}),r)break;if(!r)throw s}for(const s of t.iterateCallbacks("handlerWillRespond"))r=await s({event:n,request:e,response:r});return r}async U(t,e,s,n){let r,i;try{r=await t}catch(i){}try{await e.runCallbacks("handlerDidRespond",{event:n,request:s,response:r}),await e.doneWaiting()}catch(t){t instanceof Error&&(i=t)}if(await e.runCallbacks("handlerDidComplete",{event:n,request:s,response:r,error:i}),e.destroy(),i)throw i}}{constructor(t={}){t.cacheName=f(t.cacheName),super(t),this._=!1!==t.fallbackToNetwork,this.plugins.push(E.copyRedirectedCacheableResponsesPlugin)}async L(t,e){const s=await e.cacheMatch(t);return s||(e.event&&"install"===e.event.type?await this.C(t,e):await this.O(t,e))}async O(t,e){let n;if(!this._)throw new s("missing-precache-entry",{cacheName:this.cacheName,url:t.url});return n=await e.fetch(t),n}async C(t,e){this.N();const n=await e.fetch(t);if(!await e.cachePut(t,n.clone()))throw new s("bad-precaching-response",{url:t.url,status:n.status});return n}N(){let t=null,e=0;for(const[s,n]of this.plugins.entries())n!==E.copyRedirectedCacheableResponsesPlugin&&(n===E.defaultPrecacheCacheabilityPlugin&&(t=s),n.cacheWillUpdate&&e++);0===e?this.plugins.push(E.defaultPrecacheCacheabilityPlugin):e>1&&null!==t&&this.plugins.splice(t,1)}}E.defaultPrecacheCacheabilityPlugin={cacheWillUpdate:async({response:t})=>!t||t.status>=400?null:t},E.copyRedirectedCacheableResponsesPlugin={cacheWillUpdate:async({response:t})=>t.redirected?await m(t):t};class x{constructor({cacheName:t,plugins:e=[],fallbackToNetwork:s=!0}={}){this.k=new Map,this.j=new Map,this.T=new Map,this.l=new E({cacheName:f(t),plugins:[...e,new g({precacheController:this})],fallbackToNetwork:s}),this.install=this.install.bind(this),this.activate=this.activate.bind(this)}get strategy(){return this.l}precache(t){this.addToCacheList(t),this.W||(self.addEventListener("install",this.install),self.addEventListener("activate",this.activate),this.W=!0)}addToCacheList(t){const e=[];for(const n of t){"string"==typeof n?e.push(n):n&&void 0===n.revision&&e.push(n.url);const{cacheKey:t,url:r}=p(n),i="string"!=typeof n&&n.revision?"reload":"default";if(this.k.has(r)&&this.k.get(r)!==t)throw new s("add-to-cache-list-conflicting-entries",{firstEntry:this.k.get(r),secondEntry:t});if("string"!=typeof n&&n.integrity){if(this.T.has(t)&&this.T.get(t)!==n.integrity)throw new s("add-to-cache-list-conflicting-integrities",{url:r});this.T.set(t,n.integrity)}if(this.k.set(r,t),this.j.set(r,i),e.length>0){const t=`Workbox is precaching URLs without revision info: ${e.join(", ")}\nThis is generally NOT safe. Learn more at https://bit.ly/wb-precache`;console.warn(t)}}}install(t){return d(t,(async()=>{const e=new y;this.strategy.plugins.push(e);for(const[e,s]of this.k){const n=this.T.get(s),r=this.j.get(e),i=new Request(e,{integrity:n,cache:r,credentials:"same-origin"});await Promise.all(this.strategy.handleAll({params:{cacheKey:s},request:i,event:t}))}const{updatedURLs:s,notUpdatedURLs:n}=e;return{updatedURLs:s,notUpdatedURLs:n}}))}activate(t){return d(t,(async()=>{const t=await self.caches.open(this.strategy.cacheName),e=await t.keys(),s=new Set(this.k.values()),n=[];for(const r of e)s.has(r.url)||(await t.delete(r),n.push(r.url));return{deletedURLs:n}}))}getURLsToCacheKeys(){return this.k}getCachedURLs(){return[...this.k.keys()]}getCacheKeyForURL(t){const e=new URL(t,location.href);return this.k.get(e.href)}async matchPrecache(t){const e=t instanceof Request?t.url:t,s=this.getCacheKeyForURL(e);if(s){return(await self.caches.open(this.strategy.cacheName)).match(s)}}createHandlerBoundToURL(t){const e=this.getCacheKeyForURL(t);if(!e)throw new s("non-precached-url",{url:t});return s=>(s.request=new Request(t),s.params=Object.assign({cacheKey:e},s.params),this.strategy.handle(s))}}let C;const O=()=>(C||(C=new x),C);class N extends r{constructor(t,e){super((({request:s})=>{const n=t.getURLsToCacheKeys();for(const t of function*(t,{ignoreURLParametersMatching:e=[/^utm_/,/^fbclid$/],directoryIndex:s="index.html",cleanURLs:n=!0,urlManipulation:r}={}){const i=new URL(t,location.href);i.hash="",yield i.href;const o=function(t,e=[]){for(const s of[...t.searchParams.keys()])e.some((t=>t.test(s)))&&t.searchParams.delete(s);return t}(i,e);if(yield o.href,s&&o.pathname.endsWith("/")){const t=new URL(o.href);t.pathname+=s,yield t.href}if(n){const t=new URL(o.href);t.pathname+=".html",yield t.href}if(r){const t=r({url:i});for(const e of t)yield e.href}}(s.url,e)){const e=n.get(t);if(e)return{cacheKey:e}}}),t.strategy)}}t.NavigationRoute=class extends r{constructor(t,{allowlist:e=[/./],denylist:s=[]}={}){super((t=>this.K(t)),t),this.M=e,this.P=s}K({url:t,request:e}){if(e&&"navigate"!==e.mode)return!1;const s=t.pathname+t.search;for(const t of this.P)if(t.test(s))return!1;return!!this.M.some((t=>t.test(s)))}},t.clientsClaim=function(){self.addEventListener("activate",(()=>self.clients.claim()))},t.createHandlerBoundToURL=function(t){return O().createHandlerBoundToURL(t)},t.precacheAndRoute=function(t,e){!function(t){O().precache(t)}(t),function(t){const e=O();h(new N(e,t))}(e)},t.registerRoute=h}));
i
metasmoke.py
# coding=utf-8 import json import requests import importlib # for .reload() from globalvars import GlobalVars import threading # noinspection PyPackageRequirements import websocket try: from collections.abc import Iterable except ImportError: from collections import Iterable from datetime import datetime, timedelta from glob import glob from regex import sub import sys import traceback import time import os import subprocess as sp import datahandling import parsing import apigetpost import spamhandling import classes import chatcommunicate from helpers import log, exit_mode, only_blacklists_changed, \ only_modules_changed, blacklist_integrity_check, reload_modules, log_exception from gitmanager import GitManager import findspam from socketscience import SocketScience import metasmoke_cache MS_WEBSOCKET_LONG_INTERVAL = 60 MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL = 5 MAX_FAILURES = 10 # Preservative, 10 errors = MS down NO_ACTIVITY_PINGS_TO_REBOOT = 4 NO_ACTIVITY_PINGS_TO_STANDBY = 5 # This is effectively disabled NO_ACTIVITY_PINGS_TO_REPORT = 3 # noinspection PyClassHasNoInit,PyBroadException,PyUnresolvedReferences,PyProtectedMember class Metasmoke: status_pings_since_scan_activity = 0 scan_stat_snapshot = None class AutoSwitch: """ Automatically switch metasmoke status """ MAX_FAILURES = 10 # More than 10 failures == ms down MAX_SUCCESSES = 1 # More than 1 success == ms up ping_failure_counter = 0 # Negative values indicate consecutive successes autoswitch_is_on = True rw_lock = threading.Lock() @staticmethod def ping_failed(): """ Indicate a metasmoke status ping connection failure """ with Metasmoke.AutoSwitch.rw_lock: if Metasmoke.AutoSwitch.ping_failure_counter < 0: # Consecutive counter. Switch sign. Metasmoke.AutoSwitch.ping_failure_counter = 0 Metasmoke.AutoSwitch.ping_failure_counter += 1 current_counter = Metasmoke.AutoSwitch.ping_failure_counter current_auto = Metasmoke.AutoSwitch.autoswitch_is_on # MAX_FAILURES is constant so no lock. if current_counter > Metasmoke.AutoSwitch.MAX_FAILURES and\ GlobalVars.MSStatus.is_up() and current_auto: log("warning", "Last {} connection(s) to metasmoke failed".format(current_counter) + " Setting metasmoke status to down.") chatcommunicate.tell_rooms_with("debug", "**Warning**: {}: ".format(GlobalVars.location) + "Last {} connection(s) to metasmoke".format(current_counter) + " failed. Setting metasmoke status to **down**.") Metasmoke.set_ms_down(tell=False) @staticmethod def ping_succeeded(): """ Indicate a metasmoke status ping connection success """ with Metasmoke.AutoSwitch.rw_lock: if Metasmoke.AutoSwitch.ping_failure_counter > 0: # Consecutive counter. Switch sign. Metasmoke.AutoSwitch.ping_failure_counter = 0 Metasmoke.AutoSwitch.ping_failure_counter -= 1 # Negative values for success current_counter = -Metasmoke.AutoSwitch.ping_failure_counter current_auto = Metasmoke.AutoSwitch.autoswitch_is_on # MAX_SUCCESSES is constant so no lock. if current_counter > Metasmoke.AutoSwitch.MAX_SUCCESSES and\ GlobalVars.MSStatus.is_down() and current_auto: # Why use warning? Because some action may be needed if people don't think metasmoke is up. log("warning", "Last {} connection(s) to metasmoke succeeded".format(current_counter) + " Setting metasmoke status to up.") chatcommunicate.tell_rooms_with("debug", "**Notice**: {}: ".format(GlobalVars.location) + "Last {} connection(s) to metasmoke".format(current_counter) + " succeeded. Setting metasmoke status to **up**.") Metasmoke.set_ms_up(tell=False) @staticmethod def enable_autoswitch(to_enable): """ Enable or disable auto status switch """ switch_auto_msg = "" with Metasmoke.AutoSwitch.rw_lock: if Metasmoke.AutoSwitch.autoswitch_is_on is not to_enable: # Log and post chat message only if there really is a change. switch_auto_msg = "Metasmoke status autoswitch is now {}abled.".format("en" if to_enable else "dis") Metasmoke.AutoSwitch.autoswitch_is_on = to_enable if switch_auto_msg: log("info", switch_auto_msg) chatcommunicate.tell_rooms_with("debug", switch_auto_msg) @staticmethod def get_ping_failure(): """ Get ping failure count. Negative number is ping success count. """ with Metasmoke.AutoSwitch.rw_lock: return Metasmoke.AutoSwitch.ping_failure_counter @staticmethod def reset_switch(): """ Reset class Metasmoke.AutoSwitch to default values """ with Metasmoke.AutoSwitch.rw_lock: Metasmoke.AutoSwitch.ping_failure_counter = 0 Metasmoke.AutoSwitch.autoswitch_is_on = True @staticmethod def set_ms_up(tell=True): """ Switch metasmoke status to up """ # We must first set metasmoke to up, then say that metasmoke is up, not the other way around. ms_msg = "" if GlobalVars.MSStatus.is_down(): ms_msg = "Metasmoke status: set to up." GlobalVars.MSStatus.set_up() if ms_msg: log("info", ms_msg) if tell: chatcommunicate.tell_rooms_with("debug", "{}: {}".format(GlobalVars.location, ms_msg)) @staticmethod def set_ms_down(tell=True): """ Switch metasmoke status to down """ ms_msg = "" if GlobalVars.MSStatus.is_up(): ms_msg = "Metasmoke status: set to down." GlobalVars.MSStatus.set_down() if ms_msg: log("info", ms_msg) if tell: chatcommunicate.tell_rooms_with("debug", "{}: {}".format(GlobalVars.location, ms_msg)) @staticmethod def connect_websocket(): GlobalVars.metasmoke_ws = websocket.create_connection(GlobalVars.metasmoke_ws_host, origin=GlobalVars.metasmoke_host) payload = json.dumps({"command": "subscribe", "identifier": "{\"channel\":\"SmokeDetectorChannel\"," "\"key\":\"" + GlobalVars.metasmoke_key + "\"}"}) GlobalVars.metasmoke_ws.send(payload) GlobalVars.metasmoke_ws.settimeout(10) @staticmethod def init_websocket(): has_succeeded = False failed_connection_attempts = 0 while GlobalVars.metasmoke_key and GlobalVars.metasmoke_ws_host: try: Metasmoke.connect_websocket() has_succeeded = True while True: a = GlobalVars.metasmoke_ws.recv() try: data = json.loads(a) Metasmoke.handle_websocket_data(data) GlobalVars.MSStatus.succeeded() failed_connection_attempts = 0 except ConnectionError: raise except Exception as e: log('error', '{}: {}'.format(type(e).__name__, e)) log_exception(*sys.exc_info()) GlobalVars.MSStatus.failed() Metasmoke.connect_websocket() except Exception: GlobalVars.MSStatus.failed() log('error', "Couldn't bind to MS websocket") if not has_succeeded: failed_connection_attempts += 1 if failed_connection_attempts == MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL: chatcommunicate.tell_rooms_with("debug", "Cannot initiate MS websocket." " Continuing to retry at longer intervals.") log('warning', "Cannot initiate MS websocket." " Continuing to retry at longer intervals.") if failed_connection_attempts >= MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL: time.sleep(MS_WEBSOCKET_LONG_INTERVAL) else: # Wait and hopefully network issues will be solved time.sleep(10) else: time.sleep(10) @staticmethod def handle_websocket_data(data): if "message" not in data: if "type" in data and data['type'] == "reject_subscription": log('error', "MS WebSocket subscription was rejected. Check your MS key.") raise ConnectionError("MS WebSocket connection rejected") return message = data['message'] if not isinstance(message, Iterable): return if "message" in message:
", or the [SD wiki](//git.io/vyDZv)" " ([history](//github.com/Charcoal-SE/SmokeDetector/wiki/_history)): ", 1) from_ms = from_ms.replace("https:", "") chatcommunicate.tell_rooms_with("metasmoke", from_ms) elif "autoflag_fp" in message: event = message["autoflag_fp"] chatcommunicate.tell_rooms(event["message"], ("debug", "site-" + event["site"]), ("no-site-" + event["site"],), notify_site="/autoflag_fp") elif "exit" in message: os._exit(message["exit"]) elif "blacklist" in message: ids = (message['blacklist']['uid'], message['blacklist']['site']) datahandling.add_blacklisted_user(ids, "metasmoke", message['blacklist']['post']) datahandling.last_feedbacked = (ids, time.time() + 60) elif "unblacklist" in message: ids = (message['unblacklist']['uid'], message['unblacklist']['site']) datahandling.remove_blacklisted_user(ids) elif "naa" in message: post_site_id = parsing.fetch_post_id_and_site_from_url(message["naa"]["post_link"]) datahandling.add_ignored_post(post_site_id[0:2]) elif "fp" in message: post_site_id = parsing.fetch_post_id_and_site_from_url(message["fp"]["post_link"]) datahandling.add_false_positive(post_site_id[0:2]) elif "report" in message: import chatcommands # Do it here chatcommands.report_posts([message["report"]["post_link"]], message["report"]["user"], True, "the metasmoke API") elif "deploy_updated" in message: return # Disabled sha = message["deploy_updated"]["head_commit"]["id"] if sha != os.popen('git log -1 --pretty="%H"').read(): if "autopull" in message["deploy_updated"]["head_commit"]["message"]: if only_blacklists_changed(GitManager.get_remote_diff()): commit_md = "[`{0}`](https://github.com/{1}/commit/{0})" \ .format(sha[:7], GlobalVars.bot_repo_slug) integrity = blacklist_integrity_check() if len(integrity) == 0: # No issues GitManager.pull_remote() findspam.reload_blacklists() chatcommunicate.tell_rooms_with("debug", "No code modified in {0}, only blacklists" " reloaded.".format(commit_md)) else: integrity.append("please fix before pulling.") chatcommunicate.tell_rooms_with("debug", ", ".join(integrity)) elif "commit_status" in message: c = message["commit_status"] sha = c["commit_sha"][:7] recent_commits = sp.check_output(["git", "log", "-50", "--pretty=%H"]).decode('utf-8').strip().split('\n') if c["commit_sha"] in recent_commits: return # Same rev, or earlier rev (e.g. when watching things faster than CI completes), nothing to do if c["status"] == "success": if "autopull" in c["commit_message"] or c["commit_message"].startswith("!") or \ c["commit_message"].startswith("Auto "): s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/" \ "commit/{commit_sha}) succeeded. Message contains 'autopull', pulling...".format( ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha) remote_diff = GitManager.get_remote_diff() if only_blacklists_changed(remote_diff): GitManager.pull_remote() if not GlobalVars.on_branch: # Restart if HEAD detached log('warning', "Pulling remote with HEAD detached, checkout deploy", f=True) exit_mode("checkout_deploy") GlobalVars.reload() findspam.FindSpam.reload_blacklists() chatcommunicate.tell_rooms_with('debug', GlobalVars.s_norestart_blacklists) elif only_modules_changed(remote_diff): GitManager.pull_remote() if not GlobalVars.on_branch: # Restart if HEAD detached log('warning', "Pulling remote with HEAD detached, checkout deploy", f=True) exit_mode("checkout_deploy") GlobalVars.reload() reload_modules() chatcommunicate.tell_rooms_with('debug', GlobalVars.s_norestart_findspam) else: chatcommunicate.tell_rooms_with('debug', s, notify_site="/ci") exit_mode("pull_update") else: s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/commit/{commit_sha}) " \ "succeeded.".format(ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha) chatcommunicate.tell_rooms_with("debug", s, notify_site="/ci") elif c["status"] == "failure": s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/commit/{commit_sha}) " \ "failed.".format(ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha) chatcommunicate.tell_rooms_with("debug", s, notify_site="/ci") elif "everything_is_broken" in message: if message["everything_is_broken"] is True: exit_mode("shutdown") elif "domain_whitelist" in message: if message["domain_whitelist"] == "refresh": metasmoke_cache.MetasmokeCache.delete('whitelisted-domains') @staticmethod def send_stats_on_post(title, link, reasons, body, markdown, username, user_link, why, owner_rep, post_score, up_vote_count, down_vote_count): if GlobalVars.metasmoke_host is None: log('info', 'Attempted to send stats but metasmoke_host is undefined. Ignoring.') return elif GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke down, not sending stats.") return metasmoke_key = GlobalVars.metasmoke_key try: if len(why) > 4096: why = why[:2048] + ' ... ' + why[-2043:] # Basic maths post = {'title': title, 'link': link, 'reasons': reasons, 'body': body, 'markdown': markdown, 'username': username, 'user_link': user_link, 'why': why, 'user_reputation': owner_rep, 'score': post_score, 'upvote_count': up_vote_count, 'downvote_count': down_vote_count} # Remove None values (if they somehow manage to get through) post = {k: v for k, v in post.items() if v} payload = {'post': post, 'key': metasmoke_key} headers = {'Content-type': 'application/json'} Metasmoke.post("/posts.json", data=json.dumps(payload), headers=headers) except Exception as e: log('error', e) @staticmethod def send_feedback_for_post(post_link, feedback_type, user_name, user_id, chat_host): if GlobalVars.metasmoke_host is None: log('info', 'Received chat feedback but metasmoke_host is undefined. Ignoring.') return elif GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, not sending feedback") return metasmoke_key = GlobalVars.metasmoke_key try: payload = { 'feedback': { 'user_name': user_name, 'chat_user_id': user_id, 'chat_host': chat_host, 'feedback_type': feedback_type, 'post_link': post_link }, 'key': metasmoke_key } headers = {'Content-type': 'application/json'} Metasmoke.post("/feedbacks.json", data=json.dumps(payload), headers=headers) except Exception as e: log('error', e) @staticmethod def send_deletion_stats_for_post(post_link, is_deleted): if GlobalVars.metasmoke_host is None: log('info', 'Attempted to send deletion data but metasmoke_host is undefined. Ignoring.') return elif GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, not sending deletion stats") return metasmoke_key = GlobalVars.metasmoke_key try: payload = { 'deletion_log': { 'is_deleted': is_deleted, 'post_link': post_link }, 'key': metasmoke_key } headers = {'Content-type': 'application/json'} Metasmoke.post("/deletion_logs.json", data=json.dumps(payload), headers=headers) except Exception as e: log('error', e) @staticmethod def send_status_ping(): if GlobalVars.metasmoke_host is None: log('info', 'Attempted to send status ping but metasmoke_host is undefined. Not sent.') return elif GlobalVars.MSStatus.is_down(): payload = { "location": GlobalVars.location, "timestamp": time.time() } SocketScience.send(payload) metasmoke_key = GlobalVars.metasmoke_key try: payload = { 'location': GlobalVars.location, 'key': metasmoke_key, 'standby': GlobalVars.standby_mode or GlobalVars.no_se_activity_scan } headers = {'content-type': 'application/json'} response = Metasmoke.post("/status-update.json", data=json.dumps(payload), headers=headers, ignore_down=True) try: response = response.json() if response.get('pull_update', False): log('info', "Received pull command from MS ping response") exit_mode("pull_update") if ('failover' in response and GlobalVars.standby_mode and not GlobalVars.no_se_activity_scan): # If we're not scanning, then we don't want to become officially active due to failover. if response['failover']: GlobalVars.standby_mode = False chatcommunicate.tell_rooms_with("debug", GlobalVars.location + " received failover signal.", notify_site="/failover") if response.get('standby', False): chatcommunicate.tell_rooms_with("debug", GlobalVars.location + " entering metasmoke-forced standby.") time.sleep(2) exit_mode("standby") if response.get('shutdown', False): exit_mode("shutdown") except Exception: # TODO: What could happen here? pass except Exception as e: log('error', e) @staticmethod def update_code_privileged_users_list(): if GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, can't update blacklist manager privilege list") return payload = {'key': GlobalVars.metasmoke_key} headers = {'Content-type': 'application/json'} try: response = Metasmoke.get("/api/users/code_privileged", data=json.dumps(payload), headers=headers).json()['items'] except Exception as e: log('error', e) return GlobalVars.code_privileged_users = set() for id in response["stackexchange_chat_ids"]: GlobalVars.code_privileged_users.add(("stackexchange.com", id)) for id in response["meta_stackexchange_chat_ids"]: GlobalVars.code_privileged_users.add(("meta.stackexchange.com", id)) for id in response["stackoverflow_chat_ids"]: GlobalVars.code_privileged_users.add(("stackoverflow.com", id)) @staticmethod def determine_if_autoflagged(post_url): """ Given the URL for a post, determine whether or not it has been autoflagged. """ payload = { 'key': GlobalVars.metasmoke_key, 'filter': 'GFGJGHFMHGOLMMJMJJJGHIGOMKFKKILF', # id and autoflagged 'urls': post_url } try: response = Metasmoke.get("/api/v2.0/posts/urls", params=payload).json() except Exception as e: log('error', e) return False, [] # The first report of a URL is the only one that will be autoflagged. MS responses to the # /posts/urls endpoint have the oldest report last. if len(response["items"]) > 0 and response["items"][-1]["autoflagged"]: # get flagger names id = str(response["items"][-1]["id"]) payload = {'key': GlobalVars.metasmoke_key} flags = Metasmoke.get("/api/v2.0/posts/" + id + "/flags", params=payload).json() if len(flags["items"]) > 0: return True, [user["username"] for user in flags["items"][0]["autoflagged"]["users"]] return False, [] @staticmethod def stop_autoflagging(): payload = {'key': GlobalVars.metasmoke_key} headers = {'Content-type': 'application/json'} Metasmoke.post("/flagging/smokey_disable", data=json.dumps(payload), headers=headers) @staticmethod def send_statistics(): if GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, not sending statistics") return # Get current apiquota from globalvars GlobalVars.apiquota_rw_lock.acquire() current_apiquota = GlobalVars.apiquota GlobalVars.apiquota_rw_lock.release() posts_scanned, scan_time, posts_per_second = GlobalVars.PostScanStat.get_stat() payload = {'key': GlobalVars.metasmoke_key, 'statistic': {'posts_scanned': posts_scanned, 'api_quota': current_apiquota}} if posts_per_second: # Send scan rate as well, if applicable. payload['statistic']['post_scan_rate'] = posts_per_second GlobalVars.PostScanStat.reset_stat() headers = {'Content-type': 'application/json'} if GlobalVars.metasmoke_host is not None: log('info', 'Sent statistics to metasmoke: ', payload['statistic']) Metasmoke.post("/statistics.json", data=json.dumps(payload), headers=headers) @staticmethod def post_auto_comment(msg, user, url=None, ids=None): if not GlobalVars.metasmoke_key: return response = None if url is not None: params = {"key": GlobalVars.metasmoke_key, "urls": url, "filter": "GFGJGHFJNFGNHKNIKHGGOMILHKLJIFFN"} response = Metasmoke.get("/api/v2.0/posts/urls", params=params).json() elif ids is not None: post_id, site = ids site = parsing.api_parameter_from_link(site) params = {"key": GlobalVars.metasmoke_key, "filter": "GFGJGHFJNFGNHKNIKHGGOMILHKLJIFFN"} try: response = Metasmoke.get("/api/v2.0/posts/uid/{}/{}".format(site, post_id), params=params).json() except AttributeError: response = None if response and "items" in response and len(response["items"]) > 0: ms_id = response["items"][0]["id"] params = {"key": GlobalVars.metasmoke_key, "text": msg[:1].upper() + msg[1:], # Capitalise the first letter of the comment "chat_user_id": user.id, "chat_host": user._client.host} Metasmoke.post("/api/v2.0/comments/post/{}".format(ms_id), params=params) @staticmethod def get_post_bodies_from_ms(post_url): if not GlobalVars.metasmoke_key: return None payload = { 'key': GlobalVars.metasmoke_key, 'filter': 'GHOOIJGNLKHIIOIKGILKIJGHFMNKKGFJ', # posts.id, posts.body, posts.created_at 'urls': parsing.to_protocol_relative(post_url) } try: response = Metasmoke.get('/api/v2.0/posts/urls', params=payload).json() except AttributeError: return None except Exception as e: log('error', '{}: {}'.format(type(e).__name__, e)) log_exception(*sys.exc_info()) exception_only = ''.join(traceback.format_exception_only(type(e), e)).strip() chatcommunicate.tell_rooms_with("debug", "{}: In getting MS post information, recovered from `{}`" .format(GlobalVars.location, exception_only)) return None return response['items'] @staticmethod def get_reason_weights(): if not GlobalVars.metasmoke_key: return None payload = { 'key': GlobalVars.metasmoke_key, 'per_page': 100, 'page': 1, } items = [] try: while True: response = Metasmoke.get('/api/v2.0/reasons', params=payload).json() items.extend(response['items']) if not response['has_more']: break payload['page'] += 1 except AttributeError: return None return items # Some sniffy stuff @staticmethod def request_sender(method): def func(url, *args, ignore_down=False, **kwargs): if not GlobalVars.metasmoke_host or (GlobalVars.MSStatus.is_down() and not ignore_down): return None if 'timeout' not in kwargs: kwargs['timeout'] = 10.000 # Don't throttle by MS response = None # Should return None upon failure, if any try: response = method(GlobalVars.metasmoke_host + url, *args, **kwargs) except Exception: GlobalVars.MSStatus.failed() if ignore_down: # Means that this is a status ping Metasmoke.AutoSwitch.ping_failed() # No need to log here because it's re-raised raise # Maintain minimal difference to the original get/post methods else: GlobalVars.MSStatus.succeeded() if ignore_down: # Means that this is a status ping Metasmoke.AutoSwitch.ping_succeeded() return response return func get = request_sender.__func__(requests.get) post = request_sender.__func__(requests.post) @staticmethod def send_status_ping_and_verify_scanning_if_active(): def reboot_or_standby(action): error_message = "There's been no scan activity for {} status pings. Going to {}." \ .format(Metasmoke.status_pings_since_scan_activity, action) log('error', error_message) chatcommunicate.tell_rooms_with("debug", error_message) if action == "standby": GlobalVars.standby_mode = True # Let MS know immediately, to lessen potential wait time (e.g. if we fail to reboot). Metasmoke.send_status_ping() time.sleep(8) exit_mode(action) in_standby_mode = GlobalVars.standby_mode or GlobalVars.no_se_activity_scan if not in_standby_mode: # This is the active instance, so should be scanning. If it's not scanning, then report or go to standby. if GlobalVars.PostScanStat.get_stat() == Metasmoke.scan_stat_snapshot: # There's been no actvity since the last ping. Metasmoke.status_pings_since_scan_activity += 1 if Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_REBOOT: # Assume something is very wrong. Report to debug rooms and go into standby mode. reboot_or_standby("reboot") elif Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_STANDBY: # Assume something is very wrong. Report to debug rooms and go into standby mode. reboot_or_standby("standby") elif Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_REPORT: # Something might be wrong. Let people in debug rooms know. status_message = "There's been no scan activity for {} status pings. There may be a problem." \ .format(Metasmoke.status_pings_since_scan_activity) log('warning', status_message) chatcommunicate.tell_rooms_with("debug", status_message) else: Metasmoke.status_pings_since_scan_activity = 0 Metasmoke.scan_stat_snapshot = GlobalVars.PostScanStat.get_stat() Metasmoke.send_status_ping()
from_ms = message['message'] if (from_ms.startswith("[ [charcoal-se.github.io](https://github.com/Charcoal-SE/charcoal-se.github.io) ]" " continuous-integration/travis-ci/push")): from_ms = from_ms.replace(": ",
progressbar.rs
use crate::utils::event::Event; use crate::utils::style::{inline_style, scss_to_css}; use crate::widgets::widget::Widget; /// # The state of a ProgressBar /// /// ## Fields /// /// ```text /// min: i32 /// max: i32 /// value: i32 /// stretched: bool /// style: String /// ``` pub struct ProgressBarState { min: i32, max: i32, value: i32, stretched: bool, style: String, } impl ProgressBarState { /// Get the min pub fn min(&self) -> i32 { self.min } /// Get the max pub fn max(&self) -> i32 { self.max } /// Get the value pub fn value(&self) -> i32 { self.value } /// Get the stretched flag pub fn stretched(&self) -> bool { self.stretched } /// Get the style pub fn style(&self) -> &str { &self.style } /// Set the min pub fn set_min(&mut self, min: i32) { self.min = min; } /// Set the max pub fn set_max(&mut self, max: i32) { self.max = max; } /// Set the value pub fn set_value(&mut self, value: i32) { self.value = if value > self.max { self.max } else if value < self.min { self.min } else { value }; } /// Set the stretched flqg pub fn set_stretched(&mut self, stretched: bool) { self.stretched = stretched; } /// Set the style pub fn set_style(&mut self, style: &str) { self.style = style.to_string(); } } /// # The listener of a ProgressBar pub trait ProgressBarListener { /// Function triggered on update event fn on_update(&self, state: &mut ProgressBarState); } /// # A progress bar /// /// ## Fields /// /// ```text /// name: String /// state: ProgressBarState /// listener: Option<Box<dyn ProgressBarListener>> /// ``` /// /// ## Default values /// /// ```text /// name: name.to_string() /// state: /// min: 0 /// max: 100 /// value: 0 /// stretched: false /// style: "".to_string() /// listener: None /// ``` /// /// ## Style /// /// ```text /// div.progressbar /// div.background /// div.foreground /// ``` /// /// ## Example /// /// ``` /// use std::cell::RefCell; /// use std::rc::Rc; /// /// use neutrino::widgets::progressbar::{ /// ProgressBar, /// ProgressBarListener, /// ProgressBarState /// }; /// use neutrino::utils::theme::Theme; /// use neutrino::{App, Window}; /// /// /// struct Counter { /// value: i32, /// } /// /// impl Counter { /// fn new() -> Self { /// Self { value: 0 } /// } /// /// fn value(&self) -> i32 { /// self.value /// } /// } /// /// /// struct MyProgressBarListener { /// counter: Rc<RefCell<Counter>>, /// } /// /// impl MyProgressBarListener { /// pub fn new(counter: Rc<RefCell<Counter>>) -> Self { /// Self { counter } /// } /// } /// /// impl ProgressBarListener for MyProgressBarListener { /// fn on_update(&self, state: &mut ProgressBarState) { /// state.set_value(self.counter.borrow().value()); /// } /// } /// /// /// fn main() { /// let counter = Rc::new(RefCell::new(Counter::new())); /// /// let my_listener = MyProgressBarListener::new(Rc::clone(&counter)); /// /// let mut my_progressbar = ProgressBar::new("my_progressbar"); /// my_progressbar.set_listener(Box::new(my_listener)); /// } /// ``` pub struct ProgressBar { name: String, state: ProgressBarState, listener: Option<Box<dyn ProgressBarListener>>, } impl ProgressBar { /// Create a ProgressBar pub fn new(name: &str) -> Self { Self { name: name.to_string(), state: ProgressBarState { min: 0, max: 100, value: 0, stretched: false, style: "".to_string(), }, listener: None, } } // Set the min pub fn set_min(&mut self, min: i32)
// Set the max pub fn set_max(&mut self, max: i32) { self.state.set_max(max); } // Set the value pub fn set_value(&mut self, value: i32) { self.state.set_value(value); } // Set the stretched flag to true pub fn set_stretched(&mut self) { self.state.set_stretched(true); } /// Set the listener pub fn set_listener(&mut self, listener: Box<dyn ProgressBarListener>) { self.listener = Some(listener); } /// Set the style pub fn set_style(&mut self, style: &str) { self.state.set_style(style); } } impl Widget for ProgressBar { fn eval(&self) -> String { let stretched = if self.state.stretched() { "stretched" } else { "" }; let style = inline_style(&scss_to_css(&format!( r##"#{}{{{}}}"##, self.name, self.state.style(), ))); let html = format!( r#" <div id="{}" class="progressbar {}"> <div class="background"></div> <div class="foreground" style="width: {}%;"></div> </div> "#, self.name, stretched, f64::from(self.state.value() - self.state.min()) / f64::from(self.state.max() - self.state.min()) * 100.0, ); format!("{}{}", style, html) } fn trigger(&mut self, event: &Event) { match event { Event::Update => self.on_update(), Event::Change { source, value } => { if source == &self.name { self.on_change(value) } } _ => (), } } fn on_update(&mut self) { match &self.listener { None => (), Some(listener) => { listener.on_update(&mut self.state); } } } fn on_change(&mut self, _value: &str) {} }
{ self.state.set_min(min); }
magic_button_a.py
#!/usr/bin/env python # magic_button_.py """ Copyright (c) 2015 ContinuumBridge Limited Written by Peter Claydon """ import sys import os.path import time import json from twisted.internet import reactor from cbcommslib import CbApp, CbClient from cbconfig import * configFile = CB_CONFIG_DIR + "magic_button.config" CHECK_INTERVAL = 30 WATCHDOG_INTERVAL = 70 MAX_SEND_INTERVAL = 60*30 # Ensure we have sent a message to client within this time CID = "CID157" # Client ID config = { "uuids": [ ] } def nicetime(timeStamp): localtime = time.localtime(timeStamp) milliseconds = '%03d' % int((timeStamp - int(timeStamp)) * 1000) now = time.strftime('%H:%M:%S, %d-%m-%Y', localtime) return now class App(CbApp): def __init__(self, argv): self.state = "stopped" self.devices = [] self.idToName = {} self.buttonStates = {} self.beaconAdaptor = None self.lastSent = 0 # When a message was last sent to the client # Super-class init must be called CbApp.__init__(self, argv) def setState(self, action): self.state = action msg = {"id": self.id, "status": "state", "state": self.state} self.sendManagerMessage(msg) def onConcMessage(self, message): self.client.receive(message) def checkConnected(self): # Called every CHECK_INTERVAL now = time.time() if self.buttonStates != {}: delkeys = [] for b in self.buttonStates: #self.cbLog("debug", "checkConnected, buttonStates: " + str(self.buttonStates) + ", b: " + str(b)) if now - self.buttonStates[b]["connectTime"] > WATCHDOG_INTERVAL: self.buttonStates[b]["rssi"] = -200 toClient = {"b": b, "p": self.buttonStates[b]["rssi"], "c": False } self.client.send(toClient) self.cbLog("debug", "checkConnected, button no longer connected: " + str(json.dumps(toClient, indent=4))) self.lastSent = now delkeys.append(b) self.cbLog("debug", "checkConnected, buttonStates after del: " + str(self.buttonStates)) #self.cbLog("debug", "checkConnected, buttonStates after del: " + str(self.buttonStates)) for d in delkeys: del self.buttonStates[d] if now - self.lastSent > MAX_SEND_INTERVAL: self.cbLog("debug", "Exceeded MAX_SEND_INTERVAL") self.lastSent = now toClient = {"status": "init"} self.client.send(toClient) reactor.callLater(CHECK_INTERVAL, self.checkConnected) def onClientMessage(self, message): self.cbLog("debug", "onClientMessage, message: " + str(json.dumps(message, indent=4))) global config if "uuids" in message: config["uuids"] = message["uuids"] #self.cbLog("debug", "onClientMessage, updated UUIDs: " + str(json.dumps(config, indent=4))) try: with open(configFile, 'w') as f: json.dump(config, f) except Exception as ex: self.cbLog("warning", "onClientMessage, could not write to file. Type: " + str(type(ex)) + ", exception: " + str(ex.args)) self.readLocalConfig() self.requestUUIDs(self.beaconAdaptor) def requestUUIDs(self, adaptor): req = {"id": self.id, "request": "service", "service": [ {"characteristic": "ble_beacon", "interval": 1.0, "uuids": config["uuids"] } ] } self.sendMessage(req, adaptor) def onAdaptorService(self, message): #self.cbLog("debug", "onAdaptorService, message: " + str(message)) for p in message["service"]: if p["characteristic"] == "ble_beacon": self.beaconAdaptor = message["id"] self.requestUUIDs(self.beaconAdaptor) def onAdaptorData(self, message): #self.cbLog("debug", "onAdaptorData, message: " + str(json.dumps(message, indent=4))) if True: #try: if self.state != "running": self.setState("running") if message["characteristic"] == "ble_beacon": if message["data"]["uuid"] in config["uuids"]: changed = False buttonID = message["data"]["major"] buttonState = message["data"]["minor"] & 0x01 rxPower = message["data"]["rx_power"] if buttonID in self.buttonStates: self.buttonStates[buttonID]["connectTime"] = time.time() self.cbLog("debug", "Seen: " + str(buttonID)) else: self.buttonStates[buttonID] = { "connectTime": time.time(), "state": -1 } self.cbLog("info", "New button: " + str(buttonID)) if buttonState != self.buttonStates[buttonID]["state"]: self.buttonStates[buttonID]["state"] = buttonState self.buttonStates[buttonID]["rssi"] = rxPower self.buttonStates[buttonID]["rssi_time"] = time.time() changed = True elif abs(self.buttonStates[buttonID]["rssi"] - rxPower) > 6 and rxPower < -80 \ or abs(self.buttonStates[buttonID]["rssi"] - rxPower) > 15: self.buttonStates[buttonID]["rssi"] = rxPower self.buttonStates[buttonID]["rssi_time"] = time.time() changed = True elif abs(self.buttonStates[buttonID]["rssi"] - message["data"]["rx_power"]) > 5: if time.time() - self.buttonStates[buttonID]["rssi_time"] > 60 * 15:
if changed: toClient = {"b": buttonID, "s": self.buttonStates[buttonID]["state"], "p": self.buttonStates[buttonID]["rssi"], "c": True } self.client.send(toClient) self.cbLog("debug", "Sent to client: " + str(json.dumps(toClient, indent=4))) #except Exception as ex: # self.cbLog("warning", "onAdaptorData problem. Type: " + str(type(ex)) + ", exception: " + str(ex.args)) def readLocalConfig(self): global config try: with open(configFile, 'r') as f: newConfig = json.load(f) self.cbLog("debug", "Read local config") config.update(newConfig) except Exception as ex: self.cbLog("warning", "Problem reading magic_button.config. Type: " + str(type(ex)) + ", exception: " + str(ex.args)) for c in config: if c.lower in ("true", "t", "1"): config[c] = True elif c.lower in ("false", "f", "0"): config[c] = False try: config["uuids"] = [u.upper() for u in config["uuids"]] except Exception as ex: self.cbLog("warning", "Problem upper-casing uuids. Type: " + str(type(ex)) + ", exception: " + str(ex.args)) self.cbLog("debug", "Config: " + str(json.dumps(config, indent=4))) def onConfigureMessage(self, managerConfig): self.readLocalConfig() now = time.time() for adaptor in managerConfig["adaptors"]: adtID = adaptor["id"] if adtID not in self.devices: # Because managerConfigure may be re-called if devices are added name = adaptor["name"] friendly_name = adaptor["friendly_name"] self.idToName[adtID] = friendly_name.replace(" ", "_") self.devices.append(adtID) self.client = CbClient(self.id, CID, 3) self.client.onClientMessage = self.onClientMessage self.client.sendMessage = self.sendMessage self.client.cbLog = self.cbLog reactor.callLater(CHECK_INTERVAL, self.checkConnected) self.setState("starting") if __name__ == '__main__': App(sys.argv)
self.buttonStates[buttonID]["rssi"] = rxPower self.buttonStates[buttonID]["rssi_time"] = time.time() changed = True
embed.js
/* -*- Mode: js; js-indent-level: 2; indent-tabs-mode: nil; tab-width: 2 -*- */ /* vim: set shiftwidth=2 tabstop=2 autoindent cindent expandtab: */ /* * Copyright 2013 Mozilla Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /*global SWF, renderStage, rgbaObjToStr, ShumwayKeyboardListener */ SWF.embed = function(file, doc, container, options) { var canvas = doc.createElement('canvas'); var ctx = canvas.getContext('kanvas-2d'); var loader = new flash.display.Loader(); var loaderInfo = loader.contentLoaderInfo; var stage = new flash.display.Stage(); stage._loader = loader; loaderInfo._parameters = options.movieParams; loaderInfo._url = options.url || (typeof file === 'string' ? file : null); loaderInfo._loaderURL = options.loaderURL || loaderInfo._url; // HACK support of HiDPI displays var pixelRatio = 'devicePixelRatio' in window ? window.devicePixelRatio : 1; var canvasHolder = null; canvas._pixelRatio = pixelRatio; if (pixelRatio > 1) { var cssScale = 'scale(' + (1 / pixelRatio) + ', ' + (1 / pixelRatio) + ')'; canvas.setAttribute('style', '-moz-transform: ' + cssScale + ';' + '-webkit-transform: ' + cssScale + ';' + 'transform: ' + cssScale + ';' + '-moz-transform-origin: 0% 0%;' + '-webkit-transform-origin: 0% 0%;' + 'transform-origin: 0% 0%;'); canvasHolder = doc.createElement('div'); canvasHolder.setAttribute('style', 'display: inline-block; overflow: hidden;'); canvasHolder.appendChild(canvas); } stage._contentsScaleFactor = pixelRatio; loader._parent = stage; loader._stage = stage; function fitCanvas(container, canvas) { if (canvasHolder) { canvasHolder.style.width = container.clientWidth + 'px'; canvasHolder.style.height = container.clientHeight + 'px'; } canvas.width = container.clientWidth * pixelRatio; canvas.height = container.clientHeight * pixelRatio; stage._invalid = true; } loaderInfo._addEventListener('init', function () { if (container.clientHeight) { fitCanvas(container, canvas); window.addEventListener('resize', function () { fitCanvas(container, canvas); }); } else { if (canvasHolder) { canvasHolder.style.width = (stage._stageWidth / 20) + 'px'; canvasHolder.style.height = (stage._stageHeight / 20) + 'px'; } canvas.width = stage._stageWidth * pixelRatio / 20; canvas.height = stage._stageHeight * pixelRatio / 20; } container.setAttribute("style", "position: relative"); canvas.addEventListener('click', function () { ShumwayKeyboardListener.focus = stage; stage._clickTarget._dispatchEvent(new flash.events.MouseEvent('click')); }); canvas.addEventListener('dblclick', function () { if (stage._clickTarget._doubleClickEnabled) { stage._clickTarget._dispatchEvent(new flash.events.MouseEvent('doubleClick')); } }); canvas.addEventListener('mousedown', function () { if (stage._clickTarget._buttonMode) { stage._clickTarget._gotoButtonState('down'); } stage._clickTarget._dispatchEvent(new flash.events.MouseEvent('mouseDown')); }); canvas.addEventListener('mousemove', function (domEvt) { var node = this; var left = 0; var top = 0; if (node.offsetParent) { do { left += node.offsetLeft; top += node.offsetTop; } while ((node = node.offsetParent)); } var canvasState = stage._canvasState; var mouseX = ((domEvt.pageX - left) * pixelRatio - canvasState.offsetX) / canvasState.scaleX; var mouseY = ((domEvt.pageY - top) * pixelRatio - canvasState.offsetY) / canvasState.scaleY; if (mouseX !== stage._mouseX || mouseY !== stage._mouseY) { stage._mouseMoved = true; stage._mouseX = mouseX * 20; stage._mouseY = mouseY * 20; } }); canvas.addEventListener('mouseup', function () { if (stage._clickTarget._buttonMode) { stage._clickTarget._gotoButtonState('over'); } stage._clickTarget._dispatchEvent(new flash.events.MouseEvent('mouseUp')); }); canvas.addEventListener('mouseover', function () { stage._mouseMoved = true; stage._mouseOver = true; }); canvas.addEventListener('mouseout', function () { stage._mouseMoved = true; stage._mouseOver = false; }); var bgcolor = loaderInfo._backgroundColor; if (options.objectParams) { var m; if (options.objectParams.bgcolor && (m = /#([0-9A-F]{6})/i.exec(options.objectParams.bgcolor))) { var hexColor = parseInt(m[1], 16); bgcolor = { red: (hexColor >> 16) & 255, green: (hexColor >> 8) & 255, blue: hexColor & 255, alpha: 255 };
} if (options.objectParams.wmode === 'transparent') { bgcolor = {red: 0, green: 0, blue: 0, alpha: 0}; } } stage._color = bgcolor; ctx.fillStyle = rgbaObjToStr(bgcolor); ctx.fillRect(0, 0, canvas.width, canvas.height); var root = loader._content; stage._children[0] = root; root._dispatchEvent("added"); root._dispatchEvent("addedToStage"); container.appendChild(canvasHolder || canvas); if (options.onStageInitialized) { options.onStageInitialized(stage); } renderStage(stage, ctx, options); }); if (options.onComplete) { loaderInfo._addEventListener("complete", function () { options.onComplete(); }); } loader._load(typeof file === 'string' ? new flash.net.URLRequest(file) : file); return loader; };
score.go
package main import "./gomat" type score struct { e float32 c bool } type score_counter struct { R []score capacity int p int n int sum_c int sum_e float64 } func NewSC(cap int) *score_counter
func (sc *score_counter) Add(c bool, e float32) { if sc.p == sc.n { sc.n++ } else { if sc.n != sc.capacity { panic("n cap mismatch!") } if sc.R[sc.p].c { sc.sum_c-- } sc.sum_e -= float64(sc.R[sc.p].e) } sc.R[sc.p].c = c sc.R[sc.p].e = e if c { sc.sum_c++ } sc.sum_e += float64(e) sc.p++ if sc.p == sc.capacity { sc.p = 0 } } func (sc *score_counter) Update_score(y, c, e *gomat.Matrix) { if y.Row() != c.Row() || y.Row() != e.Row() { panic("row mismatch!") } for i := 0; i < y.Row(); i++ { classify := y.Get(i, 0) == c.Get(i, 0) sc.Add(classify, e.Get(i, 0)) } }
{ sc := &score_counter{} sc.R = make([]score, cap) sc.capacity = cap return sc }
detail.module_20191011101634.ts
import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import { SharedModule } from '../../shared/share.module'; import { ThemeModule } from '../../@theme/theme.module'; import { DetailComponent } from './detail.component'; import { LightboxModule } from 'ngx-lightbox';
}) export class DetailModule {}
@NgModule({ imports: [LightboxModule, CommonModule, SharedModule, ThemeModule], exports: [DetailComponent], declarations: [DetailComponent],
serial.rs
//! YubiHSM 2 device serial numbers use super::error::{Error, ErrorKind}; use anomaly::{fail, format_err}; use serde::{Deserialize, Serialize}; use std::{ fmt::{self, Display}, str::FromStr, }; /// Length of a YubiHSM 2 serial number in base 10 digits (i.e. characters) const NUM_DIGITS: usize = 10; /// YubiHSM serial numbers #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize)] pub struct Number(u32);
impl Display for Number { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:0width$}", self.0, width = NUM_DIGITS) } } impl FromStr for Number { type Err = Error; fn from_str(s: &str) -> Result<Number, Error> { if s.len() == NUM_DIGITS { let number = s.parse::<u32>().map_err(|_| { format_err!(ErrorKind::InvalidData, "error parsing serial number: {}", s) })?; Ok(Number(number)) } else { fail!( ErrorKind::WrongLength, "invalid serial number length (expected {}, got {}): '{}'", NUM_DIGITS, s.len(), s ); } } }
main.go
package main import ( "bufio" "context" "fmt" "io" "log" "os" svc "github.com/practicalgo/code/chap9/interceptors/service" "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) func metadataUnaryInterceptor( ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption, ) error { ctxWithMetadata := metadata.AppendToOutgoingContext( ctx, "Request-Id", "request-123", ) return invoker(ctxWithMetadata, method, req, reply, cc, opts...) } func metadataStreamInterceptor( ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption, ) (grpc.ClientStream, error) { ctxWithMetadata := metadata.AppendToOutgoingContext( ctx, "Request-Id", "request-123", ) clientStream, err := streamer( ctxWithMetadata, desc, cc, method, opts..., ) return clientStream, err } func setupGrpcConn(addr string) (*grpc.ClientConn, error) { return grpc.DialContext( context.Background(), addr, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithUnaryInterceptor(metadataUnaryInterceptor), grpc.WithStreamInterceptor(metadataStreamInterceptor), ) } func getUserServiceClient(conn *grpc.ClientConn) svc.UsersClient { return svc.NewUsersClient(conn) } func getUser( client svc.UsersClient, u *svc.UserGetRequest, ) (*svc.UserGetReply, error) { return client.GetUser(context.Background(), u) } func setupChat(r io.Reader, w io.Writer, c svc.UsersClient) error { stream, err := c.GetHelp(context.Background()) if err != nil { return err } for { scanner := bufio.NewScanner(r) prompt := "Request: " fmt.Fprint(w, prompt) scanner.Scan() if err := scanner.Err(); err != nil { return err } msg := scanner.Text() if msg == "quit" { break } request := svc.UserHelpRequest{ Request: msg, } err := stream.Send(&request) if err != nil
resp, err := stream.Recv() if err != nil { return err } fmt.Printf("Response: %s\n", resp.Response) } return stream.CloseSend() } func main() { if len(os.Args) != 3 { log.Fatal( "Specify a gRPC server and method to call", ) } serverAddr := os.Args[1] methodName := os.Args[2] conn, err := setupGrpcConn(serverAddr) if err != nil { log.Fatal(err) } defer conn.Close() c := getUserServiceClient(conn) switch methodName { case "GetUser": result, err := getUser( c, &svc.UserGetRequest{Email: "[email protected]"}, ) if err != nil { log.Fatal(err) } fmt.Fprintf( os.Stdout, "User: %s %s\n", result.User.FirstName, result.User.LastName, ) case "GetHelp": err = setupChat(os.Stdin, os.Stdout, c) if err != nil { log.Fatal(err) } default: log.Fatal("Unrecognized method name") } }
{ return err }
cors.rs
//! Cross-origin resource sharing (CORS) for Actix applications //! //! CORS middleware could be used with application and with resource. //! Cors middleware could be used as parameter for `App::wrap()`, //! `Resource::wrap()` or `Scope::wrap()` methods. //! //! # Example //! //! ```rust //! use actix_web::middleware::cors::Cors; //! use actix_web::{http, web, App, HttpRequest, HttpResponse, HttpServer}; //! //! fn index(req: HttpRequest) -> &'static str { //! "Hello world" //! } //! //! fn main() -> std::io::Result<()> { //! HttpServer::new(|| App::new() //! .wrap( //! Cors::new() // <- Construct CORS middleware builder //! .allowed_origin("https://www.rust-lang.org/") //! .allowed_methods(vec!["GET", "POST"]) //! .allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT]) //! .allowed_header(http::header::CONTENT_TYPE) //! .max_age(3600)) //! .service( //! web::resource("/index.html") //! .route(web::get().to(index)) //! .route(web::head().to(|| HttpResponse::MethodNotAllowed())) //! )) //! .bind("127.0.0.1:8080")?; //! //! Ok(()) //! } //! ``` //! In this example custom *CORS* middleware get registered for "/index.html" //! endpoint. //! //! Cors middleware automatically handle *OPTIONS* preflight request. use std::collections::HashSet; use std::iter::FromIterator; use std::rc::Rc; use actix_service::{IntoTransform, Service, Transform}; use derive_more::Display; use futures::future::{ok, Either, Future, FutureResult}; use futures::Poll; use crate::dev::RequestHead; use crate::error::{Error, ResponseError, Result}; use crate::http::header::{self, HeaderName, HeaderValue}; use crate::http::{self, HttpTryFrom, Method, StatusCode, Uri}; use crate::service::{ServiceRequest, ServiceResponse}; use crate::HttpResponse; /// A set of errors that can occur during processing CORS #[derive(Debug, Display)] pub enum CorsError { /// The HTTP request header `Origin` is required but was not provided #[display( fmt = "The HTTP request header `Origin` is required but was not provided" )] MissingOrigin, /// The HTTP request header `Origin` could not be parsed correctly. #[display(fmt = "The HTTP request header `Origin` could not be parsed correctly.")] BadOrigin, /// The request header `Access-Control-Request-Method` is required but is /// missing #[display( fmt = "The request header `Access-Control-Request-Method` is required but is missing" )] MissingRequestMethod, /// The request header `Access-Control-Request-Method` has an invalid value #[display( fmt = "The request header `Access-Control-Request-Method` has an invalid value" )] BadRequestMethod, /// The request header `Access-Control-Request-Headers` has an invalid /// value #[display( fmt = "The request header `Access-Control-Request-Headers` has an invalid value" )] BadRequestHeaders, /// Origin is not allowed to make this request #[display(fmt = "Origin is not allowed to make this request")] OriginNotAllowed, /// Requested method is not allowed #[display(fmt = "Requested method is not allowed")] MethodNotAllowed, /// One or more headers requested are not allowed #[display(fmt = "One or more headers requested are not allowed")] HeadersNotAllowed, } impl ResponseError for CorsError { fn error_response(&self) -> HttpResponse { HttpResponse::with_body(StatusCode::BAD_REQUEST, format!("{}", self).into()) } } /// An enum signifying that some of type T is allowed, or `All` (everything is /// allowed). /// /// `Default` is implemented for this enum and is `All`. #[derive(Clone, Debug, Eq, PartialEq)] pub enum AllOrSome<T> { /// Everything is allowed. Usually equivalent to the "*" value. All, /// Only some of `T` is allowed Some(T), } impl<T> Default for AllOrSome<T> { fn default() -> Self { AllOrSome::All } } impl<T> AllOrSome<T> { /// Returns whether this is an `All` variant pub fn is_all(&self) -> bool { match *self { AllOrSome::All => true, AllOrSome::Some(_) => false, } } /// Returns whether this is a `Some` variant pub fn is_some(&self) -> bool
/// Returns &T pub fn as_ref(&self) -> Option<&T> { match *self { AllOrSome::All => None, AllOrSome::Some(ref t) => Some(t), } } } /// Structure that follows the builder pattern for building `Cors` middleware /// structs. /// /// To construct a cors: /// /// 1. Call [`Cors::build`](struct.Cors.html#method.build) to start building. /// 2. Use any of the builder methods to set fields in the backend. /// 3. Call [finish](struct.Cors.html#method.finish) to retrieve the /// constructed backend. /// /// # Example /// /// ```rust /// use actix_web::http::header; /// use actix_web::middleware::cors; /// /// # fn main() { /// let cors = cors::Cors::new() /// .allowed_origin("https://www.rust-lang.org/") /// .allowed_methods(vec!["GET", "POST"]) /// .allowed_headers(vec![header::AUTHORIZATION, header::ACCEPT]) /// .allowed_header(header::CONTENT_TYPE) /// .max_age(3600); /// # } /// ``` pub struct Cors { cors: Option<Inner>, methods: bool, error: Option<http::Error>, expose_hdrs: HashSet<HeaderName>, } impl Cors { /// Build a new CORS middleware instance pub fn new() -> Cors { Cors { cors: Some(Inner { origins: AllOrSome::All, origins_str: None, methods: HashSet::new(), headers: AllOrSome::All, expose_hdrs: None, max_age: None, preflight: true, send_wildcard: false, supports_credentials: false, vary_header: true, }), methods: false, error: None, expose_hdrs: HashSet::new(), } } /// Build a new CORS default middleware pub fn default() -> CorsFactory { let inner = Inner { origins: AllOrSome::default(), origins_str: None, methods: HashSet::from_iter( vec![ Method::GET, Method::HEAD, Method::POST, Method::OPTIONS, Method::PUT, Method::PATCH, Method::DELETE, ] .into_iter(), ), headers: AllOrSome::All, expose_hdrs: None, max_age: None, preflight: true, send_wildcard: false, supports_credentials: false, vary_header: true, }; CorsFactory { inner: Rc::new(inner), } } /// Add an origin that are allowed to make requests. /// Will be verified against the `Origin` request header. /// /// When `All` is set, and `send_wildcard` is set, "*" will be sent in /// the `Access-Control-Allow-Origin` response header. Otherwise, the /// client's `Origin` request header will be echoed back in the /// `Access-Control-Allow-Origin` response header. /// /// When `Some` is set, the client's `Origin` request header will be /// checked in a case-sensitive manner. /// /// This is the `list of origins` in the /// [Resource Processing Model](https://www.w3.org/TR/cors/#resource-processing-model). /// /// Defaults to `All`. /// /// Builder panics if supplied origin is not valid uri. pub fn allowed_origin(mut self, origin: &str) -> Cors { if let Some(cors) = cors(&mut self.cors, &self.error) { match Uri::try_from(origin) { Ok(_) => { if cors.origins.is_all() { cors.origins = AllOrSome::Some(HashSet::new()); } if let AllOrSome::Some(ref mut origins) = cors.origins { origins.insert(origin.to_owned()); } } Err(e) => { self.error = Some(e.into()); } } } self } /// Set a list of methods which the allowed origins are allowed to access /// for requests. /// /// This is the `list of methods` in the /// [Resource Processing Model](https://www.w3.org/TR/cors/#resource-processing-model). /// /// Defaults to `[GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]` pub fn allowed_methods<U, M>(mut self, methods: U) -> Cors where U: IntoIterator<Item = M>, Method: HttpTryFrom<M>, { self.methods = true; if let Some(cors) = cors(&mut self.cors, &self.error) { for m in methods { match Method::try_from(m) { Ok(method) => { cors.methods.insert(method); } Err(e) => { self.error = Some(e.into()); break; } } } } self } /// Set an allowed header pub fn allowed_header<H>(mut self, header: H) -> Cors where HeaderName: HttpTryFrom<H>, { if let Some(cors) = cors(&mut self.cors, &self.error) { match HeaderName::try_from(header) { Ok(method) => { if cors.headers.is_all() { cors.headers = AllOrSome::Some(HashSet::new()); } if let AllOrSome::Some(ref mut headers) = cors.headers { headers.insert(method); } } Err(e) => self.error = Some(e.into()), } } self } /// Set a list of header field names which can be used when /// this resource is accessed by allowed origins. /// /// If `All` is set, whatever is requested by the client in /// `Access-Control-Request-Headers` will be echoed back in the /// `Access-Control-Allow-Headers` header. /// /// This is the `list of headers` in the /// [Resource Processing Model](https://www.w3.org/TR/cors/#resource-processing-model). /// /// Defaults to `All`. pub fn allowed_headers<U, H>(mut self, headers: U) -> Cors where U: IntoIterator<Item = H>, HeaderName: HttpTryFrom<H>, { if let Some(cors) = cors(&mut self.cors, &self.error) { for h in headers { match HeaderName::try_from(h) { Ok(method) => { if cors.headers.is_all() { cors.headers = AllOrSome::Some(HashSet::new()); } if let AllOrSome::Some(ref mut headers) = cors.headers { headers.insert(method); } } Err(e) => { self.error = Some(e.into()); break; } } } } self } /// Set a list of headers which are safe to expose to the API of a CORS API /// specification. This corresponds to the /// `Access-Control-Expose-Headers` response header. /// /// This is the `list of exposed headers` in the /// [Resource Processing Model](https://www.w3.org/TR/cors/#resource-processing-model). /// /// This defaults to an empty set. pub fn expose_headers<U, H>(mut self, headers: U) -> Cors where U: IntoIterator<Item = H>, HeaderName: HttpTryFrom<H>, { for h in headers { match HeaderName::try_from(h) { Ok(method) => { self.expose_hdrs.insert(method); } Err(e) => { self.error = Some(e.into()); break; } } } self } /// Set a maximum time for which this CORS request maybe cached. /// This value is set as the `Access-Control-Max-Age` header. /// /// This defaults to `None` (unset). pub fn max_age(mut self, max_age: usize) -> Cors { if let Some(cors) = cors(&mut self.cors, &self.error) { cors.max_age = Some(max_age) } self } /// Set a wildcard origins /// /// If send wildcard is set and the `allowed_origins` parameter is `All`, a /// wildcard `Access-Control-Allow-Origin` response header is sent, /// rather than the request’s `Origin` header. /// /// This is the `supports credentials flag` in the /// [Resource Processing Model](https://www.w3.org/TR/cors/#resource-processing-model). /// /// This **CANNOT** be used in conjunction with `allowed_origins` set to /// `All` and `allow_credentials` set to `true`. Depending on the mode /// of usage, this will either result in an `Error:: /// CredentialsWithWildcardOrigin` error during actix launch or runtime. /// /// Defaults to `false`. pub fn send_wildcard(mut self) -> Cors { if let Some(cors) = cors(&mut self.cors, &self.error) { cors.send_wildcard = true } self } /// Allows users to make authenticated requests /// /// If true, injects the `Access-Control-Allow-Credentials` header in /// responses. This allows cookies and credentials to be submitted /// across domains. /// /// This option cannot be used in conjunction with an `allowed_origin` set /// to `All` and `send_wildcards` set to `true`. /// /// Defaults to `false`. /// /// Builder panics if credentials are allowed, but the Origin is set to "*". /// This is not allowed by W3C pub fn supports_credentials(mut self) -> Cors { if let Some(cors) = cors(&mut self.cors, &self.error) { cors.supports_credentials = true } self } /// Disable `Vary` header support. /// /// When enabled the header `Vary: Origin` will be returned as per the W3 /// implementation guidelines. /// /// Setting this header when the `Access-Control-Allow-Origin` is /// dynamically generated (e.g. when there is more than one allowed /// origin, and an Origin than '*' is returned) informs CDNs and other /// caches that the CORS headers are dynamic, and cannot be cached. /// /// By default `vary` header support is enabled. pub fn disable_vary_header(mut self) -> Cors { if let Some(cors) = cors(&mut self.cors, &self.error) { cors.vary_header = false } self } /// Disable *preflight* request support. /// /// When enabled cors middleware automatically handles *OPTIONS* request. /// This is useful application level middleware. /// /// By default *preflight* support is enabled. pub fn disable_preflight(mut self) -> Cors { if let Some(cors) = cors(&mut self.cors, &self.error) { cors.preflight = false } self } } fn cors<'a>( parts: &'a mut Option<Inner>, err: &Option<http::Error>, ) -> Option<&'a mut Inner> { if err.is_some() { return None; } parts.as_mut() } impl<S, B> IntoTransform<CorsFactory, S> for Cors where S: Service<Request = ServiceRequest, Response = ServiceResponse<B>, Error = Error>, S::Future: 'static, B: 'static, { fn into_transform(self) -> CorsFactory { let mut slf = if !self.methods { self.allowed_methods(vec![ Method::GET, Method::HEAD, Method::POST, Method::OPTIONS, Method::PUT, Method::PATCH, Method::DELETE, ]) } else { self }; if let Some(e) = slf.error.take() { panic!("{}", e); } let mut cors = slf.cors.take().expect("cannot reuse CorsBuilder"); if cors.supports_credentials && cors.send_wildcard && cors.origins.is_all() { panic!("Credentials are allowed, but the Origin is set to \"*\""); } if let AllOrSome::Some(ref origins) = cors.origins { let s = origins .iter() .fold(String::new(), |s, v| format!("{}, {}", s, v)); cors.origins_str = Some(HeaderValue::try_from(&s[2..]).unwrap()); } if !slf.expose_hdrs.is_empty() { cors.expose_hdrs = Some( slf.expose_hdrs .iter() .fold(String::new(), |s, v| format!("{}, {}", s, v.as_str()))[2..] .to_owned(), ); } CorsFactory { inner: Rc::new(cors), } } } /// `Middleware` for Cross-origin resource sharing support /// /// The Cors struct contains the settings for CORS requests to be validated and /// for responses to be generated. pub struct CorsFactory { inner: Rc<Inner>, } impl<S, B> Transform<S> for CorsFactory where S: Service<Request = ServiceRequest, Response = ServiceResponse<B>, Error = Error>, S::Future: 'static, B: 'static, { type Request = ServiceRequest; type Response = ServiceResponse<B>; type Error = Error; type InitError = (); type Transform = CorsMiddleware<S>; type Future = FutureResult<Self::Transform, Self::InitError>; fn new_transform(&self, service: S) -> Self::Future { ok(CorsMiddleware { service, inner: self.inner.clone(), }) } } /// `Middleware` for Cross-origin resource sharing support /// /// The Cors struct contains the settings for CORS requests to be validated and /// for responses to be generated. #[derive(Clone)] pub struct CorsMiddleware<S> { service: S, inner: Rc<Inner>, } struct Inner { methods: HashSet<Method>, origins: AllOrSome<HashSet<String>>, origins_str: Option<HeaderValue>, headers: AllOrSome<HashSet<HeaderName>>, expose_hdrs: Option<String>, max_age: Option<usize>, preflight: bool, send_wildcard: bool, supports_credentials: bool, vary_header: bool, } impl Inner { fn validate_origin(&self, req: &RequestHead) -> Result<(), CorsError> { if let Some(hdr) = req.headers().get(&header::ORIGIN) { if let Ok(origin) = hdr.to_str() { return match self.origins { AllOrSome::All => Ok(()), AllOrSome::Some(ref allowed_origins) => allowed_origins .get(origin) .and_then(|_| Some(())) .ok_or_else(|| CorsError::OriginNotAllowed), }; } Err(CorsError::BadOrigin) } else { return match self.origins { AllOrSome::All => Ok(()), _ => Err(CorsError::MissingOrigin), }; } } fn access_control_allow_origin(&self, req: &RequestHead) -> Option<HeaderValue> { match self.origins { AllOrSome::All => { if self.send_wildcard { Some(HeaderValue::from_static("*")) } else if let Some(origin) = req.headers().get(&header::ORIGIN) { Some(origin.clone()) } else { None } } AllOrSome::Some(ref origins) => { if let Some(origin) = req.headers() .get(&header::ORIGIN) .filter(|o| match o.to_str() { Ok(os) => origins.contains(os), _ => false, }) { Some(origin.clone()) } else { Some(self.origins_str.as_ref().unwrap().clone()) } } } } fn validate_allowed_method(&self, req: &RequestHead) -> Result<(), CorsError> { if let Some(hdr) = req.headers().get(&header::ACCESS_CONTROL_REQUEST_METHOD) { if let Ok(meth) = hdr.to_str() { if let Ok(method) = Method::try_from(meth) { return self .methods .get(&method) .and_then(|_| Some(())) .ok_or_else(|| CorsError::MethodNotAllowed); } } Err(CorsError::BadRequestMethod) } else { Err(CorsError::MissingRequestMethod) } } fn validate_allowed_headers(&self, req: &RequestHead) -> Result<(), CorsError> { match self.headers { AllOrSome::All => Ok(()), AllOrSome::Some(ref allowed_headers) => { if let Some(hdr) = req.headers().get(&header::ACCESS_CONTROL_REQUEST_HEADERS) { if let Ok(headers) = hdr.to_str() { let mut hdrs = HashSet::new(); for hdr in headers.split(',') { match HeaderName::try_from(hdr.trim()) { Ok(hdr) => hdrs.insert(hdr), Err(_) => return Err(CorsError::BadRequestHeaders), }; } // `Access-Control-Request-Headers` must contain 1 or more // `field-name`. if !hdrs.is_empty() { if !hdrs.is_subset(allowed_headers) { return Err(CorsError::HeadersNotAllowed); } return Ok(()); } } Err(CorsError::BadRequestHeaders) } else { return Ok(()); } } } } } impl<S, B> Service for CorsMiddleware<S> where S: Service<Request = ServiceRequest, Response = ServiceResponse<B>, Error = Error>, S::Future: 'static, B: 'static, { type Request = ServiceRequest; type Response = ServiceResponse<B>; type Error = Error; type Future = Either< FutureResult<Self::Response, Error>, Either<S::Future, Box<Future<Item = Self::Response, Error = Error>>>, >; fn poll_ready(&mut self) -> Poll<(), Self::Error> { self.service.poll_ready() } fn call(&mut self, req: ServiceRequest) -> Self::Future { if self.inner.preflight && Method::OPTIONS == *req.method() { if let Err(e) = self .inner .validate_origin(req.head()) .and_then(|_| self.inner.validate_allowed_method(req.head())) .and_then(|_| self.inner.validate_allowed_headers(req.head())) { return Either::A(ok(req.error_response(e))); } // allowed headers let headers = if let Some(headers) = self.inner.headers.as_ref() { Some( HeaderValue::try_from( &headers .iter() .fold(String::new(), |s, v| s + "," + v.as_str()) .as_str()[1..], ) .unwrap(), ) } else if let Some(hdr) = req.headers().get(&header::ACCESS_CONTROL_REQUEST_HEADERS) { Some(hdr.clone()) } else { None }; let res = HttpResponse::Ok() .if_some(self.inner.max_age.as_ref(), |max_age, resp| { let _ = resp.header( header::ACCESS_CONTROL_MAX_AGE, format!("{}", max_age).as_str(), ); }) .if_some(headers, |headers, resp| { let _ = resp.header(header::ACCESS_CONTROL_ALLOW_HEADERS, headers); }) .if_some( self.inner.access_control_allow_origin(req.head()), |origin, resp| { let _ = resp.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, origin); }, ) .if_true(self.inner.supports_credentials, |resp| { resp.header(header::ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"); }) .header( header::ACCESS_CONTROL_ALLOW_METHODS, &self .inner .methods .iter() .fold(String::new(), |s, v| s + "," + v.as_str()) .as_str()[1..], ) .finish() .into_body(); Either::A(ok(req.into_response(res))) } else if req.headers().contains_key(&header::ORIGIN) { // Only check requests with a origin header. if let Err(e) = self.inner.validate_origin(req.head()) { return Either::A(ok(req.error_response(e))); } let inner = self.inner.clone(); Either::B(Either::B(Box::new(self.service.call(req).and_then( move |mut res| { if let Some(origin) = inner.access_control_allow_origin(res.request().head()) { res.headers_mut() .insert(header::ACCESS_CONTROL_ALLOW_ORIGIN, origin.clone()); }; if let Some(ref expose) = inner.expose_hdrs { res.headers_mut().insert( header::ACCESS_CONTROL_EXPOSE_HEADERS, HeaderValue::try_from(expose.as_str()).unwrap(), ); } if inner.supports_credentials { res.headers_mut().insert( header::ACCESS_CONTROL_ALLOW_CREDENTIALS, HeaderValue::from_static("true"), ); } if inner.vary_header { let value = if let Some(hdr) = res.headers_mut().get(&header::VARY) { let mut val: Vec<u8> = Vec::with_capacity(hdr.as_bytes().len() + 8); val.extend(hdr.as_bytes()); val.extend(b", Origin"); HeaderValue::try_from(&val[..]).unwrap() } else { HeaderValue::from_static("Origin") }; res.headers_mut().insert(header::VARY, value); } Ok(res) }, )))) } else { Either::B(Either::A(self.service.call(req))) } } } #[cfg(test)] mod tests { use actix_service::{IntoService, Transform}; use super::*; use crate::test::{self, block_on, TestRequest}; impl Cors { fn finish<F, S, B>(self, srv: F) -> CorsMiddleware<S> where F: IntoService<S>, S: Service< Request = ServiceRequest, Response = ServiceResponse<B>, Error = Error, > + 'static, S::Future: 'static, B: 'static, { block_on( IntoTransform::<CorsFactory, S>::into_transform(self) .new_transform(srv.into_service()), ) .unwrap() } } #[test] #[should_panic(expected = "Credentials are allowed, but the Origin is set to")] fn cors_validates_illegal_allow_credentials() { let _cors = Cors::new() .supports_credentials() .send_wildcard() .finish(test::ok_service()); } #[test] fn validate_origin_allows_all_origins() { let mut cors = Cors::new().finish(test::ok_service()); let req = TestRequest::with_header("Origin", "https://www.example.com") .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!(resp.status(), StatusCode::OK); } #[test] fn default() { let mut cors = block_on(Cors::default().new_transform(test::ok_service())).unwrap(); let req = TestRequest::with_header("Origin", "https://www.example.com") .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!(resp.status(), StatusCode::OK); } #[test] fn test_preflight() { let mut cors = Cors::new() .send_wildcard() .max_age(3600) .allowed_methods(vec![Method::GET, Method::OPTIONS, Method::POST]) .allowed_headers(vec![header::AUTHORIZATION, header::ACCEPT]) .allowed_header(header::CONTENT_TYPE) .finish(test::ok_service()); let req = TestRequest::with_header("Origin", "https://www.example.com") .method(Method::OPTIONS) .header(header::ACCESS_CONTROL_REQUEST_HEADERS, "X-Not-Allowed") .to_srv_request(); assert!(cors.inner.validate_allowed_method(req.head()).is_err()); assert!(cors.inner.validate_allowed_headers(req.head()).is_err()); let resp = test::call_service(&mut cors, req); assert_eq!(resp.status(), StatusCode::BAD_REQUEST); let req = TestRequest::with_header("Origin", "https://www.example.com") .header(header::ACCESS_CONTROL_REQUEST_METHOD, "put") .method(Method::OPTIONS) .to_srv_request(); assert!(cors.inner.validate_allowed_method(req.head()).is_err()); assert!(cors.inner.validate_allowed_headers(req.head()).is_ok()); let req = TestRequest::with_header("Origin", "https://www.example.com") .header(header::ACCESS_CONTROL_REQUEST_METHOD, "POST") .header( header::ACCESS_CONTROL_REQUEST_HEADERS, "AUTHORIZATION,ACCEPT", ) .method(Method::OPTIONS) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!( &b"*"[..], resp.headers() .get(&header::ACCESS_CONTROL_ALLOW_ORIGIN) .unwrap() .as_bytes() ); assert_eq!( &b"3600"[..], resp.headers() .get(&header::ACCESS_CONTROL_MAX_AGE) .unwrap() .as_bytes() ); let hdr = resp .headers() .get(&header::ACCESS_CONTROL_ALLOW_HEADERS) .unwrap() .to_str() .unwrap(); assert!(hdr.contains("authorization")); assert!(hdr.contains("accept")); assert!(hdr.contains("content-type")); let methods = resp .headers() .get(header::ACCESS_CONTROL_ALLOW_METHODS) .unwrap() .to_str() .unwrap(); assert!(methods.contains("POST")); assert!(methods.contains("GET")); assert!(methods.contains("OPTIONS")); Rc::get_mut(&mut cors.inner).unwrap().preflight = false; let req = TestRequest::with_header("Origin", "https://www.example.com") .header(header::ACCESS_CONTROL_REQUEST_METHOD, "POST") .header( header::ACCESS_CONTROL_REQUEST_HEADERS, "AUTHORIZATION,ACCEPT", ) .method(Method::OPTIONS) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!(resp.status(), StatusCode::OK); } // #[test] // #[should_panic(expected = "MissingOrigin")] // fn test_validate_missing_origin() { // let cors = Cors::build() // .allowed_origin("https://www.example.com") // .finish(); // let mut req = HttpRequest::default(); // cors.start(&req).unwrap(); // } #[test] #[should_panic(expected = "OriginNotAllowed")] fn test_validate_not_allowed_origin() { let cors = Cors::new() .allowed_origin("https://www.example.com") .finish(test::ok_service()); let req = TestRequest::with_header("Origin", "https://www.unknown.com") .method(Method::GET) .to_srv_request(); cors.inner.validate_origin(req.head()).unwrap(); cors.inner.validate_allowed_method(req.head()).unwrap(); cors.inner.validate_allowed_headers(req.head()).unwrap(); } #[test] fn test_validate_origin() { let mut cors = Cors::new() .allowed_origin("https://www.example.com") .finish(test::ok_service()); let req = TestRequest::with_header("Origin", "https://www.example.com") .method(Method::GET) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!(resp.status(), StatusCode::OK); } #[test] fn test_no_origin_response() { let mut cors = Cors::new().disable_preflight().finish(test::ok_service()); let req = TestRequest::default().method(Method::GET).to_srv_request(); let resp = test::call_service(&mut cors, req); assert!(resp .headers() .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) .is_none()); let req = TestRequest::with_header("Origin", "https://www.example.com") .method(Method::OPTIONS) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!( &b"https://www.example.com"[..], resp.headers() .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) .unwrap() .as_bytes() ); } #[test] fn test_response() { let exposed_headers = vec![header::AUTHORIZATION, header::ACCEPT]; let mut cors = Cors::new() .send_wildcard() .disable_preflight() .max_age(3600) .allowed_methods(vec![Method::GET, Method::OPTIONS, Method::POST]) .allowed_headers(exposed_headers.clone()) .expose_headers(exposed_headers.clone()) .allowed_header(header::CONTENT_TYPE) .finish(test::ok_service()); let req = TestRequest::with_header("Origin", "https://www.example.com") .method(Method::OPTIONS) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!( &b"*"[..], resp.headers() .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) .unwrap() .as_bytes() ); assert_eq!( &b"Origin"[..], resp.headers().get(header::VARY).unwrap().as_bytes() ); { let headers = resp .headers() .get(header::ACCESS_CONTROL_EXPOSE_HEADERS) .unwrap() .to_str() .unwrap() .split(',') .map(|s| s.trim()) .collect::<Vec<&str>>(); for h in exposed_headers { assert!(headers.contains(&h.as_str())); } } let exposed_headers = vec![header::AUTHORIZATION, header::ACCEPT]; let mut cors = Cors::new() .send_wildcard() .disable_preflight() .max_age(3600) .allowed_methods(vec![Method::GET, Method::OPTIONS, Method::POST]) .allowed_headers(exposed_headers.clone()) .expose_headers(exposed_headers.clone()) .allowed_header(header::CONTENT_TYPE) .finish(|req: ServiceRequest| { req.into_response( HttpResponse::Ok().header(header::VARY, "Accept").finish(), ) }); let req = TestRequest::with_header("Origin", "https://www.example.com") .method(Method::OPTIONS) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!( &b"Accept, Origin"[..], resp.headers().get(header::VARY).unwrap().as_bytes() ); let mut cors = Cors::new() .disable_vary_header() .allowed_origin("https://www.example.com") .allowed_origin("https://www.google.com") .finish(test::ok_service()); let req = TestRequest::with_header("Origin", "https://www.example.com") .method(Method::OPTIONS) .header(header::ACCESS_CONTROL_REQUEST_METHOD, "POST") .to_srv_request(); let resp = test::call_service(&mut cors, req); let origins_str = resp .headers() .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) .unwrap() .to_str() .unwrap(); assert_eq!("https://www.example.com", origins_str); } #[test] fn test_multiple_origins() { let mut cors = Cors::new() .allowed_origin("https://example.com") .allowed_origin("https://example.org") .allowed_methods(vec![Method::GET]) .finish(test::ok_service()); let req = TestRequest::with_header("Origin", "https://example.com") .method(Method::GET) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!( &b"https://example.com"[..], resp.headers() .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) .unwrap() .as_bytes() ); let req = TestRequest::with_header("Origin", "https://example.org") .method(Method::GET) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!( &b"https://example.org"[..], resp.headers() .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) .unwrap() .as_bytes() ); } #[test] fn test_multiple_origins_preflight() { let mut cors = Cors::new() .allowed_origin("https://example.com") .allowed_origin("https://example.org") .allowed_methods(vec![Method::GET]) .finish(test::ok_service()); let req = TestRequest::with_header("Origin", "https://example.com") .header(header::ACCESS_CONTROL_REQUEST_METHOD, "GET") .method(Method::OPTIONS) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!( &b"https://example.com"[..], resp.headers() .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) .unwrap() .as_bytes() ); let req = TestRequest::with_header("Origin", "https://example.org") .header(header::ACCESS_CONTROL_REQUEST_METHOD, "GET") .method(Method::OPTIONS) .to_srv_request(); let resp = test::call_service(&mut cors, req); assert_eq!( &b"https://example.org"[..], resp.headers() .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) .unwrap() .as_bytes() ); } }
{ !self.is_all() }
smoke_test.go
// Package ais_test contains AIS integration tests. /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ package ais_test import ( "fmt" "math/rand" "os" "path" "sync" "testing" "time" "github.com/NVIDIA/aistore/api" "github.com/NVIDIA/aistore/cmn" "github.com/NVIDIA/aistore/memsys" "github.com/NVIDIA/aistore/tutils" ) var ( objSizes = [3]int64{128 * cmn.KiB, 192 * cmn.KiB, 256 * cmn.KiB} ratios = [5]float32{0, 0.25, 0.50, 0.75, 1} // #gets / #puts ) func Test_smoke(t *testing.T) { if testing.Short() { t.Skip(skipping) } proxyURL := getPrimaryURL(t, proxyURLReadOnly) if err := cmn.CreateDir(LocalDestDir); err != nil { t.Fatalf("Failed to create dir %s, err: %v", LocalDestDir, err) } if err := cmn.CreateDir(SmokeDir); err != nil { t.Fatalf("Failed to create dir %s, err: %v", SmokeDir, err) } created := createLocalBucketIfNotExists(t, proxyURL, clibucket) fp := make(chan string, len(objSizes)*len(ratios)*numops*numworkers) bs := int64(baseseed) for _, fs := range objSizes { for _, r := range ratios { s := fmt.Sprintf("size:%s,GET/PUT:%.0f%%", cmn.B2S(fs, 0), r*100) t.Run(s, func(t *testing.T) { oneSmoke(t, proxyURL, fs, r, bs, fp) }) bs += int64(numworkers + 1) } }
close(fp) // Clean up all the files from the test wg := &sync.WaitGroup{} errCh := make(chan error, len(objSizes)*len(ratios)*numops*numworkers) for file := range fp { if usingFile { err := os.Remove(path.Join(SmokeDir, file)) if err != nil { t.Error(err) } } wg.Add(1) go tutils.Del(proxyURL, clibucket, "smoke/"+file, "", wg, errCh, true) } wg.Wait() select { case err := <-errCh: t.Error(err) default: } if created { tutils.DestroyLocalBucket(t, proxyURL, clibucket) } } func oneSmoke(t *testing.T, proxyURL string, objSize int64, ratio float32, bseed int64, filesPutCh chan string) { // Start the worker pools errCh := make(chan error, 100) var wg = &sync.WaitGroup{} // Decide the number of each type var ( nGet = int(float32(numworkers) * ratio) nPut = numworkers - nGet sgls = make([]*memsys.SGL, numworkers) ) // Get the workers started if usingSG { for i := 0; i < numworkers; i++ { sgls[i] = tutils.Mem2.NewSGL(objSize) } defer func() { for _, sgl := range sgls { sgl.Free() } }() } for i := 0; i < numworkers; i++ { if (i%2 == 0 && nPut > 0) || nGet == 0 { wg.Add(1) go func(i int) { var sgl *memsys.SGL if usingSG { sgl = sgls[i] } tutils.PutRandObjs(proxyURL, clibucket, SmokeDir, readerType, SmokeStr, uint64(objSize), numops, errCh, filesPutCh, sgl) wg.Done() }(i) nPut-- } else { wg.Add(1) go func(i int) { getRandomFiles(proxyURL, numops, clibucket, SmokeStr+"/", t, nil, errCh) wg.Done() }(i) nGet-- } } wg.Wait() select { case err := <-errCh: t.Error(err) default: } } func getRandomFiles(proxyURL string, numGets int, bucket, prefix string, t *testing.T, wg *sync.WaitGroup, errCh chan error) { if wg != nil { defer wg.Done() } var ( src = rand.NewSource(time.Now().UnixNano()) random = rand.New(src) getsGroup = &sync.WaitGroup{} msg = &cmn.GetMsg{GetPrefix: prefix, GetPageSize: int(pagesize)} baseParams = tutils.BaseAPIParams(proxyURL) ) for i := 0; i < numGets; i++ { items, err := api.ListBucket(baseParams, bucket, msg, 0) if err != nil { errCh <- err t.Error(err) return } if items == nil { errCh <- fmt.Errorf("listbucket %s: is empty - no entries", bucket) // not considered as a failure, just can't do the test return } files := make([]string, 0) for _, it := range items.Entries { // directories show up as files with '/' endings - filter them out if it.Name[len(it.Name)-1] != '/' { files = append(files, it.Name) } } if len(files) == 0 { errCh <- fmt.Errorf("cannot retrieve from an empty bucket %s", bucket) // not considered as a failure, just can't do the test return } keyname := files[random.Intn(len(files))] getsGroup.Add(1) go func() { baseParams := tutils.BaseAPIParams(proxyURL) _, err := api.GetObject(baseParams, bucket, keyname) if err != nil { errCh <- err } getsGroup.Done() }() } getsGroup.Wait() }
index.js
// import { StyleSheet, Text, View, Alert, TouchableOpacity } from "react-native"; // import React, { Component } from "react"; // // import Video from "react-native-video"; // import {Video} from "expo-av" // import { DragResizeBlock } from "react-native-drag-resize"; // import Service from "../../../libs/service"; // import dateFormat from "../../../libs/dateformat"; // export default class livefeed extends Component { // constructor() { // super(); // this.state = { // videoIndex: 0, // videos: [], // screenStatus: "Getting videos from Api ...", // regions: { // x: 100, // y: 100 // } // }; // } // componentDidMount() { // this.getVideos(); // } // saveCoordinates = async () => { // let { regions } = this.state; // let pt_left_top = { x: regions.x, y: regions.y }; // let pt_right_top = { x: regions.x + regions.w, y: regions.y }; // let pt_left_bottom = { x: regions.x, y: regions.y + regions.h }; // let pt_right_bottom = { // x: regions.x + regions.x, // y: regions.y + regions.h // }; // let created_at = dateFormat(new Date(), "yyyy-mm-dd HH:MM:ss"); // console.log("test82 created_at:", created_at); // let jsonObj = { // lat: "22.6356", // lng: "54.358", // created_at, // virtual_coord: { // x_value: pt_left_top.x, // y_value: pt_left_top.y, // x2_value: pt_right_top.x, // y2_value: pt_right_top.y, // x3_value: pt_left_bottom.x, // y3_value: pt_left_bottom.y, // x4_value: pt_right_bottom.x, // y4_value: pt_right_bottom.y, // created_at // }, // realtime_coord: { // x_value: pt_left_top.x, // y_value: pt_left_top.y, // x2_value: pt_right_top.x, // y2_value: pt_right_top.y, // x3_value: pt_left_bottom.x, // y3_value: pt_left_bottom.y, // x4_value: pt_right_bottom.x, // y4_value: pt_right_bottom.y, // created_at // } // }; // let params = { // user_id: 1, // camera_id: 1, // json: JSON.stringify(jsonObj) // }; // let s = new Service(); // let response = await s.callPostWithParamsAndBody( // params, // jsonObj, // "upload-coordinate" // ); // Alert.alert(response.data.message); // }; // onPress = regions => { // //console.log("tets82 onPress: ", JSON.stringify(this.draggableBox.state)); // this.setState({ regions: this.draggableBox.state }); // }; // async getVideos() { // let time = dateFormat(new Date(), "yyyy-mm-dd HH:MM:ss"); // console.log(time," ", new Date()) // this.setState({ // screenStatus: "Getting videos from Api for " + time // }); // let params = { // user_id: 1, // camera_id: 1, // last_sync: '2021-09-07 03:15:00-03:20:00' // }; // let s = new Service(); // let response = await s.callPostWithParams(params, "get-videos"); // console.log("test82 videos:", JSON.stringify(response)); // if (response.status) { // if (response.data.length > 0) { // this.setState({ // videoIndex: 0, // videos: response.data, // screenStatus: "Loading video..." // }); // } else { // this.setState({ // videoIndex: 0, // videos: response.data, // screenStatus: "Videos list is empty, please upload videos" // }); // } // } else { // Alert.alert(response.message); // } // } // onVideoEnd() { // console.log("test82 video end"); // let { videoIndex, videos } = this.state; // if (videoIndex < videos.length - 1) { // this.setState({ // videoIndex: videoIndex + 1, // screenStatus: "Loading next video..." // }); // } else { // //get new videos // this.setState({ // videoIndex: 0, // videos: [], // screenStatus: "Getting videos from Api ..." // }); // this.getVideos(); // } // } // onError() { // console.log("test82 on Error"); // } // render() { // let { videoIndex, videos, screenStatus, regions } = this.state; // if (videos.length == 0) { // return ( // <View style={styles.container}> // <Text style={styles.status}>{screenStatus}</Text> // {screenStatus.includes("empty") && ( // <TouchableOpacity // style={styles.retyrBtn} // onPress={this.getVideos.bind(this)} // > // <Text style={styles.buttonText}>Retry</Text> // </TouchableOpacity> // )} // </View> // ); // } else { // //console.log("test82 video file: ", videos[videoIndex].video_file); // return ( // <View style={styles.container}> // <Text style={styles.status}>{screenStatus}</Text> // <Video // key={videoIndex.toString()} // repeat={false} // controls={true} // onEnd={this.onVideoEnd.bind(this)} // onError={this.onError.bind(this)} // source={{ // uri: encodeURI(videos[videoIndex].video_file) // }} // ref={ref => { // this.player = ref; // }} // style={styles.backgroundVideo} // resizeMode="contain" // /> // <DragResizeBlock // ref={ref => { // this.draggableBox = ref; // }} // x={regions.x} // y={regions.y} // w={regions.w} // h={regions.h} // onDragEnd={this.onPress} // onResizeEnd={this.onPress} // isDraggable={true} // isResizable={true} // zIndex={1} // ></DragResizeBlock> // <TouchableOpacity // style={[styles.buttonContainer, styles.loginButton]} // onPress={this.saveCoordinates.bind(this)} // > // <Text style={styles.buttonText}>Save Coordinates</Text> // </TouchableOpacity> // </View> // ); // } // } // } // const styles = StyleSheet.create({ // container: { // flex: 1, // backgroundColor: "#555", // justifyContent: "center" // }, // backgroundVideo: { // position: "absolute", // top: 0, // left: 0, // bottom: 0, // right: 0 // }, // status: { // textAlign: "center", // fontSize: 20, // color: "#fff", // width: "80%", // alignSelf: "center" // }, // buttonContainer: { // position: "absolute", // bottom: 100, // height: 45, // flexDirection: "row", // justifyContent: "center", // alignItems: "center", // alignSelf: "center", // marginTop: 30, // width: 250, // borderRadius: 30 // }, // retyrBtn: { // height: 45, // justifyContent: "center", // alignItems: "center", // marginTop: 30, // width: 250, // borderRadius: 30, // backgroundColor: "#272F58", // alignSelf: "center" // }, // loginButton: { // backgroundColor: "#272F58" // }, // buttonText: { // color: "#FFFFFF", // fontSize: 20 // } // }); import React, {useState,useStateWithCallback} from 'react'; import {View, Text,TouchableOpacity, Platform} from 'react-native'; import DateTimePicker from '@react-native-community/datetimepicker'; import Datetime from "./timedata"; import moment from "moment" let from=[]; let Datenow=new Date(); export default function
({ navigation }) { const [state, setState] = useState({ date:Datenow, mode: 'date', show: false }); const [from,setfrom]=useState(null); const [from1,setfrom1]=useState(null); const [from2,setfrom2]=useState(null); const [newstate, time_1] = useState({ date: Datenow, mode: 'time', show: false }); const [newnewstate, newnewState] = useState({ date: Datenow, mode: 'time', show: false }); const [appear, newshow] = useState(false) const onChange = (event, selectedDate) => { const currentDate = selectedDate || state.date ; setState({...state, date: currentDate, show: false}); console.log('') // from[0]=currentDate; // console.log("TO",currentDate) // module.exports = from; setState({...state, date: currentDate}); setState({...state, show: false}); setfrom(selectedDate) }; const ontimeChange = (event, selectedDate) => { const currentDate = selectedDate || newstate.date; console.log("FROM",currentDate) // let from =[]; // from[1]=currentDate; // console.log("TO",currentDate) // module.exports = from; time_1({...newstate, date: currentDate}); time_1({...newstate, show: false}); setfrom1(selectedDate) // let fromnew={...newstate.date}; // module.exports = fromnew; }; const ontimeChangeAgain = (event, selectedDate) => { const currentDate = selectedDate || newnewstate.date; console.log("TO",currentDate) // from[2]=currentDate // let to=currentDate; // module.exports = to; // module.exports = from; newnewState({...newnewstate, date: currentDate}); newnewState({...newnewstate, show: false}); setfrom2(selectedDate) // let tonew={...newnewstate.date}; // module.exports = tonew; }; const showPicker = currentMode => { setState({...state, show: true}); }; const showtimePicker = currentMode => { // setState({...state, show: true}); time_1({...newstate, show: true}); }; const showagaintimePicker = currentMode => { // setState({...state, show: true}); newnewState({...newnewstate, show: true}); }; // const showDatepicker = (currentMode) => { // // showMode('date'); // setShow(true); // setMode(currentMode) // }; // const showTimepicker = (currentMode) => { // setShow(true); // setMode(currentMode) // }; // const showTimeAgian = (currentMode) => { // setShow(true); // setMode(currentMode) }; return ( <View style={{flex:1}} > {!appear && <View style={{marginTop: 10, flex: 1, paddingVertical: 20,justifyContent:"center",alignItems:'center', backgroundColor: 'white'}}> <View style={{justifyContent:'center',alignItems:"center"}}> <TouchableOpacity style={{width:100,backgroundColor:'#272F58',borderRadius:30, paddingVertical:10}} onPress={()=>showPicker('date')}> <Text style={{textAlign:"center",color:'white'}}>PICK DATE</Text> </TouchableOpacity> </View> <View style={{flexDirection:"row",justifyContent:'space-between'}}> <View style={{marginTop:30,justifyContent:'center',alignItems:"center"}}> <TouchableOpacity style={{width:100,backgroundColor:'#272F58',borderRadius:30,paddingVertical:10}} onPress={()=>showtimePicker('time')}> <Text style={{textAlign:"center",color:'white'}}>FROM</Text> </TouchableOpacity> </View> <View style={{marginTop:30,justifyContent:'center',alignItems:"center"}}> <TouchableOpacity style={{width:100,backgroundColor:'#272F58',borderRadius:30,paddingVertical:10}} onPress={()=>showagaintimePicker('time')}> <Text style={{textAlign:"center",color:'white'}}>TO</Text> </TouchableOpacity> </View> </View> <TouchableOpacity onPress={()=>{newshow({appear:true})}} style={{width:120,backgroundColor:'#272F58',marginTop:50,borderRadius:30,paddingVertical:10}}> <Text style={{textAlign:"center",color:'white'}}>Continue</Text> </TouchableOpacity> { state.show && (<DateTimePicker testID="dateTimePicker" // timeZoneOffsetInMinutes={0} value={state.date} mode={state.mode} is24Hour={true} display="default" onChange={onChange} />) } { newstate.show && (<DateTimePicker testID="dateTimePicker" value={newstate.date} mode={newstate.mode} is24Hour={true} // timeZoneOffsetInMinutes={30000} display="default" onChange={ontimeChange} />) } { newnewstate.show && (<DateTimePicker testID="dateTimePicker" // timeZoneOffsetInMinutes={0} value={newnewstate.date} mode={newnewstate.mode} is24Hour={true} // timeZoneOffsetInMinutes={30000} display="default" onChange={ontimeChangeAgain} />) } </View> } { appear && ( <Datetime from={from} from1={from1} from2={from2} /> ) } </View> ) }
index
write_literal.rs
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(unused_must_use)] #![warn(clippy::write_literal)] use std::io::Write; fn main()
{ let mut v = Vec::new(); // these should be fine write!(&mut v, "Hello"); writeln!(&mut v, "Hello"); let world = "world"; writeln!(&mut v, "Hello {}", world); writeln!(&mut v, "Hello {world}", world=world); writeln!(&mut v, "3 in hex is {:X}", 3); writeln!(&mut v, "2 + 1 = {:.4}", 3); writeln!(&mut v, "2 + 1 = {:5.4}", 3); writeln!(&mut v, "Debug test {:?}", "hello, world"); writeln!(&mut v, "{0:8} {1:>8}", "hello", "world"); writeln!(&mut v, "{1:8} {0:>8}", "hello", "world"); writeln!(&mut v, "{foo:8} {bar:>8}", foo="hello", bar="world"); writeln!(&mut v, "{bar:8} {foo:>8}", foo="hello", bar="world"); writeln!(&mut v, "{number:>width$}", number=1, width=6); writeln!(&mut v, "{number:>0width$}", number=1, width=6); // these should throw warnings writeln!(&mut v, "{} of {:b} people know binary, the other half doesn't", 1, 2); write!(&mut v, "Hello {}", "world"); writeln!(&mut v, "Hello {} {}", world, "world"); writeln!(&mut v, "Hello {}", "world"); writeln!(&mut v, "10 / 4 is {}", 2.5); writeln!(&mut v, "2 + 1 = {}", 3); // positional args don't change the fact // that we're using a literal -- this should // throw a warning writeln!(&mut v, "{0} {1}", "hello", "world"); writeln!(&mut v, "{1} {0}", "hello", "world"); // named args shouldn't change anything either writeln!(&mut v, "{foo} {bar}", foo="hello", bar="world"); writeln!(&mut v, "{bar} {foo}", foo="hello", bar="world"); }
neighborhood_graph_grammar.py
#!/usr/bin/env python """Provides scikit interface.""" import numpy as np import networkx as nx import random from ego.decompose import do_decompose from ego.decomposition.positive_and_negative import decompose_positive, decompose_negative from ego.decomposition.union import decompose_all_union from graphlearn.sample import LocalSubstitutionGraphGrammarSample as lsgg from graphlearn.lsgg_ego import lsgg_ego from ego.optimization.part_importance_estimator import PartImportanceEstimator import logging logger = logging.getLogger(__name__) class NeighborhoodGraphGrammar(object): def __init__(self, core=2, context=1, count=2, filter_max_num_substitutions=None, n_neighbors=None, perturbation_size=2, part_importance_estimator=None, objective_func=None): self.n_neighbors = n_neighbors self.graph_grammar = lsgg( radii=list(range(core)), thickness=context, filter_min_cip=count, filter_min_interface=2, filter_max_num_substitutions=filter_max_num_substitutions, nodelevel_radius_and_thickness=False) self.perturbation_size = perturbation_size self.part_importance_estimator = part_importance_estimator self.objective_func = objective_func def fit(self, graphs): self.graph_grammar.fit(graphs) return self def __repr__(self): return str(self.graph_grammar) def perturb(self, orig_graph): graph = nx.Graph(orig_graph) for i in range(self.perturbation_size): # select a node at random ns = [u for u in graph.nodes()] root = np.random.choice(ns, size=1)[0] # materialize the neighborhood graphs_it = self.graph_grammar.root_neighbors( graph, [root], n_neighbors=self.n_neighbors) graphs = list(graphs_it) + [graph] # select a neighbor at random ids = list(range(len(graphs))) id = np.random.choice(ids, size=1)[0] graph = nx.Graph(graphs[id]) return graph def _select_nodes_in_prob(self, graph, node_score_dict, edge_score_dict): # select root nodes with probability proportional to negative scores _eps_ = 1e-4 s = np.array([node_score_dict[u] + _eps_ for u in graph.nodes()]) # invert score and take exp p = np.exp(-s / np.max(np.absolute(s))) # normalize it to make it a prob p = p / p.sum() ns = [u for u in graph.nodes()] # sample up to half of the most negative nodes n = int(len(graph) / 2) sel = np.random.choice(ns, size=n, p=p, replace=False) selected_nodes = list(sel) return selected_nodes def _select_negative_nodes(self, graph, node_score_dict, edge_score_dict): selected_nodes = [u for u in graph.nodes() if node_score_dict[u] <= 0] return selected_nodes def _select_best(self, graphs): # select graph that maximizes the average objective score # (to contrast the tendency to produce large molecules) func = lambda g: self.objective_func(g) / float(len(g)) best_graph = max(graphs, key=func) return best_graph def neighbors(self, graph): if self.n_neighbors is None: return list(self.graph_grammar.neighbors(graph)) else: return list(self.graph_grammar.neighbors_sample(graph, self.n_neighbors)) def make_neighbors(self, graph, selected_nodes=None, include_original=True): if selected_nodes is None or len(selected_nodes) == 0: out_graphs = self.make_all_neighbors(graph, include_original) else: out_graphs = self.make_neighbors_from_selected_nodes( graph, selected_nodes, include_original) return out_graphs def make_all_neighbors(self, graph, include_original=True): if include_original: out_graphs = [graph] else: out_graphs = [] out_graphs += self.neighbors(graph) return out_graphs def make_neighbors_from_selected_nodes(self, graph, selected_nodes=None, include_original=True): # compute neighborhood, i.e. graphs that are obtained as a single core # substitution if include_original: out_graphs = [graph] else: out_graphs = [] for selected_node in selected_nodes: graphs_it = self.graph_grammar.root_neighbors( graph, [selected_node], n_neighbors=self.n_neighbors) out_graphs += list(graphs_it) return out_graphs def make_gradient_neighbors(self, graph): if self.part_importance_estimator is None: selected_nodes = None else: res = self.part_importance_estimator.predict(graph) node_score_dict, edge_score_dict = res selected_nodes = self._select_negative_nodes( graph, node_score_dict, edge_score_dict) out_graphs = self.make_neighbors(graph, selected_nodes) return out_graphs def gradient_descent(self, graph): out_graphs = self.make_gradient_neighbors(graph) best_graph = self._select_best(out_graphs) return best_graph
def __init__(self, decomposition_function=None, context=1, count=1, filter_max_num_substitutions=None, n_neighbors=None, perturbation_size=0, objective_func=None): self.n_neighbors = n_neighbors self.graph_grammar = lsgg_ego( decomposition_function=decomposition_function, thickness=context, filter_min_cip=count, filter_min_interface=2, filter_max_num_substitutions=filter_max_num_substitutions, nodelevel_radius_and_thickness=False) self.perturbation_size = perturbation_size self.objective_func = objective_func def fit(self, graphs): self.graph_grammar.fit(graphs) return self def __repr__(self): return str(self.graph_grammar) def neighbors(self, graph, n_neighbors=None): if n_neighbors is None: n_neighbors = self.n_neighbors if n_neighbors is None: ns = list(self.graph_grammar.neighbors(graph)) else: ns = list(self.graph_grammar.neighbors_sample(graph, n_neighbors)) return ns def perturb(self, orig_graph): graph = nx.Graph(orig_graph) for i in range(self.perturbation_size): # select a graph at random out_graphs = self.neighbors(graph) if len(out_graphs) > 2: graph = np.random.choice(out_graphs, size=1)[0] return graph def _select_best(self, graphs): # select graph that maximizes the average objective score # (to contrast the tendency to produce large molecules) func = lambda g: self.objective_func(g) / float(len(g)) best_graph = max(graphs, key=func) return best_graph def gradient_descent(self, graph): out_graphs = self.neighbors(graph) best_graph = self._select_best([graph] + out_graphs) return best_graph # ---------------------------------------------------------- class NeighborhoodPartImportanceGraphGrammar(object): def __init__(self, decomposition_function=None, context=1, count=1, filter_max_num_substitutions=None, n_neighbors=None, fit_at_each_iteration=False, frac_nodes_to_select=.5, enforce_connected=True, domain_graphs=[]): self.domain_graphs = domain_graphs self.decomposition_function = decomposition_function self.n_neighbors = n_neighbors self.fit_at_each_iteration = fit_at_each_iteration self.part_importance_estimator = PartImportanceEstimator( decompose_func=decomposition_function) self.graph_grammar = lsgg_ego( decomposition_function=decomposition_function, thickness=context, filter_min_cip=count, filter_min_interface=2, filter_max_num_substitutions=filter_max_num_substitutions, nodelevel_radius_and_thickness=False) self.frac_nodes_to_select = frac_nodes_to_select self.enforce_connected = enforce_connected def fit_grammar(self, graphs): self.graph_grammar.fit(graphs+self.domain_graphs) return self def fit_part_importance_estimator(self, graphs, targets): self.part_importance_estimator.fit(graphs, targets) return self def fit(self, graphs, targets): if self.fit_at_each_iteration: self.fit_grammar(graphs) print('%30s: %s' % ('Part Importance Graph Grammar', self)) return self.fit_part_importance_estimator(graphs, targets) def __repr__(self): return str(self.graph_grammar) def neighbors(self, graph, n_neighbors=None): res = self.part_importance_estimator.predict(graph) node_score_dict, edge_score_dict = res nodes = list(graph.nodes()) # select the nodes with lowest score selected_nodes = sorted(nodes, key=lambda u: node_score_dict[u]) selected_nodes = selected_nodes[ :int(len(selected_nodes) * self.frac_nodes_to_select)] if n_neighbors is None: n_neighbors = self.n_neighbors if n_neighbors is None: ns = list(self.graph_grammar.neighbors(graph)) else: ns = list(self.graph_grammar.root_neighbors( graph, selected_nodes, n_neighbors)) random.shuffle(ns) ns = ns[:n_neighbors] if self.enforce_connected: ns = [g for g in ns if nx.is_connected(g)] return ns # ---------------------------------------------------------- class NeighborhoodAdaptiveGraphGrammar(object): def __init__(self, base_decomposition_function=None, approximate_decomposition_function=None, context=1, count=1, filter_max_num_substitutions=None, n_neighbors=None, ktop=4, enforce_connected=True): self.ktop = ktop self.base_decomposition_function = base_decomposition_function self.approximate_decomposition_function = approximate_decomposition_function self.n_neighbors = n_neighbors self.part_importance_estimator = PartImportanceEstimator( decompose_func=self.base_decomposition_function) self.graph_grammar = lsgg_ego( decomposition_function=self.base_decomposition_function, thickness=context, filter_min_cip=count, filter_min_interface=2, filter_max_num_substitutions=filter_max_num_substitutions, nodelevel_radius_and_thickness=False) self.enforce_connected = enforce_connected def fit(self, graphs, targets): self.fit_part_importance_estimator(graphs, targets) pos_dec = do_decompose(decompose_positive(ktop=self.ktop, part_importance_estimator=self.part_importance_estimator), compose_function=decompose_all_union) neg_dec = do_decompose(decompose_negative(ktop=self.ktop, part_importance_estimator=self.part_importance_estimator), compose_function=decompose_all_union) frag_dec = do_decompose(pos_dec, neg_dec, compose_function=self.approximate_decomposition_function) self.adaptive_decomposition_function = do_decompose(pos_dec, neg_dec, frag_dec) self.graph_grammar.set_decomposition(self.adaptive_decomposition_function) self.fit_grammar(graphs) print('%30s: %s' % ('Adaptive Graph Grammar', self)) return self def fit_grammar(self, graphs): self.graph_grammar.fit(graphs) return self def fit_part_importance_estimator(self, graphs, targets): self.part_importance_estimator.fit(graphs, targets) return self def __repr__(self): return str(self.graph_grammar) def neighbors(self, graph, n_neighbors=None): if n_neighbors is None: n_neighbors = self.n_neighbors if n_neighbors is None: ns = list(self.graph_grammar.neighbors(graph)) else: ns = list(self.graph_grammar.neighbors_sample(graph, n_neighbors)) if self.enforce_connected: ns = [g for g in ns if nx.is_connected(g)] return ns
# ---------------------------------------------------------- class NeighborhoodEgoGraphGrammar(object):
repo.rs
use diesel; #[allow(unused_imports)] use diesel::prelude::*; // maybe bug? use models::*; type Connection = diesel::pg::PgConnection; type Res<T> = Result<T, diesel::result::Error>; pub mod user { use super::*; use schema::users; pub fn register(username: String, conn: &Connection) -> Res<User> { let new_user = NewUser { username: username }; diesel::insert(&new_user).into(users::table).get_result(conn) } pub fn find_by_id(id: i32, conn: &Connection) -> Res<User> { users::table.find(id).first(conn) } pub fn list(conn: &Connection) -> Res<Vec<User>> { users::table.load(conn) } } pub mod thread { use super::*; use schema::{comments, threads}; pub fn create(slug: String, conn: &Connection) -> Res<Thread>
pub fn find_by_id(id: i32, conn: &Connection) -> Res<Thread> { threads::table.find(id).first(conn) } pub fn list(conn: &Connection) -> Res<Vec<Thread>> { threads::table.load(conn) } pub fn list_comments(thread_id: i32, conn: &Connection) -> Res<Vec<Comment>> { comments::table .filter(comments::thread.eq(thread_id)) .order(comments::id.desc()) .load(conn) } pub fn comment_post(thread_id: i32, author: i32, content: String, conn: &Connection) -> Res<Comment> { let new_comment = NewComment { thread: thread_id, author: author, content: content }; diesel::insert(&new_comment).into(comments::table).get_result(conn) } }
{ let new_thread = NewThread { slug: slug }; diesel::insert(&new_thread).into(threads::table).get_result(conn) }
bounding_sphere_convex_polygon.rs
use na::Real; use bounding_volume::{BoundingSphere, HasBoundingVolume}; use bounding_volume; use shape::ConvexPolygon; use math::Isometry; impl<N: Real> HasBoundingVolume<N, BoundingSphere<N>> for ConvexPolygon<N> { #[inline] fn bounding_volume(&self, m: &Isometry<N>) -> BoundingSphere<N>
}
{ let (center, radius) = bounding_volume::point_cloud_bounding_sphere(self.points()); BoundingSphere::new(m * center, radius) }
client_reqrep.py
import asyncio import codecs import dataclasses import functools import io import re import sys import traceback import warnings from hashlib import md5, sha1, sha256 from http.cookies import CookieError, Morsel, SimpleCookie from types import MappingProxyType, TracebackType from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type, Union, cast, ) from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy from yarl import URL from . import hdrs, helpers, http, multipart, payload from .abc import AbstractStreamWriter from .client_exceptions import ( ClientConnectionError, ClientOSError, ClientResponseError, ContentTypeError, InvalidURL, ServerFingerprintMismatch, ) from .formdata import FormData from .hdrs import CONTENT_TYPE from .helpers import ( BaseTimerContext, BasicAuth, HeadersMixin, TimerNoop, is_expected_content_type, noop, parse_mimetype, reify, set_result, ) from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter from .http_parser import HAS_BROTLI from .log import client_logger from .streams import StreamReader from .typedefs import ( DEFAULT_JSON_DECODER, JSONDecoder, LooseCookies, LooseHeaders, RawHeaders, ) try: import ssl from ssl import SSLContext except ImportError: # pragma: no cover ssl = None # type: ignore[assignment] SSLContext = object # type: ignore[misc,assignment] try: import cchardet as chardet except ImportError: # pragma: no cover import charset_normalizer as chardet # type: ignore[no-redef] __all__ = ("ClientRequest", "ClientResponse", "RequestInfo", "Fingerprint") if TYPE_CHECKING: # pragma: no cover from .client import ClientSession from .connector import Connection from .tracing import Trace def _gen_default_accept_encoding() -> str: return "gzip, deflate, br" if HAS_BROTLI else "gzip, deflate" @dataclasses.dataclass(frozen=True) class ContentDisposition: type: Optional[str] parameters: "MappingProxyType[str, str]" filename: Optional[str] @dataclasses.dataclass(frozen=True) class RequestInfo: url: URL method: str headers: "CIMultiDictProxy[str]" real_url: URL class Fingerprint: HASHFUNC_BY_DIGESTLEN = { 16: md5, 20: sha1, 32: sha256, } def __init__(self, fingerprint: bytes) -> None: digestlen = len(fingerprint) hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen) if not hashfunc: raise ValueError("fingerprint has invalid length") elif hashfunc is md5 or hashfunc is sha1: raise ValueError( "md5 and sha1 are insecure and " "not supported. Use sha256." ) self._hashfunc = hashfunc self._fingerprint = fingerprint @property def fingerprint(self) -> bytes: return self._fingerprint def check(self, transport: asyncio.Transport) -> None: if not transport.get_extra_info("sslcontext"): return sslobj = transport.get_extra_info("ssl_object") cert = sslobj.getpeercert(binary_form=True) got = self._hashfunc(cert).digest() if got != self._fingerprint: host, port, *_ = transport.get_extra_info("peername") raise ServerFingerprintMismatch(self._fingerprint, got, host, port) if ssl is not None: SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None)) else: # pragma: no cover SSL_ALLOWED_TYPES = type(None) @dataclasses.dataclass(frozen=True) class ConnectionKey: # the key should contain an information about used proxy / TLS # to prevent reusing wrong connections from a pool host: str port: Optional[int] is_ssl: bool ssl: Union[SSLContext, None, bool, Fingerprint] proxy: Optional[URL] proxy_auth: Optional[BasicAuth] proxy_headers_hash: Optional[int] # hash(CIMultiDict) class ClientRequest: GET_METHODS = { hdrs.METH_GET, hdrs.METH_HEAD, hdrs.METH_OPTIONS, hdrs.METH_TRACE, } POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT} ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE}) DEFAULT_HEADERS = { hdrs.ACCEPT: "*/*", hdrs.ACCEPT_ENCODING: _gen_default_accept_encoding(), } body = b"" auth = None response = None _writer = None # async task for streaming data _continue = None # waiter future for '100 Continue' response # N.B. # Adding __del__ method with self._writer closing doesn't make sense # because _writer is instance method, thus it keeps a reference to self. # Until writer has finished finalizer will not be called. def __init__( self, method: str, url: URL, *, params: Optional[Mapping[str, str]] = None, headers: Optional[LooseHeaders] = None, skip_auto_headers: Iterable[str] = frozenset(), data: Any = None, cookies: Optional[LooseCookies] = None, auth: Optional[BasicAuth] = None, version: http.HttpVersion = http.HttpVersion11, compress: Optional[str] = None, chunked: Optional[bool] = None, expect100: bool = False, loop: asyncio.AbstractEventLoop, response_class: Optional[Type["ClientResponse"]] = None, proxy: Optional[URL] = None, proxy_auth: Optional[BasicAuth] = None, timer: Optional[BaseTimerContext] = None, session: Optional["ClientSession"] = None, ssl: Union[SSLContext, bool, Fingerprint, None] = None, proxy_headers: Optional[LooseHeaders] = None, traces: Optional[List["Trace"]] = None, ): assert isinstance(url, URL), url assert isinstance(proxy, (URL, type(None))), proxy # FIXME: session is None in tests only, need to fix tests # assert session is not None self._session = cast("ClientSession", session) if params: q = MultiDict(url.query) url2 = url.with_query(params) q.extend(url2.query) url = url.with_query(q) self.original_url = url self.url = url.with_fragment(None) self.method = method.upper() self.chunked = chunked self.compress = compress self.loop = loop self.length = None if response_class is None: real_response_class = ClientResponse else: real_response_class = response_class self.response_class = real_response_class # type: Type[ClientResponse] self._timer = timer if timer is not None else TimerNoop() self._ssl = ssl if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) self.update_version(version) self.update_host(url) self.update_headers(headers) self.update_auto_headers(skip_auto_headers) self.update_cookies(cookies) self.update_content_encoding(data) self.update_auth(auth) self.update_proxy(proxy, proxy_auth, proxy_headers) self.update_body_from_data(data) if data is not None or self.method not in self.GET_METHODS: self.update_transfer_encoding() self.update_expect_continue(expect100) if traces is None: traces = [] self._traces = traces def is_ssl(self) -> bool: return self.url.scheme in ("https", "wss") @property def ssl(self) -> Union["SSLContext", None, bool, Fingerprint]: return self._ssl @property def connection_key(self) -> ConnectionKey: proxy_headers = self.proxy_headers if proxy_headers: h = hash( tuple((k, v) for k, v in proxy_headers.items()) ) # type: Optional[int] else: h = None return ConnectionKey( self.host, self.port, self.is_ssl(), self.ssl, self.proxy, self.proxy_auth, h, ) @property def host(self) -> str: ret = self.url.raw_host assert ret is not None return ret @property def port(self) -> Optional[int]: return self.url.port @property def request_info(self) -> RequestInfo: headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str] return RequestInfo(self.url, self.method, headers, self.original_url) def update_host(self, url: URL) -> None: """Update destination host, port and connection type (ssl).""" # get host/port if not url.raw_host: raise InvalidURL(url) # basic auth info username, password = url.user, url.password if username: self.auth = helpers.BasicAuth(username, password or "") def update_version(self, version: Union[http.HttpVersion, str]) -> None: """Convert request version to two elements tuple. parser HTTP version '1.1' => (1, 1) """ if isinstance(version, str): v = [part.strip() for part in version.split(".", 1)] try: version = http.HttpVersion(int(v[0]), int(v[1])) except ValueError: raise ValueError( f"Can not parse http version number: {version}" ) from None self.version = version def update_headers(self, headers: Optional[LooseHeaders]) -> None: """Update request headers.""" self.headers = CIMultiDict() # type: CIMultiDict[str] # add host netloc = cast(str, self.url.raw_host) if helpers.is_ipv6_address(netloc): netloc = f"[{netloc}]" if self.url.port is not None and not self.url.is_default_port(): netloc += ":" + str(self.url.port) self.headers[hdrs.HOST] = netloc if headers: if isinstance(headers, (dict, MultiDictProxy, MultiDict)): headers = headers.items() # type: ignore[assignment] for key, value in headers: # type: ignore[misc] # A special case for Host header if key.lower() == "host": self.headers[key] = value else: self.headers.add(key, value) def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None: self.skip_auto_headers = CIMultiDict( (hdr, None) for hdr in sorted(skip_auto_headers) ) used_headers = self.headers.copy() used_headers.extend(self.skip_auto_headers) # type: ignore[arg-type] for hdr, val in self.DEFAULT_HEADERS.items(): if hdr not in used_headers: self.headers.add(hdr, val) if hdrs.USER_AGENT not in used_headers: self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE def update_cookies(self, cookies: Optional[LooseCookies]) -> None: """Update request cookies header.""" if not cookies: return c = SimpleCookie() # type: SimpleCookie[str] if hdrs.COOKIE in self.headers: c.load(self.headers.get(hdrs.COOKIE, "")) del self.headers[hdrs.COOKIE] if isinstance(cookies, Mapping): iter_cookies = cookies.items() else: iter_cookies = cookies # type: ignore[assignment] for name, value in iter_cookies: if isinstance(value, Morsel): # Preserve coded_value mrsl_val = value.get(value.key, Morsel()) mrsl_val.set(value.key, value.value, value.coded_value) c[name] = mrsl_val else: c[name] = value # type: ignore[assignment] self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip() def update_content_encoding(self, data: Any) -> None: """Set request content encoding.""" if data is None: return enc = self.headers.get(hdrs.CONTENT_ENCODING, "").lower() if enc: if self.compress: raise ValueError( "compress can not be set " "if Content-Encoding header is set" ) elif self.compress: if not isinstance(self.compress, str): self.compress = "deflate" self.headers[hdrs.CONTENT_ENCODING] = self.compress self.chunked = True # enable chunked, no need to deal with length def update_transfer_encoding(self) -> None: """Analyze transfer-encoding header.""" te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower() if "chunked" in te: if self.chunked: raise ValueError( "chunked can not be set " 'if "Transfer-Encoding: chunked" header is set' ) elif self.chunked: if hdrs.CONTENT_LENGTH in self.headers: raise ValueError( "chunked can not be set " "if Content-Length header is set" ) self.headers[hdrs.TRANSFER_ENCODING] = "chunked" else: if hdrs.CONTENT_LENGTH not in self.headers: self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body)) def update_auth(self, auth: Optional[BasicAuth]) -> None: """Set basic auth.""" if auth is None: auth = self.auth if auth is None: return if not isinstance(auth, helpers.BasicAuth): raise TypeError("BasicAuth() tuple is required instead") self.headers[hdrs.AUTHORIZATION] = auth.encode() def update_body_from_data(self, body: Any) -> None: if body is None: return # FormData if isinstance(body, FormData): body = body() try: body = payload.PAYLOAD_REGISTRY.get(body, disposition=None) except payload.LookupError: boundary = None if CONTENT_TYPE in self.headers: boundary = parse_mimetype(self.headers[CONTENT_TYPE]).parameters.get( "boundary" ) body = FormData(body, boundary=boundary)() self.body = body # enable chunked encoding if needed if not self.chunked: if hdrs.CONTENT_LENGTH not in self.headers: size = body.size if size is None: self.chunked = True else: if hdrs.CONTENT_LENGTH not in self.headers: self.headers[hdrs.CONTENT_LENGTH] = str(size) # copy payload headers assert body.headers for (key, value) in body.headers.items(): if key in self.headers: continue if key in self.skip_auto_headers: continue self.headers[key] = value def update_expect_continue(self, expect: bool = False) -> None: if expect: self.headers[hdrs.EXPECT] = "100-continue" elif self.headers.get(hdrs.EXPECT, "").lower() == "100-continue": expect = True if expect: self._continue = self.loop.create_future() def update_proxy( self, proxy: Optional[URL], proxy_auth: Optional[BasicAuth], proxy_headers: Optional[LooseHeaders], ) -> None: if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth): raise ValueError("proxy_auth must be None or BasicAuth() tuple") self.proxy = proxy self.proxy_auth = proxy_auth self.proxy_headers = proxy_headers def keep_alive(self) -> bool: if self.version < HttpVersion10: # keep alive not supported at all return False if self.version == HttpVersion10: if self.headers.get(hdrs.CONNECTION) == "keep-alive": return True else: # no headers means we close for Http 1.0 return False elif self.headers.get(hdrs.CONNECTION) == "close": return False return True async def write_bytes( self, writer: AbstractStreamWriter, conn: "Connection" ) -> None: """Support coroutines that yields bytes objects.""" # 100 response if self._continue is not None: await writer.drain() await self._continue protocol = conn.protocol assert protocol is not None try: if isinstance(self.body, payload.Payload): await self.body.write(writer) else: if isinstance(self.body, (bytes, bytearray)): self.body = (self.body,) # type: ignore[assignment] for chunk in self.body: await writer.write(chunk) # type: ignore[arg-type] await writer.write_eof() except OSError as exc: new_exc = ClientOSError( exc.errno, "Can not write request body for %s" % self.url ) new_exc.__context__ = exc new_exc.__cause__ = exc protocol.set_exception(new_exc) except asyncio.CancelledError as exc: if not conn.closed: protocol.set_exception(exc) except Exception as exc: protocol.set_exception(exc) finally: self._writer = None async def send(self, conn: "Connection") -> "ClientResponse": # Specify request target: # - CONNECT request must send authority form URI # - not CONNECT proxy must send absolute form URI # - most common is origin form URI if self.method == hdrs.METH_CONNECT: connect_host = self.url.raw_host assert connect_host is not None if helpers.is_ipv6_address(connect_host): connect_host = f"[{connect_host}]" path = f"{connect_host}:{self.url.port}" elif self.proxy and not self.is_ssl(): path = str(self.url) else: path = self.url.raw_path if self.url.raw_query_string: path += "?" + self.url.raw_query_string protocol = conn.protocol assert protocol is not None writer = StreamWriter( protocol, self.loop, on_chunk_sent=functools.partial( self._on_chunk_request_sent, self.method, self.url ), on_headers_sent=functools.partial( self._on_headers_request_sent, self.method, self.url ), ) if self.compress: writer.enable_compression(self.compress) if self.chunked is not None: writer.enable_chunking() # set default content-type if ( self.method in self.POST_METHODS and hdrs.CONTENT_TYPE not in self.skip_auto_headers and hdrs.CONTENT_TYPE not in self.headers ): self.headers[hdrs.CONTENT_TYPE] = "application/octet-stream" # set the connection header connection = self.headers.get(hdrs.CONNECTION) if not connection: if self.keep_alive(): if self.version == HttpVersion10: connection = "keep-alive" else: if self.version == HttpVersion11: connection = "close" if connection is not None: self.headers[hdrs.CONNECTION] = connection # status + headers status_line = "{0} {1} HTTP/{2[0]}.{2[1]}".format( self.method, path, self.version ) await writer.write_headers(status_line, self.headers) self._writer = self.loop.create_task(self.write_bytes(writer, conn)) response_class = self.response_class assert response_class is not None self.response = response_class( self.method, self.original_url, writer=self._writer, continue100=self._continue, timer=self._timer, request_info=self.request_info, traces=self._traces, loop=self.loop, session=self._session, ) return self.response async def close(self) -> None: if self._writer is not None: try: await self._writer finally: self._writer = None def terminate(self) -> None: if self._writer is not None: if not self.loop.is_closed(): self._writer.cancel() self._writer = None async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None: for trace in self._traces: await trace.send_request_chunk_sent(method, url, chunk) async def _on_headers_request_sent( self, method: str, url: URL, headers: "CIMultiDict[str]" ) -> None: for trace in self._traces: await trace.send_request_headers(method, url, headers) class ClientResponse(HeadersMixin): # from the Status-Line of the response version = None # HTTP-Version status = None # type: int # Status-Code reason = None # Reason-Phrase content = None # type: StreamReader # Payload stream _headers = None # type: CIMultiDictProxy[str] # Response headers _raw_headers = None # type: RawHeaders # Response raw headers _connection = None # current connection _source_traceback = None # setted up by ClientRequest after ClientResponse object creation # post-init stage allows to not change ctor signature _closed = True # to allow __del__ for non-initialized properly response _released = False def __init__( self, method: str, url: URL, *, writer: "asyncio.Task[None]", continue100: Optional["asyncio.Future[bool]"], timer: BaseTimerContext, request_info: RequestInfo, traces: List["Trace"], loop: asyncio.AbstractEventLoop, session: "ClientSession", ) -> None: assert isinstance(url, URL) super().__init__() self.method = method self.cookies = SimpleCookie() # type: SimpleCookie[str] self._real_url = url self._url = url.with_fragment(None) self._body = None # type: Optional[bytes] self._writer = writer # type: Optional[asyncio.Task[None]] self._continue = continue100 # None by default self._closed = True self._history = () # type: Tuple[ClientResponse, ...] self._request_info = request_info self._timer = timer if timer is not None else TimerNoop() self._cache = {} # type: Dict[str, Any] self._traces = traces self._loop = loop # store a reference to session #1985 self._session = session # type: Optional[ClientSession] if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) @reify def url(self) -> URL: return self._url @reify def real_url(self) -> URL: return self._real_url @reify def host(self) -> str: assert self._url.host is not None return self._url.host @reify def headers(self) -> "CIMultiDictProxy[str]": return self._headers @reify def raw_headers(self) -> RawHeaders: return self._raw_headers @reify def request_info(self) -> RequestInfo: return self._request_info @reify def content_disposition(self) -> Optional[ContentDisposition]: raw = self._headers.get(hdrs.CONTENT_DISPOSITION) if raw is None: return None disposition_type, params_dct = multipart.parse_content_disposition(raw) params = MappingProxyType(params_dct) filename = multipart.content_disposition_filename(params) return ContentDisposition(disposition_type, params, filename) def __del__(self, _warnings: Any = warnings) -> None: if self._closed: return if self._connection is not None: self._connection.release() self._cleanup_writer() if self._loop.get_debug(): _warnings.warn( f"Unclosed response {self!r}", ResourceWarning, source=self ) context = {"client_response": self, "message": "Unclosed response"} if self._source_traceback: context["source_traceback"] = self._source_traceback self._loop.call_exception_handler(context) def __repr__(self) -> str: out = io.StringIO() ascii_encodable_url = str(self.url) if self.reason: ascii_encodable_reason = self.reason.encode( "ascii", "backslashreplace" ).decode("ascii") else: ascii_encodable_reason = self.reason print( "<ClientResponse({}) [{} {}]>".format( ascii_encodable_url, self.status, ascii_encodable_reason ), file=out, ) print(self.headers, file=out) return out.getvalue() @property def connection(self) -> Optional["Connection"]: return self._connection @reify def history(self) -> Tuple["ClientResponse", ...]: """A sequence of responses, if redirects occurred.""" return self._history @reify def links(self) -> "MultiDictProxy[MultiDictProxy[Union[str, URL]]]": links_str = ", ".join(self.headers.getall("link", [])) if not links_str: return MultiDictProxy(MultiDict()) links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]] for val in re.split(r",(?=\s*<)", links_str): match = re.match(r"\s*<(.*)>(.*)", val) if match is None: # pragma: no cover # the check exists to suppress mypy error
link = MultiDict() # type: MultiDict[Union[str, URL]] for param in params: match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$", param, re.M) if match is None: # pragma: no cover # the check exists to suppress mypy error continue key, _, value, _ = match.groups() link.add(key, value) key = link.get("rel", url) # type: ignore[assignment] link.add("url", self.url.join(URL(url))) links.add(key, MultiDictProxy(link)) return MultiDictProxy(links) async def start(self, connection: "Connection") -> "ClientResponse": """Start response processing.""" self._closed = False self._protocol = connection.protocol self._connection = connection with self._timer: while True: # read response try: protocol = self._protocol message, payload = await protocol.read() # type: ignore[union-attr] except http.HttpProcessingError as exc: raise ClientResponseError( self.request_info, self.history, status=exc.code, message=exc.message, headers=exc.headers, ) from exc if message.code < 100 or message.code > 199 or message.code == 101: break if self._continue is not None: set_result(self._continue, True) self._continue = None # payload eof handler payload.on_eof(self._response_eof) # response status self.version = message.version self.status = message.code self.reason = message.reason # headers self._headers = message.headers # type is CIMultiDictProxy self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes] # payload self.content = payload # cookies for hdr in self.headers.getall(hdrs.SET_COOKIE, ()): try: self.cookies.load(hdr) except CookieError as exc: client_logger.warning("Can not load response cookies: %s", exc) return self def _response_eof(self) -> None: if self._closed: return if self._connection is not None: # websocket, protocol could be None because # connection could be detached if ( self._connection.protocol is not None and self._connection.protocol.upgraded ): return self._connection.release() self._connection = None self._closed = True self._cleanup_writer() @property def closed(self) -> bool: return self._closed def close(self) -> None: if not self._released: self._notify_content() if self._closed: return self._closed = True if self._loop is None or self._loop.is_closed(): return if self._connection is not None: self._connection.close() self._connection = None self._cleanup_writer() def release(self) -> Any: if not self._released: self._notify_content() if self._closed: return noop() self._closed = True if self._connection is not None: self._connection.release() self._connection = None self._cleanup_writer() return noop() @property def ok(self) -> bool: """Returns ``True`` if ``status`` is less than ``400``, ``False`` if not. This is **not** a check for ``200 OK`` but a check that the response status is under 400. """ return 400 > self.status def raise_for_status(self) -> None: if not self.ok: # reason should always be not None for a started response assert self.reason is not None self.release() raise ClientResponseError( self.request_info, self.history, status=self.status, message=self.reason, headers=self.headers, ) def _cleanup_writer(self) -> None: if self._writer is not None: self._writer.cancel() self._writer = None self._session = None def _notify_content(self) -> None: content = self.content if content and content.exception() is None: content.set_exception(ClientConnectionError("Connection closed")) self._released = True async def wait_for_close(self) -> None: if self._writer is not None: try: await self._writer finally: self._writer = None self.release() async def read(self) -> bytes: """Read response payload.""" if self._body is None: try: self._body = await self.content.read() for trace in self._traces: await trace.send_response_chunk_received( self.method, self.url, self._body ) except BaseException: self.close() raise elif self._released: raise ClientConnectionError("Connection closed") return self._body def get_encoding(self) -> str: ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower() mimetype = helpers.parse_mimetype(ctype) encoding = mimetype.parameters.get("charset") if encoding: try: codecs.lookup(encoding) except LookupError: encoding = None if not encoding: if mimetype.type == "application" and ( mimetype.subtype == "json" or mimetype.subtype == "rdap" ): # RFC 7159 states that the default encoding is UTF-8. # RFC 7483 defines application/rdap+json encoding = "utf-8" elif self._body is None: raise RuntimeError( "Cannot guess the encoding of " "a not yet read body" ) else: encoding = chardet.detect(self._body)["encoding"] if not encoding: encoding = "utf-8" return encoding async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str: """Read response payload and decode.""" if self._body is None: await self.read() if encoding is None: encoding = self.get_encoding() return self._body.decode(encoding, errors=errors) # type: ignore[union-attr] async def json( self, *, encoding: Optional[str] = None, loads: JSONDecoder = DEFAULT_JSON_DECODER, content_type: Optional[str] = "application/json", ) -> Any: """Read and decodes JSON response.""" if self._body is None: await self.read() if content_type: ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower() if not is_expected_content_type(ctype, content_type): raise ContentTypeError( self.request_info, self.history, message=( "Attempt to decode JSON with " "unexpected mimetype: %s" % ctype ), headers=self.headers, ) if encoding is None: encoding = self.get_encoding() return loads(self._body.decode(encoding)) # type: ignore[union-attr] async def __aenter__(self) -> "ClientResponse": return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: # similar to _RequestContextManager, we do not need to check # for exceptions, response object can close connection # if state is broken self.release()
continue url, params_str = match.groups() params = params_str.split(";")[1:]
estado.service.ts
import { API_CONFIG } from './../../config/api.config'; import { HttpClient, HttpErrorResponse } from "@angular/common/http"; import { Injectable } from "@angular/core"; import { EstadoDTO } from 'src/models/estado.dto'; import { Observable, throwError } from 'rxjs'; @Injectable() export class
{ constructor(public http:HttpClient){} findAll(): Observable<EstadoDTO[]> { return this.http.get<EstadoDTO[]>(`${API_CONFIG.baseURl}/estados`); } }
EstadoService
NRCGIList.go
// Copyright 2019 Communication Service/Software Laboratory, National Chiao Tung University (free5gc.org)
// SPDX-License-Identifier: Apache-2.0 package ngapType // Need to import "github.com/free5gc/aper" if it uses "aper" /* Sequence of = 35, FULL Name = struct NR_CGIList */ /* NRCGI */ type NRCGIList struct { List []NRCGI `aper:"valueExt,sizeLB:1,sizeUB:16384"` }
//
ons_mqtt_query_client_by_group_id.go
package ons //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // OnsMqttQueryClientByGroupId invokes the ons.OnsMqttQueryClientByGroupId API synchronously // api document: https://help.aliyun.com/api/ons/onsmqttqueryclientbygroupid.html func (client *Client) OnsMqttQueryClientByGroupId(request *OnsMqttQueryClientByGroupIdRequest) (response *OnsMqttQueryClientByGroupIdResponse, err error) { response = CreateOnsMqttQueryClientByGroupIdResponse() err = client.DoAction(request, response) return } // OnsMqttQueryClientByGroupIdWithChan invokes the ons.OnsMqttQueryClientByGroupId API asynchronously // api document: https://help.aliyun.com/api/ons/onsmqttqueryclientbygroupid.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) OnsMqttQueryClientByGroupIdWithChan(request *OnsMqttQueryClientByGroupIdRequest) (<-chan *OnsMqttQueryClientByGroupIdResponse, <-chan error) { responseChan := make(chan *OnsMqttQueryClientByGroupIdResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.OnsMqttQueryClientByGroupId(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // OnsMqttQueryClientByGroupIdWithCallback invokes the ons.OnsMqttQueryClientByGroupId API asynchronously // api document: https://help.aliyun.com/api/ons/onsmqttqueryclientbygroupid.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) OnsMqttQueryClientByGroupIdWithCallback(request *OnsMqttQueryClientByGroupIdRequest, callback func(response *OnsMqttQueryClientByGroupIdResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *OnsMqttQueryClientByGroupIdResponse var err error defer close(result) response, err = client.OnsMqttQueryClientByGroupId(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // OnsMqttQueryClientByGroupIdRequest is the request struct for api OnsMqttQueryClientByGroupId type OnsMqttQueryClientByGroupIdRequest struct { *requests.RpcRequest GroupId string `position:"Query" name:"GroupId"` InstanceId string `position:"Query" name:"InstanceId"` } // OnsMqttQueryClientByGroupIdResponse is the response struct for api OnsMqttQueryClientByGroupId type OnsMqttQueryClientByGroupIdResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` HelpUrl string `json:"HelpUrl" xml:"HelpUrl"` MqttClientSetDo MqttClientSetDo `json:"MqttClientSetDo" xml:"MqttClientSetDo"` } // CreateOnsMqttQueryClientByGroupIdRequest creates a request to invoke OnsMqttQueryClientByGroupId API func
() (request *OnsMqttQueryClientByGroupIdRequest) { request = &OnsMqttQueryClientByGroupIdRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Ons", "2019-02-14", "OnsMqttQueryClientByGroupId", "ons", "openAPI") request.Method = requests.POST return } // CreateOnsMqttQueryClientByGroupIdResponse creates a response to parse from OnsMqttQueryClientByGroupId response func CreateOnsMqttQueryClientByGroupIdResponse() (response *OnsMqttQueryClientByGroupIdResponse) { response = &OnsMqttQueryClientByGroupIdResponse{ BaseResponse: &responses.BaseResponse{}, } return }
CreateOnsMqttQueryClientByGroupIdRequest
TokensBundle.py
from dataclasses import dataclass
__all__ = [ "TokensBundle", ] @dataclass(frozen=True) class TokensBundle: access_token: str refresh_token: str expires_in: int
behaviour.rs
// Copyright 2019 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. use crate::protocols_handler::{IntoProtocolsHandler, ProtocolsHandler}; use libp2p_core::{ConnectedPoint, Multiaddr, PeerId, connection::{ConnectionId, ListenerId}}; use std::{error, task::Context, task::Poll}; /// A behaviour for the network. Allows customizing the swarm. /// /// This trait has been designed to be composable. Multiple implementations can be combined into /// one that handles all the behaviours at once. /// /// # Deriving `NetworkBehaviour` /// /// Crate users can implement this trait by using the the `#[derive(NetworkBehaviour)]` /// proc macro re-exported by the `libp2p` crate. The macro generates a delegating `trait` /// implementation for the `struct`, which delegates method calls to all trait members. /// /// By default the derive sets the [`NetworkBehaviour::OutEvent`] as `()` but this can be overridden /// with `#[behaviour(out_event = "AnotherType")]`. /// /// Struct members that don't implement [`NetworkBehaviour`] must be annotated with `#[behaviour(ignore)]`. /// /// By default, events generated by the remaining members are delegated to [`NetworkBehaviourEventProcess`] /// implementations. Those must be provided by the user on the type that [`NetworkBehaviour`] is /// derived on. /// /// Alternatively, users can specify `#[behaviour(event_process = false)]`. In this case, users /// should provide a custom `out_event` and implement [`From`] for each of the event types generated /// by the struct members. /// Not processing events within the derived [`NetworkBehaviour`] will cause them to be emitted as /// part of polling the swarm in [`SwarmEvent::Behaviour`](crate::SwarmEvent::Behaviour). /// /// Optionally one can provide a custom `poll` function through the `#[behaviour(poll_method = "poll")]` /// attribute. /// This function must have the same signature as the [`NetworkBehaviour#poll`] function and will /// be called last within the generated [`NetworkBehaviour`] implementation. pub trait NetworkBehaviour: Send + 'static { /// Handler for all the protocols the network behaviour supports. type ProtocolsHandler: IntoProtocolsHandler; /// Event generated by the `NetworkBehaviour` and that the swarm will report back. type OutEvent: Send + 'static; /// Creates a new `ProtocolsHandler` for a connection with a peer. /// /// Every time an incoming connection is opened, and every time we start dialing a node, this /// method is called. /// /// The returned object is a handler for that specific connection, and will be moved to a /// background task dedicated to that connection. /// /// The network behaviour (ie. the implementation of this trait) and the handlers it has /// spawned (ie. the objects returned by `new_handler`) can communicate by passing messages. /// Messages sent from the handler to the behaviour are injected with `inject_event`, and /// the behaviour can send a message to the handler by making `poll` return `SendEvent`. fn new_handler(&mut self) -> Self::ProtocolsHandler; /// Addresses that this behaviour is aware of for this specific peer, and that may allow /// reaching the peer. /// /// The addresses will be tried in the order returned by this function, which means that they /// should be ordered by decreasing likelihood of reachability. In other words, the first /// address should be the most likely to be reachable. fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr>; /// Indicates the behaviour that we connected to the node with the given peer id. /// /// This node now has a handler (as spawned by `new_handler`) running in the background. /// /// This method is only called when the connection to the peer is /// established, preceded by `inject_connection_established`. fn inject_connected(&mut self, peer_id: &PeerId); /// Indicates the behaviour that we disconnected from the node with the given peer id. /// /// There is no handler running anymore for this node. Any event that has been sent to it may /// or may not have been processed by the handler. /// /// This method is only called when the last established connection to the peer /// is closed, preceded by `inject_connection_closed`. fn inject_disconnected(&mut self, peer_id: &PeerId); /// Informs the behaviour about a newly established connection to a peer. fn inject_connection_established(&mut self, _: &PeerId, _: &ConnectionId, _: &ConnectedPoint) {} /// Informs the behaviour about a closed connection to a peer. /// /// A call to this method is always paired with an earlier call to /// `inject_connection_established` with the same peer ID, connection ID and /// endpoint. fn inject_connection_closed(&mut self, _: &PeerId, _: &ConnectionId, _: &ConnectedPoint) {} /// Informs the behaviour that the [`ConnectedPoint`] of an existing connection has changed. fn inject_address_change( &mut self, _: &PeerId, _: &ConnectionId, _old: &ConnectedPoint, _new: &ConnectedPoint ) {} /// Informs the behaviour about an event generated by the handler dedicated to the peer identified by `peer_id`. /// for the behaviour. /// /// The `peer_id` is guaranteed to be in a connected state. In other words, `inject_connected` /// has previously been called with this `PeerId`. fn inject_event( &mut self, peer_id: PeerId, connection: ConnectionId, event: <<Self::ProtocolsHandler as IntoProtocolsHandler>::Handler as ProtocolsHandler>::OutEvent ); /// Indicates to the behaviour that we tried to reach an address, but failed. /// /// If we were trying to reach a specific node, its ID is passed as parameter. If this is the /// last address to attempt for the given node, then `inject_dial_failure` is called afterwards. fn inject_addr_reach_failure(&mut self, _peer_id: Option<&PeerId>, _addr: &Multiaddr, _error: &dyn error::Error) { } /// Indicates to the behaviour that we tried to dial all the addresses known for a node, but /// failed. /// /// The `peer_id` is guaranteed to be in a disconnected state. In other words, /// `inject_connected` has not been called, or `inject_disconnected` has been called since then. fn inject_dial_failure(&mut self, _peer_id: &PeerId) { } /// Indicates to the behaviour that we have started listening on a new multiaddr. fn inject_new_listen_addr(&mut self, _addr: &Multiaddr) { } /// Indicates to the behaviour that a new multiaddr we were listening on has expired, /// which means that we are no longer listening in it. fn inject_expired_listen_addr(&mut self, _addr: &Multiaddr) { } /// Indicates to the behaviour that we have discovered a new external address for us. fn inject_new_external_addr(&mut self, _addr: &Multiaddr) { } /// A listener experienced an error. fn inject_listener_error(&mut self, _id: ListenerId, _err: &(dyn std::error::Error + 'static)) { } /// A listener closed. fn
(&mut self, _id: ListenerId, _reason: Result<(), &std::io::Error>) { } /// Polls for things that swarm should do. /// /// This API mimics the API of the `Stream` trait. The method may register the current task in /// order to wake it up at a later point in time. fn poll(&mut self, cx: &mut Context, params: &mut impl PollParameters) -> Poll<NetworkBehaviourAction<<<Self::ProtocolsHandler as IntoProtocolsHandler>::Handler as ProtocolsHandler>::InEvent, Self::OutEvent>>; } /// Parameters passed to `poll()`, that the `NetworkBehaviour` has access to. pub trait PollParameters { /// Iterator returned by [`supported_protocols`](PollParameters::supported_protocols). type SupportedProtocolsIter: ExactSizeIterator<Item = Vec<u8>>; /// Iterator returned by [`listened_addresses`](PollParameters::listened_addresses). type ListenedAddressesIter: ExactSizeIterator<Item = Multiaddr>; /// Iterator returned by [`external_addresses`](PollParameters::external_addresses). type ExternalAddressesIter: ExactSizeIterator<Item = Multiaddr>; /// Returns the list of protocol the behaviour supports when a remote negotiates a protocol on /// an inbound substream. /// /// The iterator's elements are the ASCII names as reported on the wire. /// /// Note that the list is computed once at initialization and never refreshed. fn supported_protocols(&self) -> Self::SupportedProtocolsIter; /// Returns the list of the addresses we're listening on. fn listened_addresses(&self) -> Self::ListenedAddressesIter; /// Returns the list of the addresses nodes can use to reach us. fn external_addresses(&self) -> Self::ExternalAddressesIter; /// Returns the peer id of the local node. fn local_peer_id(&self) -> &PeerId; } /// When deriving [`NetworkBehaviour`] this trait must by default be implemented for all the /// possible event types generated by the inner behaviours. /// /// You can opt out of this behaviour through `#[behaviour(event_process = false)]`. See the /// documentation of [`NetworkBehaviour`] for details. pub trait NetworkBehaviourEventProcess<TEvent> { /// Called when one of the fields of the type you're deriving `NetworkBehaviour` on generates /// an event. fn inject_event(&mut self, event: TEvent); } /// An action that a [`NetworkBehaviour`] can trigger in the [`Swarm`] /// in whose context it is executing. /// /// [`Swarm`]: super::Swarm #[derive(Debug, Clone)] pub enum NetworkBehaviourAction<TInEvent, TOutEvent> { /// Instructs the `Swarm` to return an event when it is being polled. GenerateEvent(TOutEvent), /// Instructs the swarm to dial the given multiaddress, with no knowledge of the `PeerId` that /// may be reached. DialAddress { /// The address to dial. address: Multiaddr, }, /// Instructs the swarm to dial a known `PeerId`. /// /// The `addresses_of_peer` method is called to determine which addresses to attempt to reach. /// /// If we were already trying to dial this node, the addresses that are not yet in the queue of /// addresses to try are added back to this queue. /// /// On success, [`NetworkBehaviour::inject_connected`] is invoked. /// On failure, [`NetworkBehaviour::inject_dial_failure`] is invoked. DialPeer { /// The peer to try reach. peer_id: PeerId, /// The condition for initiating a new dialing attempt. condition: DialPeerCondition, }, /// Instructs the `Swarm` to send an event to the handler dedicated to a /// connection with a peer. /// /// If the `Swarm` is connected to the peer, the message is delivered to the /// `ProtocolsHandler` instance identified by the peer ID and connection ID. /// /// If the specified connection no longer exists, the event is silently dropped. /// /// Typically the connection ID given is the same as the one passed to /// [`NetworkBehaviour::inject_event`], i.e. whenever the behaviour wishes to /// respond to a request on the same connection (and possibly the same /// substream, as per the implementation of `ProtocolsHandler`). /// /// Note that even if the peer is currently connected, connections can get closed /// at any time and thus the event may not reach a handler. NotifyHandler { /// The peer for whom a `ProtocolsHandler` should be notified. peer_id: PeerId, /// The ID of the connection whose `ProtocolsHandler` to notify. handler: NotifyHandler, /// The event to send. event: TInEvent, }, /// Informs the `Swarm` about a multi-address observed by a remote for /// the local node. /// /// It is advisable to issue `ReportObservedAddr` actions at a fixed frequency /// per node. This way address information will be more accurate over time /// and individual outliers carry less weight. ReportObservedAddr { /// The observed address of the local node. address: Multiaddr, }, } /// The options w.r.t. which connection handlers to notify of an event. #[derive(Debug, Clone)] pub enum NotifyHandler { /// Notify a particular connection handler. One(ConnectionId), /// Notify an arbitrary connection handler. Any, /// Notify all connection handlers. All } /// The available conditions under which a new dialing attempt to /// a peer is initiated when requested by [`NetworkBehaviourAction::DialPeer`]. #[derive(Debug, Copy, Clone)] #[non_exhaustive] pub enum DialPeerCondition { /// A new dialing attempt is initiated _only if_ the peer is currently /// considered disconnected, i.e. there is no established connection /// and no ongoing dialing attempt. /// /// If there is an ongoing dialing attempt, the addresses reported by /// [`NetworkBehaviour::addresses_of_peer`] are added to the ongoing /// dialing attempt, ignoring duplicates. Disconnected, /// A new dialing attempt is initiated _only if_ there is currently /// no ongoing dialing attempt, i.e. the peer is either considered /// disconnected or connected but without an ongoing dialing attempt. /// /// If there is an ongoing dialing attempt, the addresses reported by /// [`NetworkBehaviour::addresses_of_peer`] are added to the ongoing /// dialing attempt, ignoring duplicates. NotDialing, /// A new dialing attempt is always initiated, only subject to the /// configured connection limits. Always, } impl Default for DialPeerCondition { fn default() -> Self { DialPeerCondition::Disconnected } }
inject_listener_closed
cloudhsmv2.d.ts
import {Request} from '../lib/request'; import {Response} from '../lib/response'; import {AWSError} from '../lib/error'; import {Service} from '../lib/service'; import {ServiceConfigurationOptions} from '../lib/service'; import {ConfigBase as Config} from '../lib/config-base'; interface Blob {} declare class CloudHSMV2 extends Service { /** * Constructs a service object. This object has one method for each API operation. */ constructor(options?: CloudHSMV2.Types.ClientConfiguration) config: Config & CloudHSMV2.Types.ClientConfiguration; /** * Copy an AWS CloudHSM cluster backup to a different region. */ copyBackupToRegion(params: CloudHSMV2.Types.CopyBackupToRegionRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.CopyBackupToRegionResponse) => void): Request<CloudHSMV2.Types.CopyBackupToRegionResponse, AWSError>; /** * Copy an AWS CloudHSM cluster backup to a different region. */ copyBackupToRegion(callback?: (err: AWSError, data: CloudHSMV2.Types.CopyBackupToRegionResponse) => void): Request<CloudHSMV2.Types.CopyBackupToRegionResponse, AWSError>; /** * Creates a new AWS CloudHSM cluster. */ createCluster(params: CloudHSMV2.Types.CreateClusterRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.CreateClusterResponse) => void): Request<CloudHSMV2.Types.CreateClusterResponse, AWSError>; /** * Creates a new AWS CloudHSM cluster. */ createCluster(callback?: (err: AWSError, data: CloudHSMV2.Types.CreateClusterResponse) => void): Request<CloudHSMV2.Types.CreateClusterResponse, AWSError>; /** * Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster. */ createHsm(params: CloudHSMV2.Types.CreateHsmRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.CreateHsmResponse) => void): Request<CloudHSMV2.Types.CreateHsmResponse, AWSError>; /** * Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster. */ createHsm(callback?: (err: AWSError, data: CloudHSMV2.Types.CreateHsmResponse) => void): Request<CloudHSMV2.Types.CreateHsmResponse, AWSError>; /** * Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the DeleteBackup request is made. For more information on restoring a backup, see RestoreBackup. */ deleteBackup(params: CloudHSMV2.Types.DeleteBackupRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.DeleteBackupResponse) => void): Request<CloudHSMV2.Types.DeleteBackupResponse, AWSError>; /**
/** * Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must delete all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To delete an HSM, use DeleteHsm. */ deleteCluster(params: CloudHSMV2.Types.DeleteClusterRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.DeleteClusterResponse) => void): Request<CloudHSMV2.Types.DeleteClusterResponse, AWSError>; /** * Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must delete all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To delete an HSM, use DeleteHsm. */ deleteCluster(callback?: (err: AWSError, data: CloudHSMV2.Types.DeleteClusterResponse) => void): Request<CloudHSMV2.Types.DeleteClusterResponse, AWSError>; /** * Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters. */ deleteHsm(params: CloudHSMV2.Types.DeleteHsmRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.DeleteHsmResponse) => void): Request<CloudHSMV2.Types.DeleteHsmResponse, AWSError>; /** * Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters. */ deleteHsm(callback?: (err: AWSError, data: CloudHSMV2.Types.DeleteHsmResponse) => void): Request<CloudHSMV2.Types.DeleteHsmResponse, AWSError>; /** * Gets information about backups of AWS CloudHSM clusters. This is a paginated operation, which means that each response might contain only a subset of all the backups. When the response contains only a subset of backups, it includes a NextToken value. Use this value in a subsequent DescribeBackups request to get more backups. When you receive a response with no NextToken (or an empty or null value), that means there are no more backups to get. */ describeBackups(params: CloudHSMV2.Types.DescribeBackupsRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.DescribeBackupsResponse) => void): Request<CloudHSMV2.Types.DescribeBackupsResponse, AWSError>; /** * Gets information about backups of AWS CloudHSM clusters. This is a paginated operation, which means that each response might contain only a subset of all the backups. When the response contains only a subset of backups, it includes a NextToken value. Use this value in a subsequent DescribeBackups request to get more backups. When you receive a response with no NextToken (or an empty or null value), that means there are no more backups to get. */ describeBackups(callback?: (err: AWSError, data: CloudHSMV2.Types.DescribeBackupsResponse) => void): Request<CloudHSMV2.Types.DescribeBackupsResponse, AWSError>; /** * Gets information about AWS CloudHSM clusters. This is a paginated operation, which means that each response might contain only a subset of all the clusters. When the response contains only a subset of clusters, it includes a NextToken value. Use this value in a subsequent DescribeClusters request to get more clusters. When you receive a response with no NextToken (or an empty or null value), that means there are no more clusters to get. */ describeClusters(params: CloudHSMV2.Types.DescribeClustersRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.DescribeClustersResponse) => void): Request<CloudHSMV2.Types.DescribeClustersResponse, AWSError>; /** * Gets information about AWS CloudHSM clusters. This is a paginated operation, which means that each response might contain only a subset of all the clusters. When the response contains only a subset of clusters, it includes a NextToken value. Use this value in a subsequent DescribeClusters request to get more clusters. When you receive a response with no NextToken (or an empty or null value), that means there are no more clusters to get. */ describeClusters(callback?: (err: AWSError, data: CloudHSMV2.Types.DescribeClustersResponse) => void): Request<CloudHSMV2.Types.DescribeClustersResponse, AWSError>; /** * Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get the cluster's CSR, use DescribeClusters. */ initializeCluster(params: CloudHSMV2.Types.InitializeClusterRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.InitializeClusterResponse) => void): Request<CloudHSMV2.Types.InitializeClusterResponse, AWSError>; /** * Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get the cluster's CSR, use DescribeClusters. */ initializeCluster(callback?: (err: AWSError, data: CloudHSMV2.Types.InitializeClusterResponse) => void): Request<CloudHSMV2.Types.InitializeClusterResponse, AWSError>; /** * Gets a list of tags for the specified AWS CloudHSM cluster. This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get. */ listTags(params: CloudHSMV2.Types.ListTagsRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.ListTagsResponse) => void): Request<CloudHSMV2.Types.ListTagsResponse, AWSError>; /** * Gets a list of tags for the specified AWS CloudHSM cluster. This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get. */ listTags(callback?: (err: AWSError, data: CloudHSMV2.Types.ListTagsResponse) => void): Request<CloudHSMV2.Types.ListTagsResponse, AWSError>; /** * Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For mor information on deleting a backup, see DeleteBackup. */ restoreBackup(params: CloudHSMV2.Types.RestoreBackupRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.RestoreBackupResponse) => void): Request<CloudHSMV2.Types.RestoreBackupResponse, AWSError>; /** * Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For mor information on deleting a backup, see DeleteBackup. */ restoreBackup(callback?: (err: AWSError, data: CloudHSMV2.Types.RestoreBackupResponse) => void): Request<CloudHSMV2.Types.RestoreBackupResponse, AWSError>; /** * Adds or overwrites one or more tags for the specified AWS CloudHSM cluster. */ tagResource(params: CloudHSMV2.Types.TagResourceRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.TagResourceResponse) => void): Request<CloudHSMV2.Types.TagResourceResponse, AWSError>; /** * Adds or overwrites one or more tags for the specified AWS CloudHSM cluster. */ tagResource(callback?: (err: AWSError, data: CloudHSMV2.Types.TagResourceResponse) => void): Request<CloudHSMV2.Types.TagResourceResponse, AWSError>; /** * Removes the specified tag or tags from the specified AWS CloudHSM cluster. */ untagResource(params: CloudHSMV2.Types.UntagResourceRequest, callback?: (err: AWSError, data: CloudHSMV2.Types.UntagResourceResponse) => void): Request<CloudHSMV2.Types.UntagResourceResponse, AWSError>; /** * Removes the specified tag or tags from the specified AWS CloudHSM cluster. */ untagResource(callback?: (err: AWSError, data: CloudHSMV2.Types.UntagResourceResponse) => void): Request<CloudHSMV2.Types.UntagResourceResponse, AWSError>; } declare namespace CloudHSMV2 { export interface Backup { /** * The identifier (ID) of the backup. */ BackupId: BackupId; /** * The state of the backup. */ BackupState?: BackupState; /** * The identifier (ID) of the cluster that was backed up. */ ClusterId?: ClusterId; /** * The date and time when the backup was created. */ CreateTimestamp?: Timestamp; /** * The date and time when the backup was copied from a source backup. */ CopyTimestamp?: Timestamp; /** * The AWS Region that contains the source backup from which the new backup was copied. */ SourceRegion?: Region; /** * The identifier (ID) of the source backup from which the new backup was copied. */ SourceBackup?: BackupId; /** * The identifier (ID) of the cluster containing the source backup from which the new backup was copied. */ SourceCluster?: ClusterId; /** * The date and time when the backup will be permanently deleted. */ DeleteTimestamp?: Timestamp; /** * The list of tags for the backup. */ TagList?: TagList; } export type BackupId = string; export type BackupPolicy = "DEFAULT"|string; export type BackupState = "CREATE_IN_PROGRESS"|"READY"|"DELETED"|"PENDING_DELETION"|string; export type Backups = Backup[]; export type Boolean = boolean; export type Cert = string; export interface Certificates { /** * The cluster's certificate signing request (CSR). The CSR exists only when the cluster's state is UNINITIALIZED. */ ClusterCsr?: Cert; /** * The HSM certificate issued (signed) by the HSM hardware. */ HsmCertificate?: Cert; /** * The HSM hardware certificate issued (signed) by AWS CloudHSM. */ AwsHardwareCertificate?: Cert; /** * The HSM hardware certificate issued (signed) by the hardware manufacturer. */ ManufacturerHardwareCertificate?: Cert; /** * The cluster certificate issued (signed) by the issuing certificate authority (CA) of the cluster's owner. */ ClusterCertificate?: Cert; } export interface Cluster { /** * The cluster's backup policy. */ BackupPolicy?: BackupPolicy; /** * The cluster's identifier (ID). */ ClusterId?: ClusterId; /** * The date and time when the cluster was created. */ CreateTimestamp?: Timestamp; /** * Contains information about the HSMs in the cluster. */ Hsms?: Hsms; /** * The type of HSM that the cluster contains. */ HsmType?: HsmType; /** * The default password for the cluster's Pre-Crypto Officer (PRECO) user. */ PreCoPassword?: PreCoPassword; /** * The identifier (ID) of the cluster's security group. */ SecurityGroup?: SecurityGroup; /** * The identifier (ID) of the backup used to create the cluster. This value exists only when the cluster was created from a backup. */ SourceBackupId?: BackupId; /** * The cluster's state. */ State?: ClusterState; /** * A description of the cluster's state. */ StateMessage?: StateMessage; /** * A map from availability zone to the cluster’s subnet in that availability zone. */ SubnetMapping?: ExternalSubnetMapping; /** * The identifier (ID) of the virtual private cloud (VPC) that contains the cluster. */ VpcId?: VpcId; /** * Contains one or more certificates or a certificate signing request (CSR). */ Certificates?: Certificates; /** * The list of tags for the cluster. */ TagList?: TagList; } export type ClusterId = string; export type ClusterState = "CREATE_IN_PROGRESS"|"UNINITIALIZED"|"INITIALIZE_IN_PROGRESS"|"INITIALIZED"|"ACTIVE"|"UPDATE_IN_PROGRESS"|"DELETE_IN_PROGRESS"|"DELETED"|"DEGRADED"|string; export type Clusters = Cluster[]; export interface CopyBackupToRegionRequest { /** * The AWS region that will contain your copied CloudHSM cluster backup. */ DestinationRegion: Region; /** * The ID of the backup that will be copied to the destination region. */ BackupId: BackupId; /** * Tags to apply to the destination backup during creation. If you specify tags, only these tags will be applied to the destination backup. If you do not specify tags, the service copies tags from the source backup to the destination backup. */ TagList?: TagList; } export interface CopyBackupToRegionResponse { /** * Information on the backup that will be copied to the destination region, including CreateTimestamp, SourceBackup, SourceCluster, and Source Region. CreateTimestamp of the destination backup will be the same as that of the source backup. You will need to use the sourceBackupID returned in this operation to use the DescribeBackups operation on the backup that will be copied to the destination region. */ DestinationBackup?: DestinationBackup; } export interface CreateClusterRequest { /** * The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria: All subnets must be in the same virtual private cloud (VPC). You can specify only one subnet per Availability Zone. */ SubnetIds: SubnetIds; /** * The type of HSM to use in the cluster. Currently the only allowed value is hsm1.medium. */ HsmType: HsmType; /** * The identifier (ID) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID, use DescribeBackups. */ SourceBackupId?: BackupId; /** * Tags to apply to the CloudHSM cluster during creation. */ TagList?: TagList; } export interface CreateClusterResponse { /** * Information about the cluster that was created. */ Cluster?: Cluster; } export interface CreateHsmRequest { /** * The identifier (ID) of the HSM's cluster. To find the cluster ID, use DescribeClusters. */ ClusterId: ClusterId; /** * The Availability Zone where you are creating the HSM. To find the cluster's Availability Zones, use DescribeClusters. */ AvailabilityZone: ExternalAz; /** * The HSM's IP address. If you specify an IP address, use an available address from the subnet that maps to the Availability Zone where you are creating the HSM. If you don't specify an IP address, one is chosen for you from that subnet. */ IpAddress?: IpAddress; } export interface CreateHsmResponse { /** * Information about the HSM that was created. */ Hsm?: Hsm; } export interface DeleteBackupRequest { /** * The ID of the backup to be deleted. To find the ID of a backup, use the DescribeBackups operation. */ BackupId: BackupId; } export interface DeleteBackupResponse { /** * Information on the Backup object deleted. */ Backup?: Backup; } export interface DeleteClusterRequest { /** * The identifier (ID) of the cluster that you are deleting. To find the cluster ID, use DescribeClusters. */ ClusterId: ClusterId; } export interface DeleteClusterResponse { /** * Information about the cluster that was deleted. */ Cluster?: Cluster; } export interface DeleteHsmRequest { /** * The identifier (ID) of the cluster that contains the HSM that you are deleting. */ ClusterId: ClusterId; /** * The identifier (ID) of the HSM that you are deleting. */ HsmId?: HsmId; /** * The identifier (ID) of the elastic network interface (ENI) of the HSM that you are deleting. */ EniId?: EniId; /** * The IP address of the elastic network interface (ENI) of the HSM that you are deleting. */ EniIp?: IpAddress; } export interface DeleteHsmResponse { /** * The identifier (ID) of the HSM that was deleted. */ HsmId?: HsmId; } export interface DescribeBackupsRequest { /** * The NextToken value that you received in the previous response. Use this value to get more backups. */ NextToken?: NextToken; /** * The maximum number of backups to return in the response. When there are more backups than the number you specify, the response contains a NextToken value. */ MaxResults?: MaxSize; /** * One or more filters to limit the items returned in the response. Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID). Use the sourceBackupIds filter to return only the backups created from a source backup. The sourceBackupID of a source backup is returned by the CopyBackupToRegion operation. Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID). Use the states filter to return only backups that match the specified state. */ Filters?: Filters; /** * Designates whether or not to sort the return backups by ascending chronological order of generation. */ SortAscending?: Boolean; } export interface DescribeBackupsResponse { /** * A list of backups. */ Backups?: Backups; /** * An opaque string that indicates that the response contains only a subset of backups. Use this value in a subsequent DescribeBackups request to get more backups. */ NextToken?: NextToken; } export interface DescribeClustersRequest { /** * One or more filters to limit the items returned in the response. Use the clusterIds filter to return only the specified clusters. Specify clusters by their cluster identifier (ID). Use the vpcIds filter to return only the clusters in the specified virtual private clouds (VPCs). Specify VPCs by their VPC identifier (ID). Use the states filter to return only clusters that match the specified state. */ Filters?: Filters; /** * The NextToken value that you received in the previous response. Use this value to get more clusters. */ NextToken?: NextToken; /** * The maximum number of clusters to return in the response. When there are more clusters than the number you specify, the response contains a NextToken value. */ MaxResults?: MaxSize; } export interface DescribeClustersResponse { /** * A list of clusters. */ Clusters?: Clusters; /** * An opaque string that indicates that the response contains only a subset of clusters. Use this value in a subsequent DescribeClusters request to get more clusters. */ NextToken?: NextToken; } export interface DestinationBackup { /** * The date and time when both the source backup was created. */ CreateTimestamp?: Timestamp; /** * The AWS region that contains the source backup from which the new backup was copied. */ SourceRegion?: Region; /** * The identifier (ID) of the source backup from which the new backup was copied. */ SourceBackup?: BackupId; /** * The identifier (ID) of the cluster containing the source backup from which the new backup was copied. */ SourceCluster?: ClusterId; } export type EniId = string; export type ExternalAz = string; export type ExternalSubnetMapping = {[key: string]: SubnetId}; export type Field = string; export type Filters = {[key: string]: Strings}; export interface Hsm { /** * The Availability Zone that contains the HSM. */ AvailabilityZone?: ExternalAz; /** * The identifier (ID) of the cluster that contains the HSM. */ ClusterId?: ClusterId; /** * The subnet that contains the HSM's elastic network interface (ENI). */ SubnetId?: SubnetId; /** * The identifier (ID) of the HSM's elastic network interface (ENI). */ EniId?: EniId; /** * The IP address of the HSM's elastic network interface (ENI). */ EniIp?: IpAddress; /** * The HSM's identifier (ID). */ HsmId: HsmId; /** * The HSM's state. */ State?: HsmState; /** * A description of the HSM's state. */ StateMessage?: String; } export type HsmId = string; export type HsmState = "CREATE_IN_PROGRESS"|"ACTIVE"|"DEGRADED"|"DELETE_IN_PROGRESS"|"DELETED"|string; export type HsmType = string; export type Hsms = Hsm[]; export interface InitializeClusterRequest { /** * The identifier (ID) of the cluster that you are claiming. To find the cluster ID, use DescribeClusters. */ ClusterId: ClusterId; /** * The cluster certificate issued (signed) by your issuing certificate authority (CA). The certificate must be in PEM format and can contain a maximum of 5000 characters. */ SignedCert: Cert; /** * The issuing certificate of the issuing certificate authority (CA) that issued (signed) the cluster certificate. You must use a self-signed certificate. The certificate used to sign the HSM CSR must be directly available, and thus must be the root certificate. The certificate must be in PEM format and can contain a maximum of 5000 characters. */ TrustAnchor: Cert; } export interface InitializeClusterResponse { /** * The cluster's state. */ State?: ClusterState; /** * A description of the cluster's state. */ StateMessage?: StateMessage; } export type IpAddress = string; export interface ListTagsRequest { /** * The cluster identifier (ID) for the cluster whose tags you are getting. To find the cluster ID, use DescribeClusters. */ ResourceId: ResourceId; /** * The NextToken value that you received in the previous response. Use this value to get more tags. */ NextToken?: NextToken; /** * The maximum number of tags to return in the response. When there are more tags than the number you specify, the response contains a NextToken value. */ MaxResults?: MaxSize; } export interface ListTagsResponse { /** * A list of tags. */ TagList: TagList; /** * An opaque string that indicates that the response contains only a subset of tags. Use this value in a subsequent ListTags request to get more tags. */ NextToken?: NextToken; } export type MaxSize = number; export type NextToken = string; export type PreCoPassword = string; export type Region = string; export type ResourceId = string; export interface RestoreBackupRequest { /** * The ID of the backup to be restored. To find the ID of a backup, use the DescribeBackups operation. */ BackupId: BackupId; } export interface RestoreBackupResponse { /** * Information on the Backup object created. */ Backup?: Backup; } export type SecurityGroup = string; export type StateMessage = string; export type String = string; export type Strings = String[]; export type SubnetId = string; export type SubnetIds = SubnetId[]; export interface Tag { /** * The key of the tag. */ Key: TagKey; /** * The value of the tag. */ Value: TagValue; } export type TagKey = string; export type TagKeyList = TagKey[]; export type TagList = Tag[]; export interface TagResourceRequest { /** * The cluster identifier (ID) for the cluster that you are tagging. To find the cluster ID, use DescribeClusters. */ ResourceId: ResourceId; /** * A list of one or more tags. */ TagList: TagList; } export interface TagResourceResponse { } export type TagValue = string; export type Timestamp = Date; export interface UntagResourceRequest { /** * The cluster identifier (ID) for the cluster whose tags you are removing. To find the cluster ID, use DescribeClusters. */ ResourceId: ResourceId; /** * A list of one or more tag keys for the tags that you are removing. Specify only the tag keys, not the tag values. */ TagKeyList: TagKeyList; } export interface UntagResourceResponse { } export type VpcId = string; /** * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version. */ export type apiVersion = "2017-04-28"|"latest"|string; export interface ClientApiVersions { /** * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version. */ apiVersion?: apiVersion; } export type ClientConfiguration = ServiceConfigurationOptions & ClientApiVersions; /** * Contains interfaces for use with the CloudHSMV2 client. */ export import Types = CloudHSMV2; } export = CloudHSMV2;
* Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the DeleteBackup request is made. For more information on restoring a backup, see RestoreBackup. */ deleteBackup(callback?: (err: AWSError, data: CloudHSMV2.Types.DeleteBackupResponse) => void): Request<CloudHSMV2.Types.DeleteBackupResponse, AWSError>;
multiInput.go
package sc // MultiInput is the interface of an input that causes // cascading multi-channel expansion. // See http://doc.sccode.org/Guides/Multichannel-Expansion.html type MultiInput interface { Input InputArray() []Input } // Inputs is a slice of Input. type Inputs []Input // Add adds an input to all the inputs. func (ins Inputs) Add(val Input) Input { ia := make([]Input, len(ins)) for i, in := range ins {
} // InputArray provides access to the list of inputs. func (ins Inputs) InputArray() []Input { return ins } // Max returns Inputs that contain the max of all the inputs and the provided Input. func (ins Inputs) Max(other Input) Input { im := make([]Input, len(ins)) for i, in := range ins { im[i] = in.Max(other) } return Inputs(im) } // Midicps converts MIDI note number to cycles per second. func (ins Inputs) Midicps() Input { converted := make([]Input, len(ins)) for i, in := range ins { converted[i] = in.Midicps() } return Inputs(converted) } // Mul multiplies all the inputs by another input. func (ins Inputs) Mul(val Input) Input { ia := make([]Input, len(ins)) for i, in := range ins { ia[i] = in.Mul(val) } return Inputs(ia) } // MulAdd performs a multiplication and addition on all the inputs. func (ins Inputs) MulAdd(mul, add Input) Input { ia := make([]Input, len(ins)) for i, in := range ins { ia[i] = in.MulAdd(mul, add) } return Inputs(ia) } // Neg is a convenience operator that multiplies a signal by -1. func (ins Inputs) Neg() Input { converted := make([]Input, len(ins)) for i, in := range ins { converted[i] = in.Neg() } return Inputs(converted) } // SoftClip adds distortion to the inputs. func (ins Inputs) SoftClip() Input { clipped := make([]Input, len(ins)) for i, in := range ins { clipped[i] = in.SoftClip() } return Inputs(clipped) } // Multi does multichannel expansion. // See http://doc.sccode.org/Guides/Multichannel-Expansion.html. func Multi(inputs ...Input) Inputs { return Inputs(inputs) }
ia[i] = in.Add(val) } return Inputs(ia)
tasks.component.js
"use strict"; var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function (k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; Object.defineProperty(exports, "__esModule", { value: true }); var core_1 = require("@angular/core"); var task_icons_component_1 = require("./task-icons.component"); var task_tooltip_directive_1 = require("./task-tooltip.directive"); var shared_1 = require("../shared/shared"); var TasksComponent = (function () { function TasksComponent(tasksService, settingsService) { this.tasksService = tasksService; this.settingsService = settingsService; this.tasks = []; this.tasks = this.tasksService.taskStore; this.today = new Date(); this.queueHeaderMapping = settingsService.pluralsMap.tasks; this.timerMinutes = settingsService.timerMinutes; } TasksComponent.prototype.ngOnInit = function () { this.updateQueuedTomatoes(); }; TasksComponent.prototype.toggleTask = function (task) { task.queued = !task.queued; this.updateQueuedTomatoes();
this.queuedTomatoes = this.tasks .filter(function (task) { return task.queued; }) .reduce(function (tomatoes, queuedTask) { return tomatoes + queuedTask.tomatoRequired; }, 0); }; return TasksComponent; }()); TasksComponent = __decorate([ core_1.Component({ selector: 'tomato-tasks', entryComponents: [task_icons_component_1.default, task_tooltip_directive_1.default, shared_1.SHARED_PIPES], styleUrls: ['/app/tasks/tasks.component.css'], templateUrl: '/app/tasks/tasks.component.html' }), __metadata("design:paramtypes", [shared_1.TaskService, shared_1.SettingsService]) ], TasksComponent); exports.default = TasksComponent;
}; TasksComponent.prototype.updateQueuedTomatoes = function () {
utils.go
package consumergroup import ( "crypto/rand" "fmt" "io" "os" "time" "github.com/Shopify/sarama" ) var ( clientConfig = sarama.ClientConfig{MetadataRetries: 5, WaitForElection: 250 * time.Millisecond} consumerConfig = sarama.ConsumerConfig{MaxWaitTime: 500000, DefaultFetchSize: 256 * 1024, MinFetchSize: 1024, OffsetMethod: sarama.OffsetMethodOldest} ) // COMMON TYPES // Partition information type partitionLeader struct { id int32 leader string } // A sortable slice of Partition structs type partitionSlice []partitionLeader func (s partitionSlice) Len() int { return len(s) } func (s partitionSlice) Less(i, j int) bool { if s[i].leader < s[j].leader { return true } return s[i].id < s[j].id } func (s partitionSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // A subscribable notification type Notification struct { Type uint8 Src *ConsumerGroup Err error } func generateUUID() (string, error) { uuid := make([]byte, 16) n, err := io.ReadFull(rand.Reader, uuid) if n != len(uuid) || err != nil { return "", err } // variant bits; see section 4.1.1 uuid[8] = uuid[8]&^0xc0 | 0x80 // version 4 (pseudo-random); see section 4.1.3 uuid[6] = uuid[6]&^0xf0 | 0x40 return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil } func generateConsumerID() (consumerID string, err error) { var uuid, hostname string uuid, err = generateUUID() if err != nil
hostname, err = os.Hostname() if err != nil { return } consumerID = fmt.Sprintf("%s:%s", hostname, uuid) return }
{ return }
wasmi_impl.rs
// This file is part of Substrate. // Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Implementation of conversions between Substrate and wasmi types. use crate::{Signature, Value, ValueType}; impl From<Value> for wasmi::RuntimeValue { fn from(value: Value) -> Self { match value { Value::I32(val) => Self::I32(val), Value::I64(val) => Self::I64(val), Value::F32(val) => Self::F32(val.into()), Value::F64(val) => Self::F64(val.into()), } } } impl From<wasmi::RuntimeValue> for Value { fn from(value: wasmi::RuntimeValue) -> Self { match value { wasmi::RuntimeValue::I32(val) => Self::I32(val), wasmi::RuntimeValue::I64(val) => Self::I64(val), wasmi::RuntimeValue::F32(val) => Self::F32(val.into()), wasmi::RuntimeValue::F64(val) => Self::F64(val.into()), } } } impl From<ValueType> for wasmi::ValueType { fn from(value: ValueType) -> Self
} impl From<wasmi::ValueType> for ValueType { fn from(value: wasmi::ValueType) -> Self { match value { wasmi::ValueType::I32 => Self::I32, wasmi::ValueType::I64 => Self::I64, wasmi::ValueType::F32 => Self::F32, wasmi::ValueType::F64 => Self::F64, } } } impl From<Signature> for wasmi::Signature { fn from(sig: Signature) -> Self { let args = sig.args.iter().map(|a| (*a).into()).collect::<Vec<_>>(); wasmi::Signature::new(args, sig.return_value.map(Into::into)) } } impl From<&wasmi::Signature> for Signature { fn from(sig: &wasmi::Signature) -> Self { Signature::new( sig.params().into_iter().copied().map(Into::into).collect::<Vec<_>>(), sig.return_type().map(Into::into), ) } }
{ match value { ValueType::I32 => Self::I32, ValueType::I64 => Self::I64, ValueType::F32 => Self::F32, ValueType::F64 => Self::F64, } }
dfarmsc_test.go
package main /* import ( "testing" "github.com/hyperledger/fabric/core/chaincode/shim" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var ( stub *shim.MockStub status200 int32 ) func TestGoproducesc(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Goproducesc Suite") } var _ = Describe("Unit Test Produce BlockChain Smart Contract", func() { stub = shim.NewMockStub("Testing Produce Blockahin", new(Dfarmsc)) BeforeSuite(func() { receivedStatus := stub.MockInit("000", nil).Status status200 = int32(200) Expect(receivedStatus).Should(Equal(status200)) PayableAsset := PaymentInvoiceAsset{} PayableAsset.BuyerOrderID = "001" PayableAsset.DocType = PAYMENT_INVOICE_RECORD_TYPE PayableAsset.ProduceID = "Pro001" PayableAsset.Variety = "Gala" PayableAsset.PaymentInvoiceID = "Inv001" PayableAsset.Qty = 200 PayableAsset.Status = "Pending" var payableinfo []ParticipantPaymentInfo farmerPayableInfo := ParticipantPaymentInfo{"Far001", 200.25, "Ordfar001", "", "pending"} casPayableInfo := ParticipantPaymentInfo{"Cas001", 20.25, "Ordcas001", "", "pending"} pcPayableInfo := ParticipantPaymentInfo{"Pc001", 21.25, "Ordpc001", "", "pending"} dcPayableInfo := ParticipantPaymentInfo{"Dc001", 29.25, "Orddc001", "", "pending"} dFarmPayableInfo := ParticipantPaymentInfo{"002", 25.25, "", "", "pending"} dFarmTarnsportPayableInfo := ParticipantPaymentInfo{"001", 20.29, "", "", "pending"} payableinfo = append(payableinfo, farmerPayableInfo) payableinfo = append(payableinfo, casPayableInfo) payableinfo = append(payableinfo, pcPayableInfo) payableinfo = append(payableinfo, dcPayableInfo) payableinfo = append(payableinfo, dFarmTarnsportPayableInfo) payableinfo = append(payableinfo, dFarmPayableInfo) PayableAsset.ParticipantPaymentInfos = payableinfo data01, _ := PaymentInvoiceAssettoJson(PayableAsset) var argTocreatePayableasset [][]byte argTocreatePayableasset = [][]byte{[]byte("CreatePaymentInvoiceAsset"), []byte(data01)} payload := stub.MockInvoke("000", argTocreatePayableasset) logger.Infof("Response : %s", string(payload.Payload)) Expect(payload.Status).Should(Equal(status200)) PayableAsset2 := PaymentInvoiceAsset{} PayableAsset2.BuyerOrderID = "001" PayableAsset2.DocType = PAYMENT_INVOICE_RECORD_TYPE PayableAsset2.ProduceID = "Pro001" PayableAsset2.Variety = "Gala" PayableAsset2.PaymentInvoiceID = "Inv002" PayableAsset2.Qty = 400 PayableAsset2.Status = "Paid" var payableinfo2 []ParticipantPaymentInfo farmerPayableInfo2 := ParticipantPaymentInfo{"Far001", 200.25, "Ordfar001", "", "paid"} casPayableInfo2 := ParticipantPaymentInfo{"Cas001", 20.25, "Ordcas001", "", "paid"} pcPayableInfo2 := ParticipantPaymentInfo{"Pc001", 21.25, "Ordpc001", "", "paid"} dcPayableInfo2 := ParticipantPaymentInfo{"Dc001", 29.25, "Orddc001", "", "paid"} dFarmPayableInfo2 := ParticipantPaymentInfo{"002", 25.25, "", "", "paid"} dFarmTarnsportPayableInfo2 := ParticipantPaymentInfo{"001", 20.29, "", "", "paid"} payableinfo2 = append(payableinfo2, farmerPayableInfo2) payableinfo2 = append(payableinfo2, casPayableInfo2) payableinfo2 = append(payableinfo2, pcPayableInfo2) payableinfo2 = append(payableinfo2, dcPayableInfo2) payableinfo2 = append(payableinfo2, dFarmTarnsportPayableInfo2) payableinfo2 = append(payableinfo2, dFarmPayableInfo2)
data02, _ := PaymentInvoiceAssettoJson(PayableAsset2) argTocreatePayableasset = [][]byte{[]byte("CreatePaymentInvoiceAsset"), []byte(data02)} payload = stub.MockInvoke("000", argTocreatePayableasset) logger.Infof("Response : %s", string(payload.Payload)) Expect(payload.Status).Should(Equal(status200)) }) Describe("Checking the Smart contract Running status and Function Call Error", func() { Context("Checking SmartContract running Status ", func() { It("Run CheckStatus Function and see if result is 200", func() { argToStatuscheck := [][]byte{[]byte("StatusCheck")} payload := stub.MockInvoke("000", argToStatuscheck) logger.Infof("Response : %s", string(payload.Payload)) Expect(payload.Status).Should(Equal(status200)) }) }) Context("Checking wrong function name logic ", func() { It("Check if called function exist", func() { argToStatuscheck := [][]byte{[]byte("TestFunction")} payload := stub.MockInvoke("000", argToStatuscheck) logger.Infof("Response : %s", string(payload.Payload)) Expect(payload.Status).Should(Equal(int32(500))) }) }) }) }) */
PayableAsset2.ParticipantPaymentInfos = payableinfo2
types.rs
use std::path::Path; use clap::arg_enum; use generic_error::Result; use structopt::StructOpt; use crate::input::prompts::{PROMPT_BB_PROJECT_ALL, PROMPT_BB_SERVER, PROMPT_BB_USERNAME}; use crate::input::{get_bool, get_password, get_with_default, password_from_env}; use crate::util::bail; use dialoguer::Confirm; use std::time::Duration; #[derive(StructOpt, Debug, Clone)] #[structopt( name = "BitBucket Server Cli", about = "Clone a thousand repos, and keep em up to date, no problem." )] pub enum Opts { #[structopt(about = "Clone projects and users combined")] Clone(CloneOpts), #[structopt(about = "Clone projects")] CloneProjects(CloneOpts), #[structopt(about = "Clone users")] CloneUsers(CloneOpts), #[structopt(about = "Generate shell completions")] Completions, } #[derive(StructOpt, Debug, Clone)] pub struct CloneOpts { #[structopt( short = "B", long = "batch", name = "batch_mode", help = "Run terminal in batch mode, with no interactions." )] pub batch_mode: bool, #[structopt(flatten)] pub bitbucket_opts: BitBucketOpts, #[structopt(flatten)] pub git_opts: GitOpts, } #[derive(StructOpt, Clone, Debug)] pub struct BitBucketOpts { #[structopt( short = "s", long = "server", name = "bitbucket_server", help = "BitBucket server base url, http://example.bitbucket.mycompany.com" )] pub server: Option<String>, #[structopt( short = "u", long = "username", name = "bitbucket_username", help = "BitBucket username" )] pub username: Option<String>, #[structopt( short = "w", long = "password", name = "bitbucket_password", help = "BitBucket password" )] pub password: Option<String>, #[structopt( short = "b", long = "concurrent-http", name = "bitbucket_concurrency", help = "Number of concurrent http requests towards bitbucket. Keep it sane, keep bitbucket alive for all. Max=100", default_value = "20" )] pub concurrency: usize, #[structopt( short = "H",
name = "bitbucket_verbose", help = "Output full http response on failed bitbucket requests." )] pub verbose: bool, #[structopt( short = "W", long = "env-password", name = "bitbucket_password_from_env", help = "Try get password from env variable BITBUCKET_PASSWORD.\nTry it out without showing your password:\nIFS= read -rs BITBUCKET_PASSWORD < /dev/tty && export BITBUCKET_PASSWORD\n" )] pub password_from_env: bool, #[structopt( long = "clone-type", name = "clone_type", possible_values = & CloneType::variants(), case_insensitive = true, default_value = "ssh" )] pub clone_type: CloneType, #[structopt( short = "k", long = "key", name = "git_project_keys", help = "BitBucket Project keys (applicable multiple times)" )] pub project_keys: Vec<String>, #[structopt( short = "A", long = "all", name = "bitbucket_all", help = "Clone all projects" )] pub all: bool, #[structopt( long = "http-timeout", help = "HTTP timout, 2min4sec6milli8micro3nano combine freely with or without abbreviations or spaces.", default_value = "2.5 sec", parse(try_from_str = parse_duration::parse), )] pub timeout: Duration, #[structopt( long, help = "Retries to attempt requesting on timeout from bitbucket.", default_value = "2" )] pub retries: u32, #[structopt( long = "http-backoff", help = "Linear backoff time per failed request.\nie. 10 timed out requests and backoff=10ms -> 100ms backoff on next timed out request\nor {prior_timeouts}*{backoff}={delay_on_next_timeout}", parse(try_from_str = parse_duration::parse), )] pub backoff: Option<Duration>, } #[derive(StructOpt, Clone, Debug)] pub struct GitOpts { #[structopt( short = "R", long = "reset", name = "git_reset_state", help = "Reset repos before updating, \ and switch to master branch" )] pub reset_state: bool, #[structopt( short = "g", long = "concurrent-git", name = "git_concurrency", help = "Number of concurrent git actions. Bitbucket might have a limited number of threads reserved for serving git requests - if you drive this value to high you might block your CI, colleagues or even crash bitbucket. Max=100", default_value = "5" )] pub concurrency: usize, #[structopt( short = "Q", long = "git-quiet", name = "git_quiet", help = "Suppress warnings from failed git actions." )] pub quiet: bool, #[structopt( long = "output-directory", help = "Suppress warnings from failed git actions.", default_value = "." )] pub output_directory: String, } arg_enum! { #[derive(Clone, Debug)] pub enum CloneType { SSH, HTTP, HttpSavedLogin, } } impl CloneOpts { pub fn validate(&mut self) -> Result<()> { if self.interactive() { self.bitbucket_opts.server = match self.bitbucket_opts.server.clone() { None => get_with_default(&PROMPT_BB_SERVER, None, false), Some(s) => Some(s), }; self.bitbucket_opts.username = match self.bitbucket_opts.username.clone() { None => get_with_default(&PROMPT_BB_USERNAME, None, true), Some(s) => Some(s), }; self.bitbucket_opts.password = match self.bitbucket_opts.username { None => None, Some(_) if self.bitbucket_opts.password_from_env => None, Some(_) if self.bitbucket_opts.password.is_none() => get_password(), _ => None, }; self.bitbucket_opts.all = self.bitbucket_opts.all || (self.bitbucket_opts.project_keys().is_empty() && get_bool(&PROMPT_BB_PROJECT_ALL, false)); } self.do_create_output_dir()?; if self.bitbucket_opts.server.is_none() { bail("Server is required")?; } else if !self.bitbucket_opts.all && self.bitbucket_opts.project_keys().is_empty() && self.batch_mode { bail("project selection is required (all or keys)")?; } else if self.git_opts.concurrency > 100 || self.bitbucket_opts.concurrency > 100 { bail("Max concurrent actions = 100")?; } else if !Path::new(&self.git_opts.output_directory).exists() { bail("output_directory is not accessible, does it exist?")?; } if self.bitbucket_opts.password_from_env { match password_from_env() { Ok(pass) => self.bitbucket_opts.password = Some(pass), Err(e) => bail(&format!("Failed getting env password. {}", e.msg))?, } } Ok(()) } fn do_create_output_dir(&self) -> Result<()> { if !Path::new(&self.git_opts.output_directory).exists() { if self.batch_mode { bail(&format!( "Output directory {} doesn't exist", &self.git_opts.output_directory ))?; } match Confirm::new() .with_prompt(&format!( "Output dir {} does not exist, want me to create it?", &self.git_opts.output_directory )) .default(false) .interact() { Ok(true) => match std::fs::create_dir_all(&self.git_opts.output_directory) { Ok(_) => {} _ => bail("Failed creating output directory.")?, }, Ok(false) => bail("Unable to proceed without an output directory")?, Err(e) => bail(&format!("{:?}", e))?, } } Ok(()) } pub fn interactive(&self) -> bool { !self.batch_mode } } impl BitBucketOpts { pub fn project_keys(&self) -> Vec<String> { self.project_keys .iter() .map(|key| key.to_lowercase()) .collect() } } #[cfg(test)] mod test { use super::*; #[test] fn test_multi_time_parse() { assert_eq!( parse_duration::parse("2.5s500ms").unwrap(), Duration::from_secs(3) ) } }
long = "http-verbose",
cloudmon_report_server.go
package component import ( "fmt" "math" "regexp" "strconv" "strings" batchv1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" "yunion.io/x/onecloud-operator/pkg/apis/onecloud/v1alpha1" "yunion.io/x/onecloud-operator/pkg/controller" "yunion.io/x/onecloud-operator/pkg/manager" ) type cloudmonReportServerManager struct { *ComponentManager } func (m *cloudmonReportServerManager) Sync(oc *v1alpha1.OnecloudCluster) error { if !IsEnterpriseEdition(oc) { // TODO: DELETE cronjob return nil } return syncComponent(m, oc, oc.Spec.CloudmonReportServer.Disable) } func
(man *ComponentManager) manager.Manager { return &cloudmonReportServerManager{man} } func (m *cloudmonReportServerManager) getCronJob( oc *v1alpha1.OnecloudCluster, cfg *v1alpha1.OnecloudClusterConfig, ) (*batchv1.CronJob, error) { return m.newCronJob(v1alpha1.CloudmonReportServerComponentType, oc, cfg) } func (m *cloudmonReportServerManager) newCronJob( cType v1alpha1.ComponentType, oc *v1alpha1.OnecloudCluster, cfg *v1alpha1.OnecloudClusterConfig, ) (*batchv1.CronJob, error) { spec := &oc.Spec.CloudmonReportServer reg, _ := regexp.Compile(`/\d+`) fin := reg.Find([]byte(spec.Schedule)) if spec.Schedule == "" { spec.Schedule = "*/4 * * * *" fin = []byte("/4") } interval := strings.Split(string(fin), "/")[1] period, _ := strconv.ParseFloat(interval, 64) monitorInterval := strconv.FormatFloat(math.Ceil(period*v1alpha1.CronjobMonitorExpand), 'f', -1, 64) configMapType := v1alpha1.APIGatewayComponentType containersF := func(volMounts []corev1.VolumeMount) []corev1.Container { return []corev1.Container{ { Name: cType.String() + "-aliyun", Image: spec.Image, Command: []string{ "/opt/yunion/bin/cloudmon", "--config", fmt.Sprintf("/etc/yunion/%s.conf", configMapType), "report-server", "--interval", monitorInterval, "--provider", "Aliyun", }, ImagePullPolicy: oc.Spec.CloudmonReportServer.ImagePullPolicy, VolumeMounts: volMounts, }, { Name: cType.String() + "-huawei", Image: spec.Image, Command: []string{ "/opt/yunion/bin/cloudmon", "--config", fmt.Sprintf("/etc/yunion/%s.conf", configMapType), "report-server", "--interval", monitorInterval, "--provider", "Huawei", }, ImagePullPolicy: oc.Spec.CloudmonReportServer.ImagePullPolicy, VolumeMounts: volMounts, }, { Name: cType.String() + "-qcloud", Image: spec.Image, Command: []string{ "/opt/yunion/bin/cloudmon", "--config", fmt.Sprintf("/etc/yunion/%s.conf", configMapType), "report-server", "--interval", monitorInterval, "--provider", "Qcloud", }, ImagePullPolicy: oc.Spec.CloudmonReportServer.ImagePullPolicy, VolumeMounts: volMounts, }, { Name: cType.String() + "-google", Image: spec.Image, Command: []string{ "/opt/yunion/bin/cloudmon", "--config", fmt.Sprintf("/etc/yunion/%s.conf", configMapType), "report-server", "--interval", monitorInterval, "--provider", "Google", }, ImagePullPolicy: oc.Spec.CloudmonReportServer.ImagePullPolicy, VolumeMounts: volMounts, }, { Name: cType.String() + "-aws", Image: spec.Image, Command: []string{ "/opt/yunion/bin/cloudmon", "--config", fmt.Sprintf("/etc/yunion/%s.conf", configMapType), "report-server", "--interval", monitorInterval, "--provider", "Aws", }, ImagePullPolicy: oc.Spec.CloudmonReportServer.ImagePullPolicy, VolumeMounts: volMounts, }, { Name: cType.String() + "-azure", Image: spec.Image, Command: []string{ "/opt/yunion/bin/cloudmon", "--config", fmt.Sprintf("/etc/yunion/%s.conf", configMapType), "report-server", "--interval", monitorInterval, "--provider", "Azure", }, ImagePullPolicy: oc.Spec.CloudmonReportServer.ImagePullPolicy, VolumeMounts: volMounts, }, { Name: cType.String() + "-vmware", Image: spec.Image, Command: []string{ "/opt/yunion/bin/cloudmon", "--config", fmt.Sprintf("/etc/yunion/%s.conf", configMapType), "report-server", "--interval", monitorInterval, "--provider", "VMware", }, ImagePullPolicy: oc.Spec.CloudmonReportServer.ImagePullPolicy, VolumeMounts: volMounts, }, { Name: cType.String() + "-zstack", Image: spec.Image, Command: []string{ "/opt/yunion/bin/cloudmon", "--config", fmt.Sprintf("/etc/yunion/%s.conf", configMapType), "report-server", "--interval", monitorInterval, "--provider", "ZStack", }, ImagePullPolicy: oc.Spec.CloudmonReportServer.ImagePullPolicy, VolumeMounts: volMounts, }, } } volhelper := NewVolumeHelper(oc, controller.ComponentConfigMapName(oc, configMapType), configMapType) return m.newDefaultCronJob(cType, oc, volhelper, *spec, nil, containersF) }
newCloudmonReportServerManager
get.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // TODO: Dashboard upload package main import ( "fmt" "go/build" "os" "path/filepath" "regexp" "runtime" "strconv" "strings" ) var cmdGet = &Command{ UsageLine: "get [-a] [-d] [-fix] [-n] [-p n] [-u] [-v] [-x] [packages]", Short: "download and install packages and dependencies", Long: ` Get downloads and installs the packages named by the import paths, along with their dependencies. The -a, -n, -v, -x, and -p flags have the same meaning as in 'go build' and 'go install'. See 'go help build'. The -d flag instructs get to stop after downloading the packages; that is, it instructs get not to install the packages. The -fix flag instructs get to run the fix tool on the downloaded packages before resolving dependencies or building the code. The -u flag instructs get to use the network to update the named packages and their dependencies. By default, get uses the network to check out missing packages but does not use it to look for updates to existing packages. When checking out or updating a package, get looks for a branch or tag that matches the locally installed version of Go. If the local version "is release.rNN", it searches for "go.rNN". (For an installation using Go version "weekly.YYYY-MM-DD", it searches for a package version labeled "go.YYYY-MM-DD".) If the desired version cannot be found but others exist with labels in the correct format, get retrieves the most recent version before the desired label. Finally, if all else fails it retrieves the most recent version of the package. For more about specifying packages, see 'go help packages'. For more about how 'go get' finds source code to download, see 'go help remote'. See also: go build, go install, go clean. `, } var getD = cmdGet.Flag.Bool("d", false, "") var getU = cmdGet.Flag.Bool("u", false, "") var getFix = cmdGet.Flag.Bool("fix", false, "") func init() { addBuildFlags(cmdGet) cmdGet.Run = runGet // break init loop } func runGet(cmd *Command, args []string) { // Phase 1. Download/update. var stk importStack for _, arg := range downloadPaths(args) { download(arg, &stk) } exitIfErrors() // Phase 2. Rescan packages and reevaluate args list. // Code we downloaded and all code that depends on it // needs to be evicted from the package cache so that // the information will be recomputed. Instead of keeping // track of the reverse dependency information, evict // everything. for name := range packageCache { delete(packageCache, name) } args = importPaths(args) // Phase 3. Install. if *getD { // Download only. // Check delayed until now so that importPaths // has a chance to print errors. return } runInstall(cmd, args) } // downloadPath prepares the list of paths to pass to download. // It expands ... patterns that can be expanded. If there is no match // for a particular pattern, downloadPaths leaves it in the result list, // in the hope that we can figure out the repository from the // initial ...-free prefix. func downloadPaths(args []string) []string { args = importPathsNoDotExpansion(args) var out []string for _, a := range args { if strings.Contains(a, "...") { var expand []string // Use matchPackagesInFS to avoid printing // warnings. They will be printed by the // eventual call to importPaths instead. if build.IsLocalImport(a) { expand = matchPackagesInFS(a) } else { expand = matchPackages(a) } if len(expand) > 0 { out = append(out, expand...) continue } } out = append(out, a) } return out } // downloadCache records the import paths we have already // considered during the download, to avoid duplicate work when // there is more than one dependency sequence leading to // a particular package. var downloadCache = map[string]bool{} // downloadRootCache records the version control repository // root directories we have already considered during the download. // For example, all the packages in the code.google.com/p/codesearch repo // share the same root (the directory for that path), and we only need // to run the hg commands to consider each repository once. var downloadRootCache = map[string]bool{} // download runs the download half of the get command // for the package named by the argument. func download(arg string, stk *importStack) { p := loadPackage(arg, stk) // There's nothing to do if this is a package in the standard library. if p.Standard { return } // Only process each package once. if downloadCache[arg] { return } downloadCache[arg] = true pkgs := []*Package{p} wildcardOkay := len(*stk) == 0 // Download if the package is missing, or update if we're using -u. if p.Dir == "" || *getU { // The actual download. stk.push(p.ImportPath) err := downloadPackage(p) if err != nil { errorf("%s", &PackageError{ImportStack: stk.copy(), Err: err.Error()}) stk.pop() return } args := []string{arg} // If the argument has a wildcard in it, re-evaluate the wildcard. // We delay this until after reloadPackage so that the old entry // for p has been replaced in the package cache. if wildcardOkay && strings.Contains(arg, "...") { if build.IsLocalImport(arg) { args = matchPackagesInFS(arg) } else { args = matchPackages(arg) } } // Clear all relevant package cache entries before // doing any new loads. for _, arg := range args { p := packageCache[arg] if p != nil { delete(packageCache, p.Dir) delete(packageCache, p.ImportPath) } } pkgs = pkgs[:0] for _, arg := range args { stk.push(arg) p := loadPackage(arg, stk) stk.pop() if p.Error != nil { errorf("%s", p.Error) continue } pkgs = append(pkgs, p) } } // Process package, which might now be multiple packages // due to wildcard expansion. for _, p := range pkgs { if *getFix { run(stringList(tool("fix"), relPaths(p.gofiles))) // The imports might have changed, so reload again. p = reloadPackage(arg, stk) if p.Error != nil { errorf("%s", p.Error) return } } // Process dependencies, now that we know what they are. for _, dep := range p.deps { download(dep.ImportPath, stk) } } } // downloadPackage runs the create or download command // to make the first copy of or update a copy of the given package. func downloadPackage(p *Package) error { var ( vcs *vcsCmd repo, rootPath string err error ) if p.build.SrcRoot != "" { // Directory exists. Look for checkout along path to src. vcs, rootPath, err = vcsForDir(p) if err != nil { return err } repo = "<local>" // should be unused; make distinctive } else { // Analyze the import path to determine the version control system, // repository, and the import path for the root of the repository.
return err } vcs, repo, rootPath = rr.vcs, rr.repo, rr.root } if p.build.SrcRoot == "" { // Package not found. Put in first directory of $GOPATH or else $GOROOT. // Guard against people setting GOPATH=$GOROOT. We have to use // $GOROOT's directory hierarchy (src/pkg, not just src) in that case. if list := filepath.SplitList(buildContext.GOPATH); len(list) > 0 && list[0] != goroot { p.build.SrcRoot = filepath.Join(list[0], "src") p.build.PkgRoot = filepath.Join(list[0], "pkg") } else { p.build.SrcRoot = filepath.Join(goroot, "src", "pkg") p.build.PkgRoot = filepath.Join(goroot, "pkg") } } root := filepath.Join(p.build.SrcRoot, rootPath) // If we've considered this repository already, don't do it again. if downloadRootCache[root] { return nil } downloadRootCache[root] = true if buildV { fmt.Fprintf(os.Stderr, "%s (download)\n", rootPath) } // Check that this is an appropriate place for the repo to be checked out. // The target directory must either not exist or have a repo checked out already. meta := filepath.Join(root, "."+vcs.cmd) st, err := os.Stat(meta) if err == nil && !st.IsDir() { return fmt.Errorf("%s exists but is not a directory", meta) } if err != nil { // Metadata directory does not exist. Prepare to checkout new copy. // Some version control tools require the target directory not to exist. // We require that too, just to avoid stepping on existing work. if _, err := os.Stat(root); err == nil { return fmt.Errorf("%s exists but %s does not - stale checkout?", root, meta) } // Some version control tools require the parent of the target to exist. parent, _ := filepath.Split(root) if err := os.MkdirAll(parent, 0777); err != nil { return err } if err = vcs.create(root, repo); err != nil { return err } } else { // Metadata directory does exist; download incremental updates. if err = vcs.download(root); err != nil { return err } } if buildN { // Do not show tag sync in -n; it's noise more than anything, // and since we're not running commands, no tag will be found. // But avoid printing nothing. fmt.Fprintf(os.Stderr, "# cd %s; %s sync/update\n", root, vcs.cmd) return nil } // Select and sync to appropriate version of the repository. tags, err := vcs.tags(root) if err != nil { return err } vers := runtime.Version() if i := strings.Index(vers, " "); i >= 0 { vers = vers[:i] } if err := vcs.tagSync(root, selectTag(vers, tags)); err != nil { return err } return nil } // goTag matches go release tags such as go1 and go1.2.3. // The numbers involved must be small (at most 4 digits), // have no unnecessary leading zeros, and the version cannot // end in .0 - it is go1, not go1.0 or go1.0.0. var goTag = regexp.MustCompile( `^go((0|[1-9][0-9]{0,3})\.)*([1-9][0-9]{0,3})$`, ) // selectTag returns the closest matching tag for a given version. // Closest means the latest one that is not after the current release. // Version "goX" (or "goX.Y" or "goX.Y.Z") matches tags of the same form. // Version "release.rN" matches tags of the form "go.rN" (N being a floating-point number). // Version "weekly.YYYY-MM-DD" matches tags like "go.weekly.YYYY-MM-DD". func selectTag(goVersion string, tags []string) (match string) { const rPrefix = "release.r" if strings.HasPrefix(goVersion, rPrefix) { p := "go.r" v, err := strconv.ParseFloat(goVersion[len(rPrefix):], 64) if err != nil { return "" } var matchf float64 for _, t := range tags { if !strings.HasPrefix(t, p) { continue } tf, err := strconv.ParseFloat(t[len(p):], 64) if err != nil { continue } if matchf < tf && tf <= v { match, matchf = t, tf } } } const wPrefix = "weekly." if strings.HasPrefix(goVersion, wPrefix) { p := "go.weekly." v := goVersion[len(wPrefix):] for _, t := range tags { if !strings.HasPrefix(t, p) { continue } if match < t && t[len(p):] <= v { match = t } } } if goTag.MatchString(goVersion) { v := goVersion for _, t := range tags { if !goTag.MatchString(t) { continue } if cmpGoVersion(match, t) < 0 && cmpGoVersion(t, v) <= 0 { match = t } } } return match } // cmpGoVersion returns -1, 0, +1 reporting whether // x < y, x == y, or x > y. func cmpGoVersion(x, y string) int { // Malformed strings compare less than well-formed strings. if !goTag.MatchString(x) { return -1 } if !goTag.MatchString(y) { return +1 } // Compare numbers in sequence. xx := strings.Split(x[len("go"):], ".") yy := strings.Split(y[len("go"):], ".") for i := 0; i < len(xx) && i < len(yy); i++ { // The Atoi are guaranteed to succeed // because the versions match goTag. xi, _ := strconv.Atoi(xx[i]) yi, _ := strconv.Atoi(yy[i]) if xi < yi { return -1 } else if xi > yi { return +1 } } if len(xx) < len(yy) { return -1 } if len(xx) > len(yy) { return +1 } return 0 }
rr, err := repoRootForImportPath(p.ImportPath) if err != nil {
git_commit_test.go
/* Copyright 2018 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tag import ( "io/ioutil" "os" "path/filepath" "testing" "time" "github.com/GoogleContainerTools/skaffold/testutil" git "gopkg.in/src-d/go-git.v4" "gopkg.in/src-d/go-git.v4/plumbing/object" ) func TestGitCommit_GenerateFullyQualifiedImageName(t *testing.T) { tests := []struct { description string expectedName string createGitRepo func(string) opts *TagOptions shouldErr bool }{ { description: "success", opts: &TagOptions{ ImageName: "test", Digest: "sha256:12345abcde", }, expectedName: "test:eefe1b9", createGitRepo: func(dir string) { gitInit(t, dir). write("source.go", []byte("code")). add("source.go"). commit("initial") }, }, { description: "dirty", opts: &TagOptions{ ImageName: "test", }, expectedName: "test:eefe1b9-dirty-af8de1fde8be4367", createGitRepo: func(dir string) { gitInit(t, dir). write("source.go", []byte("code")). add("source.go"). commit("initial"). write("source.go", []byte("updated code")) }, }, { description: "untracked", opts: &TagOptions{ ImageName: "test", }, expectedName: "test:eefe1b9-dirty-bfe9b4566c9d3fec", createGitRepo: func(dir string) { gitInit(t, dir). write("source.go", []byte("code")). add("source.go"). commit("initial"). write("new.go", []byte("new code")) }, }, { description: "one file deleted", opts: &TagOptions{ ImageName: "test", }, expectedName: "test:279d53f-dirty-6a3ce511c689eda7", createGitRepo: func(dir string) { gitInit(t, dir). write("source1.go", []byte("code1")). write("source2.go", []byte("code2")). add("source1.go", "source2.go"). commit("initial"). delete("source1.go") }, }, { description: "two files deleted", opts: &TagOptions{ ImageName: "test", }, expectedName: "test:279d53f-dirty-d48c11ed65c37a09", // Must be <> than when only one file is deleted createGitRepo: func(dir string) { gitInit(t, dir). write("source1.go", []byte("code1")). write("source2.go", []byte("code2")). add("source1.go", "source2.go"). commit("initial"). delete("source1.go", "source2.go") }, }, { description: "rename", opts: &TagOptions{ ImageName: "test", }, expectedName: "test:eefe1b9-dirty-9c858d88cc0bf792", createGitRepo: func(dir string) { gitInit(t, dir). write("source.go", []byte("code")). add("source.go"). commit("initial"). rename("source.go", "source2.go") }, }, { description: "rename to different name", opts: &TagOptions{ ImageName: "test", }, expectedName: "test:eefe1b9-dirty-6534adc17ccd1cf4", // Must be <> each time a new name is used createGitRepo: func(dir string) { gitInit(t, dir). write("source.go", []byte("code")). add("source.go"). commit("initial"). rename("source.go", "source3.go") }, }, { description: "failure", createGitRepo: func(dir string) {}, shouldErr: true, }, } for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { tmpDir, cleanup := testutil.TempDir(t) defer cleanup() tt.createGitRepo(tmpDir) c := &GitCommit{} name, err := c.GenerateFullyQualifiedImageName(tmpDir, tt.opts) testutil.CheckErrorAndDeepEqual(t, tt.shouldErr, err, tt.expectedName, name) }) } } func TestGenerateFullyQualifiedImageNameFromSubDirectory(t *testing.T)
// gitRepo deals with test git repositories type gitRepo struct { dir string workTree *git.Worktree t *testing.T } func gitInit(t *testing.T, dir string) *gitRepo { repo, err := git.PlainInit(dir, false) failNowIfError(t, err) w, err := repo.Worktree() failNowIfError(t, err) return &gitRepo{ dir: dir, workTree: w, t: t, } } func (g *gitRepo) mkdir(folder string) *gitRepo { err := os.MkdirAll(filepath.Join(g.dir, folder), os.ModePerm) failNowIfError(g.t, err) return g } func (g *gitRepo) write(file string, content []byte) *gitRepo { err := ioutil.WriteFile(filepath.Join(g.dir, file), content, os.ModePerm) failNowIfError(g.t, err) return g } func (g *gitRepo) rename(file, to string) *gitRepo { err := os.Rename(filepath.Join(g.dir, file), filepath.Join(g.dir, to)) failNowIfError(g.t, err) return g } func (g *gitRepo) delete(files ...string) *gitRepo { for _, file := range files { err := os.Remove(filepath.Join(g.dir, file)) failNowIfError(g.t, err) } return g } func (g *gitRepo) add(files ...string) *gitRepo { for _, file := range files { _, err := g.workTree.Add(file) failNowIfError(g.t, err) } return g } func (g *gitRepo) commit(msg string) *gitRepo { now, err := time.Parse("Jan 2, 2006 at 15:04:05 -0700 MST", "Feb 3, 2013 at 19:54:00 -0700 MST") failNowIfError(g.t, err) _, err = g.workTree.Commit(msg, &git.CommitOptions{ Author: &object.Signature{ Name: "John Doe", Email: "[email protected]", When: now, }, }) failNowIfError(g.t, err) return g } func failNowIfError(t *testing.T, err error) { if err != nil { t.Fatal(err) } }
{ tmpDir, cleanup := testutil.TempDir(t) defer cleanup() gitInit(t, tmpDir). mkdir("sub/sub"). commit("initial") opts := &TagOptions{ImageName: "test"} c := &GitCommit{} name1, err := c.GenerateFullyQualifiedImageName(tmpDir, opts) failNowIfError(t, err) subDir := filepath.Join(tmpDir, "sub", "sub") name2, err := c.GenerateFullyQualifiedImageName(subDir, opts) failNowIfError(t, err) if name1 != name2 || name1 != "test:a7b32a6" { t.Errorf("Invalid names found: %s and %s", name1, name2) } }
domain.go
package utils import ( "strings" ) const dot = "." // Dotted adds a trailing dot to a domain if it doesn't exist. func Dotted(domain string) string { if strings.HasSuffix(domain, dot) { return domain } return domain + dot } // Undotted removes the trailing dot from a domain if it exists. func Undotted(domain string) string
{ if !strings.HasSuffix(domain, dot) { return domain } return domain[:len(domain)-1] }
artifact_targets.py
#!/usr/bin/env python # Copyright 2016 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definition of targets to build artifacts.""" import os.path import random import string import sys sys.path.insert(0, os.path.abspath('..')) import python_utils.jobset as jobset def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={}, flake_retries=0, timeout_retries=0, timeout_seconds=30 * 60, docker_base_image=None, extra_docker_args=None, verbose_success=False): """Creates jobspec for a task running under docker.""" environ = environ.copy() environ['RUN_COMMAND'] = shell_command environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name docker_args = [] for k, v in environ.items(): docker_args += ['-e', '%s=%s' % (k, v)] docker_env = { 'DOCKERFILE_DIR': dockerfile_dir, 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh', 'OUTPUT_DIR': 'artifacts' } if docker_base_image is not None: docker_env['DOCKER_BASE_IMAGE'] = docker_base_image if extra_docker_args is not None: docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args jobspec = jobset.JobSpec( cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args, environ=docker_env, shortname='build_artifact.%s' % (name), timeout_seconds=timeout_seconds, flake_retries=flake_retries, timeout_retries=timeout_retries, verbose_success=verbose_success) return jobspec def create_jobspec(name, cmdline, environ={}, shell=False, flake_retries=0, timeout_retries=0, timeout_seconds=30 * 60, use_workspace=False, cpu_cost=1.0, verbose_success=False): """Creates jobspec.""" environ = environ.copy() if use_workspace: environ['WORKSPACE_NAME'] = 'workspace_%s' % name environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name) cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh' ] + cmdline else: environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name) jobspec = jobset.JobSpec(cmdline=cmdline, environ=environ, shortname='build_artifact.%s' % (name), timeout_seconds=timeout_seconds, flake_retries=flake_retries, timeout_retries=timeout_retries, shell=shell, cpu_cost=cpu_cost, verbose_success=verbose_success) return jobspec _MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7' _ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'} class PythonArtifact: """Builds Python artifacts.""" def __init__(self, platform, arch, py_version): self.name = 'python_%s_%s_%s' % (platform, arch, py_version) self.platform = platform self.arch = arch self.labels = ['artifact', 'python', platform, arch, py_version] self.py_version = py_version if 'manylinux' in platform: self.labels.append('linux') def pre_build_jobspecs(self): return [] def build_jobspec(self): environ = {} if self.platform == 'linux_extra': # Raspberry Pi build environ['PYTHON'] = '/usr/local/bin/python{}'.format( self.py_version) environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version) # https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9 # A QEMU bug causes submodule update to hang, so we copy directly environ['RELATIVE_COPY_PATH'] = '.' # Parallel builds are counterproductive in emulated environment environ['GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS'] = '1' extra_args = ' --entrypoint=/usr/bin/qemu-arm-static ' return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch), 'tools/run_tests/artifacts/build_artifact_python.sh', environ=environ, timeout_seconds=60 * 60 * 5, docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch), extra_docker_args=extra_args) elif 'manylinux' in self.platform: if self.arch == 'x86': environ['SETARCH_CMD'] = 'linux32' # Inside the manylinux container, the python installations are located in # special places... environ['PYTHON'] = '/opt/python/{}/bin/python'.format( self.py_version) environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version) # Platform autodetection for the manylinux1 image breaks so we set the # defines ourselves. # TODO(atash) get better platform-detection support in core so we don't # need to do this manually... environ['CFLAGS'] = '-DGPR_MANYLINUX1=1' environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE' environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE' return create_docker_jobspec( self.name, # NOTE(rbellevi): Do *not* update this without also ensuring the # base_docker_image attribute is accurate. 'tools/dockerfile/grpc_artifact_python_%s_%s' % (self.platform, self.arch), 'tools/run_tests/artifacts/build_artifact_python.sh', environ=environ, timeout_seconds=60 * 60, docker_base_image='quay.io/pypa/manylinux1_i686' if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64') elif self.platform == 'windows': if 'Python27' in self.py_version: environ['EXT_COMPILER'] = 'mingw32' else: environ['EXT_COMPILER'] = 'msvc' # For some reason, the batch script %random% always runs with the same # seed. We create a random temp-dir here dir = ''.join( random.choice(string.ascii_uppercase) for _ in range(10)) return create_jobspec(self.name, [ 'tools\\run_tests\\artifacts\\build_artifact_python.bat', self.py_version, '32' if self.arch == 'x86' else '64' ], environ=environ, timeout_seconds=45 * 60, use_workspace=True) else: environ['PYTHON'] = self.py_version environ['SKIP_PIP_INSTALL'] = 'TRUE' return create_jobspec( self.name, ['tools/run_tests/artifacts/build_artifact_python.sh'], environ=environ, timeout_seconds=60 * 60 * 2, use_workspace=True) def __str__(self): return self.name class RubyArtifact: """Builds ruby native gem.""" def __init__(self, platform, arch): self.name = 'ruby_native_gem_%s_%s' % (platform, arch) self.platform = platform self.arch = arch self.labels = ['artifact', 'ruby', platform, arch] def pre_build_jobspecs(self): return [] def build_jobspec(self): # Ruby build uses docker internally and docker cannot be nested. # We are using a custom workspace instead. return create_jobspec( self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'], use_workspace=True, timeout_seconds=45 * 60) class CSharpExtArtifact: """Builds C# native extension library""" def __init__(self, platform, arch, arch_abi=None): self.name = 'csharp_ext_%s_%s' % (platform, arch) self.platform = platform self.arch = arch self.arch_abi = arch_abi self.labels = ['artifact', 'csharp', platform, arch] if arch_abi: self.name += '_%s' % arch_abi self.labels.append(arch_abi) def pre_build_jobspecs(self): return [] def build_jobspec(self): if self.arch == 'android': return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_android_ndk', 'tools/run_tests/artifacts/build_artifact_csharp_android.sh', environ={'ANDROID_ABI': self.arch_abi}) elif self.arch == 'ios': return create_jobspec( self.name, ['tools/run_tests/artifacts/build_artifact_csharp_ios.sh'], use_workspace=True) elif self.platform == 'windows': return create_jobspec(self.name, [ 'tools\\run_tests\\artifacts\\build_artifact_csharp.bat', self.arch ], use_workspace=True) else: if self.platform == 'linux': cmake_arch_option = '' # x64 is the default architecture if self.arch == 'x86': # TODO(jtattermusch): more work needed to enable # boringssl assembly optimizations for 32-bit linux. # Problem: currently we are building the artifact under # 32-bit docker image, but CMAKE_SYSTEM_PROCESSOR is still # set to x86_64, so the resulting boringssl binary # would have undefined symbols. cmake_arch_option = '-DOPENSSL_NO_ASM=ON' return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_centos6_%s' % self.arch, 'tools/run_tests/artifacts/build_artifact_csharp.sh', environ={'CMAKE_ARCH_OPTION': cmake_arch_option}) else: cmake_arch_option = '' # x64 is the default architecture if self.arch == 'x86': cmake_arch_option = '-DCMAKE_OSX_ARCHITECTURES=i386' return create_jobspec( self.name, ['tools/run_tests/artifacts/build_artifact_csharp.sh'], environ={'CMAKE_ARCH_OPTION': cmake_arch_option}, use_workspace=True) def __str__(self): return self.name class PHPArtifact: """Builds PHP PECL package""" def __init__(self, platform, arch): self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch) self.platform = platform self.arch = arch self.labels = ['artifact', 'php', platform, arch] def pre_build_jobspecs(self): return [] def build_jobspec(self): return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_centos6_{}'.format(self.arch), 'tools/run_tests/artifacts/build_artifact_php.sh') class ProtocArtifact: """Builds protoc and protoc-plugin artifacts""" def __init__(self, platform, arch): self.name = 'protoc_%s_%s' % (platform, arch) self.platform = platform self.arch = arch self.labels = ['artifact', 'protoc', platform, arch] def pre_build_jobspecs(self): return [] def build_jobspec(self): if self.platform != 'windows': cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch] ldflags = '%s' % _ARCH_FLAG_MAP[self.arch] if self.platform != 'macos': ldflags += ' -static-libgcc -static-libstdc++ -s' environ = { 'CONFIG': 'opt', 'CXXFLAGS': cxxflags, 'LDFLAGS': ldflags, 'PROTOBUF_LDFLAGS_EXTRA': ldflags } if self.platform == 'linux': return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_protoc', 'tools/run_tests/artifacts/build_artifact_protoc.sh', environ=environ) else: environ[ 'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG return create_jobspec( self.name, ['tools/run_tests/artifacts/build_artifact_protoc.sh'], environ=environ, timeout_seconds=60 * 60, use_workspace=True) else: generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015' return create_jobspec( self.name, ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'], environ={'generator': generator}, use_workspace=True) def
(self): return self.name def targets(): """Gets list of supported targets""" return ([ Cls(platform, arch) for Cls in (CSharpExtArtifact, ProtocArtifact) for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64') ] + [ CSharpExtArtifact('linux', 'android', arch_abi='arm64-v8a'), CSharpExtArtifact('linux', 'android', arch_abi='armeabi-v7a'), CSharpExtArtifact('linux', 'android', arch_abi='x86'), CSharpExtArtifact('macos', 'ios'), # TODO(https://github.com/grpc/grpc/issues/20283) # Add manylinux2010_x86 targets once this issue is resolved. PythonArtifact('manylinux2010', 'x86', 'cp27-cp27m'), PythonArtifact('manylinux2010', 'x86', 'cp27-cp27mu'), PythonArtifact('manylinux2010', 'x86', 'cp35-cp35m'), PythonArtifact('manylinux2010', 'x86', 'cp36-cp36m'), PythonArtifact('manylinux2010', 'x86', 'cp37-cp37m'), PythonArtifact('manylinux2010', 'x86', 'cp38-cp38'), PythonArtifact('linux_extra', 'armv7', '2.7'), PythonArtifact('linux_extra', 'armv7', '3.5'), PythonArtifact('linux_extra', 'armv7', '3.6'), PythonArtifact('linux_extra', 'armv6', '2.7'), PythonArtifact('linux_extra', 'armv6', '3.5'), PythonArtifact('linux_extra', 'armv6', '3.6'), PythonArtifact('manylinux2010', 'x64', 'cp27-cp27m'), PythonArtifact('manylinux2010', 'x64', 'cp27-cp27mu'), PythonArtifact('manylinux2010', 'x64', 'cp35-cp35m'), PythonArtifact('manylinux2010', 'x64', 'cp36-cp36m'), PythonArtifact('manylinux2010', 'x64', 'cp37-cp37m'), PythonArtifact('manylinux2010', 'x64', 'cp38-cp38'), PythonArtifact('macos', 'x64', 'python2.7'), PythonArtifact('macos', 'x64', 'python3.5'), PythonArtifact('macos', 'x64', 'python3.6'), PythonArtifact('macos', 'x64', 'python3.7'), PythonArtifact('macos', 'x64', 'python3.8'), PythonArtifact('windows', 'x86', 'Python27_32bit'), PythonArtifact('windows', 'x86', 'Python35_32bit'), PythonArtifact('windows', 'x86', 'Python36_32bit'), PythonArtifact('windows', 'x86', 'Python37_32bit'), PythonArtifact('windows', 'x86', 'Python38_32bit'), PythonArtifact('windows', 'x64', 'Python27'), PythonArtifact('windows', 'x64', 'Python35'), PythonArtifact('windows', 'x64', 'Python36'), PythonArtifact('windows', 'x64', 'Python37'), PythonArtifact('windows', 'x64', 'Python38'), RubyArtifact('linux', 'x64'), RubyArtifact('macos', 'x64'), PHPArtifact('linux', 'x64') ])
__str__
CollarSweater.tsx
import * as React from 'react' import { uniqueId } from 'lodash' import Colors from './Colors' export default class CollarSweater extends React.Component { static optionValue = 'CollarSweater' private path1 = uniqueId('react-path-') private mask1 = uniqueId('react-mask-') render () { const { path1, mask1 } = this return ( <g id='Clothing/Collar-+-Sweater' transform='translate(0.000000, 170.000000)'> <defs> <path d='M105.192402,29.0517235 L104,29.0517235 L104,29.0517235 C64.235498,29.0517235 32,61.2872215 32,101.051724 L32,110 L232,110 L232,101.051724 C232,61.2872215 199.764502,29.0517235 160,29.0517235 L160,29.0517235 L158.807598,29.0517235 C158.934638,30.0353144 159,31.0364513 159,32.0517235 C159,45.8588423 146.911688,57.0517235 132,57.0517235 C117.088312,57.0517235 105,45.8588423 105,32.0517235 C105,31.0364513 105.065362,30.0353144 105.192402,29.0517235 Z' id={path1} /> </defs>
<use xlinkHref={'#' + path1} /> </mask> <use id='Clothes' fill='#E6E6E6' fillRule='evenodd' xlinkHref={'#' + path1} /> <Colors maskID={mask1} /> <path d='M156,22.2794906 C162.181647,26.8351858 166,33.1057265 166,40.027915 C166,47.2334941 161.862605,53.7329769 155.228997,58.3271669 L149.57933,53.8764929 L145,54.207887 L146,51.0567821 L145.922229,50.995516 C152.022491,47.8530505 156,42.7003578 156,36.8768102 L156,22.2794906 Z M108,21.5714994 C101.232748,26.1740081 97,32.7397769 97,40.027915 C97,47.4261549 101.361602,54.080035 108.308428,58.6915723 L114.42067,53.8764929 L119,54.207887 L118,51.0567821 L118.077771,50.995516 C111.977509,47.8530505 108,42.7003578 108,36.8768102 L108,21.5714994 Z' id='Collar' fill='#F2F2F2' fillRule='evenodd' /> {this.props.children} </g> ) } }
<mask id={mask1} fill='white'>
lgbt4scraper.py
import scrapy import pandas as pd import time import os category_name = "LGBT" category_num = 4 class
(scrapy.Spider): name = category_name.lower() + str(category_num) + "spider" def start_requests(self): list_of_urls = [] parent_dir = "./reviewpages" link_file = parent_dir + "/" + category_name + str(category_num) + '_reviewpages.csv' df1 = pd.read_csv(link_file) length = df1.shape[0] for i in range(length): list_of_urls.append(df1.iat[i, 0]) urls = list_of_urls for url in urls: yield scrapy.Request(url=url, callback=self.parse) def parse(self, response): parent_dir1 = "./user_profiles" parent_dir2 = "./user_reviews" fn1 = parent_dir1 + "/" + category_name + str(category_num) + '_profiles.csv' fn2 = parent_dir2 + "/" + category_name + str(category_num) + '_reviews.csv' profile_links = [] for block in response.xpath('//a[@class="a-profile"]'): l = block.attrib['href'] l1 = 'https://www.amazon.com' + l # print(l1) profile_links.append(l1) reviews = [] for block in response.xpath('//div[@class="a-row a-spacing-small review-data"]'): r = block.xpath('string(.)').get().strip() # print(r) reviews.append(r) dict1 = {'Profile': profile_links} df1 = pd.DataFrame(dict1) dict2 = {'Review': reviews} df2 = pd.DataFrame(dict2) isExist1 = os.path.exists(fn1) if isExist1 == False: df1.to_csv(fn1, mode='w', index=False) else: df1.to_csv(fn1, mode='a', index=False, header=False) isExist2 = os.path.exists(fn2) if isExist2 == False: df2.to_csv(fn2, mode='w', index=False) else: df2.to_csv(fn2, mode='a', index=False, header=False) time.sleep(10)
QuotesSpider
unit_arg.rs
use clippy_utils::diagnostics::span_lint_and_then; use clippy_utils::source::{indent_of, reindent_multiline, snippet_opt}; use if_chain::if_chain; use rustc_errors::Applicability; use rustc_hir::{self as hir, Block, Expr, ExprKind, MatchSource, Node, StmtKind}; use rustc_lint::LateContext; use super::{utils, UNIT_ARG}; pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>) { if expr.span.from_expansion() { return; } // apparently stuff in the desugaring of `?` can trigger this // so check for that here // only the calls to `Try::from_error` is marked as desugared, // so we need to check both the current Expr and its parent. if is_questionmark_desugar_marked_call(expr) { return; } let map = &cx.tcx.hir(); let opt_parent_node = map.find(map.get_parent_node(expr.hir_id)); if_chain! { if let Some(hir::Node::Expr(parent_expr)) = opt_parent_node; if is_questionmark_desugar_marked_call(parent_expr); then { return; } } match expr.kind { ExprKind::Call(_, args) | ExprKind::MethodCall(_, _, args, _) => { let args_to_recover = args .iter() .filter(|arg| { if cx.typeck_results().expr_ty(arg).is_unit() && !utils::is_unit_literal(arg) { !matches!( &arg.kind, ExprKind::Match(.., MatchSource::TryDesugar) | ExprKind::Path(..) ) } else { false } }) .collect::<Vec<_>>(); if !args_to_recover.is_empty() { lint_unit_args(cx, expr, &args_to_recover); } }, _ => (), } } fn is_questionmark_desugar_marked_call(expr: &Expr<'_>) -> bool { use rustc_span::hygiene::DesugaringKind; if let ExprKind::Call(ref callee, _) = expr.kind { callee.span.is_desugaring(DesugaringKind::QuestionMark) } else { false } } fn lint_unit_args(cx: &LateContext<'_>, expr: &Expr<'_>, args_to_recover: &[&Expr<'_>]) { let mut applicability = Applicability::MachineApplicable; let (singular, plural) = if args_to_recover.len() > 1 { ("", "s") } else { ("a ", "") }; span_lint_and_then( cx, UNIT_ARG, expr.span, &format!("passing {}unit value{} to a function", singular, plural), |db| { let mut or = ""; args_to_recover .iter() .filter_map(|arg| { if_chain! { if let ExprKind::Block(block, _) = arg.kind; if block.expr.is_none(); if let Some(last_stmt) = block.stmts.iter().last(); if let StmtKind::Semi(last_expr) = last_stmt.kind; if let Some(snip) = snippet_opt(cx, last_expr.span); then { Some(( last_stmt.span, snip, )) } else { None } } }) .for_each(|(span, sugg)| { db.span_suggestion( span, "remove the semicolon from the last statement in the block", sugg, Applicability::MaybeIncorrect, ); or = "or "; applicability = Applicability::MaybeIncorrect; }); let arg_snippets: Vec<String> = args_to_recover .iter() .filter_map(|arg| snippet_opt(cx, arg.span)) .collect(); let arg_snippets_without_empty_blocks: Vec<String> = args_to_recover .iter() .filter(|arg| !is_empty_block(arg)) .filter_map(|arg| snippet_opt(cx, arg.span)) .collect(); if let Some(call_snippet) = snippet_opt(cx, expr.span) { let sugg = fmt_stmts_and_call( cx, expr, &call_snippet, &arg_snippets, &arg_snippets_without_empty_blocks, ); if arg_snippets_without_empty_blocks.is_empty() { db.multipart_suggestion( &format!("use {}unit literal{} instead", singular, plural), args_to_recover .iter() .map(|arg| (arg.span, "()".to_string())) .collect::<Vec<_>>(), applicability, ); } else { let plural = arg_snippets_without_empty_blocks.len() > 1; let empty_or_s = if plural { "s" } else { "" }; let it_or_them = if plural { "them" } else { "it" }; db.span_suggestion( expr.span, &format!( "{}move the expression{} in front of the call and replace {} with the unit literal `()`", or, empty_or_s, it_or_them ), sugg, applicability, ); } } }, ); } fn
(expr: &Expr<'_>) -> bool { matches!( expr.kind, ExprKind::Block( Block { stmts: &[], expr: None, .. }, _, ) ) } fn fmt_stmts_and_call( cx: &LateContext<'_>, call_expr: &Expr<'_>, call_snippet: &str, args_snippets: &[impl AsRef<str>], non_empty_block_args_snippets: &[impl AsRef<str>], ) -> String { let call_expr_indent = indent_of(cx, call_expr.span).unwrap_or(0); let call_snippet_with_replacements = args_snippets .iter() .fold(call_snippet.to_owned(), |acc, arg| acc.replacen(arg.as_ref(), "()", 1)); let mut stmts_and_call = non_empty_block_args_snippets .iter() .map(|it| it.as_ref().to_owned()) .collect::<Vec<_>>(); stmts_and_call.push(call_snippet_with_replacements); stmts_and_call = stmts_and_call .into_iter() .map(|v| reindent_multiline(v.into(), true, Some(call_expr_indent)).into_owned()) .collect(); let mut stmts_and_call_snippet = stmts_and_call.join(&format!("{}{}", ";\n", " ".repeat(call_expr_indent))); // expr is not in a block statement or result expression position, wrap in a block let parent_node = cx.tcx.hir().find(cx.tcx.hir().get_parent_node(call_expr.hir_id)); if !matches!(parent_node, Some(Node::Block(_))) && !matches!(parent_node, Some(Node::Stmt(_))) { let block_indent = call_expr_indent + 4; stmts_and_call_snippet = reindent_multiline(stmts_and_call_snippet.into(), true, Some(block_indent)).into_owned(); stmts_and_call_snippet = format!( "{{\n{}{}\n{}}}", " ".repeat(block_indent), &stmts_and_call_snippet, " ".repeat(call_expr_indent) ); } stmts_and_call_snippet }
is_empty_block
memdump.rs
//! Pretend to dump some random memory locations use async_trait::async_trait; use rand::prelude::*; use std::io::stdout; use std::io::Write; use crate::args::AppConfig; use crate::generators::gen_hex_string; use crate::io::{csleep, is_printable_ascii, newline, print}; use crate::modules::Module; pub struct Memdump; #[async_trait(?Send)] impl Module for Memdump { fn name(&self) -> &'static str { "memdump" } fn signature(&self) -> String
async fn run(&self, appconfig: &AppConfig) { let mut rng = thread_rng(); let mut current_loc = (rng.gen_range(0..2u64.pow(63)) / 16) * 16; let num_lines = rng.gen_range(50..200); for _ in 1..num_lines { print(format!("{loc:016x} ", loc = current_loc)).await; current_loc += 0x10; let values = (0..16) .map(|_| gen_hex_string(&mut rng, 2)) .collect::<Vec<String>>(); // Print the values in two columns. for (n, val) in values.iter().enumerate() { if n == 8 { print(" ").await; } print(format!("{} ", val)).await; let val_delay = rng.gen_range(0..2); stdout().flush().unwrap(); csleep(val_delay).await; } // Print the ascii values. let mut ascii_repr = String::with_capacity(values.len()); for val in values { let ascii_val = u8::from_str_radix(&val, 16).unwrap_or(b'.') as char; if is_printable_ascii(ascii_val as u64) { ascii_repr.push(ascii_val); } else { ascii_repr.push('.'); } } print(format!(" |{ascii_repr}|", ascii_repr = ascii_repr)).await; let row_delay = rng.gen_range(10..200); csleep(row_delay).await; if appconfig.should_exit() { return; } newline().await; } } }
{ "memdump -k -v".to_string() }
starcraft.py
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tool for preparing test example of Starcraft dataset. ./starcraft --resolution=64 --output_file=test.tfrecords ./starcraft --resolution=64 --output_file=train_0.tfrecords ./starcraft --resolution=64 --output_file=train_1.tfrecords ./starcraft --resolution=64 --output_file=valid.tfrecords """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags import numpy as np import png import six import tensorflow as tf FLAGS = flags.FLAGS flags.DEFINE_integer("resolution", 64, "Resolution of the video.") flags.DEFINE_string("output_file", None, "Path to the output file.") def
(argv): if len(argv) > 1: raise tf.app.UsageError("Too many command-line arguments.") writer = tf.io.TFRecordWriter(FLAGS.output_file) feature_list = {} frame_list = [] for _ in range(20): # generate 20 frames. png_image = six.StringIO() png.from_array( np.random.randint( low=0, high=255, size=(FLAGS.resolution, FLAGS.resolution, 3), dtype=np.uint8), "RGB").save(png_image) frame_list.append( tf.train.Feature( bytes_list=tf.train.BytesList(value=[png_image.getvalue()]))) png_image.close() feature_list["rgb_screen"] = tf.train.FeatureList(feature=frame_list) context_feature = {} context_feature["game_duration_loops"] = tf.train.Feature( int64_list=tf.train.Int64List(value=[20])) context_feature["game_duration_seconds"] = tf.train.Feature( float_list=tf.train.FloatList(value=[20.0])) context_feature["n_steps"] = tf.train.Feature( int64_list=tf.train.Int64List(value=[20])) context_feature["screen_size"] = tf.train.Feature( int64_list=tf.train.Int64List(value=[FLAGS.resolution, FLAGS.resolution])) example = tf.train.SequenceExample( feature_lists=tf.train.FeatureLists(feature_list=feature_list), context=tf.train.Features(feature=context_feature)) writer.write(example.SerializeToString()) writer.close() if __name__ == "__main__": app.run(main)
main
fc_test.go
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fc import ( "fmt" "os" "testing" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/testing/fake" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/mount" utiltesting "k8s.io/kubernetes/pkg/util/testing" "k8s.io/kubernetes/pkg/volume" ) func TestCanSupport(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("fc_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/fc") if err != nil { t.Errorf("Can't find the plugin by name") } if plug.Name() != "kubernetes.io/fc" { t.Errorf("Wrong name: %s", plug.Name()) } if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) { t.Errorf("Expected false") } } func TestGetAccessModes(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("fc_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, nil, nil)) plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fc") if err != nil { t.Errorf("Can't find the plugin by name") } if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) { t.Errorf("Expected two AccessModeTypes: %s and %s", api.ReadWriteOnce, api.ReadOnlyMany) } } func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool { for _, m := range modes { if m == mode { return true } } return false } type fakeDiskManager struct { tmpDir string attachCalled bool detachCalled bool } func NewFakeDiskManager() *fakeDiskManager { return &fakeDiskManager{ tmpDir: utiltesting.MkTmpdirOrDie("fc_test"), } } func (fake *fakeDiskManager) Cleanup() { os.RemoveAll(fake.tmpDir) } func (fake *fakeDiskManager) MakeGlobalPDName(disk fcDisk) string { return fake.tmpDir } func (fake *fakeDiskManager) AttachDisk(b fcDiskBuilder) error { globalPath := b.manager.MakeGlobalPDName(*b.fcDisk) err := os.MkdirAll(globalPath, 0750) if err != nil { return err } // Simulate the global mount so that the fakeMounter returns the // expected number of mounts for the attached disk. b.mounter.Mount(globalPath, globalPath, b.fsType, nil) fake.attachCalled = true return nil } func (fake *fakeDiskManager) DetachDisk(c fcDiskCleaner, mntPath string) error { globalPath := c.manager.MakeGlobalPDName(*c.fcDisk) err := os.RemoveAll(globalPath) if err != nil { return err } fake.detachCalled = true return nil } func doTestPlugin(t *testing.T, spec *volume.Spec) { tmpDir, err := utiltesting.MkTmpdir("fc_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/fc") if err != nil { t.Errorf("Can't find the plugin by name") } fakeManager := NewFakeDiskManager() defer fakeManager.Cleanup() fakeMounter := &mount.FakeMounter{} builder, err := plug.(*fcPlugin).newBuilderInternal(spec, types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder: %v", err) } path := builder.GetPath() expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fc/vol1", tmpDir) if path != expectedPath { t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) } if err := builder.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if !fakeManager.attachCalled { t.Errorf("Attach was not called") } fakeManager2 := NewFakeDiskManager() defer fakeManager2.Cleanup() cleaner, err := plug.(*fcPlugin).newCleanerInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner: %v", err) } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } if !fakeManager2.detachCalled { t.Errorf("Detach was not called") } } func TestPluginVolume(t *testing.T) { lun := 0 vol := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ FC: &api.FCVolumeSource{ TargetWWNs: []string{"some_wwn"}, FSType: "ext4", Lun: &lun, }, }, } doTestPlugin(t, volume.NewSpecFromVolume(vol)) } func TestPluginPersistentVolume(t *testing.T) { lun := 0 vol := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "vol1", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ FC: &api.FCVolumeSource{ TargetWWNs: []string{"some_wwn"}, FSType: "ext4", Lun: &lun, }, }, }, } doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) } func TestPersistentClaimReadOnlyFlag(t *testing.T)
{ tmpDir, err := utiltesting.MkTmpdir("fc_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) lun := 0 pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ FC: &api.FCVolumeSource{ TargetWWNs: []string{"some_wwn"}, FSType: "ext4", Lun: &lun, }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } client := fake.NewSimpleClientset(pv, claim) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil)) plug, _ := plugMgr.FindPluginByName(fcPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.GetAttributes().ReadOnly { t.Errorf("Expected true for builder.IsReadOnly") } }
reactor.rs
#![feature(async_await)] #![warn(rust_2018_idioms)] #![cfg(feature = "default")] use tokio_net::Reactor; use tokio_tcp::TcpListener; use tokio_test::{assert_ok, assert_pending}; use futures_util::task::{waker_ref, ArcWake}; use std::future::Future; use std::net::TcpStream; use std::pin::Pin; use std::sync::{mpsc, Arc, Mutex}; use std::task::Context; struct Task<T> { future: Mutex<Pin<Box<T>>>, } impl<T: Send> ArcWake for Task<T> { fn wake_by_ref(_: &Arc<Self>) { // Do nothing... } } impl<T> Task<T> { fn new(future: T) -> Task<T>
} #[test] fn test_drop_on_notify() { // When the reactor receives a kernel notification, it notifies the // task that holds the associated socket. If this notification results in // the task being dropped, the socket will also be dropped. // // Previously, there was a deadlock scenario where the reactor, while // notifying, held a lock and the task being dropped attempted to acquire // that same lock in order to clean up state. // // To simulate this case, we create a fake executor that does nothing when // the task is notified. This simulates an executor in the process of // shutting down. Then, when the task handle is dropped, the task itself is // dropped. let mut reactor = assert_ok!(Reactor::new()); let (addr_tx, addr_rx) = mpsc::channel(); // Define a task that just drains the listener let task = Arc::new(Task::new(async move { let addr = assert_ok!("127.0.0.1:0".parse()); // Create a listener let mut listener = assert_ok!(TcpListener::bind(&addr)); // Send the address let addr = listener.local_addr().unwrap(); addr_tx.send(addr).unwrap(); loop { let _ = listener.accept().await; } })); let _enter = tokio_executor::enter().unwrap(); tokio_net::with_default(&reactor.handle(), || { let waker = waker_ref(&task); let mut cx = Context::from_waker(&waker); assert_pending!(task.future.lock().unwrap().as_mut().poll(&mut cx)); }); // Get the address let addr = addr_rx.recv().unwrap(); drop(task); // Establish a connection to the acceptor let _s = TcpStream::connect(&addr).unwrap(); reactor.turn(None).unwrap(); }
{ Task { future: Mutex::new(Box::pin(future)), } }
webrtc_internal.rs
extern crate log; use log::info; use std::{collections::VecDeque, net::SocketAddr}; use crate::{error::NaiaClientSocketError, Packet}; use naia_socket_shared::Ref; use wasm_bindgen::{prelude::*, JsCast, JsValue}; use web_sys::{ ErrorEvent, MessageEvent, ProgressEvent, RtcDataChannel, RtcDataChannelInit, RtcDataChannelType, RtcIceCandidate, RtcIceCandidateInit, RtcPeerConnection, RtcSdpType, RtcSessionDescriptionInit, XmlHttpRequest, }; #[derive(Deserialize, Debug, Clone)] pub struct SessionAnswer { pub sdp: String, #[serde(rename = "type")] pub _type: String, } #[derive(Deserialize, Debug)] pub struct SessionCandidate { pub candidate: String, #[serde(rename = "sdpMLineIndex")] pub sdp_m_line_index: u16, #[serde(rename = "sdpMid")] pub sdp_mid: String, } #[derive(Deserialize, Debug)] pub struct JsSessionResponse { pub answer: SessionAnswer, pub candidate: SessionCandidate, } #[derive(Serialize)] pub struct IceServerConfig { pub urls: [String; 1], } #[allow(unused_must_use)] pub fn
( socket_address: SocketAddr, msg_queue: Ref<VecDeque<Result<Option<Packet>, NaiaClientSocketError>>>, ) -> RtcDataChannel { let server_url_str = format!("http://{}/new_rtc_session", socket_address); let peer: RtcPeerConnection = RtcPeerConnection::new().unwrap(); let mut data_channel_config: RtcDataChannelInit = RtcDataChannelInit::new(); data_channel_config.ordered(false); data_channel_config.max_retransmits(0); let channel: RtcDataChannel = peer.create_data_channel_with_data_channel_dict("data", &data_channel_config); channel.set_binary_type(RtcDataChannelType::Arraybuffer); let cloned_channel = channel.clone(); let msg_queue_clone = msg_queue.clone(); let channel_onopen_func: Box<dyn FnMut(JsValue)> = Box::new(move |_| { let msg_queue_clone_2 = msg_queue_clone.clone(); let channel_onmsg_func: Box<dyn FnMut(MessageEvent)> = Box::new(move |evt: MessageEvent| { if let Ok(arraybuf) = evt.data().dyn_into::<js_sys::ArrayBuffer>() { let uarray: js_sys::Uint8Array = js_sys::Uint8Array::new(&arraybuf); let mut body = vec![0; uarray.length() as usize]; uarray.copy_to(&mut body[..]); msg_queue_clone_2 .borrow_mut() .push_back(Ok(Some(Packet::new(body)))); } }); let channel_onmsg_closure = Closure::wrap(channel_onmsg_func); cloned_channel.set_onmessage(Some(channel_onmsg_closure.as_ref().unchecked_ref())); channel_onmsg_closure.forget(); }); let channel_onopen_closure = Closure::wrap(channel_onopen_func); channel.set_onopen(Some(channel_onopen_closure.as_ref().unchecked_ref())); channel_onopen_closure.forget(); let onerror_func: Box<dyn FnMut(ErrorEvent)> = Box::new(move |e: ErrorEvent| { info!("data channel error event: {:?}", e); }); let onerror_callback = Closure::wrap(onerror_func); channel.set_onerror(Some(onerror_callback.as_ref().unchecked_ref())); onerror_callback.forget(); let peer_clone = peer.clone(); let server_url_msg = Ref::new(server_url_str); let peer_offer_func: Box<dyn FnMut(JsValue)> = Box::new(move |e: JsValue| { let session_description = e.into(); let peer_clone_2 = peer_clone.clone(); let server_url_msg_clone = server_url_msg.clone(); let peer_desc_func: Box<dyn FnMut(JsValue)> = Box::new(move |_: JsValue| { let request = XmlHttpRequest::new().expect("can't create new XmlHttpRequest"); request .open("POST", &server_url_msg_clone.borrow()) .unwrap_or_else(|err| { info!( "WebSys, can't POST to server url. Original Error: {:?}", err ) }); let request_2 = request.clone(); let peer_clone_3 = peer_clone_2.clone(); let request_func: Box<dyn FnMut(ProgressEvent)> = Box::new(move |_: ProgressEvent| { if request_2.status().unwrap() == 200 { let response_string = request_2.response_text().unwrap().unwrap(); let response_js_value = js_sys::JSON::parse(response_string.as_str()).unwrap(); let session_response: JsSessionResponse = response_js_value.into_serde().unwrap(); let session_response_answer: SessionAnswer = session_response.answer.clone(); let peer_clone_4 = peer_clone_3.clone(); let remote_desc_success_func: Box<dyn FnMut(JsValue)> = Box::new( move |e: JsValue| { let mut candidate_init_dict: RtcIceCandidateInit = RtcIceCandidateInit::new( session_response.candidate.candidate.as_str(), ); candidate_init_dict.sdp_m_line_index(Some( session_response.candidate.sdp_m_line_index, )); candidate_init_dict .sdp_mid(Some(session_response.candidate.sdp_mid.as_str())); let candidate: RtcIceCandidate = RtcIceCandidate::new(&candidate_init_dict).unwrap(); let peer_add_success_func: Box<dyn FnMut(JsValue)> = Box::new(move |_: JsValue| { //Client add ice candidate success }); let peer_add_success_callback = Closure::wrap(peer_add_success_func); let peer_add_failure_func: Box<dyn FnMut(JsValue)> = Box::new(move |_: JsValue| { info!("Client error during 'addIceCandidate': {:?}", e); }); let peer_add_failure_callback = Closure::wrap(peer_add_failure_func); peer_clone_4.add_ice_candidate_with_rtc_ice_candidate_and_success_callback_and_failure_callback( &candidate, peer_add_success_callback.as_ref().unchecked_ref(), peer_add_failure_callback.as_ref().unchecked_ref()); peer_add_success_callback.forget(); peer_add_failure_callback.forget(); }, ); let remote_desc_success_callback = Closure::wrap(remote_desc_success_func); let remote_desc_failure_func: Box<dyn FnMut(JsValue)> = Box::new(move |_: JsValue| { info!( "Client error during 'setRemoteDescription': TODO, put value here" ); }); let remote_desc_failure_callback = Closure::wrap(remote_desc_failure_func); let mut rtc_session_desc_init_dict: RtcSessionDescriptionInit = RtcSessionDescriptionInit::new(RtcSdpType::Answer); rtc_session_desc_init_dict.sdp(session_response_answer.sdp.as_str()); peer_clone_3.set_remote_description_with_success_callback_and_failure_callback( &rtc_session_desc_init_dict, remote_desc_success_callback.as_ref().unchecked_ref(), remote_desc_failure_callback.as_ref().unchecked_ref(), ); remote_desc_success_callback.forget(); remote_desc_failure_callback.forget(); } }); let request_callback = Closure::wrap(request_func); request.set_onload(Some(request_callback.as_ref().unchecked_ref())); request_callback.forget(); request .send_with_opt_str(Some( peer_clone_2.local_description().unwrap().sdp().as_str(), )) .unwrap_or_else(|err| { info!("WebSys, can't sent request str. Original Error: {:?}", err) }); }); let peer_desc_callback = Closure::wrap(peer_desc_func); peer_clone .set_local_description(&session_description) .then(&peer_desc_callback); peer_desc_callback.forget(); }); let peer_offer_callback = Closure::wrap(peer_offer_func); let peer_error_func: Box<dyn FnMut(JsValue)> = Box::new(move |_: JsValue| { info!("Client error during 'createOffer': e value here? TODO"); }); let peer_error_callback = Closure::wrap(peer_error_func); peer.create_offer().then(&peer_offer_callback); peer_offer_callback.forget(); peer_error_callback.forget(); return channel; }
webrtc_initialize
providers.go
package envexpander import "github.com/mrosales/envexpander/providers" type ParameterProvider interface { Get(keys []string) (map[string]string, error) } // Creates a ParameterProvider using AWS SSM Parameter Store as a backend. // Supports both KMS-encrypted secrets as well as unencrypted strings. func NewSSMProvider(client providers.SSMAPI, withDecryption bool) ParameterProvider { return &providers.SSMProvider{
WithDecryption: withDecryption, BatchSize: 10, // SSM supports batching up to 10 parameter requests at once } } // Creates a ParameterProvider using AWS SSM Parameter Store as a backend. // Supports both KMS-encrypted secrets as well as unencrypted strings. func NewSecretsManagerProvider(client providers.SecretsManagerAPI) ParameterProvider { return &providers.SecretsManagerProvider{ Client: client, } }
Client: client,
painel_cria_tribo.py
# -------------------------- # UFSC - CTC - INE - INE5663 # Exercício da Tribo # -------------------------- # Classe responsável por criar uma tribo # from model.tribo import Tribo from view.paineis.painel_abstrato import PainelAbstrato class Pa
ainelAbstrato): def __init__(self, iu): super().__init__('Criar Tribo', iu) def _interaja(self): nome = input('Nome da tribo: ') qtd_guerreiros = int(input('Quantidade máxima de guerreiros: ')) qtd_vidas = int(input('Quantidade máxima de vidas de cada guerreiro: ')) tribo = Tribo(nome, qtd_guerreiros, qtd_vidas) self._iu.armazene_tribo(tribo) print('Tribo criada!')
inelCriaTribo(P
assistant.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- r''' Usage: python3 assistant.py service ''' import sys def check_version():
if not check_version(): exit(1) import os, io, json, subprocess, tempfile from urllib import parse from wsgiref.simple_server import make_server EXEC = sys.executable # print(EXEC) PORT = 39093 HOST = 'local.liaoxuefeng.com:%d' % PORT TEMP = tempfile.mkdtemp(suffix='_py', prefix='learn_python_') INDEX = 0 def main(): httpd = make_server('127.0.0.1', PORT, application) print('Ready for Python code on port %d...' % PORT) httpd.serve_forever() def get_name(): global INDEX INDEX = INDEX + 1 return 'test_%d' % INDEX def write_py(name, code): fpath = os.path.join(TEMP, '%s.py' % name) with open(fpath, 'w', encoding='utf-8') as f: f.write(code) print('Code wrote to: %s' % fpath) return fpath def decode(s): try: return s.decode('utf-8') except UnicodeDecodeError: return s.decode('gbk') def application(environ, start_response): host = environ.get('HTTP_HOST') method = environ.get('REQUEST_METHOD') path = environ.get('PATH_INFO') if method == 'GET' and path == '/': start_response('200 OK', [('Content-Type', 'text/html')]) return [b'<html><head><title>Learning Python</title></head><body><form method="post" action="/run"><textarea name="code" style="width:90%;height: 600px"></textarea><p><button type="submit">Run</button></p></form></body></html>'] if method == 'GET' and path == '/env': start_response('200 OK', [('Content-Type', 'text/html')]) L = [b'<html><head><title>ENV</title></head><body>'] for k, v in environ.items(): p = '<p>%s = %s' % (k, str(v)) L.append(p.encode('utf-8')) L.append(b'</html>') return L if host != HOST or method != 'POST' or path != '/run' or not environ.get('CONTENT_TYPE', '').lower().startswith('application/x-www-form-urlencoded'): start_response('400 Bad Request', [('Content-Type', 'application/json')]) return [b'{"error":"bad_request"}'] s = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])) qs = parse.parse_qs(s.decode('utf-8')) if not 'code' in qs: start_response('400 Bad Request', [('Content-Type', 'application/json')]) return [b'{"error":"invalid_params"}'] name = qs['name'][0] if 'name' in qs else get_name() code = qs['code'][0] headers = [('Content-Type', 'application/json')] origin = environ.get('HTTP_ORIGIN', '') if origin.find('.liaoxuefeng.com') == -1: start_response('400 Bad Request', [('Content-Type', 'application/json')]) return [b'{"error":"invalid_origin"}'] headers.append(('Access-Control-Allow-Origin', origin)) start_response('200 OK', headers) r = dict() try: fpath = write_py(name, code) print('Execute: %s %s' % (EXEC, fpath)) r['output'] = decode(subprocess.check_output([EXEC, fpath], stderr=subprocess.STDOUT, timeout=5)) except subprocess.CalledProcessError as e: r = dict(error='Exception', output=decode(e.output)) except subprocess.TimeoutExpired as e: r = dict(error='Timeout', output='执行超时') except subprocess.CalledProcessError as e: r = dict(error='Error', output='执行错误') print('Execute done.') return [json.dumps(r).encode('utf-8')] if __name__ == '__main__': main()
v = sys.version_info # print(v) if v.major == 3 and v.minor >= 4: return True print('Your current python is %d.%d. Please use Python 3.4.' % (v.major, v.minor)) return False
idex.py
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.base.exchange import Exchange import math from ccxt.base.errors import ExchangeError from ccxt.base.errors import AuthenticationError from ccxt.base.errors import ArgumentsRequired from ccxt.base.errors import InsufficientFunds from ccxt.base.errors import InvalidOrder class idex(Exchange): def describe(self): return self.deep_extend(super(idex, self).describe(), { 'id': 'idex', 'name': 'IDEX', 'countries': ['US'], 'rateLimit': 1500, 'certified': True, 'requiresWeb3': True, 'has': { 'fetchOrderBook': True, 'fetchTicker': True, 'fetchTickers': True, 'fetchMarkets': True, 'fetchBalance': True, 'createOrder': True, 'cancelOrder': True, 'fetchOpenOrders': True, 'fetchTransactions': True, 'fetchTrades': True, 'fetchMyTrades': True, 'withdraw': True, 'fetchOHLCV': False, }, 'timeframes': { '1m': 'M1', '3m': 'M3', '5m': 'M5', '15m': 'M15', '30m': 'M30', # default '1h': 'H1', '4h': 'H4', '1d': 'D1', '1w': 'D7', '1M': '1M', }, 'urls': { 'test': 'https://api.idex.market', 'logo': 'https://user-images.githubusercontent.com/1294454/63693236-3415e380-c81c-11e9-8600-ba1634f1407d.jpg', 'api': 'https://api.idex.market', 'www': 'https://idex.market', 'doc': [ 'https://docs.idex.market/', ], }, 'api': { 'public': { 'post': [ 'returnTicker', 'returnCurrenciesWithPairs', # undocumented 'returnCurrencies', 'return24Volume', 'returnBalances', 'returnCompleteBalances', # shows amount in orders as well as total 'returnDepositsWithdrawals', 'returnOpenOrders', 'returnOrderBook', 'returnOrderStatus', 'returnOrderTrades', 'returnTradeHistory', 'returnTradeHistoryMeta', # not documented 'returnContractAddress', 'returnNextNonce', ], }, 'private': { 'post': [ 'order', 'cancel', 'trade', 'withdraw', ], }, }, 'options': { 'contractAddress': None, # 0x2a0c0DBEcC7E4D658f48E01e3fA353F44050c208 'orderNonce': None, }, 'exceptions': { 'Invalid order signature. Please try again.': AuthenticationError, 'You have insufficient funds to match self order. If you believe self is a mistake please refresh and try again.': InsufficientFunds, 'Order no longer available.': InvalidOrder, }, 'requiredCredentials': { 'walletAddress': True, 'privateKey': True, 'apiKey': False, 'secret': False, }, 'commonCurrencies': { 'FT': 'Fabric Token', 'MT': 'Monarch', 'ONE': 'Menlo One', 'PLA': 'PlayChip', 'WAX': 'WAXP', }, }) def fetch_markets(self, params={}): # idex does not have an endpoint for markets # instead we generate the markets from the endpoint for currencies request = { 'includeDelisted': True, } markets = self.publicPostReturnCurrenciesWithPairs(self.extend(request, params)) currenciesById = {} currencies = markets['tokens'] for i in range(0, len(currencies)): currency = currencies[i] currenciesById[currency['symbol']] = currency result = [] limits = { 'amount': { 'min': None, 'max': None, }, 'price': { 'min': None, 'max': None, }, 'cost': { 'min': None, 'max': None, }, } quotes = markets['pairs'] keys = list(quotes.keys()) for i in range(0, len(keys)): quoteId = keys[i] bases = quotes[quoteId] quote = self.safe_currency_code(quoteId) quoteCurrency = currenciesById[quoteId] for j in range(0, len(bases)): baseId = bases[j] id = quoteId + '_' + baseId base = self.safe_currency_code(baseId) symbol = base + '/' + quote baseCurrency = currenciesById[baseId] baseAddress = baseCurrency['address'] quoteAddress = quoteCurrency['address'] precision = { 'price': self.safe_integer(quoteCurrency, 'decimals'), 'amount': self.safe_integer(baseCurrency, 'decimals'), } result.append({ 'symbol': symbol, 'precision': precision, 'base': base, 'quote': quote, 'baseId': baseAddress, 'quoteId': quoteAddress, 'limits': limits, 'id': id, 'info': baseCurrency, 'tierBased': False, }) return result def parse_ticker(self, ticker, market=None): # # { # last: '0.0016550916', # high: 'N/A', # low: 'N/A', # lowestAsk: '0.0016743368', # highestBid: '0.001163726270773897', # percentChange: '0', # baseVolume: '0', # quoteVolume: '0' # } # symbol = None if market: symbol = market['symbol'] baseVolume = self.safe_float(ticker, 'baseVolume') quoteVolume = self.safe_float(ticker, 'quoteVolume') last = self.safe_float(ticker, 'last') percentage = self.safe_float(ticker, 'percentChange') return { 'symbol': symbol, 'timestamp': None, 'datetime': None, 'high': self.safe_float(ticker, 'high'), 'low': self.safe_float(ticker, 'low'), 'bid': self.safe_float(ticker, 'highestBid'), 'bidVolume': None, 'ask': self.safe_float(ticker, 'lowestAsk'), 'askVolume': None, 'vwap': None, 'open': None, 'close': last, 'last': last, 'previousClose': None, 'change': None, 'percentage': percentage, 'average': None, 'baseVolume': quoteVolume, 'quoteVolume': baseVolume, 'info': ticker, } def fetch_tickers(self, symbols=None, params={}): self.load_markets() response = self.publicPostReturnTicker(params) # {ETH_BOUNCY: # {last: '0.000000004000088005', # high: 'N/A', # low: 'N/A', # lowestAsk: '0.00000000599885995', # highestBid: '0.000000001400500103', # percentChange: '0', # baseVolume: '0', # quoteVolume: '0'}, # ETH_NBAI: # {last: '0.0000032', # high: 'N/A', # low: 'N/A', # lowestAsk: '0.000004000199999502', # highestBid: '0.0000016002', # percentChange: '0', # baseVolume: '0', # quoteVolume: '0'},} ids = list(response.keys()) result = {} for i in range(0, len(ids)): id = ids[i] symbol = None market = None if id in self.markets_by_id: market = self.markets_by_id[id] symbol = market['symbol'] else: quoteId, baseId = id.split('_') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) symbol = base + '/' + quote market = {'symbol': symbol} ticker = response[id] result[symbol] = self.parse_ticker(ticker, market) return self.filter_by_array(result, 'symbol', symbols) def fetch_ticker(self, symbol, params={}): self.load_markets() market = self.market(symbol) request = { 'market': market['id'], } response = self.publicPostReturnTicker(self.extend(request, params)) # {last: '0.0016550916', # high: 'N/A', # low: 'N/A', # lowestAsk: '0.0016743368', # highestBid: '0.001163726270773897', # percentChange: '0', # baseVolume: '0', # quoteVolume: '0'} return self.parse_ticker(response, market) def fetch_order_book(self, symbol, limit=None, params={}): self.load_markets() market = self.market(symbol) id = market['quote'] + '_' + market['base'] request = { 'market': id, 'count': 100, # the default will only return one trade } if limit is not None: request['count'] = limit response = self.publicPostReturnOrderBook(self.extend(request, params)) # # { # "asks": [ # { # "price": "0.001675282799999", # "amount": "206.163978911921061732", # "total": "0.345382967850497906", # "orderHash": "0xfdf12c124a6a7fa4a8e1866b324da888c8e1b3ad209f5050d3a23df3397a5cb7", # "params": { # "tokenBuy": "0x0000000000000000000000000000000000000000", # "buySymbol": "ETH", # "buyPrecision": 18, # "amountBuy": "345382967850497906", # "tokenSell": "0xb98d4c97425d9908e66e53a6fdf673acca0be986", # "sellSymbol": "ABT", # "sellPrecision": 18, # "amountSell": "206163978911921061732", # "expires": 10000, # "nonce": 13489307413, # "user": "0x9e8ef79316a4a79bbf55a5f9c16b3e068fff65c6" # } # } # ], # "bids": [ # { # "price": "0.001161865193232242", # "amount": "854.393661648355", # "total": "0.992690256787469029", # "orderHash": "0x2f2baaf982085e4096f9e23e376214885fa74b2939497968e92222716fc2c86d", # "params": { # "tokenBuy": "0xb98d4c97425d9908e66e53a6fdf673acca0be986", # "buySymbol": "ABT", # "buyPrecision": 18, # "amountBuy": "854393661648355000000", # "tokenSell": "0x0000000000000000000000000000000000000000", # "sellSymbol": "ETH", # "sellPrecision": 18, # "amountSell": "992690256787469029", # "expires": 10000, # "nonce": 18155189676, # "user": "0xb631284dd7b74a846af5b37766ceb1f85d53eca4" # } # } # ] # } # return self.parse_order_book(response, None, 'bids', 'asks', 'price', 'amount') def parse_bid_ask(self, bidAsk, priceKey=0, amountKey=1): price = self.safe_float(bidAsk, priceKey) amount = self.safe_float(bidAsk, amountKey) info = bidAsk return [price, amount, info] def fetch_balance(self, params={}): request = { 'address': self.walletAddress, } response = self.publicPostReturnCompleteBalances(self.extend(request, params)) # # { # ETH: {available: '0.0167', onOrders: '0.1533'} # } # result = { 'info': response, } keys = list(response.keys()) for i in range(0, len(keys)): currency = keys[i] balance = response[currency] code = self.safe_currency_code(currency) result[code] = { 'free': self.safe_float(balance, 'available'), 'used': self.safe_float(balance, 'onOrders'), } return self.parse_balance(result) def create_order(self, symbol, type, side, amount, price=None, params={}): self.check_required_dependencies() self.load_markets() market = self.market(symbol) if type == 'limit': expires = 100000 contractAddress = self.get_contract_address() tokenBuy = None tokenSell = None amountBuy = None amountSell = None quoteAmount = float(price) * float(amount) if side == 'buy': tokenBuy = market['baseId'] tokenSell = market['quoteId'] amountBuy = self.to_wei(amount, market['precision']['amount']) amountSell = self.to_wei(quoteAmount, 18) else: tokenBuy = market['quoteId'] tokenSell = market['baseId'] amountBuy = self.to_wei(quoteAmount, 18) amountSell = self.to_wei(amount, market['precision']['amount']) nonce = self.get_nonce() orderToHash = { 'contractAddress': contractAddress, 'tokenBuy': tokenBuy, 'amountBuy': amountBuy, 'tokenSell': tokenSell, 'amountSell': amountSell, 'expires': expires, 'nonce': nonce, 'address': self.walletAddress, } orderHash = self.get_idex_create_order_hash(orderToHash) signature = self.sign_message(orderHash, self.privateKey) request = { 'tokenBuy': tokenBuy, 'amountBuy': amountBuy, 'tokenSell': tokenSell, 'amountSell': amountSell, 'address': self.walletAddress, 'nonce': nonce, 'expires': expires, } response = self.privatePostOrder(self.extend(request, signature)) # self.extend(request, params) will cause invalid signature # {orderNumber: 1562323021, # orderHash: # '0x31c42154a8421425a18d076df400d9ec1ef64d5251285384a71ba3c0ab31beb4', # timestamp: 1564041428, # price: '0.00073', # amount: '210', # total: '0.1533', # type: 'buy', # params: # {tokenBuy: '0x763fa6806e1acf68130d2d0f0df754c93cc546b2', # buyPrecision: 18, # amountBuy: '210000000000000000000', # tokenSell: '0x0000000000000000000000000000000000000000', # sellPrecision: 18, # amountSell: '153300000000000000', # expires: 100000, # nonce: 1, # user: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1'}} return self.parse_order(response, market) elif type == 'market': if not ('orderHash' in params): raise ArgumentsRequired(self.id + ' market order requires an order structure such as that in fetchOrderBook()[\'bids\'][0][2], fetchOrder()[\'info\'], or fetchOpenOrders()[0][\'info\']') # {price: '0.000132247803328924', # amount: '19980', # total: '2.6423111105119', # orderHash: # '0x5fb3452b3d13fc013585b51c91c43a0fbe4298c211243763c49437848c274749', # params: # {tokenBuy: '0x0000000000000000000000000000000000000000', # buySymbol: 'ETH', # buyPrecision: 18, # amountBuy: '2642311110511900000', # tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae', # sellSymbol: 'IDEX', # sellPrecision: 18, # amountSell: '19980000000000000000000', # expires: 10000, # nonce: 1564656561510, # user: '0xc3f8304270e49b8e8197bfcfd8567b83d9e4479b'}} orderToSign = { 'orderHash': params['orderHash'], 'amount': params['params']['amountBuy'], 'address': params['params']['user'], 'nonce': params['params']['nonce'], } orderHash = self.get_idex_market_order_hash(orderToSign) signature = self.sign_message(orderHash, self.privateKey) signedOrder = self.extend(orderToSign, signature) signedOrder['address'] = self.walletAddress signedOrder['nonce'] = self.get_nonce() # [{ # "amount": "0.07", # "date": "2017-10-13 16:25:36", # "total": "0.49", # "market": "ETH_DVIP", # "type": "buy", # "price": "7", # "orderHash": "0xcfe4018c59e50e0e1964c979e6213ce5eb8c751cbc98a44251eb48a0985adc52", # "uuid": "250d51a0-b033-11e7-9984-a9ab79bb8f35" # }] response = self.privatePostTrade(signedOrder) return self.parse_orders(response, market) def get_nonce(self): if self.options['orderNonce'] is None: response = self.publicPostReturnNextNonce({ 'address': self.walletAddress, }) return self.safe_integer(response, 'nonce') else: result = self.options['orderNonce'] self.options['orderNonce'] = self.sum(self.options['orderNonce'], 1) return result def get_contract_address(self): if self.options['contractAddress'] is not None: return self.options['contractAddress'] response = self.publicPostReturnContractAddress() self.options['contractAddress'] = self.safe_string(response, 'address') return self.options['contractAddress'] def cancel_order(self, orderId, symbol=None, params={}): nonce = self.get_nonce() orderToHash = { 'orderHash': orderId, 'nonce': nonce, } orderHash = self.get_idex_cancel_order_hash(orderToHash) signature = self.sign_message(orderHash, self.privateKey) request = { 'orderHash': orderId, 'address': self.walletAddress, 'nonce': nonce, } response = self.privatePostCancel(self.extend(request, signature)) # {success: 1} if 'success' in response: return { 'info': response, } else: raise ExchangeError(self.id + ' cancel order failed ' + self.json(response)) def fetch_transactions(self, code=None, since=None, limit=None, params={}): self.load_markets() currency = self.currency(code) request = { 'address': self.walletAddress, } if since is not None: request['start'] = int(int(math.floor(since / 1000))) response = self.publicPostReturnDepositsWithdrawals(self.extend(request, params)) # {deposits: # [{currency: 'ETH', # amount: '0.05', # timestamp: 1563953513, # transactionHash: # '0xd6eefd81c7efc9beeb35b924d6db3c93a78bf7eac082ba87e107ad4e94bccdcf', # depositNumber: 1586430}, # {currency: 'ETH', # amount: '0.12', # timestamp: 1564040359, # transactionHash: # '0x2ecbb3ab72b6f79fc7a9058c39dce28f913152748c1507d13ab1759e965da3ca', # depositNumber: 1587341}], # withdrawals: # [{currency: 'ETH', # amount: '0.149', # timestamp: 1564060001, # transactionHash: # '0xab555fc301779dd92fd41ccd143b1d72776ae7b5acfc59ca44a1d376f68fda15', # withdrawalNumber: 1444070, # status: 'COMPLETE'}]} deposits = self.parse_transactions(response['deposits'], currency, since, limit) withdrawals = self.parse_transactions(response['withdrawals'], currency, since, limit) return self.array_concat(deposits, withdrawals) def parse_transaction(self, item, currency=None): # {currency: 'ETH', # amount: '0.05', # timestamp: 1563953513, # transactionHash: # '0xd6eefd81c7efc9beeb35b924d6db3c93a78bf7eac082ba87e107ad4e94bccdcf', # depositNumber: 1586430} amount = self.safe_float(item, 'amount') timestamp = self.safe_timestamp(item, 'timestamp') txhash = self.safe_string(item, 'transactionHash') id = None type = None status = None addressFrom = None addressTo = None if 'depositNumber' in item: id = self.safe_string(item, 'depositNumber') type = 'deposit' addressFrom = self.walletAddress addressTo = self.options['contractAddress'] elif 'withdrawalNumber' in item: id = self.safe_string(item, 'withdrawalNumber') type = 'withdrawal' status = self.parse_transaction_status(self.safe_string(item, 'status')) addressFrom = self.options['contractAddress'] addressTo = self.walletAddress code = self.safe_currency_code(self.safe_string(item, 'currency')) return { 'info': item, 'id': id, 'txid': txhash, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'currency': code, 'amount': amount, 'status': status, 'type': type, 'updated': None, 'comment': None, 'addressFrom': addressFrom, 'tagFrom': None, 'addressTo': addressTo, 'tagTo': None, 'fee': { 'currency': code, 'cost': None, 'rate': None, }, } def parse_transaction_status(self, status): statuses = { 'COMPLETE': 'ok', } return self.safe_string(statuses, status) def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): if self.walletAddress is None: raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a walletAddress') self.load_markets() request = { 'address': self.walletAddress, } market = None if symbol is not None: market = self.market(symbol) request['market'] = market['id'] response = self.publicPostReturnOpenOrders(self.extend(request, params)) # [{timestamp: 1564041428, # orderHash: # '0x31c42154a8421425a18d076df400d9ec1ef64d5251285384a71ba3c0ab31beb4', # orderNumber: 1562323021, # market: 'ETH_LIT', # type: 'buy', # params: # {tokenBuy: '0x763fa6806e1acf68130d2d0f0df754c93cc546b2', # buySymbol: 'LIT', # buyPrecision: 18, # amountBuy: '210000000000000000000', # tokenSell: '0x0000000000000000000000000000000000000000', # sellSymbol: 'ETH', # sellPrecision: 18, # amountSell: '153300000000000000', # expires: 100000, # nonce: 1, # user: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1'}, # price: '0.00073', # amount: '210', # status: 'open', # total: '0.1533'}] return self.parse_orders(response, market, since, limit) def fetch_order(self, id, symbol=None, params={}): self.load_markets() market = None if symbol is not None: market = self.market(symbol) request = { 'orderHash': id, } response = self.publicPostReturnOrderStatus(self.extend(request, params)) # {filled: '0', # initialAmount: '210', # timestamp: 1564041428, # orderHash: # '0x31c42154a8421425a18d076df400d9ec1ef64d5251285384a71ba3c0ab31beb4', # orderNumber: 1562323021, # market: 'ETH_LIT', # type: 'buy', # params: # {tokenBuy: '0x763fa6806e1acf68130d2d0f0df754c93cc546b2', # buySymbol: 'LIT', # buyPrecision: 18, # amountBuy: '210000000000000000000', # tokenSell: '0x0000000000000000000000000000000000000000', # sellSymbol: 'ETH', # sellPrecision: 18, # amountSell: '153300000000000000', # expires: 100000, # nonce: 1, # user: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1'}, # price: '0.00073', # amount: '210', # status: 'open', # total: '0.1533'} return self.parse_order(response, market) def parse_order(self, order, market=None): # {filled: '0', # initialAmount: '210', # timestamp: 1564041428, # orderHash: # '0x31c42154a8421425a18d076df400d9ec1ef64d5251285384a71ba3c0ab31beb4', # orderNumber: 1562323021, # market: 'ETH_LIT', # type: 'buy', # params: # {tokenBuy: '0x763fa6806e1acf68130d2d0f0df754c93cc546b2', # buySymbol: 'LIT', # buyPrecision: 18, # amountBuy: '210000000000000000000', # tokenSell: '0x0000000000000000000000000000000000000000', # sellSymbol: 'ETH', # sellPrecision: 18, # amountSell: '153300000000000000', # expires: 100000, # nonce: 1, # user: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1'}, # price: '0.00073', # amount: '210', # status: 'open', # total: '0.1533'} timestamp = self.safe_timestamp(order, 'timestamp') side = self.safe_string(order, 'type') symbol = None amount = None remaining = None if 'initialAmount' in order: amount = self.safe_float(order, 'initialAmount') remaining = self.safe_float(order, 'amount') else: amount = self.safe_float(order, 'amount') filled = self.safe_float(order, 'filled') price = self.safe_float(order, 'price') cost = self.safe_float(order, 'total') if (cost is not None) and (filled is not None) and not cost: cost = filled * price if 'market' in order: marketId = order['market'] symbol = self.markets_by_id[marketId]['symbol'] elif (side is not None) and ('params' in order): params = order['params'] buy = self.safe_currency_code(self.safe_string(params, 'tokenBuy')) sell = self.safe_currency_code(self.safe_string(params, 'tokenSell')) if buy is not None and sell is not None: symbol = (buy + '/' + sell) if (side == 'buy') else (sell + '/' + buy) if symbol is None and market is not None: symbol = market['symbol'] id = self.safe_string(order, 'orderHash') status = self.parse_order_status(self.safe_string(order, 'status')) return { 'info': order, 'id': id, 'clientOrderId': None, 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'side': side, 'amount': amount, 'price': price, 'type': 'limit', 'filled': filled, 'remaining': remaining, 'cost': cost, 'status': status, 'lastTradeTimestamp': None, 'average': None, 'trades': None, 'fee': None, } def parse_order_status(self, status): statuses = { 'open': 'open', } return self.safe_string(statuses, status, status) def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): if self.walletAddress is None: raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a walletAddress') self.load_markets() request = { 'address': self.walletAddress, } market = None if symbol is not None: market = self.market(symbol) request['market'] = market['id'] if limit is not None: request['start'] = int(int(math.floor(limit))) response = self.publicPostReturnTradeHistory(self.extend(request, params)) # {ETH_IDEX: # [{type: 'buy', # date: '2019-07-25 11:24:41', # amount: '347.833140025692348611', # total: '0.050998794333719943', # uuid: 'cbdff960-aece-11e9-b566-c5d69c3be671', # tid: 4320867, # timestamp: 1564053881, # price: '0.000146618560640751', # taker: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1', # maker: '0x1a961bc2e0d619d101f5f92a6be752132d7606e6', # orderHash: # '0xbec6485613a15be619c04c1425e8e821ebae42b88fa95ac4dfe8ba2beb363ee4', # transactionHash: # '0xf094e07b329ac8046e8f34db358415863c41daa36765c05516f4cf4f5b403ad1', # tokenBuy: '0x0000000000000000000000000000000000000000', # buyerFee: '0.695666280051384697', # gasFee: '28.986780264563232993', # sellerFee: '0.00005099879433372', # tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae', # usdValue: '11.336926687304238214'}]} # # if a symbol is specified in the request: # # [{type: 'buy', # date: '2019-07-25 11:24:41', # amount: '347.833140025692348611', # total: '0.050998794333719943', # uuid: 'cbdff960-aece-11e9-b566-c5d69c3be671', # tid: 4320867, # timestamp: 1564053881, # price: '0.000146618560640751', # taker: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1', # maker: '0x1a961bc2e0d619d101f5f92a6be752132d7606e6', # orderHash: # '0xbec6485613a15be619c04c1425e8e821ebae42b88fa95ac4dfe8ba2beb363ee4', # transactionHash: # '0xf094e07b329ac8046e8f34db358415863c41daa36765c05516f4cf4f5b403ad1', # tokenBuy: '0x0000000000000000000000000000000000000000', # buyerFee: '0.695666280051384697', # gasFee: '28.986780264563232993', # sellerFee: '0.00005099879433372', # tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae', # usdValue: '11.336926687304238214'}] if isinstance(response, list): return self.parse_trades(response, market, since, limit) else: result = [] marketIds = list(response.keys()) for i in range(0, len(marketIds)): marketId = marketIds[i] trades = response[marketId] parsed = self.parse_trades(trades, market, since, limit) result = self.array_concat(result, parsed) return result def fetch_trades(self, symbol, since=None, limit=None, params={}): self.load_markets() market = self.market(symbol) request = { 'market': market['id'], } if limit is not None: request['start'] = int(int(math.floor(limit))) response = self.publicPostReturnTradeHistory(self.extend(request, params)) # [{type: 'buy', # date: '2019-07-25 11:24:41', # amount: '347.833140025692348611', # total: '0.050998794333719943', # uuid: 'cbdff960-aece-11e9-b566-c5d69c3be671', # tid: 4320867, # timestamp: 1564053881, # price: '0.000146618560640751', # taker: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1', # maker: '0x1a961bc2e0d619d101f5f92a6be752132d7606e6', # orderHash: # '0xbec6485613a15be619c04c1425e8e821ebae42b88fa95ac4dfe8ba2beb363ee4', # transactionHash: # '0xf094e07b329ac8046e8f34db358415863c41daa36765c05516f4cf4f5b403ad1', # tokenBuy: '0x0000000000000000000000000000000000000000', # buyerFee: '0.695666280051384697', # gasFee: '28.986780264563232993', # sellerFee: '0.00005099879433372', # tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae', # usdValue: '11.336926687304238214'}] return self.parse_trades(response, market, since, limit) def parse_trade(self, trade, market=None): # {type: 'buy', # date: '2019-07-25 11:24:41', # amount: '347.833140025692348611', # total: '0.050998794333719943', # uuid: 'cbdff960-aece-11e9-b566-c5d69c3be671', # tid: 4320867, # timestamp: 1564053881, # price: '0.000146618560640751', # taker: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1', # maker: '0x1a961bc2e0d619d101f5f92a6be752132d7606e6', # orderHash: # '0xbec6485613a15be619c04c1425e8e821ebae42b88fa95ac4dfe8ba2beb363ee4', # transactionHash: # '0xf094e07b329ac8046e8f34db358415863c41daa36765c05516f4cf4f5b403ad1', # tokenBuy: '0x0000000000000000000000000000000000000000', # buyerFee: '0.695666280051384697', # gasFee: '28.986780264563232993', # sellerFee: '0.00005099879433372', # tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae', # usdValue: '11.336926687304238214'} side = self.safe_string(trade, 'type') feeCurrency = None symbol = None maker = self.safe_string(trade, 'maker') takerOrMaker = None if maker is not None and self.walletAddress is not None: if maker.lower() == self.walletAddress.lower(): takerOrMaker = 'maker' else: takerOrMaker = 'taker' buy = self.safe_currency_code(self.safe_string(trade, 'tokenBuy')) sell = self.safe_currency_code(self.safe_string(trade, 'tokenSell')) # get ready to be mind-boggled feeSide = None if buy is not None and sell is not None: if side == 'buy': feeSide = 'buyerFee' if takerOrMaker == 'maker': symbol = buy + '/' + sell feeCurrency = buy else: symbol = sell + '/' + buy feeCurrency = sell else: feeSide = 'sellerFee' if takerOrMaker == 'maker': symbol = sell + '/' + buy feeCurrency = buy else: symbol = buy + '/' + sell feeCurrency = sell if symbol is None and market is not None: symbol = market['symbol'] timestamp = self.safe_timestamp(trade, 'timestamp') id = self.safe_string(trade, 'tid') amount = self.safe_float(trade, 'amount') price = self.safe_float(trade, 'price') cost = self.safe_float(trade, 'total') feeCost = self.safe_float(trade, feeSide) if feeCost < 0: gasFee = self.safe_float(trade, 'gasFee') feeCost = self.sum(gasFee, feeCost) fee = { 'currency': feeCurrency, 'cost': feeCost, } if feeCost is not None and amount is not None: feeCurrencyAmount = cost if (feeCurrency == 'ETH') else amount fee['rate'] = feeCost / feeCurrencyAmount orderId = self.safe_string(trade, 'orderHash') return { 'info': trade, 'id': id, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'fee': fee, 'price': price, 'amount': amount, 'cost': cost, 'takerOrMaker': takerOrMaker, 'side': side, 'order': orderId, 'symbol': symbol, 'type': 'limit', } def withdraw(self, code, amount, address, tag=None, params={}): self.check_required_dependencies() self.check_address(address) self.load_markets() currency = self.currency(code) tokenAddress = currency['id'] nonce = self.get_nonce() amount = self.to_wei(amount, currency['precision']) requestToHash = { 'contractAddress': self.get_contract_address(), 'token': tokenAddress, 'amount': amount, 'address': address, 'nonce': nonce, } hash = self.get_idex_withdraw_hash(requestToHash) signature = self.sign_message(hash, self.privateKey) request = { 'address': address, 'amount': amount, 'token': tokenAddress, 'nonce': nonce, } response = self.privatePostWithdraw(self.extend(request, signature)) # {amount: '0'} return { 'info': response, 'id': None, } def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): body = self.json(params) # all methods are POST url = self.urls['api'] + '/' + path headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', } if api == 'private': self.check_required_credentials() headers['API-Key'] = self.apiKey return {'url': url, 'method': method, 'body': body, 'headers': headers} def get_idex_create_order_hash(self, order): return self.solidity_sha3([ order['contractAddress'], # address order['tokenBuy'], # address order['amountBuy'], # uint256 order['tokenSell'], # address order['amountSell'], # uint256 order['expires'], # uint256 order['nonce'], # uint256 order['address'], # address ]) def get_idex_cancel_order_hash(self, order): return self.solidity_sha3([ order['orderHash'], # address order['nonce'], # uint256 ]) def get_idex_market_order_hash(self, order): return self.solidity_sha3([ order['orderHash'], # address order['amount'], # uint256 order['address'], # address order['nonce'], # uint256 ]) def
(self, request): return self.solidity_sha3([ request['contractAddress'], # address request['token'], # uint256 request['amount'], # uint256 request['address'], # address request['nonce'], # uint256 ]) def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody): if response is None: return if 'error' in response: if response['error'] in self.exceptions: raise self.exceptions[response['error']](self.id + ' ' + response['error']) raise ExchangeError(self.id + ' ' + body)
get_idex_withdraw_hash
util.rs
use datatypes::*; pub fn read_u16(data: &[u8]) -> u16 { (data[0] as u16) | ((data[1] as u16) << 8) } pub fn read_i16(data: &[u8]) -> i16 { read_u16(data) as i16 } pub fn read_imm16(data: &[u8]) -> ImmediateValue { ImmediateValue::Imm16(read_i16(data)) } pub fn read_u32(data: &[u8]) -> u32 { (data[0] as u32) | ((data[1] as u32) << 8) | ((data[2] as u32) << 16) | ((data[3] as u32) << 24) } pub fn
(data: &[u8]) -> i32 { read_u32(data) as i32 } pub fn read_imm32(data: &[u8]) -> ImmediateValue { ImmediateValue::Imm32(read_i32(data)) } pub fn get_override_size(mode: BitMode, prefix: u16) -> (OperandSize, OperandSize) { let is_op_overridden = (prefix & Prefix::OverrideOpSize as u16) != 0; let is_addr_overridden = (prefix & Prefix::OverrideAddrSize as u16) != 0; let op_size = if mode == BitMode::X86_32 { if is_op_overridden { OperandSize::Bit16 } else { OperandSize::Bit32 } } else { unimplemented!("64 bit mode"); }; let addr_size = if mode == BitMode::X86_32 { if is_addr_overridden { OperandSize::Bit16 } else { OperandSize::Bit32 } } else { unimplemented!("64 bit mode"); }; (op_size, addr_size) }
read_i32
check_bandwidth.py
#!/usr/bin/python # -*- coding: utf-8 -*- # # The MIT License (MIT) # # Copyright (c) 2016 Puru # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ''' Nagios (NRPE) plugin for checking bandwidth speed limit. ''' from argparse import ArgumentParser from time import sleep from sys import exit from os.path import isdir import re __description__ = 'Nagios (NRPE) plugin for checking bandwidth speed limit.'
# See: /usr/lib64/nagios/plugins/utils.sh STATE_OK=0 STATE_WARNING=1 STATE_CRITICAL=2 STATE_UNKNOWN=3 STATE_DEPENDENT=4 PLUGIN_NAME="BANDWIDTH" THRESHOLD_REGEX=re.compile('(\d+)([K|M|G])$') def exit_ok(msg): print "{0} OK - {1}".format(PLUGIN_NAME, msg) exit(STATE_OK) def exit_warning(msg): print "{0} WARNING - {1}".format(PLUGIN_NAME, msg) exit(STATE_WARNING) def exit_critical(msg): print "{0} CRITICAL - {1}".format(PLUGIN_NAME, msg) exit(STATE_CRITICAL) def exit_unknown(msg): print "{0} UNKNOWN - {1}".format(PLUGIN_NAME, msg) exit(STATE_UNKNOWN) def get_network_bytes(interface): dev_path = '/sys/class/net/'+interface if not isdir(dev_path): msg = "no such interface: "+interface exit_unknown(msg) rx_bytes_path = dev_path+"/statistics/rx_bytes" tx_bytes_path = dev_path+"/statistics/tx_bytes" rx_bytes = open(rx_bytes_path, 'r').read() tx_bytes = open(tx_bytes_path, 'r').read() return int(rx_bytes), int(tx_bytes) def convert_unit(value, unit): if unit == 'K': v = int(value) * 10**3 if unit == 'M': v = int(value) * 10**6 if unit == 'G': v = int(value) * 10**9 return v def main(): ap = ArgumentParser(description=__description__) ap.add_argument('-v', '--version', action='version', version=__version__) ap.add_argument('-i', '--interface', metavar='name', dest='interface', help='interface to use (default: eth0)', default='eth0') ap.add_argument('-w', '--warning', metavar='threshold', dest='warning', help="threshold in bits. Appending 'K' will count the number as Kilobits, 'M' as Megabits, 'G' as Gigabits. Examples: 200K, 3M and 1G") ap.add_argument('-c', '--critical', metavar='threshold', dest='critical', help="threshold in bits. Appending 'K' will count the number as Kilobits, 'M' as Megabits and 'G' as Gigabits. Examples: 200K, 3M and 1G") args = ap.parse_args() interface = args.interface if not args.warning or not args.critical: ap.error('required options: --warning, --critical') # parse and convert to bits m1, m2 = THRESHOLD_REGEX.match(args.warning), THRESHOLD_REGEX.match(args.critical) if not m1 or not m2: ap.error("supported threshold units: K, M, G") warning_bits = convert_unit(*m1.groups()) critical_bits = convert_unit(*m2.groups()) warning_rx_bits = warning_tx_bits = warning_bits critical_rx_bits = critical_tx_bits = critical_bits # get network transfer bytes and convert to bits o_rx_bytes, o_tx_bytes = get_network_bytes(interface); sleep(1.0) n_rx_bytes, n_tx_bytes = get_network_bytes(interface) rx_bits, tx_bits = (n_rx_bytes - o_rx_bytes)*8, (n_tx_bytes - o_tx_bytes)*8 # convert bandwidth bits back to unit down = up = None kbits, mbits, gbits = float(10**3), float(10**6), float(10**9) if rx_bits < kbits: down = "{0:.2f} bps".format(rx_bits) if rx_bits >= kbits: down = "{0:.2f} Kbps".format(rx_bits / kbits) if rx_bits >= mbits: down = "{0:.2f} Mbps".format(rx_bits / mbits) if rx_bits >= gbits: down = "{0:.2f} Gbps".format(rx_bits / gbits) if tx_bits < kbits: up = "{0:.2f} bps".format(tx_bits) if tx_bits >= kbits: up = "{0:.2f} Kbps".format(tx_bits / kbits) if tx_bits >= mbits: up = "{0:.2f} Mbps".format(tx_bits / mbits) if tx_bits >= gbits: up = "{0:.2f} Gbps".format(tx_bits / gbits) # check threshold and exit appropriately msg = "{0}: DOWN: {1}, UP: {2}".format(interface, down, up) if rx_bits > critical_rx_bits or tx_bits > critical_tx_bits: exit_critical(msg) elif rx_bits > warning_rx_bits or tx_bits > warning_tx_bits: exit_warning(msg) else: exit_ok(msg) if __name__ == '__main__': main()
__author__ = "Puru Tuladhar <[email protected]>" __github__ = "https://github.com/tuladhar/check_bandwidth" __version__ = "%(prog)s v1.0 by {0} ({1})".format(__author__, __github__)
GpioData.py
#! /usr/bin/python # -*- coding: utf-8 -*- class GpioData: _count = 0 _modNum = 8 _specMap = {} _freqMap = {} _mapList = [] _modeMap = {} _smtMap = {} _map_table = {} def __init__(self): self.__defMode = 0 self.__eintMode = False self.__modeVec = ['0', '0', '0', '0', '0', '0', '0', '0'] self.__inPullEn = True self.__inPullSelHigh = False self.__defDirInt = 0 self.__defDir = 'IN' self.__inEn = True self.__outEn = False self.__outHigh = False self.__varNames = [] self.__smtNum = -1 self.__smtEn = False self.__iesEn = True self.__drvCur = "" def get_defMode(self): return self.__defMode def set_defMode(self, mode): self.__defMode = mode def get_eintMode(self): return self.__eintMode def set_eintMode(self, flag): self.__eintMode = flag def get_modeVec(self): return self.__modeVec def set_modeVec(self, vec): self.__modeVec = vec def get_inPullEn(self): return self.__inPullEn def set_inpullEn(self, flag): self.__inPullEn = flag def get_inPullSelHigh(self): return self.__inPullSelHigh def set_inpullSelHigh(self, flag): self.__inPullSelHigh = flag def get_defDir(self): return self.__defDir def set_defDir(self, dir): self.__defDir = dir def get_inEn(self): return self.__inEn def set_inEn(self, flag): self.__inEn = flag def get_outEn(self): return self.__outEn def set_outEn(self, flag): self.__outEn = flag def get_outHigh(self): return self.__outHigh def set_outHigh(self, outHigh): self.__outHigh = outHigh def get_varNames(self): return self.__varNames def set_varNames(self, names): self.__varNames = names def set_smtEn(self, flag): self.__smtEn = flag def
(self): return self.__smtEn def get_iesEn(self): return self.__iesEn def set_iesEn(self, flag): self.__iesEn = flag def set_drvCur(self, val): self.__drvCur = val def get_drvCur(self): return self.__drvCur def set_smtNum(self, num): self.__smtNum = num def get_smtNum(self): return self.__smtNum def ge_defDirInt(self): if self.__defDir == 'IN': return 0 else: return 1 @staticmethod def set_eint_map_table(map_table): GpioData._map_table = map_table @staticmethod def get_modeName(key, idx): if key in GpioData._modeMap.keys(): value = GpioData._modeMap[key] return value[idx]
get_smtEn
test_series_json.py
""" Copyright 2022 Objectiv B.V. """ import pytest from bach.series.series_json import JsonBigQueryAccessorImpl from tests.unit.bach.util import get_fake_df @pytest.mark.skip_postgres def
(dialect): # Here we test the _get_slice_partial_expr function of the BigQuery specific JsonBigQueryAccessor. So # skipping all other dialects df = get_fake_df( dialect=dialect, index_names=['i'], data_names=['a'], dtype='json' ) jbqa = JsonBigQueryAccessorImpl(df.a) assert jbqa._get_slice_partial_expr(None, True).to_sql(dialect) == '0' assert jbqa._get_slice_partial_expr(None, False).to_sql(dialect) == '9223372036854775807' assert jbqa._get_slice_partial_expr(5, False).to_sql(dialect) == '5' assert jbqa._get_slice_partial_expr(5, True).to_sql(dialect) == '5' assert jbqa._get_slice_partial_expr(-5, False).to_sql(dialect) == \ '(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)' assert jbqa._get_slice_partial_expr(-5, True).to_sql(dialect) == \ '(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)'
test_bq_get_slice_partial_expr
variables_4.js
var searchData= [ ['fenetre_0',['fenetre',['../namespaceinterface.html#a996178d6009a1daae7e61486aa5ba351',1,'interface']]], ['frame1_1',['Frame1',['../namespaceinterface.html#a084ba399a58d3cb5ab805d79e3bb8cf7',1,'interface']]], ['frame2_2',['Frame2',['../namespaceinterface.html#a2b5f9a79cbd2a245615bff843b2cd9f8',1,'interface']]], ['frame2_5f1_3',['Frame2_1',['../namespaceinterface.html#a75b1e1c48821da8f1dd510b79c810413',1,'interface']]], ['frame2_5f1_5fa_4',['Frame2_1_a',['../namespaceinterface.html#a84f64476a2c373aa1aad05c51e86de73',1,'interface']]], ['frame2_5f1_5fb_5',['Frame2_1_b',['../namespaceinterface.html#a39b70ef28b4b811c3258bb21f496b3ff',1,'interface']]], ['frame2_5f2_6',['Frame2_2',['../namespaceinterface.html#a7f0b35ae9fbe0ab1e955b191d7ffa7a4',1,'interface']]],
['frame2_5f3_5fa_10',['Frame2_3_a',['../namespaceinterface.html#ae17251bebdbe50ec016eb67778d41a0b',1,'interface']]], ['frame2_5f3_5fb_11',['Frame2_3_b',['../namespaceinterface.html#a18faf8103098966fc163877b09692a2e',1,'interface']]], ['frame3_12',['Frame3',['../namespaceinterface.html#a637b9eb75a679431e17eabbab827d546',1,'interface']]] ];
['frame2_5f2_5fa_7',['Frame2_2_a',['../namespaceinterface.html#a928228caf95ebda54df33cdb4014dc1a',1,'interface']]], ['frame2_5f2_5fb_8',['Frame2_2_b',['../namespaceinterface.html#aef41dbd3c31aa00b582f656d774482ec',1,'interface']]], ['frame2_5f3_9',['Frame2_3',['../namespaceinterface.html#a91782fa020c7cf8691f68f5d3bc30f30',1,'interface']]],
errors.js
'use strict'; var spec = { name: 'P2P',
message: 'Internal Error on zclassic-bitcore-p2p Module {0}' }; module.exports = require('zclassic-bitcore-lib').errors.extend(spec);
_gfile.py
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """File processing utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import errno import functools import glob as _glob import os import shutil import threading import six class _GFileBase(six.Iterator): """Base I/O wrapper class. Similar semantics to Python's file object.""" # pylint: disable=protected-access def _synchronized(fn): """Synchronizes file I/O for methods in GFileBase.""" @functools.wraps(fn) def sync(self, *args, **kwargs): # Sometimes a GFileBase method is called before the instance # has been properly initialized. Check that _locker is available. if hasattr(self, '_locker'): self._locker.lock() try: return fn(self, *args, **kwargs) finally: if hasattr(self, '_locker'): self._locker.unlock() return sync # pylint: enable=protected-access def __init__(self, name, mode, locker): """Create the GFileBase object with the given filename, mode, and locker. Args: name: string, the filename. mode: string, the mode to open the file with (e.g. "r", "w", "a+"). locker: the thread locking object (e.g. _PythonLocker) for controlling thread access to the I/O methods of this class. """ self._name = name self._mode = mode self._locker = locker self._fp = open(name, mode) def __enter__(self): """Make GFileBase usable with "with" statement.""" return self def __exit__(self, unused_type, unused_value, unused_traceback): """Make GFileBase usable with "with" statement.""" self.close() @_synchronized def __del__(self): # __del__ is sometimes called before initialization, in which # case the object is not fully constructed. Check for this here # before trying to close the file handle. if hasattr(self, '_fp'): self._fp.close() @_synchronized def flush(self): """Flush the underlying file handle.""" return self._fp.flush() @property @_synchronized def closed(self): """Returns "True" if the file handle is closed. Otherwise False.""" return self._fp.closed @_synchronized def write(self, data): """Write data to the underlying file handle. Args: data: The string to write to the file handle. """ self._fp.write(data) @_synchronized def writelines(self, seq): """Write a sequence of strings to the underlying file handle.""" self._fp.writelines(seq) @_synchronized def tell(self): """Return the location from the underlying file handle. Returns: An integer location (which can be used in e.g., seek). """ return self._fp.tell() @_synchronized def seek(self, offset, whence=0):
@_synchronized def truncate(self, new_size=None): """Truncate the underlying file handle to new_size. Args: new_size: Size after truncation. If None, the file handle is truncated to 0 bytes. """ self._fp.truncate(new_size) @_synchronized def readline(self, max_length=-1): """Read a single line (up to max_length) from the underlying file handle. Args: max_length: The maximum number of chsaracters to read. Returns: A string, including any newline at the end, or empty string if at EOF. """ return self._fp.readline(max_length) @_synchronized def readlines(self, sizehint=None): """Read lines from the underlying file handle. Args: sizehint: See the python file.readlines() documentation. Returns: A list of strings from the underlying file handle. """ if sizehint is not None: return self._fp.readlines(sizehint) else: return self._fp.readlines() def __iter__(self): """Enable line iteration on the underlying handle (not synchronized).""" return self # Not synchronized def __next__(self): """Enable line iteration on the underlying handle (not synchronized). Returns: An line iterator from the underlying handle. Example: # read a file's lines by consuming the iterator with a list with open("filename", "r") as fp: lines = list(fp) """ return next(self._fp) @_synchronized def Size(self): # pylint: disable=invalid-name """Get byte size of the file from the underlying file handle.""" cur = self.tell() try: self.seek(0, 2) size = self.tell() finally: self.seek(cur) return size @_synchronized def read(self, n=-1): """Read n bytes from the underlying file handle. Args: n: Number of bytes to read (if negative, read to end of file handle.) Returns: A string of the bytes read, up to the end of file. """ return self._fp.read(n) @_synchronized def close(self): """Close the underlying file handle.""" self._fp.close() # Declare wrappers as staticmethods at the end so that we can # use them as decorators. _synchronized = staticmethod(_synchronized) class GFile(_GFileBase): """File I/O wrappers with thread locking.""" def __init__(self, name, mode='r'): super(GFile, self).__init__(name, mode, _Pythonlocker()) class FastGFile(_GFileBase): """File I/O wrappers without thread locking.""" def __init__(self, name, mode='r'): super(FastGFile, self).__init__(name, mode, _Nulllocker()) # locker classes. Note that locks must be reentrant, so that multiple # lock() calls by the owning thread will not block. class _Pythonlocker(object): """A locking strategy that uses standard locks from the thread module.""" def __init__(self): self._lock = threading.RLock() def lock(self): self._lock.acquire() def unlock(self): self._lock.release() class _Nulllocker(object): """A locking strategy where lock() and unlock() methods are no-ops.""" def lock(self): pass def unlock(self): pass def Exists(path): # pylint: disable=invalid-name """Returns True iff "path" exists (as a dir, file, non-broken symlink).""" return os.path.exists(path) def IsDirectory(path): # pylint: disable=invalid-name """Return True iff "path" exists and is a directory.""" return os.path.isdir(path) def Glob(glob): # pylint: disable=invalid-name """Return a list of filenames matching the glob "glob".""" return _glob.glob(glob) def MkDir(path, mode=0o755): # pylint: disable=invalid-name """Create the directory "path" with the given mode. Args: path: The directory path mode: The file mode for the directory Returns: None Raises: OSError: if the path already exists """ os.mkdir(path, mode) def MakeDirs(path, mode=0o755): # pylint: disable=invalid-name """Recursively create the directory "path" with the given mode. Args: path: The directory path. mode: The file mode for the created directories Raises: OSError: if the path already exists """ # NOTE(mrry): MakeDirs("") should be a no-op to match other # implementations of tf.gfile. if path: os.makedirs(path, mode) def RmDir(directory): # pylint: disable=invalid-name """Removes the directory "directory" iff the directory is empty. Args: directory: The directory to remove. Raises: OSError: If the directory does not exist or is not empty. """ os.rmdir(directory) def Remove(path): # pylint: disable=invalid-name """Delete the (non-directory) file "path". Args: path: The file to remove. Raises: OSError: If "path" does not exist, is a directory, or cannot be deleted. """ os.remove(path) def Rename(oldpath, newpath, overwrite=False): """Rename or move a file, or a local directory. Args: oldpath: string; a pathname of a file. newpath: string; a pathname to which the file will be moved. overwrite: boolean; if false, it is an error for newpath to be occupied by an existing file. Raises: OSError: If "newpath" is occupied by an existing file and overwrite=False. """ if not overwrite and Exists(newpath) and not IsDirectory(newpath): raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath) os.rename(oldpath, newpath) def DeleteRecursively(path): # pylint: disable=invalid-name """Delete the file or directory "path" recursively. Args: path: The path to remove (may be a non-empty directory). Raises: OSError: If the path does not exist or cannot be deleted. """ if IsDirectory(path): shutil.rmtree(path) else: Remove(path) def ListDirectory(directory, return_dotfiles=False): # pylint: disable=invalid-name """Returns a list of files in dir. As with the standard os.listdir(), the filenames in the returned list will be the basenames of the files in dir (not absolute paths). To get a list of absolute paths of files in a directory, a client could do: file_list = gfile.ListDir(my_dir) file_list = [os.path.join(my_dir, f) for f in file_list] (assuming that my_dir itself specified an absolute path to a directory). Args: directory: the directory to list return_dotfiles: if True, dotfiles will be returned as well. Even if this arg is True, '.' and '..' will not be returned. Returns: ['list', 'of', 'files']. The entries '.' and '..' are never returned. Other entries starting with a dot will only be returned if return_dotfiles is True. Raises: OSError: if there is an error retrieving the directory listing. """ files = os.listdir(directory) if not return_dotfiles: files = [f for f in files if not f.startswith('.')] return files def Walk(top, topdown=1, onerror=None): """Recursive directory tree generator. Args: top: string, a pathname. topdown: bool, should traversal be pre-order (True) or post-order (False) onerror: function, optional callback for errors. By default, errors that occur when listing a directory are ignored. (This is the same semantics as Python's os.walk() generator.) If the optional argument "onerror" is specified, it should be a function. It will be called with one argument, an os.error instance. It can return to continue with the walk, or reraise the exception to abort the walk. Yields: # Each yield is a 3-tuple: the pathname of a directory, followed # by lists of all its subdirectories and leaf files. (dirname, [subdirname, subdirname, ...], [filename, filename, ...]) """ return os.walk(top, topdown=topdown, onerror=onerror) def Stat(path): # pylint: disable=invalid-name """Gets the status of a file. Args: path: The file to call Stat() on. Does the equivalent of Stat() on the specified "path" and return file properties. Returns: An object whose attributes give information on the file. Raises: OSError: If "path" does not exist. """ statinfo = os.stat(path) filestat = collections.namedtuple('FileStat', ['mtime']) filestat.mtime = statinfo.st_mtime return filestat def Copy(oldpath, newpath, overwrite=False): """Copy a file. Args: oldpath: string; a pathname of a file. newpath: string; a pathname to which the file will be copied. overwrite: boolean; if false, it is an error for newpath to be occupied by an existing file. Raises: OSError: If "newpath" is occupied by an existing file and overwrite=False, or any error thrown by shutil.copy. """ if not overwrite and Exists(newpath): raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath) shutil.copy(oldpath, newpath) def Open(name, mode='r'): """Exact API match to the standard open. Args: name: a file name, either local or a gfile compatible. mode: for example "w" to open the file for writing. Returns: A threadsafe gfile.GFile object. """ return GFile(name, mode=mode)
"""Seek to offset (conditioned on whence) in the underlying file handle. Args: offset: int, the offset within the file to seek to. whence: 0, 1, or 2. See python's seek() documentation for details. """ self._fp.seek(offset, whence)
browser.go
package functions import ( "fmt" "os/exec" "runtime" ) func OpenBrowser(url string) { var err error switch runtime.GOOS { case "linux": err = exec.Command("xdg-open", url).Start() case "windows": err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start() case "darwin": err = exec.Command("open", url).Start() default:
err = fmt.Errorf("unsupported platform") } CheckErrors(err, "Code 2", "Error in the support of the platform", "Use a supported os ") }
mod.go
package management import ( "net/http" "seniors50plus/internal/models" "seniors50plus/internal/utils" "strconv" "github.com/labstack/echo" ) func ModUserHandler(c echo.Context) error { var token models.Token if err := utils.GetTokenFromContext(c, &token); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } userId, err := strconv.Atoi(c.Param("id")) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, "Invalid ID")
} dbc := models.NewDatabaseConnection() if err := dbc.Open(); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, "Error connecting to database") } defer dbc.Close() if token.Admin != models.AdminLevelSusan { return echo.NewHTTPError(http.StatusUnauthorized, "Not authorized") } user := models.User{ ID: uint(userId), } if err := dbc.GetUser(&user); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } user.AdminLevel = models.AdminLevelModerator if err := dbc.UpdateUser(&user); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, "Error updating user") } return c.JSON(http.StatusOK, user) } func UnmodUserHandler(c echo.Context) error { var token models.Token if err := utils.GetTokenFromContext(c, &token); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } userId, err := strconv.Atoi(c.Param("id")) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, "Invalid ID") } dbc := models.NewDatabaseConnection() if err := dbc.Open(); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, "Error connecting to database") } defer dbc.Close() if token.Admin != models.AdminLevelSusan { return echo.NewHTTPError(http.StatusUnauthorized, "Not authorized") } user := models.User{ ID: uint(userId), } if err := dbc.GetUser(&user); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } user.AdminLevel = models.AdminLevelUser if err := dbc.UpdateUser(&user); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, "Error updating user") } return c.JSON(http.StatusOK, user) } func GetModsHandler(c echo.Context) error { var token models.Token if err := utils.GetTokenFromContext(c, &token); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } dbc := models.NewDatabaseConnection() if err := dbc.Open(); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, "Error connecting to database") } defer dbc.Close() if token.Admin != models.AdminLevelSusan { return echo.NewHTTPError(http.StatusUnauthorized, "Not authorized") } var mods []models.User if err := dbc.GetMods(&mods); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.JSON(http.StatusOK, mods) }
dirutils.py
import filecmp import os from contextlib import redirect_stdout from io import StringIO from typing import Optional from tests.utils.filters import ldcontext_metadata_filter def
(dirbase: str) -> None: """ Make dirbase if necessary and then clear generated files """ import shutil safety_file = os.path.join(dirbase, "generated") if os.path.exists(dirbase): if not os.path.exists(safety_file): raise FileNotFoundError("'generated' guard file not found in {}".format(safety_file)) shutil.rmtree(dirbase) os.makedirs(dirbase) with open(os.path.join(dirbase, "generated"), "w") as f: f.write("Generated for safety. Directory will not be cleared if this file is not present") def file_text(txt_or_fname: str) -> str: """ Determine whether text_or_fname is a file name or a string and, if a file name, read it :param text_or_fname: :return: """ if len(txt_or_fname) > 4 and '\n' not in txt_or_fname: with open(txt_or_fname) as ef: return ef.read() return txt_or_fname class dircmp(filecmp.dircmp): """ Compare the content of dir1 and dir2. In contrast with filecmp.dircmp, this subclass compares the content of files with the same path. """ def phase3(self): """ Find out differences between common files. Ensure we are using content comparison with shallow=False. """ fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files, shallow=False) self.same_files, self.diff_files, self.funny_files = fcomp filecmp.dircmp.methodmap['same_files'] = phase3 filecmp.dircmp.methodmap['diff_files'] = phase3 filecmp.dircmp.methodmap['funny_files'] = phase3 def _do_cmp(f1, f2): bufsize = filecmp.BUFSIZE with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2: while True: b1 = fp1.read(bufsize) b2 = fp2.read(bufsize) if f1.endswith('.context.jsonld'): b1 = ldcontext_metadata_filter(b1.decode()) b2 = ldcontext_metadata_filter(b2.decode()) if b1 != b2: return False if not b1: return True filecmp._do_cmp = _do_cmp def are_dir_trees_equal(dir1: str, dir2: str) -> Optional[str]: """ Compare two directories recursively. Files in each directory are assumed to be equal if their names and contents are equal. @param dir1: First directory path @param dir2: Second directory path @return: None if directories match, else summary of differences """ def has_local_diffs(dc: dircmp) -> bool: return dc.diff_files or dc.funny_files or dc.left_only or dc.right_only def has_diffs(dc: dircmp) -> bool: return has_local_diffs(dc) or any(has_diffs(sd) for sd in dc.subdirs.values()) dirs_cmp = dircmp(dir1, dir2, ignore=['generated']) if has_diffs(dirs_cmp): output = StringIO() with redirect_stdout(output): dirs_cmp.report_full_closure() return output.getvalue() return None
make_and_clear_directory
handler.go
// Copyright Project Harbor Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vulnerable import ( "net/http" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/core/middlewares/util" sc "github.com/goharbor/harbor/src/pkg/scan/api/scan" "github.com/goharbor/harbor/src/pkg/scan/report" v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1" "github.com/goharbor/harbor/src/pkg/scan/vuln" "github.com/pkg/errors" "net/http/httptest" ) type vulnerableHandler struct { next http.Handler } // New ... func New(next http.Handler) http.Handler
// ServeHTTP ... func (vh vulnerableHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { doVulCheck, img, projectVulnerableSeverity, wl := validate(req) if !doVulCheck { vh.next.ServeHTTP(rw, req) return } rec := httptest.NewRecorder() vh.next.ServeHTTP(rec, req) // only enable vul policy check the response 200 if rec.Result().StatusCode == http.StatusOK { // Invalid project ID if wl.ProjectID == 0 { err := errors.Errorf("project verification error: project %s", img.ProjectName) vh.sendError(err, rw) return } // Get the vulnerability summary artifact := &v1.Artifact{ NamespaceID: wl.ProjectID, Repository: img.Repository, Tag: img.Reference, Digest: img.Digest, MimeType: v1.MimeTypeDockerArtifact, } cve := report.CVESet(wl.CVESet()) summaries, err := sc.DefaultController.GetSummary( artifact, []string{v1.MimeTypeNativeReport}, report.WithCVEWhitelist(&cve), ) if err != nil { err = errors.Wrap(err, "middleware: vulnerable handler") vh.sendError(err, rw) return } rawSummary, ok := summaries[v1.MimeTypeNativeReport] // No report yet? if !ok { err = errors.Errorf("no scan report existing for the artifact: %s:%s@%s", img.Repository, img.Reference, img.Digest) vh.sendError(err, rw) return } summary := rawSummary.(*vuln.NativeReportSummary) // Do judgement if summary.Severity.Code() >= projectVulnerableSeverity.Code() { err = errors.Errorf("the pulling image severity %q is higher than or equal with the project setting %q, reject the response.", summary.Severity, projectVulnerableSeverity) vh.sendError(err, rw) return } // Print scannerPull CVE list if len(summary.CVEBypassed) > 0 { for _, cve := range summary.CVEBypassed { log.Infof("Vulnerable policy check: scannerPull CVE %s", cve) } } } util.CopyResp(rec, rw) } func validate(req *http.Request) (bool, util.ImageInfo, vuln.Severity, models.CVEWhitelist) { var vs vuln.Severity var wl models.CVEWhitelist var img util.ImageInfo imgRaw := req.Context().Value(util.ImageInfoCtxKey) if imgRaw == nil { return false, img, vs, wl } // Expected artifact specified? img, ok := imgRaw.(util.ImageInfo) if !ok || len(img.Digest) == 0 { return false, img, vs, wl } if scannerPull, ok := util.ScannerPullFromContext(req.Context()); ok && scannerPull { return false, img, vs, wl } // Is vulnerable policy set? projectVulnerableEnabled, projectVulnerableSeverity, wl := util.GetPolicyChecker().VulnerablePolicy(img.ProjectName) if !projectVulnerableEnabled { return false, img, vs, wl } return true, img, projectVulnerableSeverity, wl } func (vh vulnerableHandler) sendError(err error, rw http.ResponseWriter) { log.Error(err) http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", err.Error()), http.StatusPreconditionFailed) }
{ return &vulnerableHandler{ next: next, } }
secret.py
# 1.装包 # 2.导包 from django.conf import settings from itsdangerous import TimedJSONWebSignatureSerializer as Serializer # 3.实例化 # 4.加密解密 class SecretOauth(object): # 加密 def dumps(self, data): s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600) result = s.dumps(data) return result.decode()
def loads(self, data): s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600) result = s.loads(data) return result
# 解密
gitlab.py
import gitlab import dateutil.parser import reader.cache import hashlib import logging from pandas import DataFrame, NaT from datetime import datetime class Gitlab: def __init__(self, gitlab_config: dict, workflow: dict): self.gitlab_config = gitlab_config self.workflow = workflow def cache_name(self): token = self.gitlab_config["token"] workflow = str(self.workflow) url = self.gitlab_config["url"] project_id = ( self.gitlab_config.get("project_id") if self.gitlab_config.get("project_id") else self.gitlab_config.get("group_id") ) name_hashed = hashlib.md5( (token + url + workflow + str(project_id)).encode("utf-8") ) return name_hashed.hexdigest() self.cache = reader.cache.Cache(cache_name(self)) def get_gitlab_instance(self): gl = gitlab.Gitlab( self.gitlab_config["url"], private_token=self.gitlab_config["token"] ) gl.auth() return gl def
(self, issue): issue_data = { "Key": issue.id, "Type": "issue", "Creator": issue.author["name"], "Created": dateutil.parser.parse(issue.created_at).replace(tzinfo=None), "Done": ( dateutil.parser.parse(issue.created_at).replace(tzinfo=None) if issue.created_at else NaT ), } return issue_data def get_issues(self): gl = self.get_gitlab_instance() if self.gitlab_config.get("project_id"): project = gl.projects.get(self.gitlab_config["project_id"]) issues = project.issues.list() elif self.gitlab_config.get("group_id"): group = gl.groups.get(self.gitlab_config["group_id"]) issues = group.issues.list() else: raise Exception("No valid project_id or group_id found!") return issues def get_data(self) -> DataFrame: if self.gitlab_config["cache"] and self.cache.is_valid(): logging.debug("Getting gitlab data from cache") df_issue_data = self.cache.read() return df_issue_data issues = self.get_issues() # issue_data = {"Key": [], "Type": [], "Creator": [], "Created": [], "Done": []} issues_data = [self.get_issue_data(issue) for issue in issues] df_issues_data = DataFrame(issues_data) if self.gitlab_config["cache"]: logging.debug("Storing gitlab issue data in cache") self.cache.write(df_issues_data) return df_issues_data def refresh_data(self, date: datetime) -> DataFrame: if self.gitlab_config["cache"] and self.cache.is_valid(): self.cache.clean() return self.get_data()
get_issue_data
MessageFilteringOutput_test.go
package simtest import ( "strings" "testing"
. "github.com/FactomProject/factomd/engine" . "github.com/FactomProject/factomd/testHelper" ) func TestFilterAPIOutput(t *testing.T) { state0 := SetupSim("LLLLLAAF", map[string]string{}, 25, 1, 1, t) RunCmd("1") RunCmd("w") RunCmd("s") apiRegex := "EOM.*5.*minute +1" // It has two spaces. SetOutputFilter(apiRegex) WaitBlocks(state0, 5) // The message-filter call we did above should have caused an election and SO, Node01 should not be a leader anymore. if GetFnodes()[1].State.Leader { t.Fatalf("Node01 should not be leader!") } CheckAuthoritySet(t) // Check Node01 Network Output logs to make sure there are no Dropped messaged besides the ones for our Regex out := SystemCall(`grep "Drop, matched filter Regex" fnode01_networkoutputs.txt | grep -Ev "` + apiRegex + `" | wc -l`) if strings.TrimSuffix(strings.Trim(string(out), " "), "\n") != string("0") { t.Fatalf("Filter missed let a message pass 1.") } // Checks Node01 Network Outputs to make sure there are no Sent broadcast including our Regex out2 := SystemCall(`grep "Send broadcast" fnode01_networkoutputs.txt | grep "` + apiRegex + `" | grep -v "EmbeddedMsg" | wc -l`) if strings.TrimSuffix(strings.Trim(string(out2), " "), "\n") != string("0") { t.Fatalf("Filter missed let a message pass 2.") } ShutDownEverything(t) }
describepolicies.py
# Copyright (c) 2013-2016 Hewlett Packard Enterprise Development LP # # Redistribution and use of this software in source and binary forms, # with or without modification, are permitted provided that the following # conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from requestbuilder import Arg from requestbuilder.mixins import TabifyingMixin from requestbuilder.response import PaginatedResponse from euca2ools.commands.autoscaling import AutoScalingRequest
class DescribePolicies(AutoScalingRequest, TabifyingMixin): DESCRIPTION = 'Describe auto-scaling policies' ARGS = [Arg('PolicyNames.member', metavar='POLICY', nargs='*', help='limit results to specific auto-scaling policies'), Arg('-g', '--auto-scaling-group', dest='AutoScalingGroupName', metavar='ASGROUP'), Arg('--show-long', action='store_true', route_to=None, help="show all of the policies' info")] LIST_TAGS = ['ScalingPolicies', 'Alarms'] def main(self): return PaginatedResponse(self, (None,), ('ScalingPolicies',)) def prepare_for_page(self, page): # Pages are defined by NextToken self.params['NextToken'] = page # pylint: disable=no-self-use def get_next_page(self, response): return response.get('NextToken') or None # pylint: enable=no-self-use def print_result(self, result): for policy in result.get('ScalingPolicies', []): bits = ['SCALING-POLICY', policy.get('AutoScalingGroupName'), policy.get('PolicyName'), policy.get('ScalingAdjustment')] if self.args['show_long']: bits.append(policy.get('MinAdjustmentStep')) bits.append(policy.get('AdjustmentType')) if self.args['show_long']: bits.append(policy.get('Cooldown')) bits.append(policy.get('PolicyARN')) print self.tabify(bits)
index.js
module.exports = require('./acl.js'); module.exports.__defineGetter__('memoryBackend', () => require('./memory-backend.js'));
module.exports.__defineGetter__('mongodbBackend', () => require('./mongodb-backend.js'));