file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
legacy.rs | pub(crate) fn main(
_attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream | {
let input = syn::parse_macro_input!(item as syn::ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &input.sig;
let block = &input.block;
let name = &sig.ident;
quote::quote!(
#(#attrs) *
#vis #sig {
// Mark this function as a global constructor (like C++).
#[allow(improper_ctypes)]
#[cfg_attr(target_os = "linux", link_section = ".ctors")]
#[cfg_attr(target_os = "android", link_section = ".ctors")]
#[cfg_attr(target_os = "macos", link_section = "__DATA,__mod_init_func")]
#[cfg_attr(target_os = "ios", link_section = "__DATA,__mod_init_func")]
#[cfg_attr(target_os = "windows", link_section = ".CRT$XCU")]
#[used]
static __LOAD_NEON_MODULE: extern "C" fn() = {
extern "C" fn __load_neon_module() {
// Put everything else in the ctor fn so the user fn can't see it.
#[repr(C)]
struct __NodeModule {
version: i32,
flags: u32,
dso_handle: *mut u8,
filename: *const u8,
register_func: Option<extern "C" fn(
::neon::handle::Handle<::neon::types::JsObject>, *mut u8, *mut u8)>,
context_register_func: Option<extern "C" fn(
::neon::handle::Handle<::neon::types::JsObject>, *mut u8, *mut u8, *mut u8)>,
modname: *const u8,
priv_data: *mut u8,
link: *mut __NodeModule
}
// Mark as used during tests to suppress warnings
#[cfg_attr(test, used)]
static mut __NODE_MODULE: __NodeModule = __NodeModule {
version: 0,
flags: 0,
dso_handle: 0 as *mut _,
filename: b"neon_source.rs\0" as *const u8,
register_func: Some(__register_neon_module),
context_register_func: None,
modname: b"neon_module\0" as *const u8,
priv_data: 0 as *mut _,
link: 0 as *mut _
};
extern "C" fn __register_neon_module(
m: ::neon::handle::Handle<::neon::types::JsObject>, _: *mut u8, _: *mut u8) {
::neon::macro_internal::initialize_module(m, #name);
}
extern "C" {
fn node_module_register(module: *mut __NodeModule);
}
// During tests, node is not available. Skip module registration.
#[cfg(not(test))]
unsafe {
// Set the ABI version based on the NODE_MODULE_VERSION constant provided by the current node headers.
__NODE_MODULE.version = ::neon::macro_internal::runtime::module::get_version();
node_module_register(&mut __NODE_MODULE);
}
}
__load_neon_module
};
#block
}
)
.into()
} |
|
main.py | from flask import Flask, json
import pyodbc
conn = pyodbc.connect('DRIVER={PostgreSQL Unicode};SERVER=10.4.28.183;DATABASE=postgres;UID=postgres;PWD=developer2020')
app = Flask(__name__)
def random_products(conn):
cnxn = conn.cursor()
cnxn.execute('select categoryid, name from categories c where parentid is null')
rows = cnxn.fetchall()
cnxn.commit()
return rows
@app.route('/')
def | ():
show_data = random_products(conn)
return str(show_data)
if __name__ == '__main__':
app.run()
| hello |
ironic.go | package ironic
import (
"fmt"
"os"
"strconv"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/go-logr/logr"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/baremetal/v1/nodes"
"github.com/gophercloud/gophercloud/openstack/baremetal/v1/ports"
"github.com/gophercloud/gophercloud/openstack/baremetalintrospection/v1/introspection"
"github.com/pkg/errors"
logz "sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/yaml"
metal3v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
"github.com/metal3-io/baremetal-operator/pkg/bmc"
"github.com/metal3-io/baremetal-operator/pkg/provisioner"
"github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic/clients"
"github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic/devicehints"
"github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic/hardwaredetails"
)
var (
log = logz.New().WithName("provisioner").WithName("ironic")
deprovisionRequeueDelay = time.Second * 10
provisionRequeueDelay = time.Second * 10
powerRequeueDelay = time.Second * 10
introspectionRequeueDelay = time.Second * 15
softPowerOffTimeout = time.Second * 180
deployKernelURL string
deployRamdiskURL string
ironicEndpoint string
inspectorEndpoint string
ironicTrustedCAFile string
ironicClientCertFile string
ironicClientPrivKeyFile string
ironicInsecure bool
ironicSkipClientSANVerify bool
ironicAuth clients.AuthConfig
inspectorAuth clients.AuthConfig
maxBusyHosts int = 20
// Keep pointers to ironic and inspector clients configured with
// the global auth settings to reuse the connection between
// reconcilers.
clientIronicSingleton *gophercloud.ServiceClient
clientInspectorSingleton *gophercloud.ServiceClient
)
const (
// See nodes.Node.PowerState for details
powerOn = "power on"
powerOff = "power off"
softPowerOff = "soft power off"
powerNone = "None"
nameSeparator = "~"
)
var bootModeCapabilities = map[metal3v1alpha1.BootMode]string{
metal3v1alpha1.UEFI: "boot_mode:uefi",
metal3v1alpha1.UEFISecureBoot: "boot_mode:uefi,secure_boot:true",
metal3v1alpha1.Legacy: "boot_mode:bios",
}
type macAddressConflictError struct {
Address string
ExistingNode string
}
func (e macAddressConflictError) Error() string {
return fmt.Sprintf("MAC address %s conflicts with existing node %s", e.Address, e.ExistingNode)
}
// NewMacAddressConflictError is a wrap for macAddressConflictError error
func NewMacAddressConflictError(address, node string) error {
return macAddressConflictError{Address: address, ExistingNode: node}
}
func init() {
// NOTE(dhellmann): Use Fprintf() to report errors instead of
// logging, because logging is not configured yet in init().
var authErr error
ironicAuth, inspectorAuth, authErr = clients.LoadAuth()
if authErr != nil {
fmt.Fprintf(os.Stderr, "Cannot start: %s\n", authErr)
os.Exit(1)
}
deployKernelURL = os.Getenv("DEPLOY_KERNEL_URL")
if deployKernelURL == "" {
fmt.Fprintf(os.Stderr, "Cannot start: No DEPLOY_KERNEL_URL variable set\n")
os.Exit(1)
}
deployRamdiskURL = os.Getenv("DEPLOY_RAMDISK_URL")
if deployRamdiskURL == "" {
fmt.Fprintf(os.Stderr, "Cannot start: No DEPLOY_RAMDISK_URL variable set\n")
os.Exit(1)
}
ironicEndpoint = os.Getenv("IRONIC_ENDPOINT")
if ironicEndpoint == "" {
fmt.Fprintf(os.Stderr, "Cannot start: No IRONIC_ENDPOINT variable set\n")
os.Exit(1)
}
inspectorEndpoint = os.Getenv("IRONIC_INSPECTOR_ENDPOINT")
if inspectorEndpoint == "" {
fmt.Fprintf(os.Stderr, "Cannot start: No IRONIC_INSPECTOR_ENDPOINT variable set\n")
os.Exit(1)
}
ironicTrustedCAFile = os.Getenv("IRONIC_CACERT_FILE")
if ironicTrustedCAFile == "" {
ironicTrustedCAFile = "/opt/metal3/certs/ca/crt"
}
ironicClientCertFile = os.Getenv("IRONIC_CLIENT_CERT_FILE")
if ironicClientCertFile == "" {
ironicClientCertFile = "/opt/metal3/certs/client/crt"
}
ironicClientPrivKeyFile = os.Getenv("IRONIC_CLIENT_PRIVATE_KEY_FILE")
if ironicClientPrivKeyFile == "" {
ironicClientPrivKeyFile = "/opt/metal3/certs/client/key"
}
ironicInsecureStr := os.Getenv("IRONIC_INSECURE")
if strings.ToLower(ironicInsecureStr) == "true" {
ironicInsecure = true
}
ironicSkipClientSANVerifyStr := os.Getenv("IRONIC_SKIP_CLIENT_SAN_VERIFY")
if strings.ToLower(ironicSkipClientSANVerifyStr) == "true" {
ironicSkipClientSANVerify = true
}
if maxHostsStr := os.Getenv("PROVISIONING_LIMIT"); maxHostsStr != "" {
value, err := strconv.Atoi(maxHostsStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot start: Invalid value set for variable PROVISIONING_LIMIT=%s", maxHostsStr)
os.Exit(1)
}
maxBusyHosts = value
}
}
// Provisioner implements the provisioning.Provisioner interface
// and uses Ironic to manage the host.
type ironicProvisioner struct {
// the object metadata of the BareMetalHost resource
objectMeta metav1.ObjectMeta
// the UUID of the node in Ironic
nodeID string
// the address of the BMC
bmcAddress string
// whether to disable SSL certificate verification
disableCertVerification bool
// credentials to log in to the BMC
bmcCreds bmc.Credentials
// the MAC address of the PXE boot interface
bootMACAddress string
// a client for talking to ironic
client *gophercloud.ServiceClient
// a client for talking to ironic-inspector
inspector *gophercloud.ServiceClient
// a logger configured for this host
log logr.Logger
// a debug logger configured for this host
debugLog logr.Logger
// an event publisher for recording significant events
publisher provisioner.EventPublisher
}
// LogStartup produces useful logging information that we only want to
// emit once on startup but that is interal to this package.
func LogStartup() {
log.Info("ironic settings",
"endpoint", ironicEndpoint,
"ironicAuthType", ironicAuth.Type,
"inspectorEndpoint", inspectorEndpoint,
"inspectorAuthType", inspectorAuth.Type,
"deployKernelURL", deployKernelURL,
"deployRamdiskURL", deployRamdiskURL,
)
}
// A private function to construct an ironicProvisioner (rather than a
// Provisioner interface) in a consistent way for tests.
func newProvisionerWithSettings(host metal3v1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publisher provisioner.EventPublisher, ironicURL string, ironicAuthSettings clients.AuthConfig, inspectorURL string, inspectorAuthSettings clients.AuthConfig) (*ironicProvisioner, error) {
hostData := provisioner.BuildHostData(host, bmcCreds)
tlsConf := clients.TLSConfig{
TrustedCAFile: ironicTrustedCAFile,
ClientCertificateFile: ironicClientCertFile,
ClientPrivateKeyFile: ironicClientPrivKeyFile,
InsecureSkipVerify: ironicInsecure,
SkipClientSANVerify: ironicSkipClientSANVerify,
}
clientIronic, err := clients.IronicClient(ironicURL, ironicAuthSettings, tlsConf)
if err != nil {
return nil, err
}
clientInspector, err := clients.InspectorClient(inspectorURL, inspectorAuthSettings, tlsConf)
if err != nil {
return nil, err
}
return newProvisionerWithIronicClients(hostData, publisher,
clientIronic, clientInspector)
}
func newProvisionerWithIronicClients(hostData provisioner.HostData, publisher provisioner.EventPublisher, clientIronic *gophercloud.ServiceClient, clientInspector *gophercloud.ServiceClient) (*ironicProvisioner, error) {
// Ensure we have a microversion high enough to get the features
// we need.
clientIronic.Microversion = "1.56"
provisionerLogger := log.WithValues("host", ironicNodeName(hostData.ObjectMeta))
p := &ironicProvisioner{
objectMeta: hostData.ObjectMeta,
nodeID: hostData.ProvisionerID,
bmcCreds: hostData.BMCCredentials,
bmcAddress: hostData.BMCAddress,
disableCertVerification: hostData.DisableCertificateVerification,
bootMACAddress: hostData.BootMACAddress,
client: clientIronic,
inspector: clientInspector,
log: provisionerLogger,
debugLog: provisionerLogger.V(1),
publisher: publisher,
}
return p, nil
}
// New returns a new Ironic Provisioner using the global configuration
// for finding the Ironic services.
func New(hostData provisioner.HostData, publisher provisioner.EventPublisher) (provisioner.Provisioner, error) {
var err error
if clientIronicSingleton == nil || clientInspectorSingleton == nil {
tlsConf := clients.TLSConfig{
TrustedCAFile: ironicTrustedCAFile,
ClientCertificateFile: ironicClientCertFile,
ClientPrivateKeyFile: ironicClientPrivKeyFile,
InsecureSkipVerify: ironicInsecure,
SkipClientSANVerify: ironicSkipClientSANVerify,
}
clientIronicSingleton, err = clients.IronicClient(
ironicEndpoint, ironicAuth, tlsConf)
if err != nil {
return nil, err
}
clientInspectorSingleton, err = clients.InspectorClient(
inspectorEndpoint, inspectorAuth, tlsConf)
if err != nil {
return nil, err
}
}
return newProvisionerWithIronicClients(hostData, publisher,
clientIronicSingleton, clientInspectorSingleton)
}
func (p *ironicProvisioner) bmcAccess() (bmc.AccessDetails, error) {
bmcAccess, err := bmc.NewAccessDetails(p.bmcAddress, p.disableCertVerification)
if err != nil {
return nil, errors.Wrap(err, "failed to parse BMC address information")
}
return bmcAccess, nil
}
func (p *ironicProvisioner) validateNode(ironicNode *nodes.Node) (errorMessage string, err error) {
var validationErrors []string
p.log.Info("validating node settings in ironic")
validateResult, err := nodes.Validate(p.client, ironicNode.UUID).Extract()
if err != nil {
return "", err // do not wrap error so we can check type in caller
}
if !validateResult.Boot.Result {
validationErrors = append(validationErrors, validateResult.Boot.Reason)
}
if !validateResult.Deploy.Result {
validationErrors = append(validationErrors, validateResult.Deploy.Reason)
}
if len(validationErrors) > 0 {
// We expect to see errors of this nature sometimes, so rather
// than reporting it as a reconcile error we record the error
// status on the host and return.
errorMessage = fmt.Sprintf("host validation error: %s",
strings.Join(validationErrors, "; "))
return errorMessage, nil
}
return "", nil
}
func (p *ironicProvisioner) listAllPorts(address string) ([]ports.Port, error) {
var allPorts []ports.Port
opts := ports.ListOpts{
Fields: []string{"node_uuid"},
}
if address != "" {
opts.Address = address
}
pager := ports.List(p.client, opts)
allPages, err := pager.AllPages()
if err != nil {
return allPorts, err
}
return ports.ExtractPorts(allPages)
}
func (p *ironicProvisioner) getNode() (*nodes.Node, error) {
if p.nodeID == "" {
return nil, provisioner.ErrNeedsRegistration
}
ironicNode, err := nodes.Get(p.client, p.nodeID).Extract()
switch err.(type) {
case nil:
p.debugLog.Info("found existing node by ID")
return ironicNode, nil
case gophercloud.ErrDefault404:
// Look by ID failed, trying to lookup by hostname in case it was
// previously created
return nil, provisioner.ErrNeedsRegistration
default:
return nil, errors.Wrap(err,
fmt.Sprintf("failed to find node by ID %s", p.nodeID))
}
}
// Verifies that node has port assigned by Ironic.
func (p *ironicProvisioner) nodeHasAssignedPort(ironicNode *nodes.Node) (bool, error) {
opts := ports.ListOpts{
Fields: []string{"node_uuid"},
NodeUUID: ironicNode.UUID,
}
pager := ports.List(p.client, opts)
allPages, err := pager.AllPages()
if err != nil {
return false, errors.Wrap(err, "failed to page over list of ports")
}
empty, err := allPages.IsEmpty()
if err != nil {
return false, errors.Wrap(err, "failed to check port list status")
}
if empty {
p.debugLog.Info("node has no assigned port")
return false, nil
}
p.debugLog.Info("node has assigned port")
return true, nil
}
// Verify that MAC is already allocated to some node port.
func (p *ironicProvisioner) isAddressAllocatedToPort(address string) (bool, error) {
allPorts, err := p.listAllPorts(address)
if err != nil {
return false, errors.Wrap(err, fmt.Sprintf("failed to list ports for %s", address))
}
if len(allPorts) == 0 {
p.debugLog.Info("address does not have allocated ports", "address", address)
return false, nil
}
p.debugLog.Info("address is allocated to port", "address", address)
return true, nil
}
// Look for an existing registration for the host in Ironic.
func (p *ironicProvisioner) findExistingHost(bootMACAddress string) (ironicNode *nodes.Node, err error) {
// Try to load the node by UUID
ironicNode, err = p.getNode()
if !errors.Is(err, provisioner.ErrNeedsRegistration) {
return
}
// Try to load the node by name
nodeSearchList := []string{ironicNodeName(p.objectMeta)}
if !strings.Contains(p.objectMeta.Name, nameSeparator) {
nodeSearchList = append(nodeSearchList, p.objectMeta.Name)
}
for _, nodeName := range nodeSearchList {
p.debugLog.Info("looking for existing node by name", "name", nodeName)
ironicNode, err = nodes.Get(p.client, nodeName).Extract()
switch err.(type) {
case nil:
p.debugLog.Info("found existing node by name")
return ironicNode, nil
case gophercloud.ErrDefault404:
p.log.Info(
fmt.Sprintf("node with name %s doesn't exist", nodeName))
default:
return nil, errors.Wrap(err,
fmt.Sprintf("failed to find node by name %s", nodeName))
}
}
// Try to load the node by port address
p.log.Info("looking for existing node by MAC", "MAC", bootMACAddress)
allPorts, err := p.listAllPorts(bootMACAddress)
if err != nil {
p.log.Info("failed to find an existing port with address", "MAC", bootMACAddress)
return nil, nil
}
if len(allPorts) > 0 {
nodeUUID := allPorts[0].NodeUUID
ironicNode, err = nodes.Get(p.client, nodeUUID).Extract()
switch err.(type) {
case nil:
p.debugLog.Info("found existing node by ID")
// If the node has a name, this means we didn't find it above.
if ironicNode.Name != "" {
return nil, NewMacAddressConflictError(bootMACAddress, ironicNode.Name)
}
return ironicNode, nil
case gophercloud.ErrDefault404:
return nil, errors.Wrap(err,
fmt.Sprintf("port exists but linked node doesn't %s", nodeUUID))
default:
return nil, errors.Wrap(err,
fmt.Sprintf("port exists but failed to find linked node by ID %s", nodeUUID))
}
} else {
p.log.Info("port with address doesn't exist", "MAC", bootMACAddress)
}
// Either the node was never created or the Ironic database has
// been dropped.
return nil, nil
}
func (p *ironicProvisioner) createPXEEnabledNodePort(uuid, macAddress string) error {
p.log.Info("creating PXE enabled ironic port for node", "NodeUUID", uuid, "MAC", macAddress)
enable := true
_, err := ports.Create(
p.client,
ports.CreateOpts{
NodeUUID: uuid,
Address: macAddress,
PXEEnabled: &enable,
}).Extract()
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to create ironic port for node: %s, MAC: %s", uuid, macAddress))
}
return nil
}
// ValidateManagementAccess registers the host with the provisioning
// system and tests the connection information for the host to verify
// that the location and credentials work.
//
// FIXME(dhellmann): We should rename this method to describe what it
// actually does.
func (p *ironicProvisioner) ValidateManagementAccess(data provisioner.ManagementAccessData, credentialsChanged, force bool) (result provisioner.Result, provID string, err error) {
bmcAccess, err := p.bmcAccess()
if err != nil {
result, err = operationFailed(err.Error())
return
}
var ironicNode *nodes.Node
updater := updateOptsBuilder(p.debugLog)
p.debugLog.Info("validating management access")
ironicNode, err = p.findExistingHost(p.bootMACAddress)
if err != nil {
switch err.(type) {
case macAddressConflictError:
result, err = operationFailed(err.Error())
default:
result, err = transientError(errors.Wrap(err, "failed to find existing host"))
}
return
}
// Some BMC types require a MAC address, so ensure we have one
// when we need it. If not, place the host in an error state.
if bmcAccess.NeedsMAC() && p.bootMACAddress == "" {
msg := fmt.Sprintf("BMC driver %s requires a BootMACAddress value", bmcAccess.Type())
p.log.Info(msg)
result, err = operationFailed(msg)
return
}
driverInfo := bmcAccess.DriverInfo(p.bmcCreds)
// FIXME(dhellmann): We need to get our IP on the
// provisioning network from somewhere.
driverInfo["deploy_kernel"] = deployKernelURL
driverInfo["deploy_ramdisk"] = deployRamdiskURL
// If we have not found a node yet, we need to create one
if ironicNode == nil {
p.log.Info("registering host in ironic")
if data.BootMode == metal3v1alpha1.UEFISecureBoot && !bmcAccess.SupportsSecureBoot() {
msg := fmt.Sprintf("BMC driver %s does not support secure boot", bmcAccess.Type())
p.log.Info(msg)
result, err = operationFailed(msg)
return
}
ironicNode, err = nodes.Create(
p.client,
nodes.CreateOpts{
Driver: bmcAccess.Driver(),
BootInterface: bmcAccess.BootInterface(),
Name: p.objectMeta.Name,
DriverInfo: driverInfo,
DeployInterface: p.deployInterface(data.CurrentImage),
InspectInterface: "inspector",
ManagementInterface: bmcAccess.ManagementInterface(),
PowerInterface: bmcAccess.PowerInterface(),
RAIDInterface: bmcAccess.RAIDInterface(),
VendorInterface: bmcAccess.VendorInterface(),
Properties: map[string]interface{}{
"capabilities": bootModeCapabilities[data.BootMode],
},
}).Extract()
// FIXME(dhellmann): Handle 409 and 503? errors here.
if err != nil {
result, err = transientError(errors.Wrap(err, "failed to register host in ironic"))
return
}
p.publisher("Registered", "Registered new host")
// Store the ID so other methods can assume it is set and so
// we can find the node again later.
provID = ironicNode.UUID
// If we know the MAC, create a port. Otherwise we will have
// to do this after we run the introspection step.
if p.bootMACAddress != "" {
err = p.createPXEEnabledNodePort(ironicNode.UUID, p.bootMACAddress)
if err != nil {
result, err = transientError(err)
return
}
}
} else {
// FIXME(dhellmann): At this point we have found an existing
// node in ironic by looking it up. We need to check its
// settings against what we have in the host, and change them
// if there are differences.
provID = ironicNode.UUID
updater.SetTopLevelOpt("name", ironicNodeName(p.objectMeta), ironicNode.Name)
// When node exists but has no assigned port to it by Ironic and actuall address (MAC) is present
// in host config and is not allocated to different node lets try to create port for this node.
if p.bootMACAddress != "" {
var nodeHasAssignedPort, addressIsAllocatedToPort bool
nodeHasAssignedPort, err = p.nodeHasAssignedPort(ironicNode)
if err != nil {
result, err = transientError(err)
return
}
if !nodeHasAssignedPort {
addressIsAllocatedToPort, err = p.isAddressAllocatedToPort(p.bootMACAddress)
if err != nil {
result, err = transientError(err)
return
}
if !addressIsAllocatedToPort {
err = p.createPXEEnabledNodePort(ironicNode.UUID, p.bootMACAddress)
if err != nil {
result, err = transientError(err)
return
}
}
}
}
// Look for the case where we previously enrolled this node
// and now the credentials have changed.
if credentialsChanged {
updater.SetTopLevelOpt("driver_info", driverInfo, nil)
}
// We don't return here because we also have to set the
// target provision state to manageable, which happens
// below.
}
if data.CurrentImage != nil {
p.getImageUpdateOptsForNode(ironicNode, data.CurrentImage, data.BootMode, updater)
}
updater.SetTopLevelOpt("automated_clean",
data.AutomatedCleaningMode != metal3v1alpha1.CleaningModeDisabled,
ironicNode.AutomatedClean)
var success bool
success, result, err = p.tryUpdateNode(ironicNode, updater)
if !success {
return
}
// ironicNode, err = nodes.Get(p.client, p.status.ID).Extract()
// if err != nil {
// return result, errors.Wrap(err, "failed to get provisioning state in ironic")
// }
p.log.Info("current provision state",
"lastError", ironicNode.LastError,
"current", ironicNode.ProvisionState,
"target", ironicNode.TargetProvisionState,
)
result, err = operationComplete()
// Ensure the node is marked manageable.
switch nodes.ProvisionState(ironicNode.ProvisionState) {
case nodes.Enroll:
// If ironic is reporting an error, stop working on the node.
if ironicNode.LastError != "" && !(credentialsChanged || force) {
result, err = operationFailed(ironicNode.LastError)
return
}
if ironicNode.TargetProvisionState == string(nodes.TargetManage) {
// We have already tried to manage the node and did not
// get an error, so do nothing and keep trying.
result, err = operationContinuing(provisionRequeueDelay)
return
}
result, err = p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetManage},
)
return
case nodes.Verifying:
// If we're still waiting for the state to change in Ironic,
// return true to indicate that we're dirty and need to be
// reconciled again.
result, err = operationContinuing(provisionRequeueDelay)
return
case nodes.Manageable:
return
case nodes.Available:
// The host is fully registered (and probably wasn't cleanly
// deleted previously)
return
case nodes.Active:
// The host is already running, maybe it's a master?
p.debugLog.Info("have active host", "image_source", ironicNode.InstanceInfo["image_source"])
return
default:
return
}
}
func (p *ironicProvisioner) tryUpdateNode(ironicNode *nodes.Node, updater *nodeUpdater) (success bool, result provisioner.Result, err error) {
if len(updater.Updates) == 0 {
success = true
return
}
p.log.Info("updating node settings in ironic")
_, err = nodes.Update(p.client, ironicNode.UUID, updater.Updates).Extract()
switch err.(type) {
case nil:
success = true
case gophercloud.ErrDefault409:
p.log.Info("could not update node settings in ironic, busy")
result, err = retryAfterDelay(provisionRequeueDelay)
default:
result, err = transientError(errors.Wrap(err, "failed to update host settings in ironic"))
}
return
}
func (p *ironicProvisioner) tryChangeNodeProvisionState(ironicNode *nodes.Node, opts nodes.ProvisionStateOpts) (success bool, result provisioner.Result, err error) {
p.log.Info("changing provisioning state",
"current", ironicNode.ProvisionState,
"existing target", ironicNode.TargetProvisionState,
"new target", opts.Target,
)
changeResult := nodes.ChangeProvisionState(p.client, ironicNode.UUID, opts)
switch changeResult.Err.(type) {
case nil:
success = true
case gophercloud.ErrDefault409:
p.log.Info("could not change state of host, busy")
result, err = retryAfterDelay(provisionRequeueDelay)
return
default:
result, err = transientError(errors.Wrap(changeResult.Err,
fmt.Sprintf("failed to change provisioning state to %q", opts.Target)))
return
}
result, err = operationContinuing(provisionRequeueDelay)
return
}
func (p *ironicProvisioner) changeNodeProvisionState(ironicNode *nodes.Node, opts nodes.ProvisionStateOpts) (result provisioner.Result, err error) {
_, result, err = p.tryChangeNodeProvisionState(ironicNode, opts)
return
}
// InspectHardware updates the HardwareDetails field of the host with
// details of devices discovered on the hardware. It may be called
// multiple times, and should return true for its dirty flag until the
// inspection is completed.
func (p *ironicProvisioner) InspectHardware(data provisioner.InspectData, force, refresh bool) (result provisioner.Result, details *metal3v1alpha1.HardwareDetails, err error) {
p.log.Info("inspecting hardware")
ironicNode, err := p.getNode()
if err != nil {
result, err = transientError(err)
return
}
status, err := introspection.GetIntrospectionStatus(p.inspector, ironicNode.UUID).Extract()
if err != nil || refresh {
if _, isNotFound := err.(gophercloud.ErrDefault404); isNotFound || refresh {
switch nodes.ProvisionState(ironicNode.ProvisionState) {
case nodes.Inspecting, nodes.InspectWait:
p.log.Info("inspection already started")
result, err = operationContinuing(introspectionRequeueDelay)
return
case nodes.InspectFail:
if !force {
p.log.Info("starting inspection failed", "error", status.Error)
failure := ironicNode.LastError
if failure == "" {
failure = "Inspection failed"
}
result, err = operationFailed(failure)
return
}
fallthrough
default:
var success bool
success, result, err = p.tryUpdateNode(
ironicNode,
updateOptsBuilder(p.debugLog).
SetPropertiesOpts(optionsData{
"capabilities": buildCapabilitiesValue(ironicNode, data.BootMode),
}, ironicNode),
)
if !success {
return
}
p.log.Info("starting new hardware inspection")
success, result, err = p.tryChangeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetInspect},
)
if success {
p.publisher("InspectionStarted", "Hardware inspection started")
}
return
}
}
result, err = transientError(errors.Wrap(err, "failed to extract hardware inspection status"))
return
}
if status.Error != "" {
p.log.Info("inspection failed", "error", status.Error)
result, err = operationFailed(status.Error)
return
}
if !status.Finished || (nodes.ProvisionState(ironicNode.ProvisionState) == nodes.Inspecting || nodes.ProvisionState(ironicNode.ProvisionState) == nodes.InspectWait) {
p.log.Info("inspection in progress", "started_at", status.StartedAt)
result, err = operationContinuing(introspectionRequeueDelay)
return
}
// Introspection is done
p.log.Info("getting hardware details from inspection")
response := introspection.GetIntrospectionData(p.inspector, ironicNode.UUID)
introData, err := response.Extract()
if err != nil {
result, err = transientError(errors.Wrap(err, "failed to retrieve hardware introspection data"))
return
}
p.log.Info("received introspection data", "data", response.Body)
details = hardwaredetails.GetHardwareDetails(introData)
p.publisher("InspectionComplete", "Hardware inspection completed")
result, err = operationComplete()
return
}
// UpdateHardwareState fetches the latest hardware state of the server
// and updates the HardwareDetails field of the host with details. It
// is expected to do this in the least expensive way possible, such as
// reading from a cache.
func (p *ironicProvisioner) UpdateHardwareState() (hwState provisioner.HardwareState, err error) {
p.debugLog.Info("updating hardware state")
ironicNode, err := p.getNode()
if err != nil {
return
}
switch ironicNode.PowerState {
case powerOn, powerOff:
discoveredVal := ironicNode.PowerState == powerOn
hwState.PoweredOn = &discoveredVal
case powerNone:
p.log.Info("could not determine power state", "value", ironicNode.PowerState)
default:
p.log.Info("unknown power state", "value", ironicNode.PowerState)
}
return
}
func (p *ironicProvisioner) setLiveIsoUpdateOptsForNode(ironicNode *nodes.Node, imageData *metal3v1alpha1.Image, updater *nodeUpdater) {
optValues := optionsData{
"boot_iso": imageData.URL,
// remove any image_source or checksum options
"image_source": nil,
"image_os_hash_value": nil,
"image_os_hash_algo": nil,
"image_checksum": nil,
}
updater.
SetInstanceInfoOpts(optValues, ironicNode).
SetTopLevelOpt("deploy_interface", "ramdisk", ironicNode.DeployInterface)
}
func (p *ironicProvisioner) setDirectDeployUpdateOptsForNode(ironicNode *nodes.Node, imageData *metal3v1alpha1.Image, updater *nodeUpdater) {
checksum, checksumType, ok := imageData.GetChecksum()
if !ok {
p.log.Info("image/checksum not found for host")
return
}
// FIXME: For older versions of ironic that do not have
// https://review.opendev.org/#/c/711816/ failing to include the
// 'image_checksum' causes ironic to refuse to provision the
// image, even if the other hash value parameters are given. We
// only want to do that for MD5, however, because those versions
// of ironic only support MD5 checksums.
var legacyChecksum *string
if checksumType == string(metal3v1alpha1.MD5) {
legacyChecksum = &checksum
}
optValues := optionsData{
// Remove any boot_iso field
"boot_iso": nil,
"image_source": imageData.URL,
"image_os_hash_algo": checksumType,
"image_os_hash_value": checksum,
"image_checksum": legacyChecksum,
"image_disk_format": imageData.DiskFormat,
}
updater.
SetInstanceInfoOpts(optValues, ironicNode).
SetTopLevelOpt("deploy_interface", "direct", ironicNode.DeployInterface)
}
func (p *ironicProvisioner) getImageUpdateOptsForNode(ironicNode *nodes.Node, imageData *metal3v1alpha1.Image, bootMode metal3v1alpha1.BootMode, updater *nodeUpdater) {
// instance_uuid
updater.SetTopLevelOpt("instance_uuid", string(p.objectMeta.UID), ironicNode.InstanceUUID)
// Secure boot is a normal capability that goes into instance_info (we
// also put it to properties for consistency, although it's not
// strictly required in our case).
// Instance info capabilities were invented later and
// use a normal JSON mapping instead of a custom
// string value.
capabilitiesII := map[string]string{}
if bootMode == metal3v1alpha1.UEFISecureBoot {
capabilitiesII["secure_boot"] = "true"
}
updater.SetInstanceInfoOpts(optionsData{"capabilities": capabilitiesII}, ironicNode)
if imageData.DiskFormat != nil && *imageData.DiskFormat == "live-iso" {
// Set live-iso format options
p.setLiveIsoUpdateOptsForNode(ironicNode, imageData, updater)
} else {
// Set deploy_interface direct options when not booting a live-iso
p.setDirectDeployUpdateOptsForNode(ironicNode, imageData, updater)
}
}
func (p *ironicProvisioner) getUpdateOptsForNode(ironicNode *nodes.Node, data provisioner.ProvisionData) *nodeUpdater {
updater := updateOptsBuilder(p.debugLog)
p.getImageUpdateOptsForNode(ironicNode, &data.Image, data.BootMode, updater)
opts := optionsData{
"root_device": devicehints.MakeHintMap(data.RootDeviceHints),
// FIXME(dhellmann): This should come from inspecting the host.
"cpu_arch": data.HardwareProfile.CPUArch,
"local_gb": data.HardwareProfile.LocalGB,
"capabilities": buildCapabilitiesValue(ironicNode, data.BootMode),
}
updater.SetPropertiesOpts(opts, ironicNode)
return updater
}
// We can't just replace the capabilities because we need to keep the
// values provided by inspection. We can't replace only the boot_mode
// because the API isn't fine-grained enough for that. So we have to
// look at the existing value and modify it. This function
// encapsulates the logic for building the value and knowing which
// update operation to use with the results.
func buildCapabilitiesValue(ironicNode *nodes.Node, bootMode metal3v1alpha1.BootMode) string {
capabilities, ok := ironicNode.Properties["capabilities"]
if !ok {
// There is no existing capabilities value
return bootModeCapabilities[bootMode]
}
existingCapabilities := capabilities.(string)
if existingCapabilities == "" {
// The existing value is empty so we can replace the whole
// thing.
return bootModeCapabilities[bootMode]
}
var filteredCapabilities []string
for _, item := range strings.Split(existingCapabilities, ",") {
if !strings.HasPrefix(item, "boot_mode:") && !strings.HasPrefix(item, "secure_boot:") {
filteredCapabilities = append(filteredCapabilities, item)
}
}
filteredCapabilities = append(filteredCapabilities, bootModeCapabilities[bootMode])
return strings.Join(filteredCapabilities, ",")
}
func (p *ironicProvisioner) setUpForProvisioning(ironicNode *nodes.Node, data provisioner.ProvisionData) (result provisioner.Result, err error) {
p.log.Info("starting provisioning", "node properties", ironicNode.Properties)
success, result, err := p.tryUpdateNode(ironicNode,
p.getUpdateOptsForNode(ironicNode, data))
if !success {
return
}
p.log.Info("validating host settings")
errorMessage, err := p.validateNode(ironicNode)
switch err.(type) {
case nil:
case gophercloud.ErrDefault409:
p.log.Info("could not validate host during registration, busy")
return retryAfterDelay(provisionRequeueDelay)
default:
return transientError(errors.Wrap(err, "failed to validate host during registration"))
}
if errorMessage != "" {
return operationFailed(errorMessage)
}
// If validation is successful we can start moving the host
// through the states necessary to make it "available".
p.log.Info("starting provisioning",
"lastError", ironicNode.LastError,
"current", ironicNode.ProvisionState,
"target", ironicNode.TargetProvisionState,
"deploy step", ironicNode.DeployStep,
)
p.publisher("ProvisioningStarted",
fmt.Sprintf("Image provisioning started for %s", data.Image.URL))
return
}
func (p *ironicProvisioner) deployInterface(image *metal3v1alpha1.Image) (result string) {
result = "direct"
if image != nil && image.DiskFormat != nil && *image.DiskFormat == "live-iso" {
result = "ramdisk"
}
return result
}
// Adopt notifies the provisioner that the state machine believes the host
// to be currently provisioned, and that it should be managed as such.
func (p *ironicProvisioner) Adopt(data provisioner.AdoptData, force bool) (result provisioner.Result, err error) {
ironicNode, err := p.getNode()
if err != nil {
return transientError(err)
}
switch nodes.ProvisionState(ironicNode.ProvisionState) {
case nodes.Enroll, nodes.Verifying:
return transientError(fmt.Errorf("Invalid state for adopt: %s",
ironicNode.ProvisionState))
case nodes.Manageable:
_, hasImageSource := ironicNode.InstanceInfo["image_source"]
_, hasBootISO := ironicNode.InstanceInfo["boot_iso"]
if data.State == metal3v1alpha1.StateDeprovisioning &&
!(hasImageSource || hasBootISO) {
// If we got here after a fresh registration and image data is
// available, it should have been added to the node during
// registration. If it isn't present then we got here due to a
// failed cleaning on deprovision. The node will be cleaned again
// before the next provisioning, so just allow the controller to
// continue without adopting.
p.log.Info("no image info; not adopting", "state", ironicNode.ProvisionState)
return operationComplete()
}
return p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{
Target: nodes.TargetAdopt,
},
)
case nodes.Adopting:
return operationContinuing(provisionRequeueDelay)
case nodes.AdoptFail:
if force {
return p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{
Target: nodes.TargetAdopt,
},
)
}
return operationFailed(fmt.Sprintf("Host adoption failed: %s",
ironicNode.LastError))
case nodes.Active:
default:
}
return operationComplete()
}
func (p *ironicProvisioner) ironicHasSameImage(ironicNode *nodes.Node, image metal3v1alpha1.Image) (sameImage bool) {
// To make it easier to test if ironic is configured with
// the same image we are trying to provision to the host.
if image.DiskFormat != nil && *image.DiskFormat == "live-iso" {
sameImage = (ironicNode.InstanceInfo["boot_iso"] == image.URL)
p.log.Info("checking image settings",
"boot_iso", ironicNode.InstanceInfo["boot_iso"],
"same", sameImage,
"provisionState", ironicNode.ProvisionState)
} else {
checksum, checksumType, _ := image.GetChecksum()
sameImage = (ironicNode.InstanceInfo["image_source"] == image.URL &&
ironicNode.InstanceInfo["image_os_hash_algo"] == checksumType &&
ironicNode.InstanceInfo["image_os_hash_value"] == checksum)
p.log.Info("checking image settings",
"source", ironicNode.InstanceInfo["image_source"],
"image_os_hash_algo", checksumType,
"image_os_has_value", checksum,
"same", sameImage,
"provisionState", ironicNode.ProvisionState)
}
return sameImage
}
func (p *ironicProvisioner) buildManualCleaningSteps(bmcAccess bmc.AccessDetails, data provisioner.PrepareData) (cleanSteps []nodes.CleanStep, err error) {
// Build raid clean steps
if bmcAccess.RAIDInterface() != "no-raid" {
cleanSteps = append(cleanSteps, BuildRAIDCleanSteps(data.RAIDConfig)...)
} else if data.RAIDConfig != nil {
return nil, fmt.Errorf("RAID settings are defined, but the node's driver %s does not support RAID", bmcAccess.Driver())
}
// TODO: Add manual cleaning steps for host configuration
return
}
func (p *ironicProvisioner) startManualCleaning(bmcAccess bmc.AccessDetails, ironicNode *nodes.Node, data provisioner.PrepareData) (success bool, result provisioner.Result, err error) {
if bmcAccess.RAIDInterface() != "no-raid" {
// Set raid configuration
err = setTargetRAIDCfg(p, ironicNode, data)
if err != nil {
result, err = transientError(err)
return
}
}
// Build manual clean steps
cleanSteps, err := p.buildManualCleaningSteps(bmcAccess, data)
if err != nil {
result, err = operationFailed(err.Error())
return
}
// Start manual clean
if len(cleanSteps) != 0 {
p.log.Info("remove existing configuration and set new configuration", "steps", cleanSteps)
return p.tryChangeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{
Target: nodes.TargetClean,
CleanSteps: cleanSteps,
},
)
}
result, err = operationComplete()
return
}
// Prepare remove existing configuration and set new configuration.
// If `started` is true, it means that we successfully executed `tryChangeNodeProvisionState`.
func (p *ironicProvisioner) Prepare(data provisioner.PrepareData, unprepared bool) (result provisioner.Result, started bool, err error) {
bmcAccess, err := p.bmcAccess()
if err != nil {
result, err = transientError(err)
return
}
ironicNode, err := p.getNode()
if err != nil {
result, err = transientError(err)
return
}
switch nodes.ProvisionState(ironicNode.ProvisionState) {
case nodes.Available:
var cleanSteps []nodes.CleanStep
cleanSteps, err = p.buildManualCleaningSteps(bmcAccess, data)
if err != nil {
result, err = operationFailed(err.Error())
return
}
if unprepared && len(cleanSteps) != 0 {
result, err = p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetManage},
)
return
}
result, err = operationComplete()
case nodes.Manageable:
if unprepared {
started, result, err = p.startManualCleaning(bmcAccess, ironicNode, data)
return
}
// Manual clean finished
result, err = operationComplete()
case nodes.CleanFail:
// When clean failed, we need to clean host provisioning settings.
// If unprepared is false, means the settings aren't cleared.
// So we can't set the node's state to manageable, until the settings are cleared.
if !unprepared {
result, err = operationFailed(ironicNode.LastError)
return
}
if ironicNode.Maintenance {
p.log.Info("clearing maintenance flag")
result, err = p.setMaintenanceFlag(ironicNode, false)
return
}
result, err = p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetManage},
)
case nodes.Cleaning, nodes.CleanWait:
p.log.Info("waiting for host to become manageable",
"state", ironicNode.ProvisionState,
"deploy step", ironicNode.DeployStep)
result, err = operationContinuing(provisionRequeueDelay)
default:
result, err = transientError(fmt.Errorf("Have unexpected ironic node state %s", ironicNode.ProvisionState))
}
return
}
// Provision writes the image from the host spec to the host. It may
// be called multiple times, and should return true for its dirty flag
// until the deprovisioning operation is completed.
func (p *ironicProvisioner) Provision(data provisioner.ProvisionData) (result provisioner.Result, err error) {
ironicNode, err := p.getNode()
if err != nil {
return transientError(err)
}
p.log.Info("provisioning image to host", "state", ironicNode.ProvisionState)
ironicHasSameImage := p.ironicHasSameImage(ironicNode, data.Image)
// Ironic has the settings it needs, see if it finds any issues
// with them.
switch nodes.ProvisionState(ironicNode.ProvisionState) {
case nodes.DeployFail:
// Since we were here ironic has recorded an error for this host,
// with the image and checksum we have been trying to use, so we
// should stop. (If the image values do not match, we want to try
// again.)
if ironicHasSameImage {
// Save me from "eventually consistent" systems built on
// top of relational databases...
if ironicNode.LastError == "" {
p.log.Info("failed but error message not available")
return retryAfterDelay(0)
}
p.log.Info("found error", "msg", ironicNode.LastError)
return operationFailed(fmt.Sprintf("Image provisioning failed: %s",
ironicNode.LastError))
}
p.log.Info("recovering from previous failure")
if provResult, err := p.setUpForProvisioning(ironicNode, data); err != nil || provResult.Dirty || provResult.ErrorMessage != "" {
return provResult, err
}
return p.changeNodeProvisionState(ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetActive})
case nodes.Manageable:
return p.changeNodeProvisionState(ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetProvide})
case nodes.CleanFail:
if ironicNode.Maintenance {
p.log.Info("clearing maintenance flag")
return p.setMaintenanceFlag(ironicNode, false)
}
return p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetManage},
)
case nodes.Available:
if provResult, err := p.setUpForProvisioning(ironicNode, data); err != nil || provResult.Dirty || provResult.ErrorMessage != "" {
return provResult, err
}
// After it is available, we need to start provisioning by
// setting the state to "active".
p.log.Info("making host active")
// Retrieve cloud-init user data
userData, err := data.HostConfig.UserData()
if err != nil {
return transientError(errors.Wrap(err, "could not retrieve user data"))
}
// Retrieve cloud-init network_data.json. Default value is empty
networkDataRaw, err := data.HostConfig.NetworkData()
if err != nil {
return transientError(errors.Wrap(err, "could not retrieve network data"))
}
var networkData map[string]interface{}
if err = yaml.Unmarshal([]byte(networkDataRaw), &networkData); err != nil {
return transientError(errors.Wrap(err, "failed to unmarshal network_data.json from secret"))
}
// Retrieve cloud-init meta_data.json with falback to default
metaData := map[string]interface{}{
"uuid": string(p.objectMeta.UID),
"metal3-namespace": p.objectMeta.Namespace,
"metal3-name": p.objectMeta.Name,
"local-hostname": p.objectMeta.Name,
"local_hostname": p.objectMeta.Name,
"name": p.objectMeta.Name,
}
metaDataRaw, err := data.HostConfig.MetaData()
if err != nil {
return transientError(errors.Wrap(err, "could not retrieve metadata"))
}
if metaDataRaw != "" {
if err = yaml.Unmarshal([]byte(metaDataRaw), &metaData); err != nil {
return transientError(errors.Wrap(err, "failed to unmarshal metadata from secret"))
}
}
var configDrive nodes.ConfigDrive
if userData != "" {
configDrive = nodes.ConfigDrive{
UserData: userData,
MetaData: metaData,
NetworkData: networkData,
}
if err != nil {
return transientError(errors.Wrap(err, "failed to build config drive"))
}
p.log.Info("triggering provisioning with config drive")
} else {
p.log.Info("triggering provisioning without config drive")
}
return p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{
Target: nodes.TargetActive,
ConfigDrive: configDrive,
},
)
case nodes.Active:
// provisioning is done
p.publisher("ProvisioningComplete",
fmt.Sprintf("Image provisioning completed for %s", data.Image.URL))
p.log.Info("finished provisioning")
return operationComplete()
default:
// wait states like cleaning and clean wait
p.log.Info("waiting for host to become available",
"state", ironicNode.ProvisionState,
"deploy step", ironicNode.DeployStep)
return operationContinuing(provisionRequeueDelay)
}
}
func (p *ironicProvisioner) setMaintenanceFlag(ironicNode *nodes.Node, value bool) (result provisioner.Result, err error) {
success, result, err := p.tryUpdateNode(ironicNode,
updateOptsBuilder(p.log).SetTopLevelOpt("maintenance", value, nil))
if err != nil {
err = fmt.Errorf("failed to set host maintenance flag to %v (%w)", value, err)
}
if !success {
return
}
return operationContinuing(0)
}
// Deprovision removes the host from the image. It may be called
// multiple times, and should return true for its dirty flag until the
// deprovisioning operation is completed.
func (p *ironicProvisioner) Deprovision(force bool) (result provisioner.Result, err error) {
p.log.Info("deprovisioning")
ironicNode, err := p.getNode()
if err != nil {
return transientError(err)
}
p.log.Info("deprovisioning host",
"ID", ironicNode.UUID,
"lastError", ironicNode.LastError,
"current", ironicNode.ProvisionState,
"target", ironicNode.TargetProvisionState,
"deploy step", ironicNode.DeployStep,
"instance_info", ironicNode.InstanceInfo,
)
switch nodes.ProvisionState(ironicNode.ProvisionState) {
case nodes.Error:
if !force {
p.log.Info("deprovisioning failed")
if ironicNode.LastError == "" {
result.ErrorMessage = "Deprovisioning failed"
} else {
result.ErrorMessage = ironicNode.LastError
}
return result, nil
}
p.log.Info("retrying deprovisioning")
p.publisher("DeprovisioningStarted", "Image deprovisioning restarted")
return p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetDeleted},
)
case nodes.CleanFail:
p.log.Info("cleaning failed")
if ironicNode.Maintenance {
p.log.Info("clearing maintenance flag")
return p.setMaintenanceFlag(ironicNode, false)
}
// This will return us to the manageable state without completing
// cleaning. Because cleaning happens in the process of moving from
// manageable to available, the node will still get cleaned before
// we provision it again.
return p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetManage},
)
case nodes.Manageable:
// We end up here after CleanFail. Because cleaning happens in the
// process of moving from manageable to available, the node will still
// get cleaned before we provision it again. Therefore, just declare
// deprovisioning complete.
p.log.Info("deprovisioning node is in manageable state")
return operationComplete()
case nodes.Available:
p.publisher("DeprovisioningComplete", "Image deprovisioning completed")
return operationComplete()
case nodes.Deleting:
p.log.Info("deleting")
// Transitions to Cleaning upon completion
return operationContinuing(deprovisionRequeueDelay)
case nodes.Cleaning:
p.log.Info("cleaning")
// Transitions to Available upon completion
return operationContinuing(deprovisionRequeueDelay)
case nodes.CleanWait:
p.log.Info("cleaning")
return operationContinuing(deprovisionRequeueDelay)
case nodes.Active, nodes.DeployFail:
p.log.Info("starting deprovisioning")
p.publisher("DeprovisioningStarted", "Image deprovisioning started")
return p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetDeleted},
)
default:
// FIXME(zaneb): this error is unlikely to actually be transient
return transientError(fmt.Errorf("Unhandled ironic state %s", ironicNode.ProvisionState))
}
}
// Delete removes the host from the provisioning system. It may be
// called multiple times, and should return true for its dirty flag
// until the deprovisioning operation is completed.
func (p *ironicProvisioner) Delete() (result provisioner.Result, err error) {
ironicNode, err := p.getNode()
if err != nil {
if errors.Is(err, provisioner.ErrNeedsRegistration) {
p.log.Info("no node found, already deleted")
return operationComplete()
}
return transientError(err)
}
p.log.Info("deleting host",
"ID", ironicNode.UUID,
"lastError", ironicNode.LastError,
"current", ironicNode.ProvisionState,
"target", ironicNode.TargetProvisionState,
"deploy step", ironicNode.DeployStep,
)
if nodes.ProvisionState(ironicNode.ProvisionState) == nodes.Available {
// Move back to manageable so we can delete it cleanly.
return p.changeNodeProvisionState(
ironicNode,
nodes.ProvisionStateOpts{Target: nodes.TargetManage},
)
}
if !ironicNode.Maintenance {
// If we see an active node and the controller doesn't think
// we need to deprovision it, that means the node was
// ExternallyProvisioned and we should remove it from Ironic
// without deprovisioning it.
//
// If we see a node with an error, we will have to set the
// maintenance flag before deleting it.
//
// Any other state requires us to use maintenance mode to
// delete while bypassing Ironic's internal checks related to
// Nova.
p.log.Info("setting host maintenance flag to force image delete")
return p.setMaintenanceFlag(ironicNode, true)
}
p.log.Info("host ready to be removed")
err = nodes.Delete(p.client, ironicNode.UUID).ExtractErr()
switch err.(type) {
case nil:
p.log.Info("removed")
case gophercloud.ErrDefault409:
p.log.Info("could not remove host, busy")
return retryAfterDelay(provisionRequeueDelay)
case gophercloud.ErrDefault404:
p.log.Info("did not find host to delete, OK")
default:
return transientError(errors.Wrap(err, "failed to remove host"))
}
return operationContinuing(0)
}
// Detach removes the host from the provisioning system.
// Similar to Delete, but ensures non-interruptive behavior
// for the target system. It may be called multiple times,
// and should return true for its dirty flag until the
// deletion operation is completed.
func (p *ironicProvisioner) Detach() (result provisioner.Result, err error) {
// Currently the same behavior as Delete()
return p.Delete()
}
func (p *ironicProvisioner) changePower(ironicNode *nodes.Node, target nodes.TargetPowerState) (result provisioner.Result, err error) {
p.log.Info("changing power state")
if ironicNode.TargetProvisionState != "" {
p.log.Info("host in state that does not allow power change, try again after delay",
"state", ironicNode.ProvisionState,
"target state", ironicNode.TargetProvisionState,
)
return operationContinuing(powerRequeueDelay)
}
powerStateOpts := nodes.PowerStateOpts{
Target: target,
}
if target == softPowerOff {
powerStateOpts.Timeout = int(softPowerOffTimeout.Seconds())
}
changeResult := nodes.ChangePowerState(
p.client,
ironicNode.UUID,
powerStateOpts)
switch changeResult.Err.(type) {
case nil:
p.log.Info("power change OK")
return operationContinuing(0)
case gophercloud.ErrDefault409:
p.log.Info("host is locked, trying again after delay", "delay", powerRequeueDelay)
result, _ = retryAfterDelay(powerRequeueDelay)
return result, HostLockedError{}
case gophercloud.ErrDefault400:
// Error 400 Bad Request means target power state is not supported by vendor driver
p.log.Info("power change error", "message", changeResult.Err)
return result, SoftPowerOffUnsupportedError{}
default:
p.log.Info("power change error", "message", changeResult.Err)
return transientError(errors.Wrap(changeResult.Err, "failed to change power state"))
}
}
// PowerOn ensures the server is powered on independently of any image
// provisioning operation.
func (p *ironicProvisioner) PowerOn() (result provisioner.Result, err error) {
p.log.Info("ensuring host is powered on")
ironicNode, err := p.getNode()
if err != nil {
return transientError(err)
}
p.log.Info("checking current state",
"target", ironicNode.TargetPowerState)
if ironicNode.PowerState != powerOn {
if ironicNode.TargetPowerState == powerOn {
p.log.Info("waiting for power status to change")
return operationContinuing(powerRequeueDelay)
}
result, err = p.changePower(ironicNode, nodes.PowerOn)
switch err.(type) {
case nil:
case HostLockedError:
default:
return transientError(errors.Wrap(err, "failed to power on host"))
}
p.publisher("PowerOn", "Host powered on")
}
return result, nil
}
// PowerOff ensures the server is powered off independently of any image
// provisioning operation.
func (p *ironicProvisioner) PowerOff(rebootMode metal3v1alpha1.RebootMode) (result provisioner.Result, err error) {
p.log.Info(fmt.Sprintf("ensuring host is powered off (mode: %s)", rebootMode))
if rebootMode == metal3v1alpha1.RebootModeHard {
result, err = p.hardPowerOff()
} else {
result, err = p.softPowerOff()
}
if err != nil {
switch err.(type) {
// In case of soft power off is unsupported or has failed,
// we activate hard power off.
case SoftPowerOffUnsupportedError, SoftPowerOffFailed:
return p.hardPowerOff()
case HostLockedError:
return retryAfterDelay(powerRequeueDelay)
default:
return transientError(err)
}
}
return result, nil
}
// hardPowerOff sends 'power off' request to BM node and waits for the result
func (p *ironicProvisioner) hardPowerOff() (result provisioner.Result, err error) {
p.log.Info("ensuring host is powered off by \"hard power off\" command")
ironicNode, err := p.getNode()
if err != nil {
return transientError(err)
}
if ironicNode.PowerState != powerOff {
if ironicNode.TargetPowerState == powerOff {
p.log.Info("waiting for power status to change")
return operationContinuing(powerRequeueDelay)
}
result, err = p.changePower(ironicNode, nodes.PowerOff)
if err != nil {
return transientError(errors.Wrap(err, "failed to power off host"))
}
p.publisher("PowerOff", "Host powered off")
return result, err
}
return operationComplete()
}
// softPowerOff sends 'soft power off' request to BM node.
// If soft power off is not supported, the request ends with an error.
// Otherwise the request ends with no error and the result should be
// checked later via node fields "power_state", "target_power_state"
// and "last_error".
func (p *ironicProvisioner) softPowerOff() (result provisioner.Result, err error) {
p.log.Info("ensuring host is powered off by \"soft power off\" command")
ironicNode, err := p.getNode()
if err != nil {
return transientError(err)
}
if ironicNode.PowerState != powerOff {
targetState := ironicNode.TargetPowerState
// If the target state is either powerOff or softPowerOff, then we should wait
if targetState == powerOff || targetState == softPowerOff {
p.log.Info("waiting for power status to change")
return operationContinuing(powerRequeueDelay)
}
// If the target state is unset while the last error is set,
// then the last execution of soft power off has failed.
if targetState == "" && ironicNode.LastError != "" {
return result, SoftPowerOffFailed{}
}
result, err = p.changePower(ironicNode, nodes.SoftPowerOff)
if err != nil {
return transientError(err)
}
p.publisher("PowerOff", "Host soft powered off")
}
return result, nil
}
func | (objMeta metav1.ObjectMeta) string {
return objMeta.Namespace + nameSeparator + objMeta.Name
}
// IsReady checks if the provisioning backend is available
func (p *ironicProvisioner) IsReady() (result bool, err error) {
p.debugLog.Info("verifying ironic provisioner dependencies")
checker := newIronicDependenciesChecker(p.client, p.inspector, p.log)
return checker.IsReady()
}
func (p *ironicProvisioner) HasCapacity() (result bool, err error) {
hosts, err := p.loadBusyHosts()
if err != nil {
p.log.Error(err, "Unable to get hosts for determining current provisioner capacity")
return false, err
}
// If the current host is already under processing then let's skip the test
if _, ok := hosts[ironicNodeName(p.objectMeta)]; ok {
return true, nil
}
return len(hosts) < maxBusyHosts, nil
}
func (p *ironicProvisioner) loadBusyHosts() (hosts map[string]struct{}, err error) {
hosts = make(map[string]struct{})
pager := nodes.List(p.client, nodes.ListOpts{
Fields: []string{"uuid,name,provision_state,driver_internal_info,target_provision_state"},
})
page, err := pager.AllPages()
if err != nil {
return nil, err
}
allNodes, err := nodes.ExtractNodes(page)
if err != nil {
return nil, err
}
for _, node := range allNodes {
switch nodes.ProvisionState(node.ProvisionState) {
case nodes.Cleaning, nodes.CleanWait,
nodes.Inspecting, nodes.InspectWait,
nodes.Deploying, nodes.DeployWait,
nodes.Deleting:
hosts[node.Name] = struct{}{}
}
}
return hosts, nil
}
| ironicNodeName |
wcs.py | from __future__ import absolute_import
import numpy as np
import sunpy.sun as sun
import astropy.units as u
rsun_meters = sun.constants.radius.si.value
__all__ = ['_convert_angle_units', 'convert_pixel_to_data', 'convert_hpc_hg',
'convert_data_to_pixel', 'convert_hpc_hcc', 'convert_hcc_hpc',
'convert_hcc_hg', 'convert_hg_hcc', 'proj_tan',
'convert_hg_hpc', 'convert_to_coord',
'get_center']
def _convert_angle_units(unit='arcsec'):
"""Determine the conversion factor between the data units and radians."""
if unit == 'degrees':
return np.deg2rad(1)
elif unit == 'arcmin':
return np.deg2rad(1) / 60.0
elif unit == 'arcsec':
return np.deg2rad(1) / (60 * 60.0)
elif unit == 'mas':
return np.deg2rad(1) / (60 * 60 * 1000.0)
else:
raise ValueError("The units specified are either invalid or is not supported at this time.")
def | (size, scale, reference_pixel,
reference_coordinate, x=None, y=None):
"""Calculate the data coordinate for particular pixel indices.
Parameters
----------
size : 2d ndarray
Number of pixels in width and height.
scale : 2d ndarray
The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)
reference_pixel : 2d ndarray
The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)
reference_coordinate : 2d ndarray
The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)
x,y : int or ndarray
The pixel values at which data coordinates are requested. If none are given,
returns coordinates for every pixel.
Returns
-------
out : ndarray
The data coordinates at pixel (x,y).
Notes
-----
This function assumes a gnomic projection which is correct for a detector at the focus
of an optic observing the Sun.
Examples
--------
"""
cdelt = np.array(scale)
crpix = np.array(reference_pixel)
crval = np.array(reference_coordinate)
# first assume that coord is just [x,y]
if (x is None) and (y is None):
x, y = np.meshgrid(np.arange(size[0]), np.arange(size[1]))
# note that crpix[] counts pixels starting at 1
coordx = (x - (crpix[0] - 1)) * cdelt[0] + crval[0]
coordy = (y - (crpix[1] - 1)) * cdelt[1] + crval[1]
# Correct for Gnomic projection
coordx, coordy = proj_tan(coordx, coordy)
return coordx, coordy
def get_center(size, scale, reference_pixel, reference_coordinate):
"""Returns the center of the image in data coordinates.
Parameters
----------
size : 2d ndarray
Number of pixels in width and height.
scale : 2d ndarray
The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)
reference_pixel : 2d ndarray
The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)
reference_coordinate : 2d ndarray
The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)
Returns
-------
out : ndarray
The data coordinates
Examples
--------
"""
return scale * (size - 1 * u.pix) / 2. + reference_coordinate - (reference_pixel - 1 * u.pix) * scale
def convert_data_to_pixel(x, y, scale, reference_pixel, reference_coordinate):
"""Calculate the pixel indices for a given data coordinate.
Parameters
----------
x, y : float
Data coordinate in same units as reference coordinate
scale : 2d ndarray
The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)
reference_pixel : 2d ndarray
The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)
reference_coordinate : 2d ndarray
The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)
Returns
-------
out : ndarray
The pixel coordinates (x,y) at that data coordinate.
Examples
--------
"""
# TODO: Needs to check what coordinate system the data is given in
cdelt = np.array(scale)
crpix = np.array(reference_pixel)
crval = np.array(reference_coordinate)
# De-apply any tabular projections.
# coord = inv_proj_tan(coord)
# note that crpix[] counts pixels starting at 1
pixelx = (x - crval[0]) / cdelt[0] + (crpix[1] - 1)
pixely = (y - crval[1]) / cdelt[1] + (crpix[1] - 1)
return pixelx, pixely
def convert_hpc_hcc(x, y, dsun_meters=None, angle_units='arcsec', z=False):
"""Converts from Helioprojective-Cartesian (HPC) coordinates into
Heliocentric-Cartesian (HCC) coordinates. Returns all three dimensions, x, y, z in
meters.
Parameters
----------
x, y : float
Data coordinate in angle units (default is arcsec)
dsun_meters : float
Distance from the observer to the Sun in meters. Default is 1 AU.
angle_units : str
Units of the data coordinates (e.g. arcsec, arcmin, degrees). Default is arcsec.
z : Bool
If true return the z coordinate as well.
Returns
-------
out : ndarray
The data coordinates (x,y,z) in heliocentric cartesian coordinates in meters.
Notes
-----
Implements Eq. (15) of Thompson (2006), A&A, 449, 791.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hpc_hcc(40.0, 32.0, z=True)
(28876152.176423457, 23100922.071266972, 694524220.8157959)
"""
c = np.array([_convert_angle_units(unit=angle_units),
_convert_angle_units(unit=angle_units)])
cosx = np.cos(x * c[0])
sinx = np.sin(x * c[0])
cosy = np.cos(y * c[1])
siny = np.sin(y * c[1])
if dsun_meters is None:
dsun_meters = sun.constants.au.si.value
elif isinstance(dsun_meters, u.Quantity):
dsun_meters = dsun_meters.si.value
q = dsun_meters * cosy * cosx
distance = q ** 2 - dsun_meters ** 2 + rsun_meters ** 2
# distance[np.where(distance < 0)] = np.sqrt(-1)
distance = q - np.sqrt(distance)
rx = distance * cosy * sinx
ry = distance * siny
rz = dsun_meters - distance * cosy * cosx
if np.all(z == True):
return rx, ry, rz
else:
return rx, ry
def convert_hcc_hpc(x, y, dsun_meters=None, angle_units='arcsec'):
"""Convert Heliocentric-Cartesian (HCC) to angular
Helioprojective-Cartesian (HPC) coordinates (in degrees).
Parameters
----------
x, y : float (meters)
Data coordinate in meters.
dsun_meters : float
Distance from the observer to the Sun in meters. Default is 1 AU.
angle_units : str
Units of the data coordinates (e.g. arcsec, arcmin, degrees). Default is arcsec.
Returns
-------
out : ndarray
The data coordinates (x,y) in helioprojective cartesian coordinates in arcsec.
Notes
-----
Implements Eq. (16) of Thompson (2006), A&A, 449, 791.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hcc_hpc(28748691, 22998953)
(39.823439773829705, 31.858751644835717)
"""
# Calculate the z coordinate by assuming that it is on the surface of the Sun
z = np.sqrt(rsun_meters ** 2 - x ** 2 - y ** 2)
if dsun_meters is None:
dsun_meters = sun.constants.au.si.value
elif isinstance(dsun_meters, u.Quantity):
dsun_meters = dsun_meters.si.value
zeta = dsun_meters - z
distance = np.sqrt(x**2 + y**2 + zeta**2)
hpcx = np.rad2deg(np.arctan2(x, zeta))
hpcy = np.rad2deg(np.arcsin(y / distance))
if angle_units == 'arcsec':
hpcx = 60 * 60 * hpcx
hpcy = 60 * 60 * hpcy
elif angle_units == 'arcmin':
hpcx = 60 * hpcx
hpcy = 60 * hpcy
return hpcx, hpcy
def convert_hcc_hg(x, y, z=None, b0_deg=0, l0_deg=0, radius=False):
"""Convert from Heliocentric-Cartesian (HCC) (given in meters) to
Stonyhurst Heliographic coordinates (HG) given in degrees, with
radial output in meters.
Parameters
----------
x, y : float (meters)
Data coordinate in meters.
z : float (meters)
Data coordinate in meters. If None, then the z-coordinate is assumed
to be on the Sun.
b0_deg : float (degrees)
Tilt of the solar North rotational axis toward the observer
(heliographic latitude of the observer). Usually given as SOLAR_B0,
HGLT_OBS, or CRLT_OBS. Default is 0.
l0_deg : float (degrees)
Carrington longitude of central meridian as seen from Earth. Default is 0.
radius : Bool
If true, forces the output to return a triple of (lon, lat, r). If
false, return (lon, lat) only.
Returns
-------
out : ndarray (degrees, meters)
if radius is false, return the data coordinates (lon, lat). If
radius=True, return the data coordinates (lon, lat, r). The quantities
(lon, lat) are the heliographic coordinates in degrees. The quantity
'r' is the heliographic radius in meters.
Notes
-----
Implements Eq. (12) of Thompson (2006), A&A, 449, 791.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hcc_hg(230000.0,45000000.0,
... z=695508000.0 + 8000000.0, radius=True)
(0.01873188196651189, 3.6599471896203317, 704945784.41465974)
"""
if z is None:
z = np.sqrt(rsun_meters**2 - x**2 - y**2)
cosb = np.cos(np.deg2rad(b0_deg))
sinb = np.sin(np.deg2rad(b0_deg))
hecr = np.sqrt(x**2 + y**2 + z**2)
hgln = np.arctan2(x, z * cosb - y * sinb) + np.deg2rad(l0_deg)
hglt = np.arcsin((y * cosb + z * sinb) / hecr)
if radius:
return np.rad2deg(hgln), np.rad2deg(hglt), hecr
else:
return np.rad2deg(hgln), np.rad2deg(hglt)
def convert_hg_hcc(hglon_deg, hglat_deg, b0_deg=0, l0_deg=0, occultation=False,
z=False, r=rsun_meters):
"""Convert from Stonyhurst Heliographic coordinates (given in degrees) to
Heliocentric-Cartesian coordinates (given in meters).
Parameters
----------
hglon_deg, hglat_deg : float (degrees)
Heliographic longitude and latitude in degrees.
b0_deg : float (degrees)
Tilt of the solar North rotational axis toward the observer
(heliographic latitude of the observer). Usually given as SOLAR_B0,
HGLT_OBS, or CRLT_OBS. Default is 0.
l0_deg : float (degrees)
Carrington longitude of central meridian as seen from Earth. Default is 0.
occultation : Bool
If true set all points behind the Sun (e.g. not visible) to Nan.
z : Bool
If true return the z coordinate as well.
r : float (meters)
Heliographic radius
Returns
-------
out : ndarray (meters)
The data coordinates in Heliocentric-Cartesian coordinates.
Notes
-----
Implements Eq. (11) of Thompson (2006), A&A, 449, 791, with the default
assumption that the value 'r' in Eq. (11) is identical to the radius of the
Sun.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hg_hcc(0.01873188196651189, 3.6599471896203317,
... r=704945784.41465974, z=True)
(230000.0, 45000000.0, 703508000.0)
"""
lon = np.deg2rad(hglon_deg)
lat = np.deg2rad(hglat_deg)
cosb = np.cos(np.deg2rad(b0_deg))
sinb = np.sin(np.deg2rad(b0_deg))
lon = lon - np.deg2rad(l0_deg)
cosx = np.cos(lon)
sinx = np.sin(lon)
cosy = np.cos(lat)
siny = np.sin(lat)
# Perform the conversion.
x = r * cosy * sinx
y = r * (siny * cosb - cosy * cosx * sinb)
zz = r * (siny * sinb + cosy * cosx * cosb)
if occultation:
x[zz < 0] = np.nan
y[zz < 0] = np.nan
if np.all(z == True):
return x, y, zz
else:
return x, y
def convert_hg_hpc(hglon_deg, hglat_deg, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec',
occultation=False):
"""Convert from Heliographic coordinates (HG) to Helioprojective-Cartesian
(HPC).
Parameters
----------
hglon_deg, hglat_deg : float (degrees)
Heliographic longitude and latitude in degrees.
b0_deg : float (degrees)
Tilt of the solar North rotational axis toward the observer
(heliographic latitude of the observer). Usually given as SOLAR_B0,
HGLT_OBS, or CRLT_OBS. Default is 0.
l0_deg : float (degrees)
Carrington longitude of central meridian as seen from Earth. Default is 0.
occultation : Bool
If true set all points behind the Sun (e.g. not visible) to Nan.
dsun_meters : float (meters)
Distance between the observer and the Sun.
angle_units : str
Returns
-------
out : ndarray (arcsec)
The data coordinates (x,y) in Helioprojective-Cartesian coordinates.
Notes
-----
Uses equations 11 and 16 in Thompson (2006), A&A, 449, 791-803.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hg_hpc(34.0, 45.0, b0_deg=-7.064078, l0_deg=0.0)
(380.05656560308898, 743.78281283290016)
"""
tempx, tempy = convert_hg_hcc(hglon_deg, hglat_deg, b0_deg=b0_deg, l0_deg=l0_deg, occultation=occultation)
x, y = convert_hcc_hpc(tempx, tempy, dsun_meters=dsun_meters, angle_units=angle_units)
return x, y
def convert_hpc_hg(x, y, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec'):
"""Convert from Helioprojective-Cartesian (HPC) to Heliographic coordinates
(HG) in degrees.
Parameters
----------
x, y : float ()
Data coordinate in angle units.
b0 : float (degrees)
Tilt of the solar North rotational axis toward the observer
(heliographic latitude of the observer). Usually given as SOLAR_B0,
HGLT_OBS, or CRLT_OBS. Default is 0.
l0 : float (degrees)
Carrington longitude of central meridian as seen from Earth. Default is 0.
dsun_meters : float (meters)
Distance between the observer and the Sun.
angle_units : str
Units used for input x and y. Default is arcsec.
Returns
-------
out : ndarray (degrees)
The data coordinates (hglongitude, hglatitude) in Heliographic coordinates.
Notes
-----
Uses equations 15 and 12 in Thompson (2006), A&A, 449, 791-803.
Examples
--------
>>> import sunpy.wcs
>>> sunpy.wcs.convert_hpc_hg(382, 748, b0_deg=-7.064078, l0_deg=0.0)
(34.504653439914669, 45.443143275518182)
"""
tempx, tempy = convert_hpc_hcc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)
lon, lat = convert_hcc_hg(tempx, tempy, b0_deg=b0_deg, l0_deg=l0_deg)
return lon, lat
def proj_tan(x, y, force=False):
"""Applies the gnomonic (TAN) projection to intermediate relative
coordinates. This function is not currently implemented!"""
# if pixels are within 3 degrees of the Sun then skip the calculation unless
# force is True. This applies to all sdo images so this function is just
# here as a place holder for the future
# TODO: write proj_tan function
return x, y
def convert_to_coord(x, y, from_coord, to_coord, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec'):
"""Apply a coordinate transform to coordinates. Right now can only do hpc
to hcc to hg"""
if (from_coord == 'hcc') and (to_coord == 'hg'):
rx, ry = convert_hcc_hg(x, y, b0_deg=b0_deg, l0_deg=l0_deg)
elif (from_coord == 'hpc') and (to_coord == 'hg'):
rx, ry = convert_hpc_hg(x, y, b0_deg=b0_deg, l0_deg=l0_deg, dsun_meters=dsun_meters, angle_units=angle_units)
elif (from_coord == 'hg') and (to_coord == 'hcc'):
rx, ry = convert_hg_hcc(x, y, b0_deg=b0_deg, l0_deg=l0_deg)
elif (from_coord == 'hcc') and (to_coord == 'hpc'):
rx, ry = convert_hcc_hpc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)
elif (from_coord == 'hg') and (to_coord == 'hpc'):
rx, ry = convert_hg_hpc(x, y, b0_deg=b0_deg, l0_deg=l0_deg, dsun_meters=dsun_meters, angle_units=angle_units)
elif (from_coord == 'hpc') and (to_coord == 'hcc'):
rx, ry = convert_hpc_hcc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)
return rx, ry
| convert_pixel_to_data |
startQiskit_Class2296.py | # qubit number=4
# total number=33
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def | (n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.rx(-3.1101767270538954,input_qubit[1]) # number=27
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=26
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.x(input_qubit[3]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[0]) # number=30
prog.cz(input_qubit[3],input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=23
prog.z(input_qubit[3]) # number=24
prog.cx(input_qubit[3],input_qubit[0]) # number=25
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.z(input_qubit[3]) # number=28
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2296.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| build_oracle |
ftp_client_server.py | # server
import socket # Import socket module
port = 60000 # Reserve a port for your service.
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
s.bind((host, port)) # Bind to the port
s.listen(5) # Now wait for client connection.
print('Server listening....')
while True:
conn, addr = s.accept() # Establish connection with client.
print('Got connection from', addr)
data = conn.recv(1024)
print('Server received', repr(data))
filename = 'mytext.txt'
f = open(filename, 'rb')
in_data = f.read(1024)
while (in_data):
conn.send(in_data)
print('Sent ', repr(in_data))
in_data = f.read(1024)
f.close()
print('Done sending')
conn.send('Thank you for connecting')
conn.close()
# client side server
import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 60000 # Reserve a port for your service.
s.connect((host, port))
s.send("Hello server!") | print('receiving data...')
data = s.recv(1024)
print('data=%s', (data))
if not data:
break
# write data to a file
f.write(data)
f.close()
print('Successfully get the file')
s.close()
print('connection closed') |
with open('received_file', 'wb') as f:
print('file opened')
while True: |
mig.go | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
package main
import (
"bufio"
"fmt"
"log"
"os"
"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml"
)
const (
nvidiaProcDriverPath = "/proc/driver/nvidia"
nvidiaCapabilitiesPath = nvidiaProcDriverPath + "/capabilities"
nvcapsProcDriverPath = "/proc/driver/nvidia-caps"
nvcapsMigMinorsPath = nvcapsProcDriverPath + "/mig-minors"
nvcapsDevicePath = "/dev/nvidia-caps"
)
// MIGCapableDevices stores information about all devices on the node
type MIGCapableDevices struct {
// devicesMap holds a list of devices, separated by whether they have MigEnabled or not
devicesMap map[bool][]*nvml.Device
}
// NewMIGCapableDevices creates a new MIGCapableDevices struct and returns a pointer to it.
func NewMIGCapableDevices() *MIGCapableDevices {
return &MIGCapableDevices{
devicesMap: nil, // Is initialized on first use
}
}
func (devices *MIGCapableDevices) getDevicesMap() (map[bool][]*nvml.Device, error) {
if devices.devicesMap == nil {
n, err := nvml.GetDeviceCount()
if err != nil {
return nil, err
}
migEnabledDevicesMap := make(map[bool][]*nvml.Device)
for i := uint(0); i < n; i++ {
d, err := nvml.NewDeviceLite(i)
if err != nil {
return nil, err
}
isMigEnabled, err := d.IsMigEnabled()
if err != nil {
return nil, err
}
migEnabledDevicesMap[isMigEnabled] = append(migEnabledDevicesMap[isMigEnabled], d)
}
devices.devicesMap = migEnabledDevicesMap
}
return devices.devicesMap, nil
}
// GetDevicesWithMigEnabled returns a list of devices with migEnabled=true
func (devices *MIGCapableDevices) GetDevicesWithMigEnabled() ([]*nvml.Device, error) {
devicesMap, err := devices.getDevicesMap()
if err != nil {
return nil, err
}
return devicesMap[true], nil
}
// GetDevicesWithMigDisabled returns a list of devices with migEnabled=false
func (devices *MIGCapableDevices) GetDevicesWithMigDisabled() ([]*nvml.Device, error) {
devicesMap, err := devices.getDevicesMap()
if err != nil {
return nil, err
}
return devicesMap[false], nil
}
// AssertAllMigEnabledDevicesAreValid ensures that all devices with migEnabled=true are valid. This means:
// * The have at least 1 mig devices associated with them
// Returns nill if the device is valid, or an error if these are not valid
func (devices *MIGCapableDevices) AssertAllMigEnabledDevicesAreValid() error {
devicesMap, err := devices.getDevicesMap()
if err != nil {
return err
}
for _, d := range devicesMap[true] {
migs, err := d.GetMigDevices()
if err != nil {
return err
}
if len(migs) == 0 {
return fmt.Errorf("No MIG devices associated with %v: %v", d.Path, d.UUID)
}
}
return nil
}
// GetAllMigDevices returns a list of all MIG devices.
func (devices *MIGCapableDevices) GetAllMigDevices() ([]*nvml.Device, error) {
devicesMap, err := devices.getDevicesMap()
if err != nil {
return nil, err
}
var migs []*nvml.Device
for _, d := range devicesMap[true] {
devs, err := d.GetMigDevices()
if err != nil {
return nil, err
}
migs = append(migs, devs...)
}
return migs, nil
}
// GetMigCapabilityDevicePaths returns a mapping of MIG capability path to device node path
func | () (map[string]string, error) {
// Open nvcapsMigMinorsPath for walking.
// If the nvcapsMigMinorsPath does not exist, then we are not on a MIG
// capable machine, so there is nothing to do.
// The format of this file is discussed in:
// https://docs.nvidia.com/datacenter/tesla/mig-user-guide/index.html#unique_1576522674
minorsFile, err := os.Open(nvcapsMigMinorsPath)
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("error opening MIG minors file: %v", err)
}
defer minorsFile.Close()
// Define a function to process each each line of nvcapsMigMinorsPath
processLine := func(line string) (string, int, error) {
var gpu, gi, ci, migMinor int
// Look for a CI access file
n, _ := fmt.Sscanf(line, "gpu%d/gi%d/ci%d/access %d", &gpu, &gi, &ci, &migMinor)
if n == 4 {
capPath := fmt.Sprintf(nvidiaCapabilitiesPath+"/gpu%d/mig/gi%d/ci%d/access", gpu, gi, ci)
return capPath, migMinor, nil
}
// Look for a GI access file
n, _ = fmt.Sscanf(line, "gpu%d/gi%d/access %d", &gpu, &gi, &migMinor)
if n == 3 {
capPath := fmt.Sprintf(nvidiaCapabilitiesPath+"/gpu%d/mig/gi%d/access", gpu, gi)
return capPath, migMinor, nil
}
// Look for the MIG config file
n, _ = fmt.Sscanf(line, "config %d", &migMinor)
if n == 1 {
capPath := fmt.Sprintf(nvidiaCapabilitiesPath + "/mig/config")
return capPath, migMinor, nil
}
// Look for the MIG monitor file
n, _ = fmt.Sscanf(line, "monitor %d", &migMinor)
if n == 1 {
capPath := fmt.Sprintf(nvidiaCapabilitiesPath + "/mig/monitor")
return capPath, migMinor, nil
}
return "", 0, fmt.Errorf("unparsable line: %v", line)
}
// Walk each line of nvcapsMigMinorsPath and construct a mapping of nvidia
// capabilities path to device minor for that capability
capsDevicePaths := make(map[string]string)
scanner := bufio.NewScanner(minorsFile)
for scanner.Scan() {
capPath, migMinor, err := processLine(scanner.Text())
if err != nil {
log.Printf("Skipping line in MIG minors file: %v", err)
continue
}
capsDevicePaths[capPath] = fmt.Sprintf(nvcapsDevicePath+"/nvidia-cap%d", migMinor)
}
return capsDevicePaths, nil
}
// GetMigDeviceNodePaths returns a list of device node paths associated with a MIG device
func GetMigDeviceNodePaths(parent *nvml.Device, mig *nvml.Device) ([]string, error) {
capDevicePaths, err := GetMigCapabilityDevicePaths()
if err != nil {
return nil, fmt.Errorf("error getting MIG capability device paths: %v", err)
}
var gpu int
_, err = fmt.Sscanf(parent.Path, "/dev/nvidia%d", &gpu)
if err != nil {
return nil, fmt.Errorf("error getting GPU minor: %v", err)
}
gi, err := mig.GetGPUInstanceId()
if err != nil {
return nil, fmt.Errorf("error getting MIG GPU instance ID: %v", err)
}
ci, err := mig.GetComputeInstanceId()
if err != nil {
return nil, fmt.Errorf("error getting MIG compute instance ID: %v", err)
}
giCapPath := fmt.Sprintf(nvidiaCapabilitiesPath+"/gpu%d/mig/gi%d/access", gpu, gi)
if _, exists := capDevicePaths[giCapPath]; !exists {
return nil, fmt.Errorf("missing MIG GPU instance capability path: %v", giCapPath)
}
ciCapPath := fmt.Sprintf(nvidiaCapabilitiesPath+"/gpu%d/mig/gi%d/ci%d/access", gpu, gi, ci)
if _, exists := capDevicePaths[ciCapPath]; !exists {
return nil, fmt.Errorf("missing MIG GPU instance capability path: %v", giCapPath)
}
devicePaths := []string{
parent.Path,
capDevicePaths[giCapPath],
capDevicePaths[ciCapPath],
}
return devicePaths, nil
}
| GetMigCapabilityDevicePaths |
login.administrator.dto.ts | import * as Validator from 'class-validator';
export class LoginAdministratorDto {
@Validator.IsNotEmpty()
@Validator.IsString()
@Validator.Matches(/^[a-z][a-z0-9\.]{3,30}[a-z0-9]$/)
username: string;
@Validator.IsNotEmpty()
@Validator.IsString()
@Validator.Length(6, 128) | password: string;
} |
|
recorder.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::Row::*;
use super::escape;
use super::span_utils::SpanUtils;
use std::io::Write;
use syntax::ast;
use syntax::ast::{NodeId,DefId};
use syntax::codemap::*;
const ZERO_DEF_ID: DefId = DefId { node: 0, krate: 0 };
pub struct Recorder {
// output file
pub out: Box<Write+'static>,
pub dump_spans: bool,
}
impl Recorder {
pub fn record(&mut self, info: &str) {
match write!(self.out, "{}", info) {
Err(_) => error!("Error writing output '{}'", info),
_ => (),
}
}
pub fn dump_span(&mut self,
su: SpanUtils,
kind: &str,
span: Span,
_sub_span: Option<Span>) {
assert!(self.dump_spans);
let result = format!("span,kind,{},{},text,\"{}\"\n",
kind, su.extent_str(span), escape(su.snippet(span)));
self.record(&result[..]);
}
}
pub struct FmtStrs<'a> {
pub recorder: Box<Recorder>,
span: SpanUtils<'a>,
}
macro_rules! s { ($e:expr) => { format!("{}", $e) }}
macro_rules! svec {
($($e:expr),*) => ({
// leading _ to allow empty construction without a warning.
let mut _temp = ::std::vec::Vec::new();
$(_temp.push(s!($e));)*
_temp
})
}
#[derive(Copy, Debug, Eq, PartialEq)]
pub enum Row {
Variable,
Enum,
Variant,
VariantStruct,
Function,
MethodDecl,
Struct,
Trait,
Impl,
Module,
UseAlias,
UseGlob,
ExternCrate,
Inheritance,
MethodCall,
Typedef,
ExternalCrate,
Crate,
FnCall,
ModRef,
VarRef,
TypeRef,
StructRef,
FnRef,
}
impl<'a> FmtStrs<'a> {
pub fn new(rec: Box<Recorder>, span: SpanUtils<'a>) -> FmtStrs<'a> {
FmtStrs {
recorder: rec,
span: span,
}
}
// A map from kind of item to a tuple of
// a string representation of the name
// a vector of field names
// whether this kind requires a span
// whether dump_spans should dump for this kind
fn lookup_row(r: Row) -> (&'static str, Vec<&'static str>, bool, bool) {
match r {
Variable => ("variable",
vec!("id","name","qualname","value","type","scopeid"),
true, true),
Enum => ("enum", vec!("id","qualname","scopeid","value"), true, true),
Variant => ("variant",
vec!("id","name","qualname","type","value","scopeid"),
true, true),
VariantStruct => ("variant_struct",
vec!("id","ctor_id","qualname","type","value","scopeid"),
true, true),
Function => ("function",
vec!("id","qualname","declid","declidcrate","scopeid"),
true, true),
MethodDecl => ("method_decl", vec!("id","qualname","scopeid"), true, true),
Struct => ("struct", vec!("id","ctor_id","qualname","scopeid","value"), true, true),
Trait => ("trait", vec!("id","qualname","scopeid","value"), true, true),
Impl => ("impl",
vec!("id","refid","refidcrate","traitid","traitidcrate","scopeid"),
true, true),
Module => ("module", vec!("id","qualname","scopeid","def_file"), true, false),
UseAlias => ("use_alias",
vec!("id","refid","refidcrate","name","scopeid"),
true, true),
UseGlob => ("use_glob", vec!("id","value","scopeid"), true, true),
ExternCrate => ("extern_crate",
vec!("id","name","location","crate","scopeid"),
true, true),
Inheritance => ("inheritance",
vec!("base","basecrate","derived","derivedcrate"),
true, false),
MethodCall => ("method_call",
vec!("refid","refidcrate","declid","declidcrate","scopeid"),
true, true),
Typedef => ("typedef", vec!("id","qualname","value"), true, true),
ExternalCrate => ("external_crate", vec!("name","crate","file_name"), false, false),
Crate => ("crate", vec!("name"), true, false),
FnCall => ("fn_call", vec!("refid","refidcrate","qualname","scopeid"), true, true),
ModRef => ("mod_ref", vec!("refid","refidcrate","qualname","scopeid"), true, true),
VarRef => ("var_ref", vec!("refid","refidcrate","qualname","scopeid"), true, true),
TypeRef => ("type_ref",
vec!("refid","refidcrate","qualname","scopeid"),
true, true),
StructRef => ("struct_ref",
vec!("refid","refidcrate","qualname","scopeid"),
true, true),
FnRef => ("fn_ref", vec!("refid","refidcrate","qualname","scopeid"), true, true)
}
}
pub fn make_values_str(&self,
kind: &'static str,
fields: &Vec<&'static str>,
values: Vec<String>,
span: Span) -> Option<String> {
if values.len() != fields.len() {
self.span.sess.span_bug(span, &format!(
"Mismatch between length of fields for '{}', expected '{}', found '{}'",
kind, fields.len(), values.len()));
}
let values = values.iter().map(|s| {
// Never take more than 1020 chars
if s.len() > 1020 {
&s[..1020]
} else {
&s[..]
}
});
let pairs = fields.iter().zip(values);
let strs = pairs.map(|(f, v)| format!(",{},\"{}\"", f, escape(String::from_str(v))));
Some(strs.fold(String::new(), |mut s, ss| {
s.push_str(&ss[..]);
s
}))
}
pub fn record_without_span(&mut self,
kind: Row,
values: Vec<String>,
span: Span) {
let (label, ref fields, needs_span, dump_spans) = FmtStrs::lookup_row(kind);
if needs_span {
self.span.sess.span_bug(span, &format!(
"Called record_without_span for '{}' which does requires a span",
label));
}
assert!(!dump_spans);
if self.recorder.dump_spans {
return;
}
let values_str = match self.make_values_str(label, fields, values, span) {
Some(vs) => vs,
None => return,
};
let mut result = String::from_str(label);
result.push_str(&values_str[..]);
result.push_str("\n");
self.recorder.record(&result[..]);
}
pub fn record_with_span(&mut self,
kind: Row,
span: Span,
sub_span: Span,
values: Vec<String>) {
let (label, ref fields, needs_span, dump_spans) = FmtStrs::lookup_row(kind);
if self.recorder.dump_spans {
if dump_spans {
self.recorder.dump_span(self.span.clone(),
label,
span,
Some(sub_span));
}
return;
}
if !needs_span {
self.span.sess.span_bug(span,
&format!("Called record_with_span for '{}' \
which does not require a span", label));
}
let values_str = match self.make_values_str(label, fields, values, span) {
Some(vs) => vs,
None => return,
};
let result = format!("{},{}{}\n", label, self.span.extent_str(sub_span), values_str);
self.recorder.record(&result[..]);
}
pub fn check_and_record(&mut self,
kind: Row,
span: Span,
sub_span: Option<Span>,
values: Vec<String>) {
match sub_span {
Some(sub_span) => self.record_with_span(kind, span, sub_span, values),
None => {
let (label, _, _, _) = FmtStrs::lookup_row(kind);
self.span.report_span_err(label, span);
}
}
}
pub fn variable_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
value: &str,
typ: &str) {
// Getting a fully qualified name for a variable is hard because in
// the local case they can be overridden in one block and there is no nice way
// to refer to such a scope in english, so we just hack it by appending the
// variable def's node id
let mut qualname = String::from_str(name);
qualname.push_str("$");
qualname.push_str(&id.to_string());
self.check_and_record(Variable,
span,
sub_span,
svec!(id, name, qualname, value, typ, 0));
}
// formal parameters
pub fn formal_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
fn_name: &str,
name: &str,
typ: &str) {
let mut qualname = String::from_str(fn_name);
qualname.push_str("::");
qualname.push_str(name);
self.check_and_record(Variable,
span,
sub_span,
svec!(id, name, qualname, "", typ, 0));
}
// value is the initialising expression of the static if it is not mut, otherwise "".
pub fn static_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
qualname: &str,
value: &str,
typ: &str,
scope_id: NodeId) {
self.check_and_record(Variable,
span,
sub_span,
svec!(id, name, qualname, value, typ, scope_id));
}
pub fn field_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
qualname: &str,
typ: &str,
scope_id: NodeId) {
self.check_and_record(Variable,
span,
sub_span,
svec!(id, name, qualname, "", typ, scope_id));
}
pub fn enum_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
scope_id: NodeId,
value: &str) {
self.check_and_record(Enum,
span,
sub_span,
svec!(id, name, scope_id, value));
}
pub fn tuple_variant_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
qualname: &str,
typ: &str,
val: &str,
scope_id: NodeId) {
self.check_and_record(Variant,
span,
sub_span,
svec!(id, name, qualname, typ, val, scope_id));
}
pub fn struct_variant_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
ctor_id: NodeId,
name: &str,
typ: &str,
val: &str,
scope_id: NodeId) {
self.check_and_record(VariantStruct,
span,
sub_span,
svec!(id, ctor_id, name, typ, val, scope_id));
}
pub fn fn_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
scope_id: NodeId) {
self.check_and_record(Function,
span,
sub_span,
svec!(id, name, "", "", scope_id));
}
pub fn method_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
decl_id: Option<DefId>,
scope_id: NodeId) {
let values = match decl_id {
Some(decl_id) => svec!(id, name, decl_id.node, decl_id.krate, scope_id),
None => svec!(id, name, "", "", scope_id)
};
self.check_and_record(Function,
span,
sub_span,
values);
}
pub fn method_decl_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
scope_id: NodeId) {
self.check_and_record(MethodDecl,
span,
sub_span,
svec!(id, name, scope_id));
}
pub fn struct_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
ctor_id: NodeId,
name: &str,
scope_id: NodeId,
value: &str) {
self.check_and_record(Struct,
span,
sub_span,
svec!(id, ctor_id, name, scope_id, value));
}
pub fn trait_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
scope_id: NodeId,
value: &str) {
self.check_and_record(Trait,
span,
sub_span,
svec!(id, name, scope_id, value));
}
pub fn impl_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
ref_id: Option<DefId>,
trait_id: Option<DefId>,
scope_id: NodeId) |
pub fn mod_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
name: &str,
parent: NodeId,
filename: &str) {
self.check_and_record(Module,
span,
sub_span,
svec!(id, name, parent, filename));
}
pub fn use_alias_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
mod_id: Option<DefId>,
name: &str,
parent: NodeId) {
let (mod_node, mod_crate) = match mod_id {
Some(mod_id) => (mod_id.node, mod_id.krate),
None => (0, 0)
};
self.check_and_record(UseAlias,
span,
sub_span,
svec!(id, mod_node, mod_crate, name, parent));
}
pub fn use_glob_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
values: &str,
parent: NodeId) {
self.check_and_record(UseGlob,
span,
sub_span,
svec!(id, values, parent));
}
pub fn extern_crate_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
cnum: ast::CrateNum,
name: &str,
loc: &str,
parent: NodeId) {
self.check_and_record(ExternCrate,
span,
sub_span,
svec!(id, name, loc, cnum, parent));
}
pub fn inherit_str(&mut self,
span: Span,
sub_span: Option<Span>,
base_id: DefId,
deriv_id: NodeId) {
self.check_and_record(Inheritance,
span,
sub_span,
svec!(base_id.node,
base_id.krate,
deriv_id,
0));
}
pub fn fn_call_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: DefId,
scope_id:NodeId) {
self.check_and_record(FnCall,
span,
sub_span,
svec!(id.node, id.krate, "", scope_id));
}
pub fn meth_call_str(&mut self,
span: Span,
sub_span: Option<Span>,
defid: Option<DefId>,
declid: Option<DefId>,
scope_id: NodeId) {
let (dfn, dfk) = match defid {
Some(defid) => (defid.node, defid.krate),
None => (0, 0)
};
let (dcn, dck) = match declid {
Some(declid) => (s!(declid.node), s!(declid.krate)),
None => ("".to_string(), "".to_string())
};
self.check_and_record(MethodCall,
span,
sub_span,
svec!(dfn, dfk, dcn, dck, scope_id));
}
pub fn sub_mod_ref_str(&mut self,
span: Span,
sub_span: Span,
qualname: &str,
parent:NodeId) {
self.record_with_span(ModRef,
span,
sub_span,
svec!(0, 0, qualname, parent));
}
pub fn typedef_str(&mut self,
span: Span,
sub_span: Option<Span>,
id: NodeId,
qualname: &str,
value: &str) {
self.check_and_record(Typedef,
span,
sub_span,
svec!(id, qualname, value));
}
pub fn crate_str(&mut self,
span: Span,
name: &str) {
self.record_with_span(Crate,
span,
span,
svec!(name));
}
pub fn external_crate_str(&mut self,
span: Span,
name: &str,
num: ast::CrateNum) {
let lo_loc = self.span.sess.codemap().lookup_char_pos(span.lo);
self.record_without_span(ExternalCrate,
svec!(name, num, lo_loc.file.name),
span);
}
pub fn sub_type_ref_str(&mut self,
span: Span,
sub_span: Span,
qualname: &str) {
self.record_with_span(TypeRef,
span,
sub_span,
svec!(0, 0, qualname, 0));
}
// A slightly generic function for a reference to an item of any kind.
pub fn ref_str(&mut self,
kind: Row,
span: Span,
sub_span: Option<Span>,
id: DefId,
scope_id: NodeId) {
self.check_and_record(kind,
span,
sub_span,
svec!(id.node, id.krate, "", scope_id));
}
}
| {
let ref_id = ref_id.unwrap_or(ZERO_DEF_ID);
let trait_id = trait_id.unwrap_or(ZERO_DEF_ID);
self.check_and_record(Impl,
span,
sub_span,
svec!(id,
ref_id.node,
ref_id.krate,
trait_id.node,
trait_id.krate,
scope_id));
} |
send_sms.py | # we import the Twilio client from the dependency we just installed
# from twilio.rest import TwilioRestClient
from twilio.rest import Client
| client = Client("AC3e84e9cae2390af9a661c1ab35955444", "4a8bf26cb30107ec85d98f6bf1182522")
# change the "from_" number to your Twilio number and the "to" number
# to the phone number you signed up for Twilio with, or upgrade your
# account to send SMS to any phone number
client.messages.create(to="+15129146948", from_="+17372105122",
body=message)
if __name__ == '__main__':
send_text('Hello, this is a test.') | def send_text(message):
# the following line needs your Twilio Account SID and Auth Token |
GoalAdjustments_PostEducationByIdGoaladjustments.js | /**
* Auto-generated action file for "Published Plan Service" API.
*
* Generated at: 2019-06-06T13:12:53.753Z
* Mass generator version: 1.1.0
*
* flowground :- Telekom iPaaS / naviplancentral-com-plan-connector
* Copyright © 2019, Deutsche Telekom AG
* contact: [email protected]
*
* All files of this connector are licensed under the Apache 2.0 License. For details
* see the file LICENSE on the toplevel directory.
*
*
* Operation: 'GoalAdjustments_PostEducationByIdGoaladjustments'
* Endpoint Path: '/api/GoalAdjustments/Education/{id}/Calculations'
* Method: 'post'
*
*/
const Swagger = require('swagger-client');
const processWrapper = require('../services/process-wrapper');
const spec = require('../spec.json');
// this wrapers offers a simplified emitData(data) function
module.exports.process = processWrapper(processAction);
// parameter names for this call
const PARAMETERS = [
"id"
];
// mappings from connector field names to API field names
const FIELD_MAP = {
"id": "id",
"duration": "duration",
"expensesCovered": "expensesCovered",
"lumpSumContribution": "lumpSumContribution",
"lumpSumDate": "lumpSumDate",
"monthlySavingsContribution": "monthlySavingsContribution",
"adjustedValues": "adjustedValues",
"originalValues": "originalValues",
"requestBody": "requestBody"
};
function p | msg, cfg) {
var isVerbose = process.env.debug || cfg.verbose;
if (isVerbose) {
console.log(`---MSG: ${JSON.stringify(msg)}`);
console.log(`---CFG: ${JSON.stringify(cfg)}`);
console.log(`---ENV: ${JSON.stringify(process.env)}`);
}
const contentType = cfg.body_content_type;
const body = msg.body;
mapFieldNames(body);
let parameters = {};
for(let param of PARAMETERS) {
parameters[param] = body[param];
}
// credentials for this operation
let securities = {};
let callParams = {
spec: spec,
operationId: 'GoalAdjustments_PostEducationByIdGoaladjustments',
pathName: '/api/GoalAdjustments/Education/{id}/Calculations',
method: 'post',
parameters: parameters,
requestContentType: contentType,
requestBody: body.requestBody,
securities: {authorized: securities},
server: spec.servers[cfg.server] || cfg.otherServer,
};
if (isVerbose) {
let out = Object.assign({}, callParams);
out.spec = '[omitted]';
console.log(`--SWAGGER CALL: ${JSON.stringify(out)}`);
}
// Call operation via Swagger client
return Swagger.execute(callParams).then(data => {
// emit a single message with data
this.emitData(data);
// if the response contains an array of entities, you can emit them one by one:
// data.obj.someItems.forEach((item) => {
// this.emitData(item);
// }
});
}
function mapFieldNames(obj) {
if(Array.isArray(obj)) {
obj.forEach(mapFieldNames);
}
else if(typeof obj === 'object' && obj) {
Object.keys(obj).forEach(key => {
mapFieldNames(obj[key]);
let goodKey = FIELD_MAP[key];
if(goodKey && goodKey !== key) {
obj[goodKey] = obj[key];
delete obj[key];
}
});
}
} | rocessAction( |
state.py | # state.py - fsmonitor persistent state
#
# Copyright 2013-2016 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import os
import socket
import struct
from mercurial.i18n import _
from mercurial import (
pathutil,
util,
)
_version = 4
_versionformat = ">I"
class | (object):
def __init__(self, repo):
self._vfs = repo.vfs
self._ui = repo.ui
self._rootdir = pathutil.normasprefix(repo.root)
self._lastclock = None
self._identity = util.filestat(None)
self.mode = self._ui.config('fsmonitor', 'mode', default='on')
self.walk_on_invalidate = self._ui.configbool(
'fsmonitor', 'walk_on_invalidate', False)
self.timeout = float(self._ui.config(
'fsmonitor', 'timeout', default='2'))
def get(self):
try:
file = self._vfs('fsmonitor.state', 'rb')
except IOError as inst:
self._identity = util.filestat(None)
if inst.errno != errno.ENOENT:
raise
return None, None, None
self._identity = util.filestat.fromfp(file)
versionbytes = file.read(4)
if len(versionbytes) < 4:
self._ui.log(
'fsmonitor', 'fsmonitor: state file only has %d bytes, '
'nuking state\n' % len(versionbytes))
self.invalidate()
return None, None, None
try:
diskversion = struct.unpack(_versionformat, versionbytes)[0]
if diskversion != _version:
# different version, nuke state and start over
self._ui.log(
'fsmonitor', 'fsmonitor: version switch from %d to '
'%d, nuking state\n' % (diskversion, _version))
self.invalidate()
return None, None, None
state = file.read().split('\0')
# state = hostname\0clock\0ignorehash\0 + list of files, each
# followed by a \0
if len(state) < 3:
self._ui.log(
'fsmonitor', 'fsmonitor: state file truncated (expected '
'3 chunks, found %d), nuking state\n', len(state))
self.invalidate()
return None, None, None
diskhostname = state[0]
hostname = socket.gethostname()
if diskhostname != hostname:
# file got moved to a different host
self._ui.log('fsmonitor', 'fsmonitor: stored hostname "%s" '
'different from current "%s", nuking state\n' %
(diskhostname, hostname))
self.invalidate()
return None, None, None
clock = state[1]
ignorehash = state[2]
# discard the value after the last \0
notefiles = state[3:-1]
finally:
file.close()
return clock, ignorehash, notefiles
def set(self, clock, ignorehash, notefiles):
if clock is None:
self.invalidate()
return
# Read the identity from the file on disk rather than from the open file
# pointer below, because the latter is actually a brand new file.
identity = util.filestat.frompath(self._vfs.join('fsmonitor.state'))
if identity != self._identity:
self._ui.debug('skip updating fsmonitor.state: identity mismatch\n')
return
try:
file = self._vfs('fsmonitor.state', 'wb', atomictemp=True,
checkambig=True)
except (IOError, OSError):
self._ui.warn(_("warning: unable to write out fsmonitor state\n"))
return
with file:
file.write(struct.pack(_versionformat, _version))
file.write(socket.gethostname() + '\0')
file.write(clock + '\0')
file.write(ignorehash + '\0')
if notefiles:
file.write('\0'.join(notefiles))
file.write('\0')
def invalidate(self):
try:
os.unlink(os.path.join(self._rootdir, '.hg', 'fsmonitor.state'))
except OSError as inst:
if inst.errno != errno.ENOENT:
raise
self._identity = util.filestat(None)
def setlastclock(self, clock):
self._lastclock = clock
def getlastclock(self):
return self._lastclock
| state |
main.rs | #![allow(clippy::too_many_arguments)]
use std::fs::File;
use std::io::{self, prelude::*, BufReader};
extern crate clap;
use clap::{App, Arg};
extern crate rand;
use rand::Rng;
fn gaf_max_id(filename: &str) -> usize {
let mut max_id = usize::min_value();
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
for line in reader.lines() {
let l = line.unwrap();
let path = l.split('\t').nth(5).unwrap();
for n in path.split(|c| c == '<' || c == '>') {
if !n.is_empty() {
let id = n.parse::<usize>().unwrap();
if id > max_id {
max_id = id;
}
}
}
}
max_id
}
fn for_each_line_in_gfa(gfa_filename: &str, line_type: &str, mut callback: impl FnMut(&str)) {
let file = File::open(gfa_filename).unwrap();
let reader = BufReader::new(file);
for line in reader.lines() {
let l = line.unwrap();
let curr_type = l.split('\t').nth(0).unwrap();
if curr_type == line_type {
callback(&l);
}
}
}
struct GfaGraph {
node_length: Vec<usize>,
max_id: usize,
}
impl GfaGraph {
fn new() -> Self {
GfaGraph {
node_length: vec![],
max_id: usize::min_value(),
}
}
fn from_gfa(gfa_filename: &str) -> Self {
let mut max_id = usize::min_value();
for_each_line_in_gfa(gfa_filename, "S", |l: &str| {
let id = l.split('\t').nth(1).unwrap().parse::<usize>().unwrap();
if id > max_id {
max_id = id;
}
});
let mut node_length = Vec::<usize>::new();
node_length.resize(max_id, 0);
for_each_line_in_gfa(gfa_filename, "S", |l: &str| {
let id = l.split('\t').nth(1).unwrap().parse::<usize>().unwrap();
let seq = l.split('\t').nth(2).unwrap();
node_length[id - 1] = seq.len();
});
GfaGraph {
node_length,
max_id,
}
}
fn get_node_length(self: &GfaGraph, id: usize) -> usize {
self.node_length[id - 1]
}
fn get_max_id(self: &GfaGraph) -> usize {
self.max_id
}
fn loaded(self: &GfaGraph) -> bool {
self.max_id != 0
}
}
fn gaf_nth_longest_read(
filename: &str,
keep_n_longest: usize,
min_length: u64,
max_length: u64,
) -> u64 {
let mut v = Vec::new();
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
for line in reader.lines() {
let length = line
.unwrap()
.split('\t')
.nth(1)
.unwrap()
.parse::<u64>()
.unwrap();
if length >= min_length && length <= max_length {
v.push(length);
}
}
// sort by decreasing length
v.sort_by(|a, b| b.partial_cmp(a).unwrap());
let cutoff = if keep_n_longest > v.len() {
v.len()
} else {
keep_n_longest
};
v[cutoff - 1]
}
fn do_matrix(
filename: &str,
gfa_filename: &str,
vectorize: bool,
binary_out: bool,
mut max_id: usize,
min_length: u64,
max_length: u64,
trim_read_name: bool,
group_name: &str,
keep_n_longest: usize,
sampling_rate: f64,
sample_up_to: u64,
) {
let mut sampled_read_count = 0;
let graph = if !gfa_filename.is_empty() {
GfaGraph::from_gfa(gfa_filename)
} else | ;
max_id = if graph.loaded() {
graph.get_max_id()
} else {
max_id
};
if !vectorize && max_id == 0 {
max_id = gaf_max_id(filename);
}
let query_length_threshold = if keep_n_longest > 0 {
gaf_nth_longest_read(filename, keep_n_longest, min_length, max_length)
} else {
u64::min_value()
};
let mut rng = rand::thread_rng();
if group_name != "" {
print!("group.name\t");
}
if vectorize {
print!("aln.name\tquery.length\tnode.id\trank");
} else {
print!("aln.name\tquery.length\tnode.count");
for x in 1..=max_id {
print!("\tnode.{}", x);
}
}
println!();
io::stdout().flush().unwrap();
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
for line in reader.lines() {
// parse the line
let l = line.unwrap();
let mut name = "";
let mut path = "";
let mut query_length: u64 = 0;
for (i, s) in l.split('\t').enumerate() {
match i {
0 => {
name = if trim_read_name {
s.split_ascii_whitespace().nth(0).unwrap()
} else {
s
}
}
1 => query_length = s.parse::<u64>().unwrap(),
5 => path = s,
_ => {}
};
}
if query_length >= min_length
&& query_length <= max_length
&& query_length >= query_length_threshold
&& ((sampling_rate - 1.0f64).abs() == 0.0f64 || rng.gen::<f64>() < sampling_rate)
&& sampled_read_count < sample_up_to
{
sampled_read_count += 1;
if vectorize {
for (j, n) in path.split(|c| c == '<' || c == '>').enumerate() {
if !n.is_empty() {
if group_name != "" {
print!("{}\t", group_name);
}
println!("{}\t{}\t{}\t{}", name, query_length, n, j);
}
}
} else {
let mut v = vec![0; max_id];
for n in path.split(|c| c == '<' || c == '>') {
if !n.is_empty() {
let id = n.parse::<usize>().unwrap();
v[id - 1] = if binary_out {
1
} else if graph.loaded() {
graph.get_node_length(id)
} else {
1
};
}
}
if group_name != "" {
print!("{}\t", group_name);
}
print!("{}", name);
print!("\t{}", query_length);
let sum: usize = v.iter().sum();
print!("\t{}", sum);
for x in v {
print!("\t{}", x);
}
println!();
}
io::stdout().flush().unwrap(); // maybe not necessary
}
}
}
fn main() -> io::Result<()> {
let matches = App::new("gaffy")
.version("0.1.0")
.author("Erik Garrison <[email protected]>")
.about("Manipulate GAF (graph alignment format) files")
.arg(Arg::with_name("INPUT")
.required(true)
.takes_value(true)
.index(1)
.help("input GAF file"))
.arg(Arg::with_name("gfa")
.short("g")
.long("gfa")
.takes_value(true)
.help("Input GFA file to which the GAF was mapped."))
.arg(Arg::with_name("vectorize")
.short("v")
.long("vectorize")
.help("Write a tabular representation of the alignments (one record per alignment node traversal)"))
.arg(Arg::with_name("trim-read-name")
.short("t")
.long("trim-read-name")
.help("Trim the read name at the first whitespace"))
.arg(Arg::with_name("group-name")
.short("n")
.long("group-name")
.takes_value(true)
.help("Add a group name field to each record in the matrix or vector output, to help when merging outputs."))
.arg(Arg::with_name("keep-n-longest")
.short("k")
.long("keep-n-longest")
.takes_value(true)
.help("Keep the longest N reads."))
.arg(Arg::with_name("max-length")
.short("M")
.long("max-length")
.takes_value(true)
.help("Keep reads shorter than this length (before keep-n-longest calculations)."))
.arg(Arg::with_name("min-length")
.short("L")
.long("min-length")
.takes_value(true)
.help("Keep reads longer than this length (before keep-n-longest calculations)."))
.arg(Arg::with_name("weighted-matrix")
.short("w")
.long("weighted-matrix")
.help("Weight matrix values by GFA node_length."))
.arg(Arg::with_name("sampling-rate")
.short("r")
.long("sampling-rate")
.takes_value(true)
.help("Sample selected alignments at this rate [0-1]."))
.arg(Arg::with_name("sample-up-to")
.short("u")
.long("sample-up-to")
.takes_value(true)
.help("Sample up to this many alignments."))
.get_matches();
let filename = matches.value_of("INPUT").unwrap();
let max_id = usize::min_value();
let gfa_filename = if matches.is_present("gfa") {
matches.value_of("gfa").unwrap()
} else {
""
};
let keep_n_longest = if matches.is_present("keep-n-longest") {
matches
.value_of("keep-n-longest")
.unwrap()
.parse::<usize>()
.unwrap()
} else {
0
};
let min_length = if matches.is_present("min-length") {
matches
.value_of("min-length")
.unwrap()
.parse::<u64>()
.unwrap()
} else {
0
};
let max_length = if matches.is_present("max-length") {
matches
.value_of("max-length")
.unwrap()
.parse::<u64>()
.unwrap()
} else {
u64::max_value()
};
let sampling_rate = if matches.is_present("sampling-rate") {
matches
.value_of("sampling-rate")
.unwrap()
.parse::<f64>()
.unwrap()
} else {
1.0
};
let sample_up_to = if matches.is_present("sample-up-to") {
matches
.value_of("sample-up-to")
.unwrap()
.parse::<u64>()
.unwrap()
} else {
u64::max_value()
};
do_matrix(
filename,
gfa_filename,
matches.is_present("vectorize"),
!matches.is_present("weighted-matrix"),
max_id,
min_length,
max_length,
matches.is_present("trim-read-name"),
matches.value_of("group-name").unwrap_or(""),
keep_n_longest,
sampling_rate,
sample_up_to,
);
Ok(())
}
| {
GfaGraph::new()
} |
svg.go | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package svg
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"regexp"
"strings"
)
func invokeDot(format string) func(input io.Reader, output io.Writer) error {
return func(input io.Reader, output io.Writer) error {
cmd := exec.Command("dot", "-T"+format)
cmd.Stdin, cmd.Stdout, cmd.Stderr = input, output, os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to execute dot. Is Graphviz installed? Error: %v", err)
}
return nil
}
}
func MassageDotSVG() func(input io.Reader, output io.Writer) error {
generateSVG := invokeDot("svg")
return func(input io.Reader, output io.Writer) error {
baseSVG := new(bytes.Buffer)
if err := generateSVG(input, baseSVG); err != nil {
return err
}
_, err := output.Write([]byte(massageSVG(baseSVG.String())))
return err
}
}
var (
viewBox = regexp.MustCompile(`<svg\s*width="[^"]+"\s*height="[^"]+"\s*viewBox="[^"]+"`)
graphID = regexp.MustCompile(`<g id="graph\d"`)
svgClose = regexp.MustCompile(`</svg>`)
)
// massageSVG enhances the SVG output from DOT to provide better
// panning inside a web browser. It uses the svgpan library, which is
// embedded into the svgpan.JSSource variable.
func massageSVG(svg string) string {
// Work around for dot bug which misses quoting some ampersands,
// resulting on unparsable SVG.
svg = strings.Replace(svg, "&;", "&;", -1)
// Dot's SVG output is
//
// <svg width="___" height="___"
// viewBox="___" xmlns=...>
// <g id="graph0" transform="...">
// ...
// </g>
// </svg>
//
// Change it to
//
// <svg width="100%" height="100%"
// xmlns=...>
// <script type="text/ecmascript"><![CDATA[` ..$(svgpan.JSSource)... `]]></script>`
// <g id="viewport" transform="translate(0,0)">
// <g id="graph0" transform="...">
// ...
// </g>
// </g>
// </svg>
if loc := viewBox.FindStringIndex(svg); loc != nil {
svg = svg[:loc[0]] +
`<svg width="100%" height="100%"` +
svg[loc[1]:]
}
if loc := graphID.FindStringIndex(svg); loc != nil {
svg = svg[:loc[0]] +
`<script type="text/ecmascript"><![CDATA[` + string(JSSource) + `]]></script>` +
`<g id="viewport" transform="scale(0.5,0.5) translate(0,0)">` +
svg[loc[0]:]
}
if loc := svgClose.FindStringIndex(svg); loc != nil |
return svg
}
const JSSource = `
/**
* SVGPan library 1.2.2
* ======================
*
* Given an unique existing element with id "viewport" (or when missing, the
* first g-element), including the library into any SVG adds the following
* capabilities:
*
* - Mouse panning
* - Mouse zooming (using the wheel)
* - Object dragging
*
* You can configure the behaviour of the pan/zoom/drag with the variables
* listed in the CONFIGURATION section of this file.
*
* Known issues:
*
* - Zooming (while panning) on Safari has still some issues
*
* Releases:
*
* 1.2.2, Tue Aug 30 17:21:56 CEST 2011, Andrea Leofreddi
* - Fixed viewBox on root tag (#7)
* - Improved zoom speed (#2)
*
* 1.2.1, Mon Jul 4 00:33:18 CEST 2011, Andrea Leofreddi
* - Fixed a regression with mouse wheel (now working on Firefox 5)
* - Working with viewBox attribute (#4)
* - Added "use strict;" and fixed resulting warnings (#5)
* - Added configuration variables, dragging is disabled by default (#3)
*
* 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui
* Fixed a bug with browser mouse handler interaction
*
* 1.1, Wed Feb 3 17:39:33 GMT 2010, Zeng Xiaohui
* Updated the zoom code to support the mouse wheel on Safari/Chrome
*
* 1.0, Andrea Leofreddi
* First release
*
* This code is licensed under the following BSD license:
*
* Copyright 2009-2017 Andrea Leofreddi <[email protected]>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are those of the
* authors and should not be interpreted as representing official policies, either expressed
* or implied, of Andrea Leofreddi.
*/
"use strict";
/// CONFIGURATION
/// ====>
var enablePan = 1; // 1 or 0: enable or disable panning (default enabled)
var enableZoom = 1; // 1 or 0: enable or disable zooming (default enabled)
var enableDrag = 0; // 1 or 0: enable or disable dragging (default disabled)
var zoomScale = 0.2; // Zoom sensitivity
/// <====
/// END OF CONFIGURATION
var root = document.documentElement;
var state = 'none', svgRoot = null, stateTarget, stateOrigin, stateTf;
setupHandlers(root);
/**
* Register handlers
*/
function setupHandlers(root){
setAttributes(root, {
"onmouseup" : "handleMouseUp(evt)",
"onmousedown" : "handleMouseDown(evt)",
"onmousemove" : "handleMouseMove(evt)",
//"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element
});
if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)
window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari
else
window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others
}
/**
* Retrieves the root element for SVG manipulation. The element is then cached into the svgRoot global variable.
*/
function getRoot(root) {
if(svgRoot == null) {
var r = root.getElementById("viewport") ? root.getElementById("viewport") : root.documentElement, t = r;
while(t != root) {
if(t.getAttribute("viewBox")) {
setCTM(r, t.getCTM());
t.removeAttribute("viewBox");
}
t = t.parentNode;
}
svgRoot = r;
}
return svgRoot;
}
/**
* Instance an SVGPoint object with given event coordinates.
*/
function getEventPoint(evt) {
var p = root.createSVGPoint();
p.x = evt.clientX;
p.y = evt.clientY;
return p;
}
/**
* Sets the current transform matrix of an element.
*/
function setCTM(element, matrix) {
var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")";
element.setAttribute("transform", s);
}
/**
* Dumps a matrix to a string (useful for debug).
*/
function dumpMatrix(matrix) {
var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n 0, 0, 1 ]";
return s;
}
/**
* Sets attributes of an element.
*/
function setAttributes(element, attributes){
for (var i in attributes)
element.setAttributeNS(null, i, attributes[i]);
}
/**
* Handle mouse wheel event.
*/
function handleMouseWheel(evt) {
if(!enableZoom)
return;
if(evt.preventDefault)
evt.preventDefault();
evt.returnValue = false;
var svgDoc = evt.target.ownerDocument;
var delta;
if(evt.wheelDelta)
delta = evt.wheelDelta / 360; // Chrome/Safari
else
delta = evt.detail / -9; // Mozilla
var z = Math.pow(1 + zoomScale, delta);
var g = getRoot(svgDoc);
var p = getEventPoint(evt);
p = p.matrixTransform(g.getCTM().inverse());
// Compute new scale matrix in current mouse position
var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);
setCTM(g, g.getCTM().multiply(k));
if(typeof(stateTf) == "undefined")
stateTf = g.getCTM().inverse();
stateTf = stateTf.multiply(k.inverse());
}
/**
* Handle mouse move event.
*/
function handleMouseMove(evt) {
if(evt.preventDefault)
evt.preventDefault();
evt.returnValue = false;
var svgDoc = evt.target.ownerDocument;
var g = getRoot(svgDoc);
if(state == 'pan' && enablePan) {
// Pan mode
var p = getEventPoint(evt).matrixTransform(stateTf);
setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));
} else if(state == 'drag' && enableDrag) {
// Drag mode
var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());
setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));
stateOrigin = p;
}
}
/**
* Handle click event.
*/
function handleMouseDown(evt) {
if(evt.preventDefault)
evt.preventDefault();
evt.returnValue = false;
var svgDoc = evt.target.ownerDocument;
var g = getRoot(svgDoc);
if(
evt.target.tagName == "svg"
|| !enableDrag // Pan anyway when drag is disabled and the user clicked on an element
) {
// Pan mode
state = 'pan';
stateTf = g.getCTM().inverse();
stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
} else {
// Drag mode
state = 'drag';
stateTarget = evt.target;
stateTf = g.getCTM().inverse();
stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
}
}
/**
* Handle mouse button release event.
*/
function handleMouseUp(evt) {
if(evt.preventDefault)
evt.preventDefault();
evt.returnValue = false;
var svgDoc = evt.target.ownerDocument;
if(state == 'pan' || state == 'drag') {
// Quit pan mode
state = '';
}
}
`
| {
svg = svg[:loc[0]] +
`</g>` +
svg[loc[0]:]
} |
diagnostics.py | """Diagnostics support for HomeKit."""
from __future__ import annotations
from typing import Any
from pyhap.accessory_driver import AccessoryDriver
from pyhap.state import State
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import HomeKit
from .const import DOMAIN, HOMEKIT
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
| """Return diagnostics for a config entry."""
homekit: HomeKit = hass.data[DOMAIN][entry.entry_id][HOMEKIT]
data: dict[str, Any] = {
"status": homekit.status,
"config-entry": {
"title": entry.title,
"version": entry.version,
"data": dict(entry.data),
"options": dict(entry.options),
},
}
if not hasattr(homekit, "driver"):
return data
driver: AccessoryDriver = homekit.driver
data.update(driver.get_accessories())
state: State = driver.state
data.update(
{
"client_properties": {
str(client): props for client, props in state.client_properties.items()
},
"config_version": state.config_version,
"pairing_id": state.mac,
}
)
return data |
|
Kraken.go | package kraken
import (
"crypto/hmac"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
. "github.com/78182648/goexdiy"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
type BaseResponse struct {
Error []string `json:"error"`
Result interface{} `json:"result"`
}
type NewOrderResponse struct {
Description interface{} `json:"descr"`
TxIds []string `json:"txid"`
}
type Kraken struct {
httpClient *http.Client
accessKey,
secretKey string
}
var (
BASE_URL = "https://api.kraken.com"
API_V0 = "/0/"
API_DOMAIN = BASE_URL + API_V0
PUBLIC = "public/"
PRIVATE = "private/"
)
func New(client *http.Client, accesskey, secretkey string) *Kraken {
return &Kraken{client, accesskey, secretkey}
}
func (k *Kraken) placeOrder(orderType, side, amount, price string, pair CurrencyPair) (*Order, error) {
apiuri := "private/AddOrder"
params := url.Values{}
params.Set("pair", k.convertPair(pair).ToSymbol(""))
params.Set("type", side)
params.Set("ordertype", orderType)
params.Set("price", price)
params.Set("volume", amount)
var resp NewOrderResponse
err := k.doAuthenticatedRequest("POST", apiuri, params, &resp)
//log.Println
if err != nil {
return nil, err
}
var tradeSide TradeSide = SELL
if "buy" == side {
tradeSide = BUY
}
return &Order{
Currency: pair,
OrderID2: resp.TxIds[0],
Amount: ToFloat64(amount),
Price: ToFloat64(price),
Side: tradeSide,
Status: ORDER_UNFINISH}, nil
}
func (k *Kraken) LimitBuy(amount, price string, currency CurrencyPair, opt ...LimitOrderOptionalParameter) (*Order, error) {
return k.placeOrder("limit", "buy", amount, price, currency)
}
func (k *Kraken) LimitSell(amount, price string, currency CurrencyPair, opt ...LimitOrderOptionalParameter) (*Order, error) {
return k.placeOrder("limit", "sell", amount, price, currency)
}
func (k *Kraken) MarketBuy(amount, price string, currency CurrencyPair) (*Order, error) {
return k.placeOrder("market", "buy", amount, price, currency)
}
func (k *Kraken) MarketSell(amount, price string, currency CurrencyPair) (*Order, error) {
return k.placeOrder("market", "sell", amount, price, currency)
}
func (k *Kraken) CancelOrder(orderId string, currency CurrencyPair) (bool, error) {
params := url.Values{}
apiuri := "private/CancelOrder"
params.Set("txid", orderId)
var respmap map[string]interface{}
err := k.doAuthenticatedRequest("POST", apiuri, params, &respmap)
if err != nil {
return false, err
}
//log.Println(respmap)
return true, nil
}
func (k *Kraken) toOrder(orderinfo interface{}) Order {
omap := orderinfo.(map[string]interface{})
descmap := omap["descr"].(map[string]interface{})
return Order{
Amount: ToFloat64(omap["vol"]),
Price: ToFloat64(descmap["price"]),
DealAmount: ToFloat64(omap["vol_exec"]),
AvgPrice: ToFloat64(omap["price"]),
Side: AdaptTradeSide(descmap["type"].(string)),
Status: k.convertOrderStatus(omap["status"].(string)),
Fee: ToFloat64(omap["fee"]),
OrderTime: ToInt(omap["opentm"]),
}
}
func (k *Kraken) GetOrderInfos(txids ...string) ([]Order, error) {
params := url.Values{}
params.Set("txid", strings.Join(txids, ","))
var resultmap map[string]interface{}
err := k.doAuthenticatedRequest("POST", "private/QueryOrders", params, &resultmap)
if err != nil {
return nil, err
}
//log.Println(resultmap)
var ords []Order
for txid, v := range resultmap {
ord := k.toOrder(v)
ord.OrderID2 = txid
ords = append(ords, ord)
}
return ords, nil
}
func (k *Kraken) GetOneOrder(orderId string, currency CurrencyPair) (*Order, error) {
orders, err := k.GetOrderInfos(orderId)
if err != nil {
return nil, err
}
if len(orders) == 0 {
return nil, errors.New("not fund the order " + orderId)
}
ord := &orders[0]
ord.Currency = currency
return ord, nil
}
func (k *Kraken) GetUnfinishOrders(currency CurrencyPair) ([]Order, error) {
var result struct {
Open map[string]interface{} `json:"open"`
}
| }
var orders []Order
for txid, v := range result.Open {
ord := k.toOrder(v)
ord.OrderID2 = txid
ord.Currency = currency
orders = append(orders, ord)
}
return orders, nil
}
func (k *Kraken) GetOrderHistorys(currency CurrencyPair, optional ...OptionalParameter) ([]Order, error) {
panic("")
}
func (k *Kraken) GetAccount() (*Account, error) {
params := url.Values{}
apiuri := "private/Balance"
var resustmap map[string]interface{}
err := k.doAuthenticatedRequest("POST", apiuri, params, &resustmap)
if err != nil {
return nil, err
}
acc := new(Account)
acc.Exchange = k.GetExchangeName()
acc.SubAccounts = make(map[Currency]SubAccount)
for key, v := range resustmap {
currency := k.convertCurrency(key)
amount := ToFloat64(v)
//log.Println(symbol, amount)
acc.SubAccounts[currency] = SubAccount{Currency: currency, Amount: amount, ForzenAmount: 0, LoanAmount: 0}
if currency.Symbol == "XBT" { // adapt to btc
acc.SubAccounts[BTC] = SubAccount{Currency: BTC, Amount: amount, ForzenAmount: 0, LoanAmount: 0}
}
}
return acc, nil
}
//func (k *Kraken) GetTradeBalance() {
// var resultmap map[string]interface{}
// k.doAuthenticatedRequest("POST", "private/TradeBalance", url.Values{}, &resultmap)
// log.Println(resultmap)
//}
func (k *Kraken) GetTicker(currency CurrencyPair) (*Ticker, error) {
var resultmap map[string]interface{}
err := k.doAuthenticatedRequest("GET", "public/Ticker?pair="+k.convertPair(currency).ToSymbol(""), url.Values{}, &resultmap)
if err != nil {
return nil, err
}
ticker := new(Ticker)
ticker.Pair = currency
for _, t := range resultmap {
tickermap := t.(map[string]interface{})
ticker.Last = ToFloat64(tickermap["c"].([]interface{})[0])
ticker.Buy = ToFloat64(tickermap["b"].([]interface{})[0])
ticker.Sell = ToFloat64(tickermap["a"].([]interface{})[0])
ticker.Low = ToFloat64(tickermap["l"].([]interface{})[0])
ticker.High = ToFloat64(tickermap["h"].([]interface{})[0])
ticker.Vol = ToFloat64(tickermap["v"].([]interface{})[0])
}
return ticker, nil
}
func (k *Kraken) GetDepth(size int, currency CurrencyPair) (*Depth, error) {
apiuri := fmt.Sprintf("public/Depth?pair=%s&count=%d", k.convertPair(currency).ToSymbol(""), size)
var resultmap map[string]interface{}
err := k.doAuthenticatedRequest("GET", apiuri, url.Values{}, &resultmap)
if err != nil {
return nil, err
}
//log.Println(respmap)
dep := Depth{}
dep.Pair = currency
for _, d := range resultmap {
depmap := d.(map[string]interface{})
asksmap := depmap["asks"].([]interface{})
bidsmap := depmap["bids"].([]interface{})
for _, v := range asksmap {
ask := v.([]interface{})
dep.AskList = append(dep.AskList, DepthRecord{ToFloat64(ask[0]), ToFloat64(ask[1])})
}
for _, v := range bidsmap {
bid := v.([]interface{})
dep.BidList = append(dep.BidList, DepthRecord{ToFloat64(bid[0]), ToFloat64(bid[1])})
}
break
}
sort.Sort(sort.Reverse(dep.AskList)) //reverse
return &dep, nil
}
func (k *Kraken) GetKlineRecords(currency CurrencyPair, period KlinePeriod, size int, opt ...OptionalParameter) ([]Kline, error) {
panic("")
}
//非个人,整个交易所的交易记录
func (k *Kraken) GetTrades(currencyPair CurrencyPair, since int64) ([]Trade, error) {
panic("")
}
func (k *Kraken) GetExchangeName() string {
return KRAKEN
}
func (k *Kraken) buildParamsSigned(apiuri string, postForm *url.Values) string {
postForm.Set("nonce", fmt.Sprintf("%d", time.Now().UnixNano()))
urlPath := API_V0 + apiuri
secretByte, _ := base64.StdEncoding.DecodeString(k.secretKey)
encode := []byte(postForm.Get("nonce") + postForm.Encode())
sha := sha256.New()
sha.Write(encode)
shaSum := sha.Sum(nil)
pathSha := append([]byte(urlPath), shaSum...)
mac := hmac.New(sha512.New, secretByte)
mac.Write(pathSha)
macSum := mac.Sum(nil)
sign := base64.StdEncoding.EncodeToString(macSum)
return sign
}
func (k *Kraken) doAuthenticatedRequest(method, apiuri string, params url.Values, ret interface{}) error {
headers := map[string]string{}
if "POST" == method {
signature := k.buildParamsSigned(apiuri, ¶ms)
headers = map[string]string{
"API-Key": k.accessKey,
"API-Sign": signature,
}
}
resp, err := NewHttpRequest(k.httpClient, method, API_DOMAIN+apiuri, params.Encode(), headers)
if err != nil {
return err
}
//println(string(resp))
var base BaseResponse
base.Result = ret
err = json.Unmarshal(resp, &base)
if err != nil {
return err
}
//println(string(resp))
if len(base.Error) > 0 {
return errors.New(base.Error[0])
}
return nil
}
func (k *Kraken) convertCurrency(currencySymbol string) Currency {
if len(currencySymbol) >= 4 {
currencySymbol = strings.Replace(currencySymbol, "X", "", 1)
currencySymbol = strings.Replace(currencySymbol, "Z", "", 1)
}
return NewCurrency(currencySymbol, "")
}
func (k *Kraken) convertPair(pair CurrencyPair) CurrencyPair {
if "BTC" == pair.CurrencyA.Symbol {
return NewCurrencyPair(XBT, pair.CurrencyB)
}
if "BTC" == pair.CurrencyB.Symbol {
return NewCurrencyPair(pair.CurrencyA, XBT)
}
return pair
}
func (k *Kraken) convertOrderStatus(status string) TradeStatus {
switch status {
case "open", "pending":
return ORDER_UNFINISH
case "canceled", "expired":
return ORDER_CANCEL
case "filled", "closed":
return ORDER_FINISH
case "partialfilled":
return ORDER_PART_FINISH
}
return ORDER_UNFINISH
} | err := k.doAuthenticatedRequest("POST", "private/OpenOrders", url.Values{}, &result)
if err != nil {
return nil, err |
test_moments.py | import pytest
from dpm.distributions import *
import dpm.utils as utils
import torch
def test_arcsine():
model = Arcsine()
assert model.expectation == 0.5
assert model.median == 0.5
assert model.variance == 0.125
assert model.skewness == 0.
assert model.kurtosis == -1.5
model = Arcsine(-1, 1)
assert model.expectation == 0.
assert model.median == 0.
assert model.variance == 0.5
assert model.skewness == 0.
assert model.kurtosis == -1.5
def test_bernoulli():
model = Bernoulli(probs=[0.3])
assert model.logits.item() + 0.8473 < 1e-2
assert model.expectation.item() - 0.3 < 1e-2
assert model.variance.item() - 0.21 < 1e-2
assert model.skewness.item() - 1.9047619048 < 1e-2
assert model.kurtosis.item() + -1.2380952381 < 1e-2
def test_beta():
model = Beta()
assert model.expectation == 0.5
assert model.variance == 0.125
m = Beta(0.5, 0.5).mode.item()
assert m == 0. or 1.
assert Beta(4.5, 3.5).mode.item() - 0.5833333333 < 1e-2
assert Beta(1.5, 0.5).mode.item() == 1.
assert Beta(0.5, 1.5).mode.item() == 0.
# assert Beta(1.00000, 1.00000).mode.item() > 0. and Beta(1.00000, 1.00000).mode.item() < 1.
def test_cauchy():
model = Cauchy(loc=1.)
assert model.median == 1.
assert model.mode == 1.
def test_exponential():
model = Exponential()
assert model.expectation - 1. < 1e-2
assert model.mode - 0. < 1e-2
assert model.variance - 1. < 1e-2
assert model.median - 0.6931471806 < 1e-2
assert model.skewness - 2. < 1e-2
assert model.kurtosis - 6. < 1e-2
model = Exponential(0.5)
assert model.expectation - 2. < 1e-2
assert model.mode - 0. < 1e-2
assert model.variance - 4. < 1e-2
assert model.median - 1.3862943611 < 1e-2
assert model.skewness - 2. < 1e-2
assert model.kurtosis - 6. < 1e-2
def test_gamma():
|
def test_gumbel():
model = Gumbel(loc=1., scale=2.)
assert model.expectation - (1 + 2 * utils.euler_mascheroni) < 1e-2
assert model.mode == 1.
assert model.median - 1.7330258412 < 1e-2
assert model.variance - 6.5797362674 < 1e-2
assert model.skewness - 1.14 < 1e-2
assert model.kurtosis - 2.4 < 1e-2
def test_hyperbolicsecant():
model = HyperbolicSecant()
assert model.expectation == 0.
assert model.variance == 1.
assert model.median == 0.
def test_laplace():
model = Laplace(loc=1., scale=2.)
assert model.expectation - 1. < 1e-2
assert model.variance - 8. < 1e-2
assert model.stddev - 2.8284271247 < 1e-2
assert model.median - 1. < 1e-2
assert model.mode - 1. < 1e-2
assert model.skewness < 1e-2
assert model.kurtosis - 3. < 1e-2
assert model.entropy() - 2.3862943611 < 1e-2
def test_log_cauchy():
model = LogCauchy(loc=2.)
assert model.median - 7.3890560989 < 1e-2
def test_log_normal():
model = LogNormal()
assert model.expectation - 1.6487212707 < 1e-2
assert model.variance - 4.6707742705 < 1e-2
assert model.mode - utils.e < 1e-2
assert model.median - utils.e < 1e-2
def test_logistic():
model = Logistic(loc=1., scale=2.)
assert model.expectation == 1.
assert model.mode == 1.
assert model.variance - 13.1594725348 < 1e-2
assert model.median == 1.
assert model.skewness == 0.
assert model.kurtosis == 1.2
def test_normal():
model = Normal(0., 3.)
assert model.variance.item() == 3.
assert model.expectation.item() == 0.
model = Normal([0., 0.], [3., 1., 1., 3.])
assert (model.variance - torch.tensor([[3., 1.], [1., 3.]]) < 1e-2).all()
assert (model.expectation == torch.tensor([0., 0.])).all()
def test_rayleigh():
model = Rayleigh(3.)
assert model.expectation - 3.7599424119 < 1e-2
assert model.mode - 3. < 1e-2
assert model.median - 3.5322300675 < 1e-2
assert model.variance - 3.8628330588 < 1e-2
assert model.skewness - 1.1186145158 < 1e-2
assert model.kurtosis - 0.2450893007 < 1e-2
def test_studentt():
model = StudentT()
model.expectation
model.variance
model.mode
def test_uniform():
model = Uniform()
assert model.expectation - 0.5 < 1e-2
assert model.variance - 1/12. < 1e-2
assert model.median - 0.5 < 1e-2
assert model.skewness == 0.
assert model.kurtosis + 1.2 < 1e-2
def test_logitnormal():
model = LogitNormal()
assert model.median - torch.sigmoid(torch.tensor(0.)) < 1e-2
model = LogitNormal(1.)
assert model.median - torch.sigmoid(torch.tensor(1.)) < 1e-2
# EOF
| model = Gamma()
assert model.expectation - 1. < 1e-2
assert model.variance - 1. < 1e-2
model = Gamma(0.5, 0.75)
assert model.expectation - 0.6666666667 < 1e-2
assert model.variance - 0.8888888889 < 1e-2 |
interpreter_truncate_table_test.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_base::tokio;
use common_exception::Result; |
use crate::interpreters::*;
use crate::tests::parse_query;
#[tokio::test]
async fn test_truncate_table_interpreter() -> Result<()> {
let ctx = crate::tests::create_query_context()?;
// Create table.
{
static TEST_CREATE_QUERY: &str = "\
CREATE TABLE default.a(\
a String, b String\
) Engine = Memory\
";
if let PlanNode::CreateTable(plan) = parse_query(TEST_CREATE_QUERY, &ctx)? {
let interpreter = CreateTableInterpreter::try_create(ctx.clone(), plan.clone())?;
let _ = interpreter.execute(None).await?;
}
}
// Insert into.
{
static TEST_INSERT_QUERY: &str = "INSERT INTO default.a VALUES('1,1', '2,2')";
if let PlanNode::InsertInto(plan) = parse_query(TEST_INSERT_QUERY, &ctx)? {
let executor = InsertIntoInterpreter::try_create(ctx.clone(), plan.clone())?;
let _ = executor.execute(None).await?;
}
}
// select.
{
static TEST_SELECT_QUERY: &str = "SELECT * FROM default.a";
if let PlanNode::Select(plan) = parse_query(TEST_SELECT_QUERY, &ctx)? {
let interpreter = SelectInterpreter::try_create(ctx.clone(), plan.clone())?;
let stream = interpreter.execute(None).await?;
let result = stream.try_collect::<Vec<_>>().await?;
let expected = vec![
"+-----+-----+",
"| a | b |",
"+-----+-----+",
"| 1,1 | 2,2 |",
"+-----+-----+",
];
common_datablocks::assert_blocks_sorted_eq(expected, result.as_slice());
} else {
panic!()
}
}
// truncate table.
{
static TEST_TRUNCATE_QUERY: &str = "TRUNCATE TABLE default.a";
if let PlanNode::TruncateTable(plan) = parse_query(TEST_TRUNCATE_QUERY, &ctx)? {
let interpreter = TruncateTableInterpreter::try_create(ctx.clone(), plan.clone())?;
assert_eq!(interpreter.name(), "TruncateTableInterpreter");
let stream = interpreter.execute(None).await?;
let result = stream.try_collect::<Vec<_>>().await?;
let expected = vec!["++", "++"];
common_datablocks::assert_blocks_sorted_eq(expected, result.as_slice());
} else {
panic!()
}
}
// select.
{
static TEST_SELECT_QUERY: &str = "SELECT * FROM default.a";
if let PlanNode::Select(plan) = parse_query(TEST_SELECT_QUERY, &ctx)? {
let executor = SelectInterpreter::try_create(ctx.clone(), plan.clone())?;
let stream = executor.execute(None).await?;
let result = stream.try_collect::<Vec<_>>().await?;
let expected = vec!["++", "++"];
common_datablocks::assert_blocks_sorted_eq(expected, result.as_slice());
} else {
panic!()
}
}
Ok(())
} | use common_planners::*;
use futures::TryStreamExt;
use pretty_assertions::assert_eq; |
client.rs | use actix_files::{Files, NamedFile};
use actix_web::{web, Responder};
pub fn init(cfg: &mut web::ServiceConfig) {
cfg.service(Files::new("/", "./client/build").index_file("index.html"));
}
pub async fn | () -> impl Responder {
NamedFile::open("./client/build/index.html")
}
| fallback_fn |
config.py | # -*- coding: utf-8 -*-
#
# Modified by Peize Sun, Rufeng Zhang
# Contact: {sunpeize, cxrfzhang}@foxmail.com
# |
def add_sparsercnn_config(cfg):
"""
Add config for SparseRCNN.
"""
cfg.MODEL.SparseRCNN = CN()
cfg.MODEL.SparseRCNN.NUM_CLASSES = 80
cfg.MODEL.SparseRCNN.NUM_PROPOSALS = 300
# RCNN Head.
cfg.MODEL.SparseRCNN.NHEADS = 8
cfg.MODEL.SparseRCNN.DROPOUT = 0.0
cfg.MODEL.SparseRCNN.DIM_FEEDFORWARD = 2048
cfg.MODEL.SparseRCNN.ACTIVATION = 'relu'
cfg.MODEL.SparseRCNN.HIDDEN_DIM = 256
cfg.MODEL.SparseRCNN.NUM_CLS = 1
cfg.MODEL.SparseRCNN.NUM_REG = 3
cfg.MODEL.SparseRCNN.NUM_HEADS = 6
# Dynamic Conv.
cfg.MODEL.SparseRCNN.NUM_DYNAMIC = 2
cfg.MODEL.SparseRCNN.DIM_DYNAMIC = 64
# Loss.
cfg.MODEL.SparseRCNN.CLASS_WEIGHT = 2.0
cfg.MODEL.SparseRCNN.GIOU_WEIGHT = 2.0
cfg.MODEL.SparseRCNN.L1_WEIGHT = 5.0 #5.0
cfg.MODEL.SparseRCNN.DEEP_SUPERVISION = True
cfg.MODEL.SparseRCNN.NO_OBJECT_WEIGHT = 0.1
# Focal Loss.
cfg.MODEL.SparseRCNN.USE_FOCAL = True
cfg.MODEL.SparseRCNN.ALPHA = 0.25
cfg.MODEL.SparseRCNN.GAMMA = 2.0
cfg.MODEL.SparseRCNN.PRIOR_PROB = 0.01
# Optimizer.
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 1.0 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
|
config.py | """distutils.command.config
Implements the Distutils 'config' command, a (mostly) empty command class
that exists mainly to be sub-classed by specific module distributions and
applications. The idea is that while every "config" command is different,
at least they're all named the same, and users always see "config" in the
list of standard commands. Also, this is a good place to put common
configure-like tasks: "try to compile this C code", or "figure out where
this header file lives".
"""
import os, re
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.sysconfig import customize_compiler
from distutils import log
LANG_EXT = {"c": ".c", "c++": ".cxx"}
class config(Command):
description = "prepare to build"
user_options = [
("compiler=", None, "specify the compiler type"),
("cc=", None, "specify the compiler executable"),
("include-dirs=", "I", "list of directories to search for header files"),
("define=", "D", "C preprocessor macros to define"),
("undef=", "U", "C preprocessor macros to undefine"),
("libraries=", "l", "external C libraries to link with"),
("library-dirs=", "L", "directories to search for external C libraries"),
("noisy", None, "show every action (compile, link, run, ...) taken"),
(
"dump-source",
None,
"dump generated source files before attempting to compile them",
),
]
# The three standard command methods: since the "config" command
# does nothing by default, these are empty.
def initialize_options(self):
self.compiler = None
self.cc = None
self.include_dirs = None
self.libraries = None
self.library_dirs = None
# maximal output for now
self.noisy = 1
self.dump_source = 1
# list of temporary files generated along-the-way that we have
# to clean at some point
self.temp_files = []
def finalize_options(self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.libraries is None:
self.libraries = []
elif isinstance(self.libraries, str):
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
def run(self):
pass
# Utility methods for actual "config" commands. The interfaces are
# loosely based on Autoconf macros of similar names. Sub-classes
# may use these freely.
def _check_compiler(self):
"""Check that 'self.compiler' really is a CCompiler object;
if not, make it one.
"""
# We do this late, and only on-demand, because this is an expensive
# import.
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=1
)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile(self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
with open(filename, "w") as file:
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
return filename
def _preprocess(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs, lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable(
[obj],
prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang,
)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean(self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", " ".join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# XXX these ignore the dry-run flag: what to do, what to do? even if
# you want a dry-run build, you still need some sort of configuration
# info. My inclination is to make it up to the real config command to
# consult 'dry_run', and assume a default (minimal) configuration if
# true. The problem with trying to do it here is that you'd have to
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file from 'body' (a string containing lines | preprocessor succeeded, false if there were any errors.
('body' probably isn't of much use, but what the heck.)
"""
from distutils.ccompiler import CompileError
self._check_compiler()
ok = True
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = False
self._clean()
return ok
def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
with open(out) as file:
match = False
while True:
line = file.readline()
if line == "":
break
if pattern.search(line):
match = True
break
self._clean()
return match
def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
"""Try to compile a source file built from 'body' and 'headers'.
Return true on success, false otherwise.
"""
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = True
except CompileError:
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link(
self,
body,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
lang="c",
):
"""Try to compile and link a source file, built from 'body' and
'headers', to executable form. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs, libraries, library_dirs, lang)
ok = True
except (CompileError, LinkError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run(
self,
body,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
lang="c",
):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(
body, headers, include_dirs, libraries, library_dirs, lang
)
self.spawn([exe])
ok = True
except (CompileError, LinkError, DistutilsExecError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func(
self,
func,
headers=None,
include_dirs=None,
libraries=None,
library_dirs=None,
decl=0,
call=0,
):
"""Determine if function 'func' is available by constructing a
source file that refers to 'func', and compiles and links it.
If everything succeeds, returns true; otherwise returns false.
The constructed source file starts out by including the header
files listed in 'headers'. If 'decl' is true, it then declares
'func' (as "int func()"); you probably shouldn't supply 'headers'
and set 'decl' true in the same call, or you might get errors about
a conflicting declarations for 'func'. Finally, the constructed
'main()' function either references 'func' or (if 'call' is true)
calls it. 'libraries' and 'library_dirs' are used when
linking.
"""
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = "\n".join(body) + "\n"
return self.try_link(body, headers, include_dirs, libraries, library_dirs)
def check_lib(
self,
library,
library_dirs=None,
headers=None,
include_dirs=None,
other_libraries=[],
):
"""Determine if 'library' is available to be linked against,
without actually checking that any particular symbols are provided
by it. 'headers' will be used in constructing the source file to
be compiled, but the only effect of this is to check if all the
header files listed are available. Any libraries listed in
'other_libraries' will be included in the link, in case 'library'
has symbols that depend on other libraries.
"""
self._check_compiler()
return self.try_link(
"int main (void) { }",
headers,
include_dirs,
[library] + other_libraries,
library_dirs,
)
def check_header(self, header, include_dirs=None, library_dirs=None, lang="c"):
"""Determine if the system header file named by 'header_file'
exists and can be found by the preprocessor; return true if so,
false otherwise.
"""
return self.try_cpp(
body="/* No body */", headers=[header], include_dirs=include_dirs
)
def dump_file(filename, head=None):
"""Dumps a file content into log.info.
If head is not None, will be dumped before the file content.
"""
if head is None:
log.info("%s", filename)
else:
log.info(head)
file = open(filename)
try:
log.info(file.read())
finally:
file.close() | of C/C++ code) and 'headers' (a list of header files to include)
and run it through the preprocessor. Return true if the |
query.10.test.py | input = """
a.
x | d :- a.
c :- b.
c?
""" | output = """
a.
x | d :- a.
c :- b.
c?
""" | |
form-error-message.component.ts | /**
* @license
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
import { ChangeDetectorRef, Component, Input } from '@angular/core';
import { FormControl, FormGroup } from '@angular/forms';
import { Subscription } from 'rxjs';
import { CounterMessageService } from './../../services/counter-message.service';
import { Param } from './../../shared/param-state';
import { ValidationUtils } from './validators/ValidationUtils';
/**
* \@author Sandeep.Mantha
* \@whatItDoes
*
* \@howToUse
*
*/
@Component({
selector: 'nm-counter-message',
template: `
<div>
Required: {{ totalMandtoryCount - mandatoryLeft }} of
{{ totalMandtoryCount }}
</div>
`
})
export class | {
@Input() element: Param;
@Input() form: FormGroup;
mandatoryLeft: number = 0;
totalCount: number = 0;
totalMandtoryCount: number = 0;
subscription: Subscription;
constructor(
private counterMsgSvc: CounterMessageService,
private cd: ChangeDetectorRef
) {}
ngOnInit() {}
ngAfterViewInit() {
this.displayMessage();
this.cd.detectChanges();
this.subscription = this.counterMsgSvc.counterMessageSubject$.subscribe(
event => {
if (event) {
this.displayMessage();
this.cd.detectChanges();
}
}
);
}
ngOnDestroy() {
if (this.subscription) {
this.subscription.unsubscribe();
}
}
displayMessage() {
this.mandatoryLeft = 0;
this.totalCount = 0;
this.totalMandtoryCount = 0;
this.calculateFieldCount(this.element);
}
calculateFieldCount(param: Param) {
if (param.type.model) {
param.type.model.params.forEach(element => {
if (element.visible) {
if (element.type.model && element.type.model.params) {
this.calculateFieldCount(element);
} else {
this.totalCount++;
//below condition will evaluate static and dynamic validation(groups)
if (
ValidationUtils.applyelementStyle(element) ||
ValidationUtils.createRequired(
element,
element.activeValidationGroups
)
) {
this.totalMandtoryCount++;
this.checkControlValidity(this.form, element.config.code);
}
}
}
});
}
}
checkControlValidity(formGroup: FormGroup, code: string) {
Object.keys(formGroup.controls).forEach(field => {
let ctrl = formGroup.controls[field];
if (ctrl instanceof FormControl) {
if (field === code && ctrl.errors) {
this.mandatoryLeft++;
}
} else if (ctrl instanceof FormGroup) {
this.checkControlValidity(ctrl, code);
}
});
}
}
| FormErrorMessage |
search.rs | use anyhow::Result;
use crackmes::list::ListCrackme;
use fuzzy_matcher::{skim::SkimMatcherV2, FuzzyMatcher};
use tui::widgets::{Block, Borders, List, ListItem, ListState};
#[derive(Default, Debug)]
pub struct SearchText(String);
impl SearchText {
pub fn as_str(&self) -> &str {
&self.0
}
pub fn push(&mut self, c: char) {
self.0.push(c);
}
pub fn pop(&mut self) {
self.0.pop();
}
pub fn get(&self, length: usize) -> &str {
// accounting for pipe characters at beginning and end, and cursor
let length = length.checked_sub(3).unwrap_or(length);
let start = if self.0.len() > length {
self.0.len() - length
} else {
0
};
&self.0[start..]
}
}
#[derive(Default)]
pub struct Searcher<'crackme> {
store: &'crackme mut [ListCrackme<'crackme>],
found: Vec<usize>,
state: ListState,
matcher: SkimMatcherV2,
}
use reqwest::Client;
impl<'a> Searcher<'a> {
pub fn new(store: &'a mut [ListCrackme<'a>]) -> Searcher<'a> {
let mut searcher = Searcher {
found: (0..store.len()).collect(),
store,
..Default::default()
};
searcher.last();
searcher
}
pub async fn fetch_descriptions(&mut self, client: &mut Client) -> Result<()> {
// we start at the end of the vector, so we only have to download the current and next
// crackme's description (technically only current but we fetch the next one to lighten the
// load later), unless we skip to the top (through the gg bind), then we also need to
// download below us
if let Some(nearby) = self.found.len().checked_sub(1).and_then(|last| {
self.state.selected().map(|i| {
let start = i.saturating_sub(1);
// "saturating" add on the len
let end = if i + 1 > last { last } else { i + 1 };
start..=end
})
}) {
for i in nearby {
let crackme = &mut self.store[i];
if crackme.description().is_none() {
crackme
.try_set_description(
crate::get::get_description(client, crackme.id()).await?,
)
.unwrap();
}
}
}
Ok(())
}
pub fn state(&mut self) -> &mut ListState |
pub fn search(&mut self, query: &str) {
let items = self
.store
.iter()
.enumerate()
.filter(|(_, crackme)| {
self.matcher
.fuzzy_match(&crackme.to_search_string(), query.trim())
.is_some()
})
.map(|(index, _)| index)
.collect();
self.found = items;
self.last();
}
pub fn list(&self) -> List<'static> {
let items: Vec<ListItem> = self
.found
.iter()
.flat_map(|&i| self.store.get(i))
.map(|l| ListItem::new(format!("{} by {}", l.name(), l.author())))
.collect();
List::new(items)
.block(Block::default().borders(Borders::ALL))
.highlight_symbol(">> ")
}
pub fn next(&mut self) {
let i = match self.state.selected() {
Some(i) => {
if i >= self.found.len() - 1 {
self.found.len() - 1
} else {
i + 1
}
}
None => 0,
};
self.state.select(Some(i));
}
pub fn previous(&mut self) {
let i = match self.state.selected() {
Some(i) => {
if i == 0 {
0
} else {
i - 1
}
}
None => 0,
};
self.state.select(Some(i));
}
pub fn selected(&self) -> Option<&ListCrackme<'_>> {
self.state
.selected()
.and_then(|i| self.found.get(i).and_then(|&i| self.store.get(i)))
}
pub fn last(&mut self) {
if !self.found.is_empty() {
self.state.select(None);
self.state.select(Some(self.found.len() - 1));
}
}
pub fn into_selected(self) -> Option<&'a ListCrackme<'a>> {
self.state
.selected()
.and_then(move |i| self.store.get(self.found[i]))
}
}
| {
&mut self.state
} |
package.js | Package.describe({
name: "telescope:kadira",
summary: "Telescope Kadira package",
version: "0.25.2",
git: "https://github.com/TelescopeJS/telescope-kadira.git"
});
Package.onUse(function (api) {
api.versionsFrom(['[email protected]']);
api.use([
'telescope:[email protected]',
'meteorhacks:[email protected]',
'kadira:[email protected]'
], ['client', 'server']);
api.addFiles([
'package-tap.i18n',
'lib/kadira-settings.js'
], ['client', 'server']);
api.addFiles([
'lib/server/kadira.js'
], ['server']);
var languages = ["ar", "bg", "cs", "da", "de", "el", "en", "es", "et", "fr", "hu", "id", "it", "ja", "kk", "ko", "nl", "pl", "pt-BR", "ro", "ru", "sl", "sv", "th", "tr", "vi", "zh-CN"]; | api.addFiles(languagesPaths, ["client", "server"]);
}); | var languagesPaths = languages.map(function (language) {
return "i18n/"+language+".i18n.json";
}); |
urgence.page.spec.ts | import { CUSTOM_ELEMENTS_SCHEMA } from '@angular/core';
import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { UrgencePage } from './urgence.page';
describe('UrgencePage', () => {
let component: UrgencePage;
let fixture: ComponentFixture<UrgencePage>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ UrgencePage ],
schemas: [CUSTOM_ELEMENTS_SCHEMA], | .compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(UrgencePage);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
}); | }) |
controller_status_dto.py | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.10.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ControllerStatusDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'active_thread_count': 'int',
'terminated_thread_count': 'int',
'queued': 'str',
'flow_files_queued': 'int',
'bytes_queued': 'int',
'running_count': 'int',
'stopped_count': 'int',
'invalid_count': 'int',
'disabled_count': 'int',
'active_remote_port_count': 'int',
'inactive_remote_port_count': 'int',
'up_to_date_count': 'int',
'locally_modified_count': 'int',
'stale_count': 'int',
'locally_modified_and_stale_count': 'int',
'sync_failure_count': 'int'
}
attribute_map = {
'active_thread_count': 'activeThreadCount',
'terminated_thread_count': 'terminatedThreadCount',
'queued': 'queued',
'flow_files_queued': 'flowFilesQueued',
'bytes_queued': 'bytesQueued',
'running_count': 'runningCount',
'stopped_count': 'stoppedCount',
'invalid_count': 'invalidCount', | 'up_to_date_count': 'upToDateCount',
'locally_modified_count': 'locallyModifiedCount',
'stale_count': 'staleCount',
'locally_modified_and_stale_count': 'locallyModifiedAndStaleCount',
'sync_failure_count': 'syncFailureCount'
}
def __init__(self, active_thread_count=None, terminated_thread_count=None, queued=None, flow_files_queued=None, bytes_queued=None, running_count=None, stopped_count=None, invalid_count=None, disabled_count=None, active_remote_port_count=None, inactive_remote_port_count=None, up_to_date_count=None, locally_modified_count=None, stale_count=None, locally_modified_and_stale_count=None, sync_failure_count=None):
"""
ControllerStatusDTO - a model defined in Swagger
"""
self._active_thread_count = None
self._terminated_thread_count = None
self._queued = None
self._flow_files_queued = None
self._bytes_queued = None
self._running_count = None
self._stopped_count = None
self._invalid_count = None
self._disabled_count = None
self._active_remote_port_count = None
self._inactive_remote_port_count = None
self._up_to_date_count = None
self._locally_modified_count = None
self._stale_count = None
self._locally_modified_and_stale_count = None
self._sync_failure_count = None
if active_thread_count is not None:
self.active_thread_count = active_thread_count
if terminated_thread_count is not None:
self.terminated_thread_count = terminated_thread_count
if queued is not None:
self.queued = queued
if flow_files_queued is not None:
self.flow_files_queued = flow_files_queued
if bytes_queued is not None:
self.bytes_queued = bytes_queued
if running_count is not None:
self.running_count = running_count
if stopped_count is not None:
self.stopped_count = stopped_count
if invalid_count is not None:
self.invalid_count = invalid_count
if disabled_count is not None:
self.disabled_count = disabled_count
if active_remote_port_count is not None:
self.active_remote_port_count = active_remote_port_count
if inactive_remote_port_count is not None:
self.inactive_remote_port_count = inactive_remote_port_count
if up_to_date_count is not None:
self.up_to_date_count = up_to_date_count
if locally_modified_count is not None:
self.locally_modified_count = locally_modified_count
if stale_count is not None:
self.stale_count = stale_count
if locally_modified_and_stale_count is not None:
self.locally_modified_and_stale_count = locally_modified_and_stale_count
if sync_failure_count is not None:
self.sync_failure_count = sync_failure_count
@property
def active_thread_count(self):
"""
Gets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:return: The active_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_thread_count
@active_thread_count.setter
def active_thread_count(self, active_thread_count):
"""
Sets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:param active_thread_count: The active_thread_count of this ControllerStatusDTO.
:type: int
"""
self._active_thread_count = active_thread_count
@property
def terminated_thread_count(self):
"""
Gets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:return: The terminated_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._terminated_thread_count
@terminated_thread_count.setter
def terminated_thread_count(self, terminated_thread_count):
"""
Sets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:param terminated_thread_count: The terminated_thread_count of this ControllerStatusDTO.
:type: int
"""
self._terminated_thread_count = terminated_thread_count
@property
def queued(self):
"""
Gets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:return: The queued of this ControllerStatusDTO.
:rtype: str
"""
return self._queued
@queued.setter
def queued(self, queued):
"""
Sets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:param queued: The queued of this ControllerStatusDTO.
:type: str
"""
self._queued = queued
@property
def flow_files_queued(self):
"""
Gets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:return: The flow_files_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._flow_files_queued
@flow_files_queued.setter
def flow_files_queued(self, flow_files_queued):
"""
Sets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:param flow_files_queued: The flow_files_queued of this ControllerStatusDTO.
:type: int
"""
self._flow_files_queued = flow_files_queued
@property
def bytes_queued(self):
"""
Gets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:return: The bytes_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._bytes_queued
@bytes_queued.setter
def bytes_queued(self, bytes_queued):
"""
Sets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:param bytes_queued: The bytes_queued of this ControllerStatusDTO.
:type: int
"""
self._bytes_queued = bytes_queued
@property
def running_count(self):
"""
Gets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:return: The running_count of this ControllerStatusDTO.
:rtype: int
"""
return self._running_count
@running_count.setter
def running_count(self, running_count):
"""
Sets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:param running_count: The running_count of this ControllerStatusDTO.
:type: int
"""
self._running_count = running_count
@property
def stopped_count(self):
"""
Gets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:return: The stopped_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stopped_count
@stopped_count.setter
def stopped_count(self, stopped_count):
"""
Sets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:param stopped_count: The stopped_count of this ControllerStatusDTO.
:type: int
"""
self._stopped_count = stopped_count
@property
def invalid_count(self):
"""
Gets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:return: The invalid_count of this ControllerStatusDTO.
:rtype: int
"""
return self._invalid_count
@invalid_count.setter
def invalid_count(self, invalid_count):
"""
Sets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:param invalid_count: The invalid_count of this ControllerStatusDTO.
:type: int
"""
self._invalid_count = invalid_count
@property
def disabled_count(self):
"""
Gets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:return: The disabled_count of this ControllerStatusDTO.
:rtype: int
"""
return self._disabled_count
@disabled_count.setter
def disabled_count(self, disabled_count):
"""
Sets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:param disabled_count: The disabled_count of this ControllerStatusDTO.
:type: int
"""
self._disabled_count = disabled_count
@property
def active_remote_port_count(self):
"""
Gets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:return: The active_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_remote_port_count
@active_remote_port_count.setter
def active_remote_port_count(self, active_remote_port_count):
"""
Sets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:param active_remote_port_count: The active_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._active_remote_port_count = active_remote_port_count
@property
def inactive_remote_port_count(self):
"""
Gets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:return: The inactive_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._inactive_remote_port_count
@inactive_remote_port_count.setter
def inactive_remote_port_count(self, inactive_remote_port_count):
"""
Sets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:param inactive_remote_port_count: The inactive_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._inactive_remote_port_count = inactive_remote_port_count
@property
def up_to_date_count(self):
"""
Gets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:return: The up_to_date_count of this ControllerStatusDTO.
:rtype: int
"""
return self._up_to_date_count
@up_to_date_count.setter
def up_to_date_count(self, up_to_date_count):
"""
Sets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:param up_to_date_count: The up_to_date_count of this ControllerStatusDTO.
:type: int
"""
self._up_to_date_count = up_to_date_count
@property
def locally_modified_count(self):
"""
Gets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:return: The locally_modified_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_count
@locally_modified_count.setter
def locally_modified_count(self, locally_modified_count):
"""
Sets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:param locally_modified_count: The locally_modified_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_count = locally_modified_count
@property
def stale_count(self):
"""
Gets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:return: The stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stale_count
@stale_count.setter
def stale_count(self, stale_count):
"""
Sets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:param stale_count: The stale_count of this ControllerStatusDTO.
:type: int
"""
self._stale_count = stale_count
@property
def locally_modified_and_stale_count(self):
"""
Gets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:return: The locally_modified_and_stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_and_stale_count
@locally_modified_and_stale_count.setter
def locally_modified_and_stale_count(self, locally_modified_and_stale_count):
"""
Sets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:param locally_modified_and_stale_count: The locally_modified_and_stale_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_and_stale_count = locally_modified_and_stale_count
@property
def sync_failure_count(self):
"""
Gets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:return: The sync_failure_count of this ControllerStatusDTO.
:rtype: int
"""
return self._sync_failure_count
@sync_failure_count.setter
def sync_failure_count(self, sync_failure_count):
"""
Sets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:param sync_failure_count: The sync_failure_count of this ControllerStatusDTO.
:type: int
"""
self._sync_failure_count = sync_failure_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ControllerStatusDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | 'disabled_count': 'disabledCount',
'active_remote_port_count': 'activeRemotePortCount',
'inactive_remote_port_count': 'inactiveRemotePortCount', |
gen_schema.go | //go:build ignore
// +build ignore
package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"runtime"
"sort"
"strings"
"go.skia.org/infra/go/exec"
"go.skia.org/infra/go/git"
"go.skia.org/infra/go/gitiles"
"go.skia.org/infra/go/sklog"
"go.skia.org/infra/go/taskname"
)
const (
TARGET_FILE = "task_name_schema_gen.go"
TMPL = `// Code generated by "go run gen_schema.go"; DO NOT EDIT
package taskname
var SCHEMA_FROM_GIT = map[string]*Schema{
%s}
var SEPARATOR_FROM_GIT = "%s"
`
)
type taskNameSchema struct {
// Schema maps a role (e.g. Build) to a taskname.Schema instance.
// Note, the json names are a carryover from Buildbot days, where builder == task
Schema map[string]*taskname.Schema `json:"builder_name_schema"`
// TaskNameSep specifies how the various keys will be seperated, e.g. "-"
TaskNameSep string `json:"builder_name_sep"`
}
func main() | {
_, filename, _, _ := runtime.Caller(0)
pkgDir := path.Dir(filename)
// Load the schema from JSON
r := gitiles.NewRepo("https://skia.googlesource.com/skia", nil)
b, err := r.ReadFileAtRef(context.Background(), "infra/bots/recipe_modules/builder_name_schema/builder_name_schema.json", git.MainBranch)
if err != nil {
sklog.Fatalf("Could not read schema file: %s\n", err)
}
schema := new(taskNameSchema)
if err := json.NewDecoder(bytes.NewReader(b)).Decode(schema); err != nil {
sklog.Fatalf("Could not decode schema file: %s\n", err)
}
schemaLines := []string{}
for key, value := range schema.Schema {
line := fmt.Sprintf("\t\"%s\": %#v,\n", key, value)
// "%#v" includes the package name, ie "taskname.Schema",
// but since the generated file is part of the taskname
// package, that results in a circular import. So we remove it
// here.
line = strings.Replace(line, "taskname.", "", -1)
schemaLines = append(schemaLines, line)
}
sort.Strings(schemaLines)
assetsStr := strings.Join(schemaLines, "")
fileContents := []byte(fmt.Sprintf(TMPL, assetsStr, schema.TaskNameSep))
targetFile := path.Join(pkgDir, TARGET_FILE)
if err := ioutil.WriteFile(targetFile, fileContents, os.ModePerm); err != nil {
sklog.Fatal(err)
}
if _, err := exec.RunCwd(context.Background(), ".", "gofmt", "-s", "-w", targetFile); err != nil {
sklog.Fatal(err)
}
} |
|
OpenStack.py | # Getting started with OpenStack using libcloud
# http://developer.openstack.org/firstapp-libcloud/getting_started.html
from libcloud.compute.ssh import *
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from Cloud import Cloud
from settings import *
# noinspection PyPep8Naming
class OpenStack(Cloud):
def __init__(self):
super().__init__()
openstack = get_driver(Provider.OPENSTACK)
self.driver = openstack(user,
password,
ex_tenant_name = tenant_name,
ex_force_auth_url = auth_url,
ex_force_auth_version = '2.0_password',
ex_force_service_region = service_region)
self.activeIps = []
def | (self):
print('Retrieving infrastructure information from SwitchEngines ...')
images = self.driver.list_images()
sizes = self.driver.list_sizes()
security_groups = self.driver.ex_list_security_groups()
networks = self.driver.ex_list_networks()
print('Done.')
security_group = [s for s in security_groups if s.name == 'anywhere'][0]
network = [s for s in networks if s.name == 'My network'][0]
size = [s for s in sizes if s.name == 'c1.micro'][0]
# noinspection PyPep8Naming
mongoDbIp = self.__run_instance('MongoDB', size, images, security_group, network)
restServerIP = self.__run_instance('RESTServer', size, images, security_group, network)
restClientIP = self.__run_instance('RESTClient', size, images, security_group, network)
self.__additionalOperations(restServerIP, restClientIP, mongoDbIp)
@staticmethod
def __additionalOperations(restServerIP, restClientIP, mongoDbIp):
clientSSH = ShellOutSSHClient(restServerIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyserver.py %s &' % mongoDbIp)
finally:
clientSSH.close()
clientSSH = ShellOutSSHClient(restClientIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyclient.py %s &' % mongoDbIp)
finally:
clientSSH.close()
def __run_instance(self, instancename, size, images, security_group, network):
print('Creating a new node ...')
image = [s for s in images if s.name == instancename][0]
node = self.driver.create_node(name = instancename,
size = size,
image = image,
ex_security_groups = [security_group],
ex_keyname = 'switch-engine',
networks = [network])
print('Done.')
print("Waiting for %s ..." % instancename)
self.driver.wait_until_running([node])
self.activeNodes.append(node)
nodes = self.driver.list_nodes()
instanceNode = [s for s in nodes if s.name == instancename][0]
privateIp = instanceNode.private_ips[0]
print('Instance ready.')
print('Attaching a Public IP ...')
ip = self.driver.ex_create_floating_ip()
self.activeIps.append(ip)
self.driver.ex_attach_floating_ip_to_node(node, ip)
print('Done.')
return privateIp
def destroy(self):
print('Destroying the instance on SwitchEngines ...')
for node in self.activeNodes:
node.destroy()
for ip in self.activeIps:
self.driver.ex_delete_floating_ip(ip)
print('Done.')
| create |
sign-in.component.ts | import { Component, OnInit } from '@angular/core';
import { NgForm } from "@angular/forms";
import { Router } from "@angular/router"; |
@Component({
selector: 'app-sign-in',
templateUrl: './sign-in.component.html',
styleUrls: ['./sign-in.component.css']
})
export class SignInComponent implements OnInit {
constructor(private userService: UserService,private router : Router) { }
model ={
email :'',
password:''
};
emailRegex = /^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/;
serverErrorMessages: string;
ngOnInit() {
}
onSubmit(form : NgForm){
this.userService.login(form.value).subscribe(
res => {
//console.log(res);
this.userService.setToken(res['token']);
const titi = this.userService.getToken();
console.log(titi);
this.router.navigateByUrl('/profile');
},
err => {
this.serverErrorMessages = err.error.message;
console.error(err.error.message);
}
);
}
} |
import { UserService } from '../../shared/user.service'; |
structs2.rs | // structs2.rs
// Address all the TODOs to make the tests pass!
#[derive(Debug)]
struct Order {
name: String,
year: u32,
made_by_phone: bool,
made_by_mobile: bool,
made_by_email: bool,
item_number: u32,
count: u32,
}
fn create_order_template() -> Order {
Order {
name: String::from("Bob"),
year: 2019,
made_by_phone: false,
made_by_mobile: false,
made_by_email: true,
item_number: 123,
count: 0,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn your_order() {
let order_template = create_order_template();
// TODO: Create your own order using the update syntax and template above!
let your_order = Order{
name: "Hacker in Rust".to_string(),
count: 1,
..order_template
};
assert_eq!(your_order.name, "Hacker in Rust");
assert_eq!(your_order.year, order_template.year);
assert_eq!(your_order.made_by_phone, order_template.made_by_phone);
assert_eq!(your_order.made_by_mobile, order_template.made_by_mobile);
assert_eq!(your_order.made_by_email, order_template.made_by_email);
assert_eq!(your_order.item_number, order_template.item_number); | assert_eq!(your_order.count, 1);
}
} |
|
basic.stories.tsx | import React from 'react';
import ReactDOM from 'react-dom';
import { Mentions } from 'antd';
const { Option } = Mentions;
function | (value) {
console.log('Change:', value);
}
function onSelect(option) {
console.log('select', option);
}
/**
*
* 基本使用。
*
*
*
* Basic usage.
*
*
*/
export const basic = () => {
return ReactDOM.render(
<Mentions
style={{ width: '100%' }}
onChange={onChange}
onSelect={onSelect}
defaultValue="@afc163"
>
<Option value="afc163">afc163</Option>
<Option value="zombieJ">zombieJ</Option>
<Option value="yesmeck">yesmeck</Option>
</Mentions>,
document.getElementById('root'))
}
export default {
title: 'Ant Design/Data-Entry/Mentions',
component: Mentions,
} | onChange |
output.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// <p>Contains the response to a successful <a>GetSessionToken</a> request,
/// including temporary Amazon Web Services credentials that can be used to make Amazon Web Services requests. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetSessionTokenOutput {
/// <p>The temporary security credentials, which include an access key ID, a secret access
/// key, and a security (or session) token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub credentials: std::option::Option<crate::model::Credentials>,
}
impl std::fmt::Debug for GetSessionTokenOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetSessionTokenOutput");
formatter.field("credentials", &self.credentials);
formatter.finish()
}
}
/// See [`GetSessionTokenOutput`](crate::output::GetSessionTokenOutput)
pub mod get_session_token_output {
/// A builder for [`GetSessionTokenOutput`](crate::output::GetSessionTokenOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) credentials: std::option::Option<crate::model::Credentials>,
}
impl Builder {
/// <p>The temporary security credentials, which include an access key ID, a secret access
/// key, and a security (or session) token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub fn credentials(mut self, input: crate::model::Credentials) -> Self {
self.credentials = Some(input);
self
}
pub fn set_credentials(
mut self,
input: std::option::Option<crate::model::Credentials>,
) -> Self {
self.credentials = input;
self
}
/// Consumes the builder and constructs a [`GetSessionTokenOutput`](crate::output::GetSessionTokenOutput)
pub fn build(self) -> crate::output::GetSessionTokenOutput {
crate::output::GetSessionTokenOutput {
credentials: self.credentials,
}
}
}
}
impl GetSessionTokenOutput {
/// Creates a new builder-style object to manufacture [`GetSessionTokenOutput`](crate::output::GetSessionTokenOutput)
pub fn builder() -> crate::output::get_session_token_output::Builder {
crate::output::get_session_token_output::Builder::default()
}
}
/// <p>Contains the response to a successful <a>GetFederationToken</a> request,
/// including temporary Amazon Web Services credentials that can be used to make Amazon Web Services requests. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetFederationTokenOutput {
/// <p>The temporary security credentials, which include an access key ID, a secret access key,
/// and a security (or session) token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub credentials: std::option::Option<crate::model::Credentials>,
/// <p>Identifiers for the federated user associated with the credentials (such as
/// <code>arn:aws:sts::123456789012:federated-user/Bob</code> or
/// <code>123456789012:Bob</code>). You can use the federated user's ARN in your
/// resource-based policies, such as an Amazon S3 bucket policy. </p>
pub federated_user: std::option::Option<crate::model::FederatedUser>,
/// <p>A percentage value that indicates the packed size of the session policies and session
/// tags combined passed in the request. The request fails if the packed size is greater than 100 percent,
/// which means the policies and tags exceeded the allowed space.</p>
pub packed_policy_size: std::option::Option<i32>,
}
impl std::fmt::Debug for GetFederationTokenOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetFederationTokenOutput");
formatter.field("credentials", &self.credentials);
formatter.field("federated_user", &self.federated_user);
formatter.field("packed_policy_size", &self.packed_policy_size);
formatter.finish()
}
}
/// See [`GetFederationTokenOutput`](crate::output::GetFederationTokenOutput)
pub mod get_federation_token_output {
/// A builder for [`GetFederationTokenOutput`](crate::output::GetFederationTokenOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) credentials: std::option::Option<crate::model::Credentials>,
pub(crate) federated_user: std::option::Option<crate::model::FederatedUser>,
pub(crate) packed_policy_size: std::option::Option<i32>,
}
impl Builder {
/// <p>The temporary security credentials, which include an access key ID, a secret access key,
/// and a security (or session) token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub fn credentials(mut self, input: crate::model::Credentials) -> Self {
self.credentials = Some(input);
self
}
pub fn set_credentials(
mut self,
input: std::option::Option<crate::model::Credentials>,
) -> Self {
self.credentials = input;
self
}
/// <p>Identifiers for the federated user associated with the credentials (such as
/// <code>arn:aws:sts::123456789012:federated-user/Bob</code> or
/// <code>123456789012:Bob</code>). You can use the federated user's ARN in your
/// resource-based policies, such as an Amazon S3 bucket policy. </p>
pub fn federated_user(mut self, input: crate::model::FederatedUser) -> Self {
self.federated_user = Some(input);
self
}
pub fn set_federated_user(
mut self,
input: std::option::Option<crate::model::FederatedUser>,
) -> Self {
self.federated_user = input;
self
}
/// <p>A percentage value that indicates the packed size of the session policies and session
/// tags combined passed in the request. The request fails if the packed size is greater than 100 percent,
/// which means the policies and tags exceeded the allowed space.</p>
pub fn packed_policy_size(mut self, input: i32) -> Self {
self.packed_policy_size = Some(input);
self
}
pub fn set_packed_policy_size(mut self, input: std::option::Option<i32>) -> Self {
self.packed_policy_size = input;
self
}
/// Consumes the builder and constructs a [`GetFederationTokenOutput`](crate::output::GetFederationTokenOutput)
pub fn build(self) -> crate::output::GetFederationTokenOutput {
crate::output::GetFederationTokenOutput {
credentials: self.credentials,
federated_user: self.federated_user,
packed_policy_size: self.packed_policy_size,
}
}
}
}
impl GetFederationTokenOutput {
/// Creates a new builder-style object to manufacture [`GetFederationTokenOutput`](crate::output::GetFederationTokenOutput)
pub fn builder() -> crate::output::get_federation_token_output::Builder {
crate::output::get_federation_token_output::Builder::default()
}
}
/// <p>Contains the response to a successful <a>GetCallerIdentity</a> request,
/// including information about the entity making the request.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetCallerIdentityOutput {
/// <p>The unique identifier of the calling entity. The exact value depends on the type of
/// entity that is making the call. The values returned are those listed in the <b>aws:userid</b> column in the <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable">Principal
/// table</a> found on the <b>Policy Variables</b> reference
/// page in the <i>IAM User Guide</i>.</p>
pub user_id: std::option::Option<std::string::String>,
/// <p>The Amazon Web Services account ID number of the account that owns or contains the calling
/// entity.</p>
pub account: std::option::Option<std::string::String>,
/// <p>The Amazon Web Services ARN associated with the calling entity.</p>
pub arn: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GetCallerIdentityOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetCallerIdentityOutput");
formatter.field("user_id", &self.user_id);
formatter.field("account", &self.account);
formatter.field("arn", &self.arn);
formatter.finish()
}
}
/// See [`GetCallerIdentityOutput`](crate::output::GetCallerIdentityOutput)
pub mod get_caller_identity_output {
/// A builder for [`GetCallerIdentityOutput`](crate::output::GetCallerIdentityOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) user_id: std::option::Option<std::string::String>,
pub(crate) account: std::option::Option<std::string::String>,
pub(crate) arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The unique identifier of the calling entity. The exact value depends on the type of
/// entity that is making the call. The values returned are those listed in the <b>aws:userid</b> column in the <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable">Principal
/// table</a> found on the <b>Policy Variables</b> reference
/// page in the <i>IAM User Guide</i>.</p>
pub fn user_id(mut self, input: impl Into<std::string::String>) -> Self {
self.user_id = Some(input.into());
self
}
pub fn set_user_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.user_id = input;
self
}
/// <p>The Amazon Web Services account ID number of the account that owns or contains the calling
/// entity.</p>
pub fn account(mut self, input: impl Into<std::string::String>) -> Self {
self.account = Some(input.into());
self
}
pub fn set_account(mut self, input: std::option::Option<std::string::String>) -> Self {
self.account = input;
self
}
/// <p>The Amazon Web Services ARN associated with the calling entity.</p>
pub fn arn(mut self, input: impl Into<std::string::String>) -> Self {
self.arn = Some(input.into());
self
}
pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.arn = input;
self
}
/// Consumes the builder and constructs a [`GetCallerIdentityOutput`](crate::output::GetCallerIdentityOutput)
pub fn build(self) -> crate::output::GetCallerIdentityOutput {
crate::output::GetCallerIdentityOutput {
user_id: self.user_id,
account: self.account,
arn: self.arn,
}
}
}
}
impl GetCallerIdentityOutput {
/// Creates a new builder-style object to manufacture [`GetCallerIdentityOutput`](crate::output::GetCallerIdentityOutput)
pub fn builder() -> crate::output::get_caller_identity_output::Builder {
crate::output::get_caller_identity_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetAccessKeyInfoOutput {
/// <p>The number used to identify the Amazon Web Services account.</p>
pub account: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GetAccessKeyInfoOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetAccessKeyInfoOutput");
formatter.field("account", &self.account);
formatter.finish()
}
}
/// See [`GetAccessKeyInfoOutput`](crate::output::GetAccessKeyInfoOutput)
pub mod get_access_key_info_output {
/// A builder for [`GetAccessKeyInfoOutput`](crate::output::GetAccessKeyInfoOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) account: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The number used to identify the Amazon Web Services account.</p>
pub fn account(mut self, input: impl Into<std::string::String>) -> Self {
self.account = Some(input.into());
self
}
pub fn set_account(mut self, input: std::option::Option<std::string::String>) -> Self {
self.account = input;
self
}
/// Consumes the builder and constructs a [`GetAccessKeyInfoOutput`](crate::output::GetAccessKeyInfoOutput)
pub fn build(self) -> crate::output::GetAccessKeyInfoOutput {
crate::output::GetAccessKeyInfoOutput {
account: self.account,
}
}
}
}
impl GetAccessKeyInfoOutput {
/// Creates a new builder-style object to manufacture [`GetAccessKeyInfoOutput`](crate::output::GetAccessKeyInfoOutput)
pub fn builder() -> crate::output::get_access_key_info_output::Builder {
crate::output::get_access_key_info_output::Builder::default()
}
}
/// <p>A document that contains additional information about the authorization status of a
/// request from an encoded message that is returned in response to an Amazon Web Services request.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DecodeAuthorizationMessageOutput {
/// <p>An XML document that contains the decoded message.</p>
pub decoded_message: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DecodeAuthorizationMessageOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DecodeAuthorizationMessageOutput");
formatter.field("decoded_message", &self.decoded_message);
formatter.finish()
}
}
/// See [`DecodeAuthorizationMessageOutput`](crate::output::DecodeAuthorizationMessageOutput)
pub mod decode_authorization_message_output {
/// A builder for [`DecodeAuthorizationMessageOutput`](crate::output::DecodeAuthorizationMessageOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) decoded_message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>An XML document that contains the decoded message.</p>
pub fn decoded_message(mut self, input: impl Into<std::string::String>) -> Self {
self.decoded_message = Some(input.into());
self
}
pub fn set_decoded_message(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.decoded_message = input;
self
}
/// Consumes the builder and constructs a [`DecodeAuthorizationMessageOutput`](crate::output::DecodeAuthorizationMessageOutput)
pub fn build(self) -> crate::output::DecodeAuthorizationMessageOutput {
crate::output::DecodeAuthorizationMessageOutput {
decoded_message: self.decoded_message,
}
}
}
}
impl DecodeAuthorizationMessageOutput {
/// Creates a new builder-style object to manufacture [`DecodeAuthorizationMessageOutput`](crate::output::DecodeAuthorizationMessageOutput)
pub fn builder() -> crate::output::decode_authorization_message_output::Builder {
crate::output::decode_authorization_message_output::Builder::default()
}
}
/// <p>Contains the response to a successful <a>AssumeRoleWithWebIdentity</a>
/// request, including temporary Amazon Web Services credentials that can be used to make Amazon Web Services requests. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AssumeRoleWithWebIdentityOutput {
/// <p>The temporary security credentials, which include an access key ID, a secret access key,
/// and a security token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub credentials: std::option::Option<crate::model::Credentials>,
/// <p>The unique user identifier that is returned by the identity provider. This identifier is
/// associated with the <code>WebIdentityToken</code> that was submitted with the
/// <code>AssumeRoleWithWebIdentity</code> call. The identifier is typically unique to the
/// user and the application that acquired the <code>WebIdentityToken</code> (pairwise
/// identifier). For OpenID Connect ID tokens, this field contains the value returned by the
/// identity provider as the token's <code>sub</code> (Subject) claim. </p>
pub subject_from_web_identity_token: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers that you
/// can use to refer to the resulting temporary security credentials. For example, you can
/// reference these credentials as a principal in a resource-based policy by using the ARN or
/// assumed role ID. The ARN and ID include the <code>RoleSessionName</code> that you specified
/// when you called <code>AssumeRole</code>. </p>
pub assumed_role_user: std::option::Option<crate::model::AssumedRoleUser>,
/// <p>A percentage value that indicates the packed size of the session policies and session
/// tags combined passed in the request. The request fails if the packed size is greater than 100 percent,
/// which means the policies and tags exceeded the allowed space.</p>
pub packed_policy_size: std::option::Option<i32>,
/// <p> The issuing authority of the web identity token presented. For OpenID Connect ID
/// tokens, this contains the value of the <code>iss</code> field. For OAuth 2.0 access tokens,
/// this contains the value of the <code>ProviderId</code> parameter that was passed in the
/// <code>AssumeRoleWithWebIdentity</code> request.</p>
pub provider: std::option::Option<std::string::String>,
/// <p>The intended audience (also known as client ID) of the web identity token. This is
/// traditionally the client identifier issued to the application that requested the web
/// identity token.</p>
pub audience: std::option::Option<std::string::String>,
/// <p>The value of the source identity that is returned in the JSON web token (JWT) from the
/// identity provider.</p>
/// <p>You can require users to set a source identity value when they assume a role. You do
/// this by using the <code>sts:SourceIdentity</code> condition key in a role trust policy.
/// That way, actions that are taken with the role are associated with that user. After the
/// source identity is set, the value cannot be changed. It is present in the request for all
/// actions that are taken by the role and persists across <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining">chained
/// role</a> sessions. You can configure your identity provider to use an attribute
/// associated with your users, like user name or email, as the source identity when calling
/// <code>AssumeRoleWithWebIdentity</code>. You do this by adding a claim to the JSON web
/// token. To learn more about OIDC tokens and claims, see <a href="https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html">Using Tokens with User Pools</a> in the <i>Amazon Cognito Developer Guide</i>.
/// For more information about using source identity, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html">Monitor and control
/// actions taken with assumed roles</a> in the
/// <i>IAM User Guide</i>.</p>
/// <p>The regex used to validate this parameter is a string of characters
/// consisting of upper- and lower-case alphanumeric characters with no spaces. You can
/// also include underscores or any of the following characters: =,.@-</p>
pub source_identity: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for AssumeRoleWithWebIdentityOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AssumeRoleWithWebIdentityOutput");
formatter.field("credentials", &self.credentials);
formatter.field(
"subject_from_web_identity_token",
&self.subject_from_web_identity_token,
);
formatter.field("assumed_role_user", &self.assumed_role_user);
formatter.field("packed_policy_size", &self.packed_policy_size);
formatter.field("provider", &self.provider);
formatter.field("audience", &self.audience);
formatter.field("source_identity", &self.source_identity);
formatter.finish()
}
}
/// See [`AssumeRoleWithWebIdentityOutput`](crate::output::AssumeRoleWithWebIdentityOutput)
pub mod assume_role_with_web_identity_output {
/// A builder for [`AssumeRoleWithWebIdentityOutput`](crate::output::AssumeRoleWithWebIdentityOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) credentials: std::option::Option<crate::model::Credentials>,
pub(crate) subject_from_web_identity_token: std::option::Option<std::string::String>,
pub(crate) assumed_role_user: std::option::Option<crate::model::AssumedRoleUser>,
pub(crate) packed_policy_size: std::option::Option<i32>,
pub(crate) provider: std::option::Option<std::string::String>,
pub(crate) audience: std::option::Option<std::string::String>,
pub(crate) source_identity: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The temporary security credentials, which include an access key ID, a secret access key,
/// and a security token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub fn credentials(mut self, input: crate::model::Credentials) -> Self {
self.credentials = Some(input);
self
}
pub fn set_credentials(
mut self,
input: std::option::Option<crate::model::Credentials>,
) -> Self {
self.credentials = input;
self
}
/// <p>The unique user identifier that is returned by the identity provider. This identifier is
/// associated with the <code>WebIdentityToken</code> that was submitted with the
/// <code>AssumeRoleWithWebIdentity</code> call. The identifier is typically unique to the
/// user and the application that acquired the <code>WebIdentityToken</code> (pairwise
/// identifier). For OpenID Connect ID tokens, this field contains the value returned by the
/// identity provider as the token's <code>sub</code> (Subject) claim. </p>
pub fn subject_from_web_identity_token(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.subject_from_web_identity_token = Some(input.into());
self
}
pub fn set_subject_from_web_identity_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.subject_from_web_identity_token = input;
self
}
/// <p>The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers that you
/// can use to refer to the resulting temporary security credentials. For example, you can
/// reference these credentials as a principal in a resource-based policy by using the ARN or
/// assumed role ID. The ARN and ID include the <code>RoleSessionName</code> that you specified
/// when you called <code>AssumeRole</code>. </p>
pub fn assumed_role_user(mut self, input: crate::model::AssumedRoleUser) -> Self {
self.assumed_role_user = Some(input);
self
}
pub fn set_assumed_role_user(
mut self,
input: std::option::Option<crate::model::AssumedRoleUser>,
) -> Self {
self.assumed_role_user = input;
self
}
/// <p>A percentage value that indicates the packed size of the session policies and session
/// tags combined passed in the request. The request fails if the packed size is greater than 100 percent,
/// which means the policies and tags exceeded the allowed space.</p>
pub fn packed_policy_size(mut self, input: i32) -> Self {
self.packed_policy_size = Some(input);
self
}
pub fn set_packed_policy_size(mut self, input: std::option::Option<i32>) -> Self {
self.packed_policy_size = input;
self
}
/// <p> The issuing authority of the web identity token presented. For OpenID Connect ID
/// tokens, this contains the value of the <code>iss</code> field. For OAuth 2.0 access tokens,
/// this contains the value of the <code>ProviderId</code> parameter that was passed in the
/// <code>AssumeRoleWithWebIdentity</code> request.</p>
pub fn provider(mut self, input: impl Into<std::string::String>) -> Self {
self.provider = Some(input.into());
self
}
pub fn set_provider(mut self, input: std::option::Option<std::string::String>) -> Self {
self.provider = input;
self
}
/// <p>The intended audience (also known as client ID) of the web identity token. This is
/// traditionally the client identifier issued to the application that requested the web
/// identity token.</p>
pub fn audience(mut self, input: impl Into<std::string::String>) -> Self {
self.audience = Some(input.into());
self
}
pub fn set_audience(mut self, input: std::option::Option<std::string::String>) -> Self {
self.audience = input;
self
}
/// <p>The value of the source identity that is returned in the JSON web token (JWT) from the
/// identity provider.</p>
/// <p>You can require users to set a source identity value when they assume a role. You do
/// this by using the <code>sts:SourceIdentity</code> condition key in a role trust policy.
/// That way, actions that are taken with the role are associated with that user. After the
/// source identity is set, the value cannot be changed. It is present in the request for all
/// actions that are taken by the role and persists across <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining">chained
/// role</a> sessions. You can configure your identity provider to use an attribute
/// associated with your users, like user name or email, as the source identity when calling
/// <code>AssumeRoleWithWebIdentity</code>. You do this by adding a claim to the JSON web
/// token. To learn more about OIDC tokens and claims, see <a href="https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html">Using Tokens with User Pools</a> in the <i>Amazon Cognito Developer Guide</i>.
/// For more information about using source identity, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html">Monitor and control
/// actions taken with assumed roles</a> in the
/// <i>IAM User Guide</i>.</p>
/// <p>The regex used to validate this parameter is a string of characters
/// consisting of upper- and lower-case alphanumeric characters with no spaces. You can
/// also include underscores or any of the following characters: =,.@-</p>
pub fn source_identity(mut self, input: impl Into<std::string::String>) -> Self {
self.source_identity = Some(input.into());
self
}
pub fn set_source_identity(
mut self,
input: std::option::Option<std::string::String>,
) -> Self |
/// Consumes the builder and constructs a [`AssumeRoleWithWebIdentityOutput`](crate::output::AssumeRoleWithWebIdentityOutput)
pub fn build(self) -> crate::output::AssumeRoleWithWebIdentityOutput {
crate::output::AssumeRoleWithWebIdentityOutput {
credentials: self.credentials,
subject_from_web_identity_token: self.subject_from_web_identity_token,
assumed_role_user: self.assumed_role_user,
packed_policy_size: self.packed_policy_size,
provider: self.provider,
audience: self.audience,
source_identity: self.source_identity,
}
}
}
}
impl AssumeRoleWithWebIdentityOutput {
/// Creates a new builder-style object to manufacture [`AssumeRoleWithWebIdentityOutput`](crate::output::AssumeRoleWithWebIdentityOutput)
pub fn builder() -> crate::output::assume_role_with_web_identity_output::Builder {
crate::output::assume_role_with_web_identity_output::Builder::default()
}
}
/// <p>Contains the response to a successful <a>AssumeRoleWithSAML</a> request,
/// including temporary Amazon Web Services credentials that can be used to make Amazon Web Services requests. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AssumeRoleWithSamlOutput {
/// <p>The temporary security credentials, which include an access key ID, a secret access key,
/// and a security (or session) token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub credentials: std::option::Option<crate::model::Credentials>,
/// <p>The identifiers for the temporary security credentials that the operation
/// returns.</p>
pub assumed_role_user: std::option::Option<crate::model::AssumedRoleUser>,
/// <p>A percentage value that indicates the packed size of the session policies and session
/// tags combined passed in the request. The request fails if the packed size is greater than 100 percent,
/// which means the policies and tags exceeded the allowed space.</p>
pub packed_policy_size: std::option::Option<i32>,
/// <p>The value of the <code>NameID</code> element in the <code>Subject</code> element of the
/// SAML assertion.</p>
pub subject: std::option::Option<std::string::String>,
/// <p> The format of the name ID, as defined by the <code>Format</code> attribute in the
/// <code>NameID</code> element of the SAML assertion. Typical examples of the format are
/// <code>transient</code> or <code>persistent</code>. </p>
/// <p> If the format includes the prefix
/// <code>urn:oasis:names:tc:SAML:2.0:nameid-format</code>, that prefix is removed. For
/// example, <code>urn:oasis:names:tc:SAML:2.0:nameid-format:transient</code> is returned as
/// <code>transient</code>. If the format includes any other prefix, the format is returned
/// with no modifications.</p>
pub subject_type: std::option::Option<std::string::String>,
/// <p>The value of the <code>Issuer</code> element of the SAML assertion.</p>
pub issuer: std::option::Option<std::string::String>,
/// <p> The value of the <code>Recipient</code> attribute of the
/// <code>SubjectConfirmationData</code> element of the SAML assertion. </p>
pub audience: std::option::Option<std::string::String>,
/// <p>A hash value based on the concatenation of the following:</p>
/// <ul>
/// <li>
/// <p>The <code>Issuer</code> response value.</p>
/// </li>
/// <li>
/// <p>The Amazon Web Services account ID.</p>
/// </li>
/// <li>
/// <p>The friendly name (the last part of the ARN) of the SAML provider in IAM.</p>
/// </li>
/// </ul>
/// <p>The combination of <code>NameQualifier</code> and <code>Subject</code> can be used to
/// uniquely identify a federated user.</p>
/// <p>The following pseudocode shows how the hash value is calculated:</p>
/// <p>
/// <code>BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) )</code>
/// </p>
pub name_qualifier: std::option::Option<std::string::String>,
/// <p>The value in the <code>SourceIdentity</code> attribute in the SAML assertion. </p>
/// <p>You can require users to set a source identity value when they assume a role. You do
/// this by using the <code>sts:SourceIdentity</code> condition key in a role trust policy.
/// That way, actions that are taken with the role are associated with that user. After the
/// source identity is set, the value cannot be changed. It is present in the request for all
/// actions that are taken by the role and persists across <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining">chained
/// role</a> sessions. You can configure your SAML identity provider to use an attribute
/// associated with your users, like user name or email, as the source identity when calling
/// <code>AssumeRoleWithSAML</code>. You do this by adding an attribute to the SAML
/// assertion. For more information about using source identity, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html">Monitor and control
/// actions taken with assumed roles</a> in the
/// <i>IAM User Guide</i>.</p>
/// <p>The regex used to validate this parameter is a string of characters
/// consisting of upper- and lower-case alphanumeric characters with no spaces. You can
/// also include underscores or any of the following characters: =,.@-</p>
pub source_identity: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for AssumeRoleWithSamlOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AssumeRoleWithSamlOutput");
formatter.field("credentials", &self.credentials);
formatter.field("assumed_role_user", &self.assumed_role_user);
formatter.field("packed_policy_size", &self.packed_policy_size);
formatter.field("subject", &self.subject);
formatter.field("subject_type", &self.subject_type);
formatter.field("issuer", &self.issuer);
formatter.field("audience", &self.audience);
formatter.field("name_qualifier", &self.name_qualifier);
formatter.field("source_identity", &self.source_identity);
formatter.finish()
}
}
/// See [`AssumeRoleWithSamlOutput`](crate::output::AssumeRoleWithSamlOutput)
pub mod assume_role_with_saml_output {
/// A builder for [`AssumeRoleWithSamlOutput`](crate::output::AssumeRoleWithSamlOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) credentials: std::option::Option<crate::model::Credentials>,
pub(crate) assumed_role_user: std::option::Option<crate::model::AssumedRoleUser>,
pub(crate) packed_policy_size: std::option::Option<i32>,
pub(crate) subject: std::option::Option<std::string::String>,
pub(crate) subject_type: std::option::Option<std::string::String>,
pub(crate) issuer: std::option::Option<std::string::String>,
pub(crate) audience: std::option::Option<std::string::String>,
pub(crate) name_qualifier: std::option::Option<std::string::String>,
pub(crate) source_identity: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The temporary security credentials, which include an access key ID, a secret access key,
/// and a security (or session) token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub fn credentials(mut self, input: crate::model::Credentials) -> Self {
self.credentials = Some(input);
self
}
pub fn set_credentials(
mut self,
input: std::option::Option<crate::model::Credentials>,
) -> Self {
self.credentials = input;
self
}
/// <p>The identifiers for the temporary security credentials that the operation
/// returns.</p>
pub fn assumed_role_user(mut self, input: crate::model::AssumedRoleUser) -> Self {
self.assumed_role_user = Some(input);
self
}
pub fn set_assumed_role_user(
mut self,
input: std::option::Option<crate::model::AssumedRoleUser>,
) -> Self {
self.assumed_role_user = input;
self
}
/// <p>A percentage value that indicates the packed size of the session policies and session
/// tags combined passed in the request. The request fails if the packed size is greater than 100 percent,
/// which means the policies and tags exceeded the allowed space.</p>
pub fn packed_policy_size(mut self, input: i32) -> Self {
self.packed_policy_size = Some(input);
self
}
pub fn set_packed_policy_size(mut self, input: std::option::Option<i32>) -> Self {
self.packed_policy_size = input;
self
}
/// <p>The value of the <code>NameID</code> element in the <code>Subject</code> element of the
/// SAML assertion.</p>
pub fn subject(mut self, input: impl Into<std::string::String>) -> Self {
self.subject = Some(input.into());
self
}
pub fn set_subject(mut self, input: std::option::Option<std::string::String>) -> Self {
self.subject = input;
self
}
/// <p> The format of the name ID, as defined by the <code>Format</code> attribute in the
/// <code>NameID</code> element of the SAML assertion. Typical examples of the format are
/// <code>transient</code> or <code>persistent</code>. </p>
/// <p> If the format includes the prefix
/// <code>urn:oasis:names:tc:SAML:2.0:nameid-format</code>, that prefix is removed. For
/// example, <code>urn:oasis:names:tc:SAML:2.0:nameid-format:transient</code> is returned as
/// <code>transient</code>. If the format includes any other prefix, the format is returned
/// with no modifications.</p>
pub fn subject_type(mut self, input: impl Into<std::string::String>) -> Self {
self.subject_type = Some(input.into());
self
}
pub fn set_subject_type(mut self, input: std::option::Option<std::string::String>) -> Self {
self.subject_type = input;
self
}
/// <p>The value of the <code>Issuer</code> element of the SAML assertion.</p>
pub fn issuer(mut self, input: impl Into<std::string::String>) -> Self {
self.issuer = Some(input.into());
self
}
pub fn set_issuer(mut self, input: std::option::Option<std::string::String>) -> Self {
self.issuer = input;
self
}
/// <p> The value of the <code>Recipient</code> attribute of the
/// <code>SubjectConfirmationData</code> element of the SAML assertion. </p>
pub fn audience(mut self, input: impl Into<std::string::String>) -> Self {
self.audience = Some(input.into());
self
}
pub fn set_audience(mut self, input: std::option::Option<std::string::String>) -> Self {
self.audience = input;
self
}
/// <p>A hash value based on the concatenation of the following:</p>
/// <ul>
/// <li>
/// <p>The <code>Issuer</code> response value.</p>
/// </li>
/// <li>
/// <p>The Amazon Web Services account ID.</p>
/// </li>
/// <li>
/// <p>The friendly name (the last part of the ARN) of the SAML provider in IAM.</p>
/// </li>
/// </ul>
/// <p>The combination of <code>NameQualifier</code> and <code>Subject</code> can be used to
/// uniquely identify a federated user.</p>
/// <p>The following pseudocode shows how the hash value is calculated:</p>
/// <p>
/// <code>BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) )</code>
/// </p>
pub fn name_qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.name_qualifier = Some(input.into());
self
}
pub fn set_name_qualifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.name_qualifier = input;
self
}
/// <p>The value in the <code>SourceIdentity</code> attribute in the SAML assertion. </p>
/// <p>You can require users to set a source identity value when they assume a role. You do
/// this by using the <code>sts:SourceIdentity</code> condition key in a role trust policy.
/// That way, actions that are taken with the role are associated with that user. After the
/// source identity is set, the value cannot be changed. It is present in the request for all
/// actions that are taken by the role and persists across <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining">chained
/// role</a> sessions. You can configure your SAML identity provider to use an attribute
/// associated with your users, like user name or email, as the source identity when calling
/// <code>AssumeRoleWithSAML</code>. You do this by adding an attribute to the SAML
/// assertion. For more information about using source identity, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html">Monitor and control
/// actions taken with assumed roles</a> in the
/// <i>IAM User Guide</i>.</p>
/// <p>The regex used to validate this parameter is a string of characters
/// consisting of upper- and lower-case alphanumeric characters with no spaces. You can
/// also include underscores or any of the following characters: =,.@-</p>
pub fn source_identity(mut self, input: impl Into<std::string::String>) -> Self {
self.source_identity = Some(input.into());
self
}
pub fn set_source_identity(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.source_identity = input;
self
}
/// Consumes the builder and constructs a [`AssumeRoleWithSamlOutput`](crate::output::AssumeRoleWithSamlOutput)
pub fn build(self) -> crate::output::AssumeRoleWithSamlOutput {
crate::output::AssumeRoleWithSamlOutput {
credentials: self.credentials,
assumed_role_user: self.assumed_role_user,
packed_policy_size: self.packed_policy_size,
subject: self.subject,
subject_type: self.subject_type,
issuer: self.issuer,
audience: self.audience,
name_qualifier: self.name_qualifier,
source_identity: self.source_identity,
}
}
}
}
impl AssumeRoleWithSamlOutput {
/// Creates a new builder-style object to manufacture [`AssumeRoleWithSamlOutput`](crate::output::AssumeRoleWithSamlOutput)
pub fn builder() -> crate::output::assume_role_with_saml_output::Builder {
crate::output::assume_role_with_saml_output::Builder::default()
}
}
/// <p>Contains the response to a successful <a>AssumeRole</a> request, including
/// temporary Amazon Web Services credentials that can be used to make Amazon Web Services requests. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AssumeRoleOutput {
/// <p>The temporary security credentials, which include an access key ID, a secret access key,
/// and a security (or session) token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub credentials: std::option::Option<crate::model::Credentials>,
/// <p>The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers that you
/// can use to refer to the resulting temporary security credentials. For example, you can
/// reference these credentials as a principal in a resource-based policy by using the ARN or
/// assumed role ID. The ARN and ID include the <code>RoleSessionName</code> that you specified
/// when you called <code>AssumeRole</code>. </p>
pub assumed_role_user: std::option::Option<crate::model::AssumedRoleUser>,
/// <p>A percentage value that indicates the packed size of the session policies and session
/// tags combined passed in the request. The request fails if the packed size is greater than 100 percent,
/// which means the policies and tags exceeded the allowed space.</p>
pub packed_policy_size: std::option::Option<i32>,
/// <p>The source identity specified by the principal that is calling the
/// <code>AssumeRole</code> operation.</p>
/// <p>You can require users to specify a source identity when they assume a role. You do this
/// by using the <code>sts:SourceIdentity</code> condition key in a role trust policy. You can
/// use source identity information in CloudTrail logs to determine who took actions with a role.
/// You can use the <code>aws:SourceIdentity</code> condition key to further control access to
/// Amazon Web Services resources based on the value of source identity. For more information about using
/// source identity, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html">Monitor and control
/// actions taken with assumed roles</a> in the
/// <i>IAM User Guide</i>.</p>
/// <p>The regex used to validate this parameter is a string of characters consisting of upper-
/// and lower-case alphanumeric characters with no spaces. You can also include underscores or
/// any of the following characters: =,.@-</p>
pub source_identity: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for AssumeRoleOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AssumeRoleOutput");
formatter.field("credentials", &self.credentials);
formatter.field("assumed_role_user", &self.assumed_role_user);
formatter.field("packed_policy_size", &self.packed_policy_size);
formatter.field("source_identity", &self.source_identity);
formatter.finish()
}
}
/// See [`AssumeRoleOutput`](crate::output::AssumeRoleOutput)
pub mod assume_role_output {
/// A builder for [`AssumeRoleOutput`](crate::output::AssumeRoleOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) credentials: std::option::Option<crate::model::Credentials>,
pub(crate) assumed_role_user: std::option::Option<crate::model::AssumedRoleUser>,
pub(crate) packed_policy_size: std::option::Option<i32>,
pub(crate) source_identity: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The temporary security credentials, which include an access key ID, a secret access key,
/// and a security (or session) token.</p>
/// <note>
/// <p>The size of the security token that STS API operations return is not fixed. We
/// strongly recommend that you make no assumptions about the maximum size.</p>
/// </note>
pub fn credentials(mut self, input: crate::model::Credentials) -> Self {
self.credentials = Some(input);
self
}
pub fn set_credentials(
mut self,
input: std::option::Option<crate::model::Credentials>,
) -> Self {
self.credentials = input;
self
}
/// <p>The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers that you
/// can use to refer to the resulting temporary security credentials. For example, you can
/// reference these credentials as a principal in a resource-based policy by using the ARN or
/// assumed role ID. The ARN and ID include the <code>RoleSessionName</code> that you specified
/// when you called <code>AssumeRole</code>. </p>
pub fn assumed_role_user(mut self, input: crate::model::AssumedRoleUser) -> Self {
self.assumed_role_user = Some(input);
self
}
pub fn set_assumed_role_user(
mut self,
input: std::option::Option<crate::model::AssumedRoleUser>,
) -> Self {
self.assumed_role_user = input;
self
}
/// <p>A percentage value that indicates the packed size of the session policies and session
/// tags combined passed in the request. The request fails if the packed size is greater than 100 percent,
/// which means the policies and tags exceeded the allowed space.</p>
pub fn packed_policy_size(mut self, input: i32) -> Self {
self.packed_policy_size = Some(input);
self
}
pub fn set_packed_policy_size(mut self, input: std::option::Option<i32>) -> Self {
self.packed_policy_size = input;
self
}
/// <p>The source identity specified by the principal that is calling the
/// <code>AssumeRole</code> operation.</p>
/// <p>You can require users to specify a source identity when they assume a role. You do this
/// by using the <code>sts:SourceIdentity</code> condition key in a role trust policy. You can
/// use source identity information in CloudTrail logs to determine who took actions with a role.
/// You can use the <code>aws:SourceIdentity</code> condition key to further control access to
/// Amazon Web Services resources based on the value of source identity. For more information about using
/// source identity, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html">Monitor and control
/// actions taken with assumed roles</a> in the
/// <i>IAM User Guide</i>.</p>
/// <p>The regex used to validate this parameter is a string of characters consisting of upper-
/// and lower-case alphanumeric characters with no spaces. You can also include underscores or
/// any of the following characters: =,.@-</p>
pub fn source_identity(mut self, input: impl Into<std::string::String>) -> Self {
self.source_identity = Some(input.into());
self
}
pub fn set_source_identity(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.source_identity = input;
self
}
/// Consumes the builder and constructs a [`AssumeRoleOutput`](crate::output::AssumeRoleOutput)
pub fn build(self) -> crate::output::AssumeRoleOutput {
crate::output::AssumeRoleOutput {
credentials: self.credentials,
assumed_role_user: self.assumed_role_user,
packed_policy_size: self.packed_policy_size,
source_identity: self.source_identity,
}
}
}
}
impl AssumeRoleOutput {
/// Creates a new builder-style object to manufacture [`AssumeRoleOutput`](crate::output::AssumeRoleOutput)
pub fn builder() -> crate::output::assume_role_output::Builder {
crate::output::assume_role_output::Builder::default()
}
}
| {
self.source_identity = input;
self
} |
app.js | import React, { PureComponent } from 'react';
import PropTypes from 'prop-types';
import autobind from 'autobind-decorator';
import fs from 'fs';
import { clipboard, ipcRenderer, remote } from 'electron';
import { parse as urlParse } from 'url';
import HTTPSnippet from 'httpsnippet';
import ReactDOM from 'react-dom';
import { connect } from 'react-redux';
import { bindActionCreators } from 'redux';
import Wrapper from '../components/wrapper';
import WorkspaceEnvironmentsEditModal from '../components/modals/workspace-environments-edit-modal';
import Toast from '../components/toast';
import CookiesModal from '../components/modals/cookies-modal';
import RequestSwitcherModal from '../components/modals/request-switcher-modal';
import SettingsModal, { TAB_INDEX_SHORTCUTS } from '../components/modals/settings-modal';
import {
COLLAPSE_SIDEBAR_REMS,
DEFAULT_PANE_HEIGHT,
DEFAULT_PANE_WIDTH,
DEFAULT_SIDEBAR_WIDTH,
MAX_PANE_HEIGHT,
MAX_PANE_WIDTH,
MAX_SIDEBAR_REMS,
MIN_PANE_HEIGHT,
MIN_PANE_WIDTH,
MIN_SIDEBAR_REMS,
PREVIEW_MODE_SOURCE,
} from '../../common/constants';
import * as globalActions from '../redux/modules/global';
import * as db from '../../common/database';
import * as models from '../../models';
import {
selectActiveCookieJar,
selectActiveOAuth2Token,
selectActiveRequest,
selectActiveRequestMeta,
selectActiveRequestResponses,
selectActiveResponse,
selectActiveWorkspace,
selectActiveWorkspaceClientCertificates,
selectActiveWorkspaceMeta,
selectEntitiesLists,
selectSidebarChildren,
selectSyncItems,
selectUnseenWorkspaces,
selectWorkspaceRequestsAndRequestGroups,
} from '../redux/selectors';
import RequestCreateModal from '../components/modals/request-create-modal';
import GenerateCodeModal from '../components/modals/generate-code-modal';
import WorkspaceSettingsModal from '../components/modals/workspace-settings-modal';
import RequestSettingsModal from '../components/modals/request-settings-modal';
import RequestRenderErrorModal from '../components/modals/request-render-error-modal';
import * as network from '../../network/network';
import { debounce, getContentDispositionHeader, getDataDirectory } from '../../common/misc';
import * as mime from 'mime-types';
import * as path from 'path';
import * as render from '../../common/render';
import { getKeys } from '../../templating/utils';
import { showAlert, showModal, showPrompt } from '../components/modals/index';
import { exportHarRequest } from '../../common/har';
import { hotKeyRefs } from '../../common/hotkeys';
import { executeHotKey } from '../../common/hotkeys-listener';
import KeydownBinder from '../components/keydown-binder';
import ErrorBoundary from '../components/error-boundary';
import * as plugins from '../../plugins';
import * as templating from '../../templating/index';
import AskModal from '../components/modals/ask-modal';
import { updateMimeType } from '../../models/request';
import MoveRequestGroupModal from '../components/modals/move-request-group-modal';
import * as themes from '../../plugins/misc';
import ExportRequestsModal from '../components/modals/export-requests-modal';
import FileSystemDriver from '../../sync/store/drivers/file-system-driver';
import VCS from '../../sync/vcs';
import SyncMergeModal from '../components/modals/sync-merge-modal';
@autobind
class App extends PureComponent {
constructor(props) {
super(props);
this.state = {
showDragOverlay: false,
draggingSidebar: false,
draggingPaneHorizontal: false,
draggingPaneVertical: false,
sidebarWidth: props.sidebarWidth || DEFAULT_SIDEBAR_WIDTH,
paneWidth: props.paneWidth || DEFAULT_PANE_WIDTH,
paneHeight: props.paneHeight || DEFAULT_PANE_HEIGHT,
isVariableUncovered: props.isVariableUncovered || false,
vcs: null,
forceRefreshCounter: 0,
forceRefreshHeaderCounter: 0,
isMigratingChildren: false,
};
this._getRenderContextPromiseCache = {};
this._savePaneWidth = debounce(paneWidth => this._updateActiveWorkspaceMeta({ paneWidth }));
this._savePaneHeight = debounce(paneHeight => this._updateActiveWorkspaceMeta({ paneHeight }));
this._saveSidebarWidth = debounce(sidebarWidth =>
this._updateActiveWorkspaceMeta({ sidebarWidth }),
);
this._globalKeyMap = null;
}
_setGlobalKeyMap() {
this._globalKeyMap = [
[
hotKeyRefs.PREFERENCES_SHOW_GENERAL,
() => {
showModal(SettingsModal);
},
],
[
hotKeyRefs.PREFERENCES_SHOW_KEYBOARD_SHORTCUTS,
() => {
showModal(SettingsModal, TAB_INDEX_SHORTCUTS);
},
],
[
hotKeyRefs.SHOW_RECENT_REQUESTS,
() => {
showModal(RequestSwitcherModal, {
disableInput: true,
maxRequests: 10,
maxWorkspaces: 0,
selectOnKeyup: true,
title: 'Recent Requests',
hideNeverActiveRequests: true,
// Add an open delay so the dialog won't show for quick presses
openDelay: 150,
});
},
],
[
hotKeyRefs.WORKSPACE_SHOW_SETTINGS,
() => {
const { activeWorkspace } = this.props;
showModal(WorkspaceSettingsModal, activeWorkspace);
},
],
[
hotKeyRefs.REQUEST_SHOW_SETTINGS,
() => {
if (this.props.activeRequest) {
showModal(RequestSettingsModal, {
request: this.props.activeRequest,
});
}
},
],
[
hotKeyRefs.REQUEST_QUICK_SWITCH,
() => {
showModal(RequestSwitcherModal);
},
],
[hotKeyRefs.REQUEST_SEND, this._handleSendShortcut],
[
hotKeyRefs.ENVIRONMENT_SHOW_EDITOR,
() => {
const { activeWorkspace } = this.props;
showModal(WorkspaceEnvironmentsEditModal, activeWorkspace);
},
],
[
hotKeyRefs.SHOW_COOKIES_EDITOR,
() => {
const { activeWorkspace } = this.props;
showModal(CookiesModal, activeWorkspace);
},
],
[
hotKeyRefs.REQUEST_QUICK_CREATE,
async () => {
const { activeRequest, activeWorkspace } = this.props;
const parentId = activeRequest ? activeRequest.parentId : activeWorkspace._id;
const request = await models.request.create({ parentId, name: 'New Request' });
await this._handleSetActiveRequest(request._id);
},
],
[
hotKeyRefs.REQUEST_SHOW_CREATE,
() => {
const { activeRequest, activeWorkspace } = this.props;
const parentId = activeRequest ? activeRequest.parentId : activeWorkspace._id;
this._requestCreate(parentId);
},
],
[
hotKeyRefs.REQUEST_SHOW_DELETE,
() => {
const { activeRequest } = this.props;
if (!activeRequest) {
return;
}
showModal(AskModal, {
title: 'Delete Request?',
message: `Really delete ${activeRequest.name}?`,
onDone: confirmed => {
if (!confirmed) {
return;
}
models.request.remove(activeRequest);
},
});
},
],
[
hotKeyRefs.REQUEST_SHOW_CREATE_FOLDER,
() => {
const { activeRequest, activeWorkspace } = this.props;
const parentId = activeRequest ? activeRequest.parentId : activeWorkspace._id;
this._requestGroupCreate(parentId);
},
],
[
hotKeyRefs.REQUEST_SHOW_GENERATE_CODE_EDITOR,
async () => {
showModal(GenerateCodeModal, this.props.activeRequest);
},
],
[
hotKeyRefs.REQUEST_SHOW_DUPLICATE,
async () => {
await this._requestDuplicate(this.props.activeRequest);
},
],
[
hotKeyRefs.REQUEST_TOGGLE_PIN,
async () => {
if (!this.props.activeRequest) {
return;
}
const metas = Object.values(this.props.entities.requestMetas).find(
m => m.parentId === this.props.activeRequest._id,
);
await this._handleSetRequestPinned(this.props.activeRequest, !(metas && metas.pinned));
},
],
[hotKeyRefs.PLUGIN_RELOAD, this._handleReloadPlugins],
[
hotKeyRefs.ENVIRONMENT_UNCOVER_VARIABLES,
async () => {
await this._updateIsVariableUncovered();
},
],
[
hotKeyRefs.SIDEBAR_TOGGLE,
() => {
this._handleToggleSidebar();
},
],
];
}
async _handleSendShortcut() {
const { activeRequest, activeEnvironment } = this.props;
await this._handleSendRequestWithEnvironment(
activeRequest ? activeRequest._id : 'n/a',
activeEnvironment ? activeEnvironment._id : 'n/a',
);
}
_setRequestPaneRef(n) {
this._requestPane = n;
}
_setResponsePaneRef(n) {
this._responsePane = n;
}
_setSidebarRef(n) {
this._sidebar = n;
}
_requestGroupCreate(parentId) {
showPrompt({
title: 'New Folder',
defaultValue: 'My Folder',
submitName: 'Create',
label: 'Name',
selectText: true,
onComplete: async name => {
const requestGroup = await models.requestGroup.create({
parentId,
name,
});
await models.requestGroupMeta.create({
parentId: requestGroup._id,
collapsed: false,
});
},
});
}
_requestCreate(parentId) {
showModal(RequestCreateModal, {
parentId,
onComplete: request => {
this._handleSetActiveRequest(request._id);
},
});
}
static async _requestGroupDuplicate(requestGroup) {
showPrompt({
title: 'Duplicate Folder',
defaultValue: requestGroup.name,
submitName: 'Create',
label: 'New Name',
selectText: true,
onComplete: async name => {
await models.requestGroup.duplicate(requestGroup, { name });
},
});
}
static async _requestGroupMove(requestGroup) {
showModal(MoveRequestGroupModal, { requestGroup });
}
_requestDuplicate(request) {
if (!request) {
return;
}
showPrompt({
title: 'Duplicate Request',
defaultValue: request.name,
submitName: 'Create',
label: 'New Name',
selectText: true,
onComplete: async name => {
const newRequest = await models.request.duplicate(request, { name });
await this._handleSetActiveRequest(newRequest._id);
},
});
}
_workspaceDuplicate(callback) {
const workspace = this.props.activeWorkspace;
showPrompt({
title: 'Duplicate Workspace',
defaultValue: workspace.name,
submitName: 'Create',
selectText: true,
label: 'New Name',
onComplete: async name => {
const newWorkspace = await db.duplicate(workspace, { name });
await this.props.handleSetActiveWorkspace(newWorkspace._id);
callback();
},
});
}
async _fetchRenderContext() {
const { activeEnvironment, activeRequest, activeWorkspace } = this.props;
const environmentId = activeEnvironment ? activeEnvironment._id : null;
const ancestors = await db.withAncestors(activeRequest || activeWorkspace, [
models.request.type,
models.requestGroup.type,
models.workspace.type,
]);
return render.getRenderContext(activeRequest, environmentId, ancestors);
}
async _handleGetRenderContext() {
const context = await this._fetchRenderContext();
const keys = getKeys(context);
return { context, keys };
}
/**
* Heavily optimized render function
*
* @param text - template to render
* @param contextCacheKey - if rendering multiple times in parallel, set this
* @returns {Promise}
* @private
*/
async _handleRenderText(text, contextCacheKey = null) {
if (!contextCacheKey || !this._getRenderContextPromiseCache[contextCacheKey]) {
// NOTE: We're caching promises here to avoid race conditions
this._getRenderContextPromiseCache[contextCacheKey] = this._fetchRenderContext();
}
// Set timeout to delete the key eventually
setTimeout(() => delete this._getRenderContextPromiseCache[contextCacheKey], 5000);
const context = await this._getRenderContextPromiseCache[contextCacheKey];
return render.render(text, context);
}
_handleGenerateCodeForActiveRequest() {
App._handleGenerateCode(this.props.activeRequest);
}
static _handleGenerateCode(request) {
showModal(GenerateCodeModal, request);
}
async _handleCopyAsCurl(request) {
const { activeEnvironment } = this.props;
const environmentId = activeEnvironment ? activeEnvironment._id : 'n/a';
const har = await exportHarRequest(request._id, environmentId);
const snippet = new HTTPSnippet(har);
const cmd = snippet.convert('shell', 'curl');
clipboard.writeText(cmd);
}
static async _updateRequestGroupMetaByParentId(requestGroupId, patch) {
const requestGroupMeta = await models.requestGroupMeta.getByParentId(requestGroupId);
if (requestGroupMeta) {
await models.requestGroupMeta.update(requestGroupMeta, patch);
} else {
const newPatch = Object.assign({ parentId: requestGroupId }, patch);
await models.requestGroupMeta.create(newPatch);
}
}
async _updateActiveWorkspaceMeta(patch) {
const workspaceId = this.props.activeWorkspace._id;
const workspaceMeta = await models.workspaceMeta.getOrCreateByParentId(workspaceId);
if (workspaceMeta) {
return models.workspaceMeta.update(workspaceMeta, patch);
} else {
const newPatch = Object.assign({ parentId: workspaceId }, patch);
return models.workspaceMeta.create(newPatch);
}
}
static async _updateRequestMetaByParentId(requestId, patch) {
const requestMeta = await models.requestMeta.getByParentId(requestId);
if (requestMeta) {
return models.requestMeta.update(requestMeta, patch);
} else {
const newPatch = Object.assign({ parentId: requestId }, patch);
return models.requestMeta.create(newPatch);
}
}
_updateIsVariableUncovered() {
this.setState({ isVariableUncovered: !this.state.isVariableUncovered });
}
_handleSetPaneWidth(paneWidth) {
this.setState({ paneWidth });
this._savePaneWidth(paneWidth);
}
_handleSetPaneHeight(paneHeight) {
this.setState({ paneHeight });
this._savePaneHeight(paneHeight);
}
async _handleSetActiveRequest(activeRequestId) {
await this._updateActiveWorkspaceMeta({ activeRequestId });
await App._updateRequestMetaByParentId(activeRequestId, { lastActive: Date.now() });
}
async _handleSetActiveEnvironment(activeEnvironmentId) {
await this._updateActiveWorkspaceMeta({ activeEnvironmentId });
// Give it time to update and re-render
setTimeout(() => {
this._wrapper && this._wrapper._forceRequestPaneRefresh();
}, 300);
}
_handleSetSidebarWidth(sidebarWidth) {
this.setState({ sidebarWidth });
this._saveSidebarWidth(sidebarWidth);
}
async _handleSetSidebarHidden(sidebarHidden) {
await this._updateActiveWorkspaceMeta({ sidebarHidden });
}
async _handleSetSidebarFilter(sidebarFilter) {
await this._updateActiveWorkspaceMeta({ sidebarFilter });
}
_handleSetRequestGroupCollapsed(requestGroupId, collapsed) {
App._updateRequestGroupMetaByParentId(requestGroupId, { collapsed });
}
async _handleSetRequestPinned(request, pinned) {
App._updateRequestMetaByParentId(request._id, { pinned });
}
_handleSetResponsePreviewMode(requestId, previewMode) {
App._updateRequestMetaByParentId(requestId, { previewMode });
}
_handleUpdateDownloadPath(requestId, downloadPath) {
App._updateRequestMetaByParentId(requestId, { downloadPath });
}
async _handleSetResponseFilter(requestId, responseFilter) {
await App._updateRequestMetaByParentId(requestId, { responseFilter });
clearTimeout(this._responseFilterHistorySaveTimeout);
this._responseFilterHistorySaveTimeout = setTimeout(async () => {
const meta = await models.requestMeta.getByParentId(requestId);
const responseFilterHistory = meta.responseFilterHistory.slice(0, 10);
// Already in history?
if (responseFilterHistory.includes(responseFilter)) {
return;
}
// Blank?
if (!responseFilter) {
return;
}
responseFilterHistory.unshift(responseFilter);
await App._updateRequestMetaByParentId(requestId, {
responseFilterHistory,
});
}, 2000);
}
async _handleUpdateRequestMimeType(mimeType) {
if (!this.props.activeRequest) {
console.warn('Tried to update request mime-type when no active request');
return null;
}
const requestMeta = await models.requestMeta.getOrCreateByParentId(
this.props.activeRequest._id,
);
const savedBody = requestMeta.savedRequestBody;
const saveValue =
typeof mimeType !== 'string' // Switched to No body
? this.props.activeRequest.body
: {}; // Clear saved value in requestMeta
await models.requestMeta.update(requestMeta, {
savedRequestBody: saveValue,
});
const newRequest = await updateMimeType(this.props.activeRequest, mimeType, false, savedBody);
// Force it to update, because other editor components (header editor)
// needs to change. Need to wait a delay so the next render can finish
setTimeout(() => {
this.setState({ forceRefreshHeaderCounter: this.state.forceRefreshHeaderCounter + 1 });
}, 500);
return newRequest;
}
async _getDownloadLocation() {
return new Promise(resolve => {
const options = {
title: 'Select Download Location',
buttonLabel: 'Send and Save',
defaultPath: window.localStorage.getItem('insomnia.sendAndDownloadLocation'),
};
remote.dialog.showSaveDialog(options, filename => {
window.localStorage.setItem('insomnia.sendAndDownloadLocation', filename);
resolve(filename);
});
});
}
async _handleSendAndDownloadRequestWithEnvironment(requestId, environmentId, dir) {
const { settings, handleStartLoading, handleStopLoading } = this.props;
const request = await models.request.getById(requestId);
if (!request) {
return;
}
// NOTE: Since request is by far the most popular event, we will throttle
// it so that we only track it if the request has changed since the last one
const key = request._id;
if (this._sendRequestTrackingKey !== key) {
this._sendRequestTrackingKey = key;
}
// Start loading
handleStartLoading(requestId);
try {
const responsePatch = await network.send(requestId, environmentId);
const headers = responsePatch.headers || [];
const header = getContentDispositionHeader(headers);
const nameFromHeader = header ? header.value : null;
if (
responsePatch.bodyPath &&
responsePatch.statusCode >= 200 &&
responsePatch.statusCode < 300
) {
const extension = mime.extension(responsePatch.contentType) || 'unknown';
const name =
nameFromHeader || `${request.name.replace(/\s/g, '-').toLowerCase()}.${extension}`;
let filename;
if (dir) {
filename = path.join(dir, name);
} else {
filename = await this._getDownloadLocation();
}
const to = fs.createWriteStream(filename);
const readStream = models.response.getBodyStream(responsePatch);
if (!readStream) {
return;
}
readStream.pipe(to);
readStream.on('end', async () => {
responsePatch.error = `Saved to ${filename}`;
await models.response.create(responsePatch, settings.maxHistoryResponses);
});
readStream.on('error', async err => {
console.warn('Failed to download request after sending', responsePatch.bodyPath, err);
await models.response.create(responsePatch, settings.maxHistoryResponses);
});
} else {
// Save the bad responses so failures are shown still
await models.response.create(responsePatch, settings.maxHistoryResponses);
}
} catch (err) {
showAlert({
title: 'Unexpected Request Failure',
message: (
<div>
<p>The request failed due to an unhandled error:</p>
<code className="wide selectable">
<pre>{err.message}</pre>
</code>
</div>
),
});
}
// Unset active response because we just made a new one
await App._updateRequestMetaByParentId(requestId, {
activeResponseId: null,
});
// Stop loading
handleStopLoading(requestId);
}
async _handleSendRequestWithEnvironment(requestId, environmentId) {
const { handleStartLoading, handleStopLoading, settings } = this.props;
const request = await models.request.getById(requestId);
if (!request) {
return;
}
// NOTE: Since request is by far the most popular event, we will throttle
// it so that we only track it if the request has changed since the last noe
const key = `${request._id}::${request.modified}`;
if (this._sendRequestTrackingKey !== key) {
this._sendRequestTrackingKey = key;
}
handleStartLoading(requestId);
try {
const responsePatch = await network.send(requestId, environmentId);
await models.response.create(responsePatch, settings.maxHistoryResponses);
} catch (err) {
if (err.type === 'render') {
showModal(RequestRenderErrorModal, { request, error: err });
} else {
showAlert({
title: 'Unexpected Request Failure',
message: (
<div>
<p>The request failed due to an unhandled error:</p>
<code className="wide selectable">
<pre>{err.message}</pre>
</code>
</div>
),
});
}
}
// Unset active response because we just made a new one
await App._updateRequestMetaByParentId(requestId, {
activeResponseId: null,
});
// Stop loading
handleStopLoading(requestId);
}
async _handleSetActiveResponse(requestId, activeResponse = null) {
const { activeEnvironment } = this.props;
const activeResponseId = activeResponse ? activeResponse._id : null;
await App._updateRequestMetaByParentId(requestId, { activeResponseId });
let response;
if (activeResponseId) {
response = await models.response.getById(activeResponseId);
} else {
const environmentId = activeEnvironment ? activeEnvironment._id : null;
response = await models.response.getLatestForRequest(requestId, environmentId);
}
const requestVersionId = response ? response.requestVersionId : 'n/a';
const request = await models.requestVersion.restore(requestVersionId);
if (request) {
// Refresh app to reflect changes. Using timeout because we need to
// wait for the request update to propagate.
setTimeout(() => this._wrapper._forceRequestPaneRefresh(), 500);
} else {
// Couldn't restore request. That's okay
}
}
_requestCreateForWorkspace() {
this._requestCreate(this.props.activeWorkspace._id);
}
_startDragSidebar() {
this.setState({ draggingSidebar: true });
}
_resetDragSidebar() {
// TODO: Remove setTimeout need be not triggering drag on double click
setTimeout(() => this._handleSetSidebarWidth(DEFAULT_SIDEBAR_WIDTH), 50);
}
_startDragPaneHorizontal() {
this.setState({ draggingPaneHorizontal: true });
}
_startDragPaneVertical() {
this.setState({ draggingPaneVertical: true });
}
_resetDragPaneHorizontal() {
// TODO: Remove setTimeout need be not triggering drag on double click
setTimeout(() => this._handleSetPaneWidth(DEFAULT_PANE_WIDTH), 50);
}
_resetDragPaneVertical() {
// TODO: Remove setTimeout need be not triggering drag on double click
setTimeout(() => this._handleSetPaneHeight(DEFAULT_PANE_HEIGHT), 50);
}
_handleMouseMove(e) {
if (this.state.draggingPaneHorizontal) {
// Only pop the overlay after we've moved it a bit (so we don't block doubleclick);
const distance = this.props.paneWidth - this.state.paneWidth;
if (!this.state.showDragOverlay && Math.abs(distance) > 0.02 /* % */) {
this.setState({ showDragOverlay: true });
}
const requestPane = ReactDOM.findDOMNode(this._requestPane);
const responsePane = ReactDOM.findDOMNode(this._responsePane);
const requestPaneWidth = requestPane.offsetWidth;
const responsePaneWidth = responsePane.offsetWidth;
const pixelOffset = e.clientX - requestPane.offsetLeft;
let paneWidth = pixelOffset / (requestPaneWidth + responsePaneWidth);
paneWidth = Math.min(Math.max(paneWidth, MIN_PANE_WIDTH), MAX_PANE_WIDTH);
this._handleSetPaneWidth(paneWidth);
} else if (this.state.draggingPaneVertical) {
// Only pop the overlay after we've moved it a bit (so we don't block doubleclick);
const distance = this.props.paneHeight - this.state.paneHeight;
if (!this.state.showDragOverlay && Math.abs(distance) > 0.02 /* % */) {
this.setState({ showDragOverlay: true });
}
const requestPane = ReactDOM.findDOMNode(this._requestPane);
const responsePane = ReactDOM.findDOMNode(this._responsePane);
const requestPaneHeight = requestPane.offsetHeight;
const responsePaneHeight = responsePane.offsetHeight;
const pixelOffset = e.clientY - requestPane.offsetTop;
let paneHeight = pixelOffset / (requestPaneHeight + responsePaneHeight);
paneHeight = Math.min(Math.max(paneHeight, MIN_PANE_HEIGHT), MAX_PANE_HEIGHT);
this._handleSetPaneHeight(paneHeight);
} else if (this.state.draggingSidebar) {
// Only pop the overlay after we've moved it a bit (so we don't block doubleclick);
const distance = this.props.sidebarWidth - this.state.sidebarWidth;
if (!this.state.showDragOverlay && Math.abs(distance) > 2 /* ems */) {
this.setState({ showDragOverlay: true });
}
const sidebar = ReactDOM.findDOMNode(this._sidebar);
const currentPixelWidth = sidebar.offsetWidth;
const ratio = (e.clientX - sidebar.offsetLeft) / currentPixelWidth;
const width = this.state.sidebarWidth * ratio;
let sidebarWidth = Math.min(width, MAX_SIDEBAR_REMS);
if (sidebarWidth < COLLAPSE_SIDEBAR_REMS) {
sidebarWidth = MIN_SIDEBAR_REMS;
}
this._handleSetSidebarWidth(sidebarWidth);
}
}
_handleMouseUp() {
if (this.state.draggingSidebar) {
this.setState({ draggingSidebar: false, showDragOverlay: false });
}
if (this.state.draggingPaneHorizontal) {
this.setState({ draggingPaneHorizontal: false, showDragOverlay: false });
}
if (this.state.draggingPaneVertical) {
this.setState({ draggingPaneVertical: false, showDragOverlay: false });
}
}
_handleKeyDown(e) {
for (const [definition, callback] of this._globalKeyMap) {
executeHotKey(e, definition, callback);
}
}
_handleToggleMenuBar(hide) {
for (const win of remote.BrowserWindow.getAllWindows()) {
if (win.isMenuBarAutoHide() !== hide) {
win.setAutoHideMenuBar(hide);
win.setMenuBarVisibility(!hide);
}
}
}
async _handleToggleSidebar() {
const sidebarHidden = !this.props.sidebarHidden;
await this._handleSetSidebarHidden(sidebarHidden);
}
_handleShowExportRequestsModal() {
showModal(ExportRequestsModal);
}
_setWrapperRef(n) {
this._wrapper = n;
}
async _handleReloadPlugins() {
const { settings } = this.props;
await plugins.getPlugins(true);
templating.reload();
themes.setTheme(settings.theme);
console.log('[plugins] reloaded');
}
/**
* Update document.title to be "Workspace (Environment) – Request"
* @private
*/
_updateDocumentTitle() {
const { activeWorkspace, activeEnvironment, activeRequest } = this.props;
let title = activeWorkspace.name;
if (activeEnvironment) {
title += ` (${activeEnvironment.name})`;
}
if (activeRequest) {
title += ` – ${activeRequest.name}`;
}
document.title = title;
}
componentDidUpdate(prevProps) {
this._updateDocumentTitle();
// Force app refresh if login state changes
if (prevProps.isLoggedIn !== this.props.isLoggedIn) {
this.setState(state => ({
forceRefreshCounter: state.forceRefreshCounter + 1,
}));
}
}
async _updateVCS(activeWorkspace) {
// Get the vcs and set it to null in the state while we update it
let vcs = this.state.vcs;
this.setState({ vcs: null });
if (!vcs) {
const directory = path.join(getDataDirectory(), 'version-control');
const driver = new FileSystemDriver({ directory });
vcs = new VCS(driver, async conflicts => {
return new Promise(resolve => {
showModal(SyncMergeModal, {
conflicts,
handleDone: conflicts => resolve(conflicts),
});
});
});
}
await vcs.switchProject(activeWorkspace._id);
this.setState({ vcs });
}
async componentDidMount() {
// Bind mouse and key handlers
document.addEventListener('mouseup', this._handleMouseUp);
document.addEventListener('mousemove', this._handleMouseMove);
this._setGlobalKeyMap();
// Update title
this._updateDocumentTitle();
// Update VCS
await this._updateVCS(this.props.activeWorkspace);
db.onChange(async changes => {
let needsRefresh = false;
for (const change of changes) {
const [type, doc, fromSync] = change;
const { vcs } = this.state;
const { activeRequest } = this.props;
// Force refresh if environment changes
// TODO: Only do this for environments in this workspace (not easy because they're nested)
if (doc.type === models.environment.type) {
console.log('[App] Forcing update from environment change', change);
needsRefresh = true;
}
// Force refresh if sync changes the active request
if (fromSync && activeRequest && doc._id === activeRequest._id) {
needsRefresh = true;
console.log('[App] Forcing update from request change', change);
}
// Delete VCS project if workspace deleted
if (vcs && doc.type === models.workspace.type && type === db.CHANGE_REMOVE) {
await vcs.removeProjectsForRoot(doc._id);
}
}
if (needsRefresh) {
setTimeout(() => {
this._wrapper && this._wrapper._forceRequestPaneRefresh();
}, 300);
}
});
ipcRenderer.on('toggle-preferences', () => {
showModal(SettingsModal);
});
ipcRenderer.on('reload-plugins', this._handleReloadPlugins);
ipcRenderer.on('toggle-preferences-shortcuts', () => {
showModal(SettingsModal, TAB_INDEX_SHORTCUTS);
});
ipcRenderer.on('run-command', (e, commandUri) => {
const parsed = urlParse(commandUri, true);
const command = `${parsed.hostname}${parsed.pathname}`;
const args = JSON.parse(JSON.stringify(parsed.query));
args.workspaceId = args.workspaceId || this.props.activeWorkspace._id;
this.props.handleCommand(command, args);
});
// NOTE: This is required for "drop" event to trigger.
document.addEventListener(
'dragover',
e => {
e.preventDefault();
},
false,
);
document.addEventListener(
'drop',
async e => {
e.preventDefault();
const { activeWorkspace, handleImportUriToWorkspace } = this.props;
if (!activeWorkspace) {
return;
}
if (e.dataTransfer.files.length === 0) {
console.log('[drag] Ignored drop event because no files present');
return;
}
const file = e.dataTransfer.files[0];
const { path } = file;
const uri = `file://${path}`;
await showAlert({
title: 'Confirm Data Import',
message: (
<span>
Import <code>{path}</code>?
</span>
),
addCancel: true,
});
handleImportUriToWorkspace(activeWorkspace._id, uri);
},
false,
);
ipcRenderer.on('toggle-sidebar', this._handleToggleSidebar);
// handle this
this._handleToggleMenuBar(this.props.settings.autoHideMenuBar);
// Give it a bit before letting the backend know it's ready
setTimeout(() => ipcRenderer.send('window-ready'), 500);
}
componentWillUnmount() {
// Remove mouse and key handlers
document.removeEventListener('mouseup', this._handleMouseUp);
document.removeEventListener('mousemove', this._handleMouseMove);
}
_ensureWorkspaceChildren(props) {
const { activeWorkspace, activeCookieJar, environments } = props;
const baseEnvironments = environments.filter(e => e.parentId === activeWorkspace._id);
// Nothing to do
if (baseEnvironments.length && activeCookieJar) {
return;
}
// We already started migrating. Let it finish.
if (this.state.isMigratingChildren) {
return;
}
// Prevent rendering of everything
this.setState({ isMigratingChildren: true }, async () => {
const flushId = await db.bufferChanges();
await models.environment.getOrCreateForWorkspace(activeWorkspace);
await models.cookieJar.getOrCreateForParentId(activeWorkspace._id);
await db.flushChanges(flushId);
this.setState({ isMigratingChildren: false });
});
}
// eslint-disable-next-line camelcase
UNSAFE_componentWillReceiveProps(nextProps) {
this._ensureWorkspaceChildren(nextProps);
// Update VCS if needed
const { activeWorkspace } = this.props;
if (nextProps.activeWorkspace._id !== activeWorkspace._id) {
this._updateVCS(nextProps.activeWorkspace);
}
}
// eslint-disable-next-line camelcase
UNSAFE_componentWillMount() {
this._ensureWorkspaceChildren(this.props);
}
render() {
if (this.state.isMigratingChildren) {
console.log('[app] Waiting for migration to complete');
return null;
}
const { activeWorkspace } = this.props;
const {
paneWidth,
paneHeight,
sidebarWidth,
isVariableUncovered,
vcs,
forceRefreshCounter,
forceRefreshHeaderCounter,
} = this.state;
const uniquenessKey = `${forceRefreshCounter}::${activeWorkspace._id}`;
return (
<KeydownBinder onKeydown={this._handleKeyDown}>
<div className="app" key={uniquenessKey}>
<ErrorBoundary showAlert>
<Wrapper
{...this.props}
ref={this._setWrapperRef}
paneWidth={paneWidth}
paneHeight={paneHeight}
sidebarWidth={sidebarWidth}
handleCreateRequestForWorkspace={this._requestCreateForWorkspace}
handleSetRequestPinned={this._handleSetRequestPinned}
handleSetRequestGroupCollapsed={this._handleSetRequestGroupCollapsed}
handleActivateRequest={this._handleSetActiveRequest}
handleSetRequestPaneRef={this._setRequestPaneRef}
handleSetResponsePaneRef={this._setResponsePaneRef}
handleSetSidebarRef={this._setSidebarRef}
handleStartDragSidebar={this._startDragSidebar}
handleResetDragSidebar={this._resetDragSidebar}
handleStartDragPaneHorizontal={this._startDragPaneHorizontal}
handleStartDragPaneVertical={this._startDragPaneVertical}
handleResetDragPaneHorizontal={this._resetDragPaneHorizontal}
handleResetDragPaneVertical={this._resetDragPaneVertical}
handleCreateRequest={this._requestCreate}
handleRender={this._handleRenderText}
handleGetRenderContext={this._handleGetRenderContext}
handleDuplicateRequest={this._requestDuplicate}
handleDuplicateRequestGroup={App._requestGroupDuplicate}
handleMoveRequestGroup={App._requestGroupMove}
handleDuplicateWorkspace={this._workspaceDuplicate}
handleCreateRequestGroup={this._requestGroupCreate}
handleGenerateCode={App._handleGenerateCode}
handleGenerateCodeForActiveRequest={this._handleGenerateCodeForActiveRequest}
handleCopyAsCurl={this._handleCopyAsCurl}
handleSetResponsePreviewMode={this._handleSetResponsePreviewMode}
handleSetResponseFilter={this._handleSetResponseFilter}
handleSendRequestWithEnvironment={this._handleSendRequestWithEnvironment}
handleSendAndDownloadRequestWithEnvironment={
this._handleSendAndDownloadRequestWithEnvironment
}
handleSetActiveResponse={this._handleSetActiveResponse}
handleSetActiveRequest={this._handleSetActiveRequest}
handleSetActiveEnvironment={this._handleSetActiveEnvironment}
handleSetSidebarFilter={this._handleSetSidebarFilter}
handleToggleMenuBar={this._handleToggleMenuBar}
handleUpdateRequestMimeType={this._handleUpdateRequestMimeType}
handleShowExportRequestsModal={this._handleShowExportRequestsModal}
handleUpdateDownloadPath={this._handleUpdateDownloadPath}
isVariableUncovered={isVariableUncovered}
headerEditorKey={forceRefreshHeaderCounter + ''}
vcs={vcs}
/>
</ErrorBoundary>
<ErrorBoundary showAlert>
<Toast />
</ErrorBoundary>
{/* Block all mouse activity by showing an overlay while dragging */}
{this.state.showDragOverlay ? <div className="blocker-overlay" /> : null}
</div>
</KeydownBinder>
);
}
}
App.propTypes = {
// Required
sidebarWidth: PropTypes.number.isRequired,
paneWidth: PropTypes.number.isRequired,
paneHeight: PropTypes.number.isRequired,
handleCommand: PropTypes.func.isRequired,
settings: PropTypes.object.isRequired,
isLoggedIn: PropTypes.bool.isRequired,
activeWorkspace: PropTypes.shape({
_id: PropTypes.string.isRequired,
}).isRequired,
handleSetActiveWorkspace: PropTypes.func.isRequired,
// Optional
activeRequest: PropTypes.object,
activeEnvironment: PropTypes.shape({
_id: PropTypes.string.isRequired,
}),
};
function mapStateToProps(state, props) {
const { entities, global } = state;
const { isLoading, loadingRequestIds, isLoggedIn } = global;
// Entities
const entitiesLists = selectEntitiesLists(state, props);
const {
workspaces,
environments,
requests,
requestGroups,
requestMetas,
requestVersions,
} = entitiesLists;
const settings = entitiesLists.settings[0];
// Workspace stuff
const workspaceMeta = selectActiveWorkspaceMeta(state, props) || {};
const activeWorkspace = selectActiveWorkspace(state, props);
const activeWorkspaceClientCertificates = selectActiveWorkspaceClientCertificates(state, props);
const sidebarHidden = workspaceMeta.sidebarHidden || false;
const sidebarFilter = workspaceMeta.sidebarFilter || '';
const sidebarWidth = workspaceMeta.sidebarWidth || DEFAULT_SIDEBAR_WIDTH;
const paneWidth = workspaceMeta.paneWidth || DEFAULT_PANE_WIDTH;
const paneHeight = workspaceMeta.paneHeight || DEFAULT_PANE_HEIGHT;
// Request stuff
const requestMeta = selectActiveRequestMeta(state, props) || {};
const activeRequest = selectActiveRequest(state, props);
const responsePreviewMode = requestMeta.previewMode || PREVIEW_MODE_SOURCE;
const responseFilter = requestMeta.responseFilter || '';
const responseFilterHistory = requestMeta.responseFilterHistory || [];
const responseDownloadPath = requestMeta.downloadPath || null;
// Cookie Jar
const activeCookieJar = selectActiveCookieJar(state, props);
// Response stuff
const activeRequestResponses = selectActiveRequestResponses(state, props) || [];
const activeResponse = selectActiveResponse(state, props) || null;
// Environment stuff
const activeEnvironmentId = workspaceMeta.activeEnvironmentId;
const activeEnvironment = entities.environments[activeEnvironmentId];
// OAuth2Token stuff
const oAuth2Token = selectActiveOAuth2Token(state, props);
// Find other meta things
const loadStartTime = loadingRequestIds[activeRequest ? activeRequest._id : 'n/a'] || -1;
const sidebarChildren = selectSidebarChildren(state, props);
const workspaceChildren = selectWorkspaceRequestsAndRequestGroups(state, props);
const unseenWorkspaces = selectUnseenWorkspaces(state, props);
// Sync stuff
const syncItems = selectSyncItems(state, props);
return Object.assign({}, state, {
activeCookieJar,
activeEnvironment,
activeRequest,
activeRequestResponses,
activeResponse,
activeWorkspace,
activeWorkspaceClientCertificates,
environments,
isLoading,
isLoggedIn,
loadStartTime,
oAuth2Token,
paneHeight,
paneWidth,
requestGroups,
requestMetas,
requestVersions,
requests,
responseDownloadPath,
responseFilter,
responseFilterHistory,
responsePreviewMode,
settings,
sidebarChildren,
sidebarFilter,
sidebarHidden,
sidebarWidth,
syncItems,
unseenWorkspaces,
workspaceChildren,
workspaces,
});
}
function mapDispatchToProps(dispatch) {
const global = bindActionCreators(globalActions, dispatch);
return {
handleStartLoading: global.loadRequestStart,
handleStopLoading: global.loadRequestStop,
handleSetActiveWorkspace: global.setActiveWorkspace,
handleImportFileToWorkspace: global.importFile,
handleImportClipBoardToWorkspace: global.importClipBoard,
handleImportUriToWorkspace: global.importUri,
handleCommand: global.newCommand,
handleExportFile: global.exportWorkspacesToFile,
handleExportRequestsToFile: global.exportRequestsToFile,
handleMoveDoc: _moveDoc,
};
}
async function _mov | ToMove, parentId, targetId, targetOffset) {
// Nothing to do. We are in the same spot as we started
if (docToMove._id === targetId) {
return;
}
// Don't allow dragging things into itself or children. This will disconnect
// the node from the tree and cause the item to no longer show in the UI.
const descendents = await db.withDescendants(docToMove);
for (const doc of descendents) {
if (doc._id === parentId) {
return;
}
}
function __updateDoc(doc, patch) {
models.getModel(docToMove.type).update(doc, patch);
}
if (targetId === null) {
// We are moving to an empty area. No sorting required
await __updateDoc(docToMove, { parentId });
return;
}
// NOTE: using requestToTarget's parentId so we can switch parents!
let docs = [
...(await models.request.findByParentId(parentId)),
...(await models.requestGroup.findByParentId(parentId)),
].sort((a, b) => (a.metaSortKey < b.metaSortKey ? -1 : 1));
// Find the index of doc B so we can re-order and save everything
for (let i = 0; i < docs.length; i++) {
const doc = docs[i];
if (doc._id === targetId) {
let before, after;
if (targetOffset < 0) {
// We're moving to below
before = docs[i];
after = docs[i + 1];
} else {
// We're moving to above
before = docs[i - 1];
after = docs[i];
}
const beforeKey = before ? before.metaSortKey : docs[0].metaSortKey - 100;
const afterKey = after ? after.metaSortKey : docs[docs.length - 1].metaSortKey + 100;
if (Math.abs(afterKey - beforeKey) < 0.000001) {
// If sort keys get too close together, we need to redistribute the list. This is
// not performant at all (need to update all siblings in DB), but it is extremely rare
// anyway
console.log(`[app] Recreating Sort Keys ${beforeKey} ${afterKey}`);
await db.bufferChanges(300);
docs.map((r, i) => __updateDoc(r, { metaSortKey: i * 100, parentId }));
} else {
const metaSortKey = afterKey - (afterKey - beforeKey) / 2;
__updateDoc(docToMove, { metaSortKey, parentId });
}
break;
}
}
}
export default connect(
mapStateToProps,
mapDispatchToProps,
)(App);
| eDoc(doc |
comm.rs | // This file is part of the uutils coreutils package.
//
// (c) Michael Gehring <[email protected]>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) delim mkdelim
use std::cmp::Ordering;
use std::fs::File;
use std::io::{self, stdin, BufRead, BufReader, Stdin};
use std::path::Path;
use uucore::error::FromIo;
use uucore::error::UResult;
use uucore::InvalidEncodingHandling;
use clap::{crate_version, App, Arg, ArgMatches};
static ABOUT: &str = "compare two sorted files line by line";
static LONG_HELP: &str = "";
mod options {
pub const COLUMN_1: &str = "1";
pub const COLUMN_2: &str = "2";
pub const COLUMN_3: &str = "3";
pub const DELIMITER: &str = "output-delimiter";
pub const DELIMITER_DEFAULT: &str = "\t";
pub const FILE_1: &str = "FILE1";
pub const FILE_2: &str = "FILE2";
}
fn usage() -> String {
format!("{} [OPTION]... FILE1 FILE2", uucore::execution_phrase())
}
fn mkdelim(col: usize, opts: &ArgMatches) -> String {
let mut s = String::new();
let delim = opts.value_of(options::DELIMITER).unwrap();
if col > 1 && !opts.is_present(options::COLUMN_1) {
s.push_str(delim.as_ref());
}
if col > 2 && !opts.is_present(options::COLUMN_2) {
s.push_str(delim.as_ref());
}
s
}
fn ensure_nl(line: &mut String) {
if !line.ends_with('\n') {
line.push('\n');
}
}
enum LineReader {
Stdin(Stdin),
FileIn(BufReader<File>),
}
impl LineReader {
fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
match *self {
LineReader::Stdin(ref mut r) => r.read_line(buf),
LineReader::FileIn(ref mut r) => r.read_line(buf),
}
}
}
fn comm(a: &mut LineReader, b: &mut LineReader, opts: &ArgMatches) {
let delim: Vec<String> = (0..4).map(|col| mkdelim(col, opts)).collect();
let ra = &mut String::new();
let mut na = a.read_line(ra);
let rb = &mut String::new();
let mut nb = b.read_line(rb);
while na.is_ok() || nb.is_ok() {
let ord = match (na.is_ok(), nb.is_ok()) {
(false, true) => Ordering::Greater,
(true, false) => Ordering::Less,
(true, true) => match (&na, &nb) {
(&Ok(0), &Ok(0)) => break,
(&Ok(0), _) => Ordering::Greater,
(_, &Ok(0)) => Ordering::Less,
_ => ra.cmp(&rb),
},
_ => unreachable!(),
};
match ord {
Ordering::Less => {
if !opts.is_present(options::COLUMN_1) {
ensure_nl(ra);
print!("{}{}", delim[1], ra);
}
ra.clear();
na = a.read_line(ra);
}
Ordering::Greater => {
if !opts.is_present(options::COLUMN_2) {
ensure_nl(rb);
print!("{}{}", delim[2], rb);
}
rb.clear();
nb = b.read_line(rb);
}
Ordering::Equal => {
if !opts.is_present(options::COLUMN_3) {
ensure_nl(ra);
print!("{}{}", delim[3], ra);
}
ra.clear();
rb.clear();
na = a.read_line(ra);
nb = b.read_line(rb);
}
}
}
}
fn open_file(name: &str) -> io::Result<LineReader> |
#[uucore_procs::gen_uumain]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let usage = usage();
let args = args
.collect_str(InvalidEncodingHandling::ConvertLossy)
.accept_any();
let matches = uu_app().override_usage(&usage[..]).get_matches_from(args);
let filename1 = matches.value_of(options::FILE_1).unwrap();
let filename2 = matches.value_of(options::FILE_2).unwrap();
let mut f1 = open_file(filename1).map_err_context(|| filename1.to_string())?;
let mut f2 = open_file(filename2).map_err_context(|| filename2.to_string())?;
comm(&mut f1, &mut f2, &matches);
Ok(())
}
pub fn uu_app<'a>() -> App<'a> {
App::new(uucore::util_name())
.version(crate_version!())
.about(ABOUT)
.after_help(LONG_HELP)
.arg(
Arg::new(options::COLUMN_1)
.short('1')
.help("suppress column 1 (lines unique to FILE1)"),
)
.arg(
Arg::new(options::COLUMN_2)
.short('2')
.help("suppress column 2 (lines unique to FILE2)"),
)
.arg(
Arg::new(options::COLUMN_3)
.short('3')
.help("suppress column 3 (lines that appear in both files)"),
)
.arg(
Arg::new(options::DELIMITER)
.long(options::DELIMITER)
.help("separate columns with STR")
.value_name("STR")
.default_value(options::DELIMITER_DEFAULT)
.hide_default_value(true),
)
.arg(Arg::new(options::FILE_1).required(true))
.arg(Arg::new(options::FILE_2).required(true))
}
| {
match name {
"-" => Ok(LineReader::Stdin(stdin())),
_ => {
let f = File::open(&Path::new(name))?;
Ok(LineReader::FileIn(BufReader::new(f)))
}
}
} |
newPageHandler.go | // newPageHandler.go creates and returns a new page
package site
import (
"encoding/json"
"net/http"
"zanaduu3/src/core"
"zanaduu3/src/pages"
)
var newPageHandler = siteHandler{
URI: "/json/newPage/",
HandlerFunc: newPageHandlerFunc,
Options: pages.PageOptions{
RequireLogin: true,
},
}
// newPageData contains parameters passed in via the request.
type newPageData struct {
Type string
ParentIDs []string
IsEditorComment bool
Alias string
SubmitToDomainID string
// If creating a new comment, this is the id of the primary page
CommentPrimaryPageID string
}
// newPageHandlerFunc handles the request.
func newPageHandlerFunc(params *pages.HandlerParams) *pages.Result {
// Decode data
var data newPageData
err := json.NewDecoder(params.R.Body).Decode(&data)
if err != nil {
return pages.Fail("Couldn't decode request", err).Status(http.StatusBadRequest)
}
data.Type, err = core.CorrectPageType(data.Type)
if err != nil {
data.Type = core.WikiPageType
}
return newPageInternalHandler(params, &data)
}
func newPageInternalHandler(params *pages.HandlerParams, data *newPageData) *pages.Result | {
u := params.U
pageID, err := core.CreateNewPage(params.DB, params.U, &core.CreateNewPageOptions{
Alias: data.Alias,
Type: data.Type,
SeeDomainID: params.PrivateDomain.ID,
EditDomainID: u.MyDomainID(),
SubmitToDomainID: data.SubmitToDomainID,
IsEditorComment: data.IsEditorComment,
ParentIDs: data.ParentIDs,
CommentPrimaryPageID: data.CommentPrimaryPageID,
})
if err != nil {
return pages.Fail("Couldn't create new page", err)
}
editData := &editJSONData{
PageAlias: pageID,
}
return editJSONInternalHandler(params, editData)
} |
|
difference_of_squares.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Submission file for the python difference_of_squares exercise.
#
# v2: Using "x*x" instead of the slower "x**2" and remove left over
# "pass" statement in difference()
# v1: Using sum(), abs(), range() and "**2"
def difference(length):
"""
Return the (absolute) difference between the Square of Sums and the
Sum of Squares in the interval [1, length].
"""
return abs(square_of_sum(length) - sum_of_squares(length))
def square_of_sum(length):
"""
Return the Square of Sums in the interval [1, length].
"""
s = sum(range(1, length+1))
return s*s
def sum_of_squares(length):
| """
Return the sum of Squares in the interval [1, length].
"""
return sum([x*x for x in range(1, length+1)]) |
|
RedirectManager.tsx | import React from "react";
import { useContext } from "react";
import { Redirect } from "react-router-dom";
import { AppContext } from "../App";
import { emptyProfile } from "../hooks";
import { profilesMatch } from "../utils";
interface Props {}
function | (props: Props) {
const { isAuthenticated, profile } = useContext(AppContext);
// Logged out
if (!isAuthenticated) return <Redirect to="/" />;
// Authenticated but no profile
if (isAuthenticated && !profile) return <Redirect to="/create" />;
if (isAuthenticated && profilesMatch(profile, emptyProfile))
return <Redirect to="/create" />;
// Logged in with profile
return <Redirect to="/manage" />;
}
export default RedirectManager;
| RedirectManager |
distance.rs | use linfa::Float;
use ndarray::{ArrayView, Dimension, Zip};
use ndarray_stats::DeviationExt;
/// A distance function that can be used in spatial algorithms such as nearest neighbour.
pub trait Distance<F: Float>: Clone + Send + Sync {
/// Computes the distance between two points. For most spatial algorithms to work correctly,
/// **this metric must satisfy the Triangle Inequality.**
///
/// Panics if the points have different dimensions.
fn distance<D: Dimension>(&self, a: ArrayView<F, D>, b: ArrayView<F, D>) -> F;
/// A faster version of the distance metric that keeps the order of the distance function. That
/// is, `dist(a, b) > dist(c, d)` implies `rdist(a, b) > rdist(c, d)`. For most algorithms this
/// is the same as `distance`. Unlike `distance`, this function does **not** need to satisfy
/// the Triangle Inequality.
#[inline]
fn rdistance<D: Dimension>(&self, a: ArrayView<F, D>, b: ArrayView<F, D>) -> F {
self.distance(a, b)
}
/// Converts the result of `rdistance` to `distance`
#[inline]
fn rdist_to_dist(&self, rdist: F) -> F {
rdist
}
/// Converts the result of `distance` to `rdistance`
#[inline]
fn dist_to_rdist(&self, dist: F) -> F {
dist
}
}
/// L1 or [Manhattan](https://en.wikipedia.org/wiki/Taxicab_geometry) distance
#[derive(Debug, Clone, PartialEq)]
pub struct L1Dist;
impl<F: Float> Distance<F> for L1Dist {
#[inline]
fn distance<D: Dimension>(&self, a: ArrayView<F, D>, b: ArrayView<F, D>) -> F {
a.l1_dist(&b).unwrap()
}
}
/// L2 or [Euclidean](https://en.wikipedia.org/wiki/Euclidean_distance) distance
#[derive(Debug, Clone, PartialEq)]
pub struct L2Dist;
impl<F: Float> Distance<F> for L2Dist {
#[inline]
fn distance<D: Dimension>(&self, a: ArrayView<F, D>, b: ArrayView<F, D>) -> F {
F::from(a.l2_dist(&b).unwrap()).unwrap()
}
#[inline]
fn rdistance<D: Dimension>(&self, a: ArrayView<F, D>, b: ArrayView<F, D>) -> F {
F::from(a.sq_l2_dist(&b).unwrap()).unwrap()
}
#[inline]
fn rdist_to_dist(&self, rdist: F) -> F {
rdist.sqrt()
}
#[inline]
fn dist_to_rdist(&self, dist: F) -> F {
dist.powi(2)
}
}
/// L-infinte or [Chebyshev](https://en.wikipedia.org/wiki/Chebyshev_distance) distance
#[derive(Debug, Clone, PartialEq)]
pub struct LInfDist;
impl<F: Float> Distance<F> for LInfDist {
#[inline]
fn distance<D: Dimension>(&self, a: ArrayView<F, D>, b: ArrayView<F, D>) -> F {
a.linf_dist(&b).unwrap()
}
}
/// L-p or [Minkowsky](https://en.wikipedia.org/wiki/Minkowski_distance) distance
#[derive(Debug, Clone, PartialEq)]
pub struct LpDist<F: Float>(F);
impl<F: Float> Distance<F> for LpDist<F> {
#[inline]
fn distance<D: Dimension>(&self, a: ArrayView<F, D>, b: ArrayView<F, D>) -> F {
Zip::from(&a)
.and(&b)
.fold(F::zero(), |acc, &a, &b| acc + (a - b).abs().powf(self.0))
.powf(F::one() / self.0)
}
}
#[cfg(test)]
mod test {
use approx::assert_abs_diff_eq;
use ndarray::arr1;
use super::*;
fn dist_test<D: Distance<f64>>(dist: D, result: f64) {
let a = arr1(&[0.5, 6.6]);
let b = arr1(&[4.4, 3.0]);
let ab = dist.distance(a.view(), b.view());
assert_abs_diff_eq!(ab, result, epsilon = 1e-3);
assert_abs_diff_eq!(dist.rdist_to_dist(dist.dist_to_rdist(ab)), ab);
let a = arr1(&[f64::INFINITY, 6.6]);
let b = arr1(&[4.4, f64::NEG_INFINITY]);
assert!(dist.distance(a.view(), b.view()).is_infinite()); | let c = arr1(&[-4.5, 3.3]);
let ab = dist.distance(a.view(), b.view());
let bc = dist.distance(b.view(), c.view());
let ac = dist.distance(a.view(), c.view());
assert!(ab + bc > ac)
}
#[test]
fn l1_dist() {
dist_test(L1Dist, 7.5);
}
#[test]
fn l2_dist() {
dist_test(L2Dist, 5.3075);
// Check squared distance
let a = arr1(&[0.5, 6.6]);
let b = arr1(&[4.4, 3.0]);
assert_abs_diff_eq!(L2Dist.rdistance(a.view(), b.view()), 28.17, epsilon = 1e-3);
}
#[test]
fn linf_dist() {
dist_test(LInfDist, 3.9);
}
#[test]
fn lp_dist() {
dist_test(LpDist(3.3), 4.635);
}
} |
// Triangle equality
let a = arr1(&[0.5, 6.6]);
let b = arr1(&[4.4, 3.0]); |
is.js | // This file was automatically generated from common.soy.
// Please don't edit this file by hand.
if (typeof apps == 'undefined') { var apps = {}; }
apps.messages = function(opt_data, opt_ignored, opt_ijData) {
return '<div style="display: none"><span id="subtitle">sjónrænt forritunarumhverfi</span><span id="blocklyMessage">Blockly</span><span id="codeTooltip">Sjá forritið sem JavaScript kóða.</span><span id="linkTooltip">Vista og tengja við kubba.</span><span id="runTooltip">Keyra forritið sem kubbarnir á vinnusvæðinu mynda.</span><span id="runProgram">Keyra forritið</span><span id="resetProgram">Byrja aftur</span><span id="dialogOk">Í lagi</span><span id="dialogCancel">Hætta við</span><span id="catLogic">Rökvísi</span><span id="catLoops">Lykkjur</span><span id="catMath">Reikningur</span><span id="catText">Texti</span><span id="catLists">Listar</span><span id="catColour">Litir</span><span id="catVariables">Breytur</span><span id="catProcedures">Stefjur</span><span id="httpRequestError">Það kom upp vandamál með beiðnina.</span><span id="linkAlert">Deildu kubbunum þínum með þessari krækju:</span><span id="hashError">Því miður, \'%1\' passar ekki við neitt vistað forrit.</span><span id="xmlError">Gat ekki hlaðið vistuðu skrána þína. Var hún kannske búin til í annarri útgáfu af Blockly?</span><span id="listVariable">listi</span><span id="textVariable">texti</span></div>';
};
apps.dialog = function(opt_data, opt_ignored, opt_ijData) {
return '<div id="dialogShadow" class="dialogAnimate"></div><div id="dialogBorder"></div><div id="dialog"></div>';
};
apps.codeDialog = function(opt_data, opt_ignored, opt_ijData) {
return '<div id="dialogCode" class="dialogHiddenContent"><pre id="containerCode"></pre>' + apps.ok(null, null, opt_ijData) + '</div>';
};
apps.storageDialog = function(opt_data, opt_ignored, opt_ijData) {
return '<div id="dialogStorage" class="dialogHiddenContent"><div id="containerStorage"></div>' + apps.ok(null, null, opt_ijData) + '</div>';
};
apps.ok = function(opt_data, opt_ignored, opt_ijData) {
return '<div class="farSide" style="padding: 1ex 3ex 0"><button class="secondary" onclick="BlocklyApps.hideDialog(true)">Í lagi</button></div>';
};
;
// This file was automatically generated from template.soy.
// Please don't edit this file by hand.
if (typeof graphpage == 'undefined') { var graphpage = {}; }
| };
graphpage.start = function(opt_data, opt_ignored, opt_ijData) {
return graphpage.messages(null, null, opt_ijData) + '<table width="100%"><tr><td><h1><span id="title"><a href="../index.html?lang=' + soy.$$escapeHtml(opt_ijData.lang) + '">Blockly</a> : Reiknir með línuriti</span></h1></td><td class="farSide"><select id="languageMenu"></select> <button id="linkButton" class="notext" title="Vista og tengja við kubba."><img src=\'link.png\' height=21 width=21></button></div></td></tr></table><div id="visualization"></div><div id="funcText"><img id="y1" src="../../media/1x1.gif">...</div><script type="text/javascript" src="../../blockly_compressed.js"><\/script><script type="text/javascript" src="../../blocks_compressed.js"><\/script><script type="text/javascript" src="../../javascript_compressed.js"><\/script><script type="text/javascript" src="../../' + soy.$$escapeHtml(opt_ijData.langSrc) + '"><\/script><script type="text/javascript" src="blocks.js"><\/script>' + graphpage.toolbox(null, null, opt_ijData) + '<div id="blockly"></div>' + apps.dialog(null, null, opt_ijData) + apps.storageDialog(null, null, opt_ijData);
};
graphpage.toolbox = function(opt_data, opt_ignored, opt_ijData) {
return '<xml id="toolbox" style="display: none"><category name="Reikningur"><block type="math_number"></block><block type="math_arithmetic"></block><block type="math_single"></block><block type="math_trig"></block><block type="math_constant"></block><block type="math_number_property"></block><block type="math_round"></block><block type="math_modulo"></block><block type="math_constrain"><value name="LOW"><block type="math_number"><title name="NUM">1</title></block></value><value name="HIGH"><block type="math_number"><title name="NUM">100</title></block></value></block><block type="math_random_int"><value name="FROM"><block type="math_number"><title name="NUM">1</title></block></value><value name="TO"><block type="math_number"><title name="NUM">100</title></block></value></block><block type="math_random_float"></block></category><category name="Breytur"><block type="graph_get_x"></block></category><category name="Rökvísi"><block type="logic_compare"></block><block type="logic_operation"></block><block type="logic_negate"></block><block type="logic_boolean"></block><block type="logic_ternary"></block></category></xml>';
}; | graphpage.messages = function(opt_data, opt_ignored, opt_ijData) {
return apps.messages(null, null, opt_ijData); |
MongoHelper.py | import pymongo
from config import DB_CONFIG, DEFAULT_SCORE
from db.ISqlHelper import ISqlHelper
class MongoHelper(ISqlHelper):
def | (self):
self.client = pymongo.MongoClient(DB_CONFIG['DB_CONNECT_STRING'], connect=False)
def init_db(self):
self.db = self.client.proxy
self.proxys = self.db.proxys
def drop_db(self):
self.client.drop_database(self.db)
def insert(self, value=None):
if value:
proxy = dict(ip=value['ip'], port=value['port'], types=value['types'], protocol=value['protocol'],
country=value['country'],
area=value['area'], speed=value['speed'], score=DEFAULT_SCORE)
self.proxys.insert(proxy)
def delete(self, conditions=None):
if conditions:
self.proxys.remove(conditions)
return ('deleteNum', 'ok')
else:
return ('deleteNum', 'None')
def update(self, conditions=None, value=None):
# update({"UserName":"libing"},{"$set":{"Email":"[email protected]","Password":"123"}})
if conditions and value:
self.proxys.update(conditions, {"$set": value})
return {'updateNum': 'ok'}
else:
return {'updateNum': 'fail'}
def select(self, count=None, conditions=None):
if count:
count = int(count)
else:
count = 0
if conditions:
conditions = dict(conditions)
if 'count' in conditions:
del conditions['count']
conditions_name = ['types', 'protocol']
for condition_name in conditions_name:
value = conditions.get(condition_name, None)
if value:
conditions[condition_name] = int(value)
else:
conditions = {}
items = self.proxys.find(conditions, limit=count).sort(
[("speed", pymongo.ASCENDING), ("score", pymongo.DESCENDING)])
results = []
for item in items:
result = (item['ip'], item['port'], item['score'])
results.append(result)
return results
if __name__ == '__main__':
# from db.MongoHelper import MongoHelper as SqlHelper
# sqlhelper = SqlHelper()
# sqlhelper.init_db()
# # print sqlhelper.select(None,{'types':u'1'})
# items= sqlhelper.proxys.find({'types':0})
# for item in items:
# print item
# # # print sqlhelper.select(None,{'types':u'0'})
pass
| __init__ |
arrow.py | """
Arrows
"""
#*****************************************************************************
# Copyright (C) 2006 Alex Clemesha <[email protected]>,
# William Stein <[email protected]>,
# 2008 Mike Hansen <[email protected]>,
# 2009 Emily Kirkman
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.plot.primitive import GraphicPrimitive
from sage.misc.decorators import options, rename_keyword
from sage.plot.colors import to_mpl_color
class CurveArrow(GraphicPrimitive):
def __init__(self, path, options):
"""
Returns an arrow graphics primitive along the provided path (bezier curve).
EXAMPLES::
sage: from sage.plot.arrow import CurveArrow
sage: b = CurveArrow(path=[[(0,0),(.5,.5),(1,0)],[(.5,1),(0,0)]],
....: options={})
sage: b
CurveArrow from (0, 0) to (0, 0)
"""
import numpy as np
self.path = path
codes = [1] + (len(self.path[0])-1)*[len(self.path[0])]
vertices = self.path[0]
for curve in self.path[1:]:
vertices += curve
codes += (len(curve))*[len(curve)+1]
self.codes = codes
self.vertices = np.array(vertices, np.float)
GraphicPrimitive.__init__(self, options)
def get_minmax_data(self):
"""
Returns a dictionary with the bounding box data.
EXAMPLES::
sage: from sage.plot.arrow import CurveArrow
sage: b = CurveArrow(path=[[(0,0),(.5,.5),(1,0)],[(.5,1),(0,0)]],
....: options={})
sage: d = b.get_minmax_data()
sage: d['xmin']
0.0
sage: d['xmax']
1.0
"""
return {'xmin': self.vertices[:,0].min(),
'xmax': self.vertices[:,0].max(),
'ymin': self.vertices[:,1].min(),
'ymax': self.vertices[:,1].max()}
def _allowed_options(self):
"""
Return the dictionary of allowed options for the curve arrow graphics
primitive.
EXAMPLES::
sage: from sage.plot.arrow import CurveArrow
sage: list(sorted(CurveArrow(path=[[(0,0),(2,3)]],options={})._allowed_options().items()))
[('arrowsize', 'The size of the arrowhead'),
('arrowstyle', 'todo'),
('head', '2-d only: Which end of the path to draw the head (one of 0 (start), 1 (end) or 2 (both)'),
('hue', 'The color given as a hue.'),
('legend_color', 'The color of the legend text.'),
('legend_label', 'The label for this item in the legend.'),
('linestyle', "2d only: The style of the line, which is one of
'dashed', 'dotted', 'solid', 'dashdot', or '--', ':', '-', '-.',
respectively."),
('rgbcolor', 'The color as an RGB tuple.'),
('thickness', 'The thickness of the arrow.'),
('width', 'The width of the shaft of the arrow, in points.'),
('zorder', '2-d only: The layer level in which to draw')]
"""
return {'width': 'The width of the shaft of the arrow, in points.',
'rgbcolor': 'The color as an RGB tuple.',
'hue': 'The color given as a hue.',
'legend_label': 'The label for this item in the legend.',
'legend_color': 'The color of the legend text.',
'arrowstyle': 'todo',
'arrowsize': 'The size of the arrowhead',
'thickness': 'The thickness of the arrow.',
'zorder': '2-d only: The layer level in which to draw',
'head': '2-d only: Which end of the path to draw the head (one of 0 (start), 1 (end) or 2 (both)',
'linestyle': "2d only: The style of the line, which is one of "
"'dashed', 'dotted', 'solid', 'dashdot', or '--', ':', '-', '-.', "
"respectively."}
def _repr_(self):
"""
Text representation of an arrow graphics primitive.
EXAMPLES::
sage: from sage.plot.arrow import CurveArrow
sage: CurveArrow(path=[[(0,0),(1,4),(2,3)]],options={})._repr_()
'CurveArrow from (0, 0) to (2, 3)'
"""
return "CurveArrow from %s to %s" % (self.path[0][0], self.path[-1][-1])
def _render_on_subplot(self, subplot):
"""
Render this arrow in a subplot. This is the key function that
defines how this arrow graphics primitive is rendered in
matplotlib's library.
EXAMPLES::
This function implicitly ends up rendering this arrow on a matplotlib
subplot:
sage: arrow(path=[[(0,1), (2,-1), (4,5)]])
Graphics object consisting of 1 graphics primitive
"""
from sage.plot.misc import get_matplotlib_linestyle
options = self.options()
width = float(options['width'])
head = options.pop('head')
if head == 0: style = '<|-'
elif head == 1: style = '-|>'
elif head == 2: style = '<|-|>'
else: raise KeyError('head parameter must be one of 0 (start), 1 (end) or 2 (both).')
arrowsize = float(options.get('arrowsize', 5))
head_width = arrowsize
head_length = arrowsize * 2.0
color = to_mpl_color(options['rgbcolor'])
from matplotlib.patches import FancyArrowPatch
from matplotlib.path import Path
bpath = Path(self.vertices, self.codes)
p = FancyArrowPatch(path=bpath,
lw=width, arrowstyle='%s,head_width=%s,head_length=%s' % (style, head_width, head_length),
fc=color, ec=color,
linestyle=get_matplotlib_linestyle(options['linestyle'], return_type='long'))
p.set_zorder(options['zorder'])
p.set_label(options['legend_label'])
subplot.add_patch(p)
return p
class Arrow(GraphicPrimitive):
"""
Primitive class that initializes the (line) arrow graphics type
EXAMPLES:
We create an arrow graphics object, then take the 0th entry
in it to get the actual Arrow graphics primitive::
sage: P = arrow((0,1), (2,3))[0]
sage: type(P)
<class 'sage.plot.arrow.Arrow'>
sage: P
Arrow from (0.0,1.0) to (2.0,3.0)
"""
def __init__(self, xtail, ytail, xhead, yhead, options):
"""
Create an arrow graphics primitive.
EXAMPLES::
sage: from sage.plot.arrow import Arrow
sage: Arrow(0,0,2,3,{})
Arrow from (0.0,0.0) to (2.0,3.0)
"""
self.xtail = float(xtail)
self.xhead = float(xhead)
self.ytail = float(ytail)
self.yhead = float(yhead)
GraphicPrimitive.__init__(self, options)
def get_minmax_data(self):
"""
Returns a bounding box for this arrow.
EXAMPLES::
sage: d = arrow((1,1), (5,5)).get_minmax_data()
sage: d['xmin']
1.0
sage: d['xmax']
5.0
"""
return {'xmin': min(self.xtail, self.xhead),
'xmax': max(self.xtail, self.xhead),
'ymin': min(self.ytail, self.yhead),
'ymax': max(self.ytail, self.yhead)}
def _allowed_options(self):
"""
Return the dictionary of allowed options for the line arrow graphics
primitive.
EXAMPLES::
sage: from sage.plot.arrow import Arrow
sage: list(sorted(Arrow(0,0,2,3,{})._allowed_options().items()))
[('arrowshorten', 'The length in points to shorten the arrow.'),
('arrowsize', 'The size of the arrowhead'),
('head',
'2-d only: Which end of the path to draw the head (one of 0 (start), 1 (end) or 2 (both)'),
('hue', 'The color given as a hue.'),
('legend_color', 'The color of the legend text.'),
('legend_label', 'The label for this item in the legend.'),
('linestyle',
"2d only: The style of the line, which is one of 'dashed',
'dotted', 'solid', 'dashdot', or '--', ':', '-', '-.',
respectively."),
('rgbcolor', 'The color as an RGB tuple.'),
('thickness', 'The thickness of the arrow.'),
('width', 'The width of the shaft of the arrow, in points.'),
('zorder', '2-d only: The layer level in which to draw')]
"""
return {'width': 'The width of the shaft of the arrow, in points.',
'rgbcolor': 'The color as an RGB tuple.',
'hue': 'The color given as a hue.',
'arrowshorten': 'The length in points to shorten the arrow.',
'arrowsize': 'The size of the arrowhead',
'thickness': 'The thickness of the arrow.',
'legend_label': 'The label for this item in the legend.',
'legend_color': 'The color of the legend text.',
'zorder': '2-d only: The layer level in which to draw',
'head': '2-d only: Which end of the path to draw the head (one of 0 (start), 1 (end) or 2 (both)',
'linestyle': "2d only: The style of the line, which is one of "
"'dashed', 'dotted', 'solid', 'dashdot', or '--', ':', '-', '-.', "
"respectively."}
def _plot3d_options(self, options=None):
"""
Translate 2D plot options into 3D plot options.
EXAMPLES::
sage: P = arrow((0,1), (2,3), width=5)
sage: p=P[0]; p
Arrow from (0.0,1.0) to (2.0,3.0)
sage: q=p.plot3d()
sage: q.thickness
5
"""
if options is None:
options = self.options()
options = dict(self.options())
options_3d = {}
if 'width' in options:
options_3d['thickness'] = options['width']
del options['width']
# ignore zorder and head in 3d plotting
if 'zorder' in options:
del options['zorder']
if 'head' in options:
del options['head']
if 'linestyle' in options:
del options['linestyle']
options_3d.update(GraphicPrimitive._plot3d_options(self, options))
return options_3d
def plot3d(self, ztail=0, zhead=0, **kwds):
|
def _repr_(self):
"""
Text representation of an arrow graphics primitive.
EXAMPLES::
sage: from sage.plot.arrow import Arrow
sage: Arrow(0,0,2,3,{})._repr_()
'Arrow from (0.0,0.0) to (2.0,3.0)'
"""
return "Arrow from (%s,%s) to (%s,%s)" % (self.xtail, self.ytail, self.xhead, self.yhead)
def _render_on_subplot(self, subplot):
r"""
Render this arrow in a subplot. This is the key function that
defines how this arrow graphics primitive is rendered in
matplotlib's library.
EXAMPLES:
This function implicitly ends up rendering this arrow on
a matplotlib subplot::
sage: arrow((0,1), (2,-1))
Graphics object consisting of 1 graphics primitive
TESTS:
The length of the ends (shrinkA and shrinkB) should not depend
on the width of the arrow, because Matplotlib already takes
this into account. See :trac:`12836`::
sage: fig = Graphics().matplotlib()
sage: sp = fig.add_subplot(1,1,1, label='axis1')
sage: a = arrow((0,0), (1,1))
sage: b = arrow((0,0), (1,1), width=20)
sage: p1 = a[0]._render_on_subplot(sp)
sage: p2 = b[0]._render_on_subplot(sp)
sage: p1.shrinkA == p2.shrinkA
True
sage: p1.shrinkB == p2.shrinkB
True
Dashed arrows should have solid arrowheads,
:trac:`12852`. This test saves the plot of a dashed arrow to
an EPS file. Within the EPS file, ``stroke`` will be called
twice: once to draw the line, and again to draw the
arrowhead. We check that both calls do not occur while the
dashed line style is enabled::
sage: a = arrow((0,0), (1,1), linestyle='dashed')
sage: filename = tmp_filename(ext='.eps')
sage: a.save(filename=filename)
sage: with open(filename, 'r') as f:
....: contents = f.read().replace('\n', ' ')
sage: two_stroke_pattern = r'setdash.*stroke.*stroke.*setdash.*setdash'
sage: import re
sage: two_stroke_re = re.compile(two_stroke_pattern)
sage: two_stroke_re.search(contents) is None
True
"""
from sage.plot.misc import get_matplotlib_linestyle
options = self.options()
head = options.pop('head')
if head == 0: style = '<|-'
elif head == 1: style = '-|>'
elif head == 2: style = '<|-|>'
else: raise KeyError('head parameter must be one of 0 (start), 1 (end) or 2 (both).')
width = float(options['width'])
arrowshorten_end = float(options.get('arrowshorten', 0)) / 2.0
arrowsize = float(options.get('arrowsize', 5))
head_width = arrowsize
head_length = arrowsize * 2.0
color = to_mpl_color(options['rgbcolor'])
from matplotlib.patches import FancyArrowPatch
p = FancyArrowPatch((self.xtail, self.ytail), (self.xhead, self.yhead),
lw=width,
arrowstyle='%s,head_width=%s,head_length=%s' % (style, head_width, head_length),
shrinkA=arrowshorten_end, shrinkB=arrowshorten_end,
fc=color, ec=color,
linestyle=get_matplotlib_linestyle(options['linestyle'], return_type='long'))
p.set_zorder(options['zorder'])
p.set_label(options['legend_label'])
if options['linestyle'] != 'solid':
# The next few lines work around a design issue in matplotlib.
# Currently, the specified linestyle is used to draw both the path
# and the arrowhead. If linestyle is 'dashed', this looks really
# odd. This code is from Jae-Joon Lee in response to a post to the
# matplotlib mailing list.
# See http://sourceforge.net/mailarchive/forum.php?thread_name=CAG%3DuJ%2Bnw2dE05P9TOXTz_zp-mGP3cY801vMH7yt6vgP9_WzU8w%40mail.gmail.com&forum_name=matplotlib-users
import matplotlib.patheffects as pe
class CheckNthSubPath(object):
def __init__(self, patch, n):
"""
creates an callable object that returns True if the
provided path is the n-th path from the patch.
"""
self._patch = patch
self._n = n
def get_paths(self, renderer):
self._patch.set_dpi_cor(renderer.points_to_pixels(1.))
paths, fillables = self._patch.get_path_in_displaycoord()
return paths
def __call__(self, renderer, gc, tpath, affine, rgbFace):
path = self.get_paths(renderer)[self._n]
vert1, code1 = path.vertices, path.codes
import numpy as np
return np.array_equal(vert1, tpath.vertices) and np.array_equal(code1, tpath.codes)
class ConditionalStroke(pe.RendererBase):
def __init__(self, condition_func, pe_list):
"""
path effect that is only applied when the condition_func
returns True.
"""
super(ConditionalStroke, self).__init__()
self._pe_list = pe_list
self._condition_func = condition_func
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
if self._condition_func(renderer, gc, tpath, affine, rgbFace):
for pe1 in self._pe_list:
pe1.draw_path(renderer, gc, tpath, affine, rgbFace)
pe1 = ConditionalStroke(CheckNthSubPath(p, 0), [pe.Stroke()])
pe2 = ConditionalStroke(CheckNthSubPath(p, 1), [pe.Stroke(dashes={'dash_offset': 0, 'dash_list': None})])
p.set_path_effects([pe1, pe2])
subplot.add_patch(p)
return p
def arrow(tailpoint=None, headpoint=None, **kwds):
"""
Returns either a 2-dimensional or 3-dimensional arrow depending
on value of points.
For information regarding additional arguments, see either arrow2d?
or arrow3d?.
EXAMPLES::
sage: arrow((0,0), (1,1))
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(arrow((0,0), (1,1)))
::
sage: arrow((0,0,1), (1,1,1))
Graphics3d Object
.. PLOT::
sphinx_plot(arrow((0,0,1), (1,1,1)))
"""
try:
return arrow2d(tailpoint, headpoint, **kwds)
except ValueError:
from sage.plot.plot3d.shapes import arrow3d
return arrow3d(tailpoint, headpoint, **kwds)
@rename_keyword(color='rgbcolor')
@options(width=2, rgbcolor=(0,0,1), zorder=2, head=1, linestyle='solid', legend_label=None)
def arrow2d(tailpoint=None, headpoint=None, path=None, **options):
"""
If ``tailpoint`` and ``headpoint`` are provided, returns an arrow from
(xtail, ytail) to (xhead, yhead). If ``tailpoint`` or ``headpoint`` is None and
``path`` is not None, returns an arrow along the path. (See further info on
paths in :class:`bezier_path`).
INPUT:
- ``tailpoint`` - the starting point of the arrow
- ``headpoint`` - where the arrow is pointing to
- ``path`` - the list of points and control points (see bezier_path for
detail) that the arrow will follow from source to destination
- ``head`` - 0, 1 or 2, whether to draw the head at the start (0), end (1)
or both (2) of the path (using 0 will swap headpoint and tailpoint).
This is ignored in 3D plotting.
- ``linestyle`` - (default: ``'solid'``) The style of the line, which is
one of ``'dashed'``, ``'dotted'``, ``'solid'``, ``'dashdot'``,
or ``'--'``, ``':'``, ``'-'``, ``'-.'``, respectively.
- ``width`` - (default: 2) the width of the arrow shaft, in points
- ``color`` - (default: (0,0,1)) the color of the arrow (as an RGB tuple or
a string)
- ``hue`` - the color of the arrow (as a number)
- ``arrowsize`` - the size of the arrowhead
- ``arrowshorten`` - the length in points to shorten the arrow (ignored if
using path parameter)
- ``legend_label`` - the label for this item in the legend
- ``legend_color`` - the color for the legend label
- ``zorder`` - the layer level to draw the arrow-- note that this is
ignored in 3D plotting.
EXAMPLES:
A straight, blue arrow::
sage: arrow2d((1,1), (3,3))
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(arrow2d((1,1), (3,3)))
Make a red arrow::
sage: arrow2d((-1,-1), (2,3), color=(1,0,0))
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(arrow2d((-1,-1), (2,3), color=(1,0,0)))
::
sage: arrow2d((-1,-1), (2,3), color='red')
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(arrow2d((-1,-1), (2,3), color='red'))
You can change the width of an arrow::
sage: arrow2d((1,1), (3,3), width=5, arrowsize=15)
Graphics object consisting of 1 graphics primitive
.. PLOT::
P = arrow2d((1,1), (3,3), width=5, arrowsize=15)
sphinx_plot(P)
Use a dashed line instead of a solid one for the arrow::
sage: arrow2d((1,1), (3,3), linestyle='dashed')
Graphics object consisting of 1 graphics primitive
sage: arrow2d((1,1), (3,3), linestyle='--')
Graphics object consisting of 1 graphics primitive
.. PLOT::
P = arrow2d((1,1), (3,3), linestyle='--')
sphinx_plot(P)
A pretty circle of arrows::
sage: sum([arrow2d((0,0), (cos(x),sin(x)), hue=x/(2*pi)) for x in [0..2*pi,step=0.1]])
Graphics object consisting of 63 graphics primitives
.. PLOT::
P = sum([arrow2d((0,0), (cos(x*0.1),sin(x*0.1)), hue=x/(20*pi)) for x in range(floor(20*pi)+1)])
sphinx_plot(P)
If we want to draw the arrow between objects, for example, the
boundaries of two lines, we can use the ``arrowshorten`` option
to make the arrow shorter by a certain number of points::
sage: L1 = line([(0,0), (1,0)], thickness=10)
sage: L2 = line([(0,1), (1,1)], thickness=10)
sage: A = arrow2d((0.5,0), (0.5,1), arrowshorten=10, rgbcolor=(1,0,0))
sage: L1 + L2 + A
Graphics object consisting of 3 graphics primitives
.. PLOT::
L1 = line([(0,0), (1,0)],thickness=10)
L2 = line([(0,1), (1,1)], thickness=10)
A = arrow2d((0.5,0), (0.5,1), arrowshorten=10, rgbcolor=(1,0,0))
sphinx_plot(L1 + L2 + A)
If BOTH ``headpoint`` and ``tailpoint`` are None, then an empty plot is
returned::
sage: arrow2d(headpoint=None, tailpoint=None)
Graphics object consisting of 0 graphics primitives
We can also draw an arrow with a legend::
sage: arrow((0,0), (0,2), legend_label='up', legend_color='purple')
Graphics object consisting of 1 graphics primitive
.. PLOT::
P = arrow((0,0), (0,2), legend_label='up', legend_color='purple')
sphinx_plot(P)
Extra options will get passed on to :meth:`Graphics.show()`, as long as they are valid::
sage: arrow2d((-2,2), (7,1), frame=True)
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(arrow2d((-2,2), (7,1), frame=True))
::
sage: arrow2d((-2,2), (7,1)).show(frame=True)
"""
from sage.plot.all import Graphics
g = Graphics()
g._set_extra_kwds(Graphics._extract_kwds_for_show(options))
if headpoint is not None and tailpoint is not None:
xtail, ytail = tailpoint
xhead, yhead = headpoint
g.add_primitive(Arrow(xtail, ytail, xhead, yhead, options=options))
elif path is not None:
g.add_primitive(CurveArrow(path, options=options))
elif tailpoint is None and headpoint is None:
return g
else:
raise TypeError('Arrow requires either both headpoint and tailpoint or a path parameter.')
if options['legend_label']:
g.legend(True)
g._legend_colors = [options['legend_color']]
return g
| """
Takes 2D plot and places it in 3D.
EXAMPLES::
sage: A = arrow((0,0),(1,1))[0].plot3d()
sage: A.jmol_repr(A.testing_render_params())[0]
'draw line_1 diameter 2 arrow {0.0 0.0 0.0} {1.0 1.0 0.0} '
Note that we had to index the arrow to get the Arrow graphics
primitive. We can also change the height via the :meth:`Graphics.plot3d`
method, but only as a whole::
sage: A = arrow((0,0),(1,1)).plot3d(3)
sage: A.jmol_repr(A.testing_render_params())[0][0]
'draw line_1 diameter 2 arrow {0.0 0.0 3.0} {1.0 1.0 3.0} '
Optional arguments place both the head and tail outside the
`xy`-plane, but at different heights. This must be done on
the graphics primitive obtained by indexing::
sage: A=arrow((0,0),(1,1))[0].plot3d(3,4)
sage: A.jmol_repr(A.testing_render_params())[0]
'draw line_1 diameter 2 arrow {0.0 0.0 3.0} {1.0 1.0 4.0} '
"""
from sage.plot.plot3d.shapes2 import line3d
options = self._plot3d_options()
options.update(kwds)
return line3d([(self.xtail, self.ytail, ztail), (self.xhead, self.yhead, zhead)], arrow_head=True, **options) |
mixins.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This module contains additional behavior that can be attached to any given
package.
"""
import collections
import os
import llnl.util.filesystem
__all__ = [
'filter_compiler_wrappers'
]
class PackageMixinsMeta(type):
"""This metaclass serves the purpose of implementing a declarative syntax
for package mixins.
Mixins are implemented below in the form of a function. Each one of them
needs to register a callable that takes a single argument to be run
before or after a certain phase. This callable is basically a method that
gets implicitly attached to the package class by calling the mixin.
"""
_methods_to_be_added = {}
_add_method_before = collections.defaultdict(list)
_add_method_after = collections.defaultdict(list)
@staticmethod
def register_method_before(fn, phase):
"""Registers a method to be run before a certain phase.
Args:
fn: function taking a single argument (self)
phase (str): phase before which fn must run
"""
PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn
PackageMixinsMeta._add_method_before[phase].append(fn)
@staticmethod
def register_method_after(fn, phase):
"""Registers a method to be run after a certain phase.
Args:
fn: function taking a single argument (self)
phase (str): phase after which fn must run
"""
PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn
PackageMixinsMeta._add_method_after[phase].append(fn)
def | (cls, name, bases, attr_dict):
# Add the methods to the class being created
if PackageMixinsMeta._methods_to_be_added:
attr_dict.update(PackageMixinsMeta._methods_to_be_added)
PackageMixinsMeta._methods_to_be_added.clear()
attr_fmt = '_InstallPhase_{0}'
# Copy the phases that needs it to the most derived classes
# in order not to interfere with other packages in the hierarchy
phases_to_be_copied = list(
PackageMixinsMeta._add_method_before.keys()
)
phases_to_be_copied += list(
PackageMixinsMeta._add_method_after.keys()
)
for phase in phases_to_be_copied:
attr_name = attr_fmt.format(phase)
# Here we want to get the attribute directly from the class (not
# from the instance), so that we can modify it and add the mixin
# method to the pipeline.
phase = getattr(cls, attr_name)
# Due to MRO, we may have taken a method from a parent class
# and modifying it may influence other packages in unwanted
# manners. Solve the problem by copying the phase into the most
# derived class.
setattr(cls, attr_name, phase.copy())
# Insert the methods in the appropriate position
# in the installation pipeline.
for phase in PackageMixinsMeta._add_method_before:
attr_name = attr_fmt.format(phase)
phase_obj = getattr(cls, attr_name)
fn_list = PackageMixinsMeta._add_method_after[phase]
for f in fn_list:
phase_obj.run_before.append(f)
# Flush the dictionary for the next class
PackageMixinsMeta._add_method_before.clear()
for phase in PackageMixinsMeta._add_method_after:
attr_name = attr_fmt.format(phase)
phase_obj = getattr(cls, attr_name)
fn_list = PackageMixinsMeta._add_method_after[phase]
for f in fn_list:
phase_obj.run_after.append(f)
# Flush the dictionary for the next class
PackageMixinsMeta._add_method_after.clear()
super(PackageMixinsMeta, cls).__init__(name, bases, attr_dict)
def filter_compiler_wrappers(*files, **kwargs):
"""Substitutes any path referring to a Spack compiler wrapper with the
path of the underlying compiler that has been used.
If this isn't done, the files will have CC, CXX, F77, and FC set to
Spack's generic cc, c++, f77, and f90. We want them to be bound to
whatever compiler they were built with.
Args:
*files: files to be filtered relative to the search root (which is,
by default, the installation prefix)
**kwargs: allowed keyword arguments
after
specifies after which phase the files should be
filtered (defaults to 'install')
relative_root
path relative to prefix where to start searching for
the files to be filtered. If not set the install prefix
wil be used as the search root. **It is highly recommended
to set this, as searching from the installation prefix may
affect performance severely in some cases**.
ignore_absent, backup
these two keyword arguments, if present, will be forwarded
to ``filter_file`` (see its documentation for more information
on their behavior)
recursive
this keyword argument, if present, will be forwarded to
``find`` (see its documentation for more information on the
behavior)
"""
after = kwargs.get('after', 'install')
relative_root = kwargs.get('relative_root', None)
filter_kwargs = {
'ignore_absent': kwargs.get('ignore_absent', True),
'backup': kwargs.get('backup', False),
'string': True
}
find_kwargs = {
'recursive': kwargs.get('recursive', False)
}
def _filter_compiler_wrappers_impl(self):
# Compute the absolute path of the search root
root = os.path.join(
self.prefix, relative_root
) if relative_root else self.prefix
# Compute the absolute path of the files to be filtered and
# remove links from the list.
abs_files = llnl.util.filesystem.find(root, files, **find_kwargs)
abs_files = [x for x in abs_files if not os.path.islink(x)]
x = llnl.util.filesystem.FileFilter(*abs_files)
replacements = [
('CC', self.compiler.cc),
('CXX', self.compiler.cxx),
('F77', self.compiler.f77),
('FC', self.compiler.fc)
]
for env_var, compiler_path in replacements:
if env_var in os.environ:
x.filter(os.environ[env_var], compiler_path, **filter_kwargs)
# Remove this linking flag if present (it turns RPATH into RUNPATH)
x.filter('-Wl,--enable-new-dtags', '', **filter_kwargs)
PackageMixinsMeta.register_method_after(
_filter_compiler_wrappers_impl, after
)
| __init__ |
index.ts | import { GridSize } from '@material-ui/core';
import { IMapping, IOption, Triggers } from '../../../types';
export interface ILayouts {
[id: string]: ILayout;
}
export interface ILayoutEntry {
id: string;
type: GroupTypes | ComponentTypes;
triggers?: Triggers[];
}
export interface ILayoutGroup extends ILayoutEntry {
children: string[];
dataModelBindings?: IDataModelBindings;
maxCount: number;
textResourceBindings?: ITextResourceBindings;
tableHeaders?: string[];
edit?: IGroupEditProperties;
}
export interface ILayoutComponent extends ILayoutEntry {
dataModelBindings: IDataModelBindings;
isValid?: boolean;
readOnly?: boolean;
optionsId?: string;
options?: IOption[];
disabled?: boolean;
required?: boolean;
textResourceBindings: ITextResourceBindings;
formData?: any;
grid?: IGrid;
}
|
export type ComponentTypes =
| 'AddressComponent'
| 'AttachmentList'
| 'Button'
| 'Checkboxes'
| 'Datepicker'
| 'Dropdown'
| 'FileUpload'
| 'FileUploadWithTag'
| 'Header'
| 'Input'
| 'NavigationButtons'
| 'InstantiationButton'
| 'Paragraph'
| 'Image'
| 'RadioButtons'
| 'Summary'
| 'TextArea'
| 'NavigationBar'
| 'Likert'
| 'Panel';
export interface IDataModelBindings {
[id: string]: string;
}
export interface ITextResourceBindings {
[id: string]: string;
}
export type ILayout = Array<ILayoutComponent | ILayoutGroup>;
export interface ISelectionComponentProps extends ILayoutComponent {
options?: IOption[];
optionsId?: string;
mapping?: IMapping;
secure?: boolean;
}
export interface IGrid extends IGridStyling {
labelGrid?: IGridStyling;
innerGrid?: IGridStyling;
}
export interface IGridStyling {
xs?: GridSize;
sm?: GridSize;
md?: GridSize;
lg?: GridSize;
xl?: GridSize;
}
export interface IGroupEditProperties {
mode?: 'hideTable' | 'showTable' | 'showAll' | 'likert';
filter?: IGroupFilter[];
addButton?: boolean;
saveButton?: boolean;
deleteButton?: boolean;
multiPage?: boolean;
openByDefault?: boolean;
}
export interface IGroupFilter {
key: string;
value: string;
} | export type GroupTypes = 'Group' | 'group'; |
series_ids_pool.go | package series
import "sync"
| var Uint32Pool = _uint32Pool{
Pool: sync.Pool{
New: func() interface{} {
item := make([]uint32, ScanBufSize)
return &item
}}}
type _uint32Pool struct {
sync.Pool
}
func (p *_uint32Pool) Get() *[]uint32 {
return p.Pool.Get().(*[]uint32)
}
func (p *_uint32Pool) Put(item *[]uint32) {
p.Pool.Put(item)
} | const ScanBufSize = 4096
// Uint32Pool is a singleton pool for reusing []uint32 with length as 4096 |
views.py | import datetime
from django.shortcuts import render
from django.views import View
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.db.models.deletion import ProtectedError
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from main.models import Etiqueta, Cuenta, Movimiento, FiltroMovimientos, FiltroCuentas
import main.functions as functions
class IndexView(View):
"""Página principal"""
def get(self, request, *args, **kwargs):
context = { 'tab': 'principal' }
return render(request, 'main/index.html', context)
class CuentasView(LoginRequiredMixin, View):
"""Listado de cuentas. Permite añadir una cuenta nueva."""
def get(self, request, pag=1, *args, **kwargs):
lista_cuentas = Cuenta.objects.all()
lista_etiquetas = Etiqueta.objects.all().order_by('id')
# Si no existe el filtro lo crea, con los valores por defecto
filtro = FiltroCuentas.objects.all()
if len(filtro) == 0:
filtro = FiltroCuentas()
filtro.save()
else:
filtro = filtro[0]
# aplica el filtro
if filtro.num:
lista_cuentas = lista_cuentas.filter(pk=filtro.num)
if filtro.nombre:
lista_cuentas = lista_cuentas.filter(nombre__contains=filtro.nombre)
if filtro.etiqueta:
lista_cuentas = lista_cuentas.filter(etiqueta=filtro.etiqueta)
# aplica orden
orden = '-' if not filtro.ascendiente else ''
lista_cuentas = lista_cuentas.order_by(orden+filtro.campo)
# cálculo de paginación. 10 resultados por página
paginacion, num_cuentas, pag, lista_cuentas = functions.get_pagination(pag, lista_cuentas)
context = {
'tab': 'cuentas',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'filtro': filtro,
'paginacion': paginacion,
'pagina_actual': pag,
'num_cuentas': num_cuentas,
}
return render(request, 'main/cuentas.html', context)
def post(self, request, *args, **kwargs):
nueva_cuenta = Cuenta(
num = request.POST['num'].strip(),
nombre = request.POST['nombre']
)
nueva_cuenta.save()
e = request.POST['etiqueta']
if len(e):
nombres_etiquetas = e.split(', ')
nueva_cuenta.etiqueta.set(nombres_etiquetas)
nueva_cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
class AsientosView(LoginRequiredMixin, View):
"""Listado de asientos (o movimientos). Permite añadir un asiento
simple nuevo.
"""
def get(self, request, pag=1, *args, **kwargs):
lista_movimientos = Movimiento.objects.all().order_by('num')
lista_cuentas = Cuenta.objects.all().order_by('num')
# Si no existe el filtro lo crea, con los valores por defecto
filtro = FiltroMovimientos.objects.all()
if len(filtro) == 0:
filtro = FiltroMovimientos()
filtro.save()
else:
filtro = filtro[0]
# aplicación del filtro
if filtro.fecha_inicial:
fecha = datetime.date.fromisoformat(filtro.fecha_inicial)
lista_movimientos = lista_movimientos.filter(fecha__gte=fecha)
if filtro.fecha_final:
fecha = datetime.date.fromisoformat(filtro.fecha_final)
lista_movimientos = lista_movimientos.filter(fecha__lte=fecha)
if filtro.cuenta:
lista_movimientos = lista_movimientos.filter(cuenta=filtro.cuenta)
if filtro.descripcion:
lista_movimientos = lista_movimientos.filter(descripcion__contains=filtro.descripcion)
if filtro.asiento:
lista_movimientos = lista_movimientos.filter(num=int(filtro.asiento))
total_haber = total_debe = 0
for m in lista_movimientos:
total_debe += m.debe
total_haber += m.haber
total = total_haber - total_debe
# aplica orden
orden = '-' if not filtro.ascendiente else ''
lista_movimientos = lista_movimientos.order_by(orden+filtro.campo)
# cálculo de paginación. 25 resultados por página
paginacion, num_movimientos, pag, lista_movimientos = functions.get_pagination(pag, lista_movimientos)
context = {
'tab': 'asientos',
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas,
'filtro': filtro,
'total_debe': total_debe,
'total_haber': total_haber,
'total': total,
'paginacion': paginacion,
'pagina_actual': pag,
'num_movimientos': num_movimientos,
}
return render(request, 'main/asientos.html', context)
def post(self, request, *args, **kwargs):
num = functions.max_num_asiento()
pk_debe = request.POST['debe'].split(':')[0]
pk_haber = request.POST['haber'].split(':')[0]
simple = {
'num': num+1,
'fecha': request.POST['fecha'],
'descripcion': request.POST['descripcion'],
'valor': request.POST['valor'],
'debe': Cuenta.objects.get(pk=pk_debe),
'haber': Cuenta.objects.get(pk=pk_haber)
}
functions.crea_asiento_simple(simple)
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarAsientoView(LoginRequiredMixin, View):
def get(self, request, num):
lista_movimientos = [ a for a in Movimiento.objects.all() if a.num == num ]
lista_cuentas = Cuenta.objects.all()
for movimiento in lista_movimientos:
fecha_movimiento = f'{movimiento.fecha.year}-{movimiento.fecha.month:02d}-{movimiento.fecha.day:02d}'
movimiento.fecha = fecha_movimiento
context = {
'tab': 'asientos',
'num_asiento': num,
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas
}
return render(request, 'main/modificar_asiento.html', context)
def post(self, request, *args, **kwargs):
num_items = int((len(request.POST) -1 )/ 7)
for i in range(num_items):
movimiento = Movimiento.objects.get(id=request.POST[f'id{i}'])
movimiento.num = int(request.POST[f'num{i}'])
movimiento.fecha = request.POST[f'fecha{i}']
movimiento.descripcion = request.POST[f'descripcion{i}']
movimiento.debe = float(request.POST[f'debe{i}'])
movimiento.haber = float(request.POST[f'haber{i}'])
num_cuenta = int(request.POST[f'cuenta{i}'].split(':')[0])
cuenta = Cuenta.objects.get(num=num_cuenta)
movimiento.cuenta = cuenta
movimiento.save()
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarCuentaView(LoginRequiredMixin, View):
def get(self, request, num):
context = {
'tab': 'cuentas',
'cuenta': Cuenta.objects.get(pk=num),
}
return render(request, 'main/modificar_cuenta.html', context)
def post(self, request, *args, **kwargs):
cuenta = Cuenta.objects.get(pk=request.POST['num'])
cuenta.nombre = request.POST['nombre']
etiquetas = request.POST['etiqueta'].split(', ')
# validación etiquetas
lista_etiquetas = Etiqueta.objects.all()
etiquetas_sin_error = list()
for e in etiquetas:
if lista_etiquetas.filter(id=e):
etiquetas_sin_error.append(e)
cuenta.etiqueta.set(etiquetas_sin_error)
cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_movimiento(request, pk, pagina, num_asiento=None):
movimiento = Movimiento.objects.get(pk=pk)
movimiento.delete()
if num_asiento:
return HttpResponseRedirect(reverse(f'main:{pagina}', args=[num_asiento]))
else:
return HttpResponseRedirect(reverse(f'main:{pagina}'))
@login_required
def anadir_movimiento(request, num, fecha):
movimiento = Movimiento(
num = num,
fecha = fecha,
descripcion = '',
debe = 0,
haber = 0,
cuenta = Cuenta.objects.all()[0]
)
movimiento.save()
return HttpResponseRedirect(reverse(f'main:modificar_asiento', args=[num]))
@login_required
def borrar_cuenta(request, pk):
cuenta = Cuenta.objects.get(pk=pk)
try:
cuenta.delete()
except ProtectedError as e:
aviso = {
'mensaje': "Esta cuenta no se puede borrar, porque tiene movimientos asociados.",
'nuevo_url': reverse('main:cuentas'),
}
context = {
'tab': 'cuentas',
'aviso': aviso,
}
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
class CargarCuentas(LoginRequiredMixin, View):
def get(sel | rgarAsientos(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:asientos'))
def post(self, request, *args, **kwargs):
simple, compleja = functions.extraer_asientos(request.FILES['file'])
movimientos_anadidos, errores_simple, errores_compleja = functions.crear_asientos(simple, compleja)
context = {
'tab': 'asientos',
'movimientos_anadidos': movimientos_anadidos,
'errores_simple': errores_simple,
'errores_compleja': errores_compleja,
'num_errores': len(errores_simple) + len(errores_compleja)
}
return render(request, 'main/cargar_asientos.html', context)
@login_required
def filtro_cuentas(request):
if request.method == 'POST':
filtro = FiltroCuentas.objects.all()[0]
if request.POST['accion_filtro'] == 'aplicar':
filtro.num = request.POST['f_num']
filtro.nombre = request.POST['f_nombre']
filtro.etiqueta = request.POST['f_etiqueta']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro.num = ''
filtro.nombre = ''
filtro.etiqueta = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def filtro_asientos(request):
if request.method == 'POST':
if request.POST['accion_filtro'] == 'aplicar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = request.POST['f_fecha_inicial']
filtro.fecha_final = request.POST['f_fecha_final']
filtro.descripcion = request.POST['f_descripcion']
filtro.cuenta = request.POST['f_cuenta'].split(':')[0]
filtro.asiento = request.POST['f_asiento']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = ''
filtro.fecha_final = ''
filtro.descripcion = ''
filtro.cuenta = ''
filtro.asiento = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:asientos'))
@login_required
def cambiar_orden(request, tipo, campo):
if tipo == 'asientos':
filtro = FiltroMovimientos.objects.all()[0]
elif tipo == 'cuentas':
filtro = FiltroCuentas.objects.all()[0]
else:
return HttpResponseRedirect(reverse('main:index'))
if filtro.campo == campo.lower():
filtro.ascendiente = not filtro.ascendiente
else:
filtro.campo = campo.lower()
filtro.ascendiente = True
filtro.save()
return HttpResponseRedirect(reverse('main:'+tipo))
@login_required
def gestionar_etiqueta(request):
"""Gestiona el formulario para añadir o borrar etiquetas, dentro de la
vista de cuentas. Solo gestiona peticiones de tipo post.
"""
if request.method == 'POST':
accion = request.POST['accion_etiqueta']
id = request.POST['e_id']
nombre = request.POST['e_nombre']
if accion == 'anadir':
Etiqueta.objects.create(
id = id,
nombre = nombre,
)
elif accion == 'borrar':
e = Etiqueta.objects.filter(id=id)
if len(e):
e[0].delete()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
class InformesView(LoginRequiredMixin, View):
"""Página principal"""
def get(self, request, *args, **kwargs):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'df': {'empty': True },
}
return render(request, 'main/informes.html', context)
def post(self, request):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
movimientos = Movimiento.objects.all()
movimientos = functions.filtra_movimientos(request.POST, movimientos)
df = functions.genera_informe(request.POST['f_tipo'], movimientos)
titulo, subtitulo = functions.titulo_informe(request.POST)
graph = functions.grafico_informe(df)
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'titulo': titulo,
'subtitulo': subtitulo,
'df': df,
'filtro': request.POST,
'graph': graph,
}
return render(request, 'main/informes.html', context)
@login_required
def borrar_multiples_cuentas(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
cuenta = Cuenta.objects.get(pk=request.POST[checked])
try:
cuenta.delete()
except ProtectedError as e:
errors.append(cuenta)
context = { 'tab': 'cuentas' }
if errors:
nombres = [ c.nombre for c in errors ]
nombres = ", ".join(nombres)
aviso = {
'mensaje': f"La(s) siguiente(s) cuentas no se pueden borrar, porque tienen movimientos asociados: {nombres}.",
'nuevo_url': reverse('main:cuentas'),
}
context['aviso'] = aviso
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_multiples_movimientos(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
movimiento = Movimiento.objects.get(pk=request.POST[checked])
movimiento.delete()
return HttpResponseRedirect(reverse('main:asientos'))
| f, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:cuentas'))
def post(self, request, *args, **kwargs):
datos_excel = functions.extraer_cuentas(request.FILES['file'])
sobreescribir = request.POST.get('sobreescribir', False)
cuentas_anadidas, cuentas_error = functions.crear_cuentas(datos_excel, sobreescribir)
context = {
'tab': 'cuentas',
'cuentas_anadidas': cuentas_anadidas,
'cuentas_error': cuentas_error,
}
return render(request, 'main/cargar_cuentas.html', context)
class Ca |
models.py | """
This model supports user labeling of resources in various ways.
For a User u, this instantiates a subobject u.ulabels (like u.uaccess)
that contains all the labeling functions.
Functions include:
* u.ulabels.label_resource(r, label)
instantiates a label for a resource. Resources can have multiple labels.
* u.ulabels.unlabel_resource(r, label)
removes a label; there can be many labels.
* u.ulabels.clear_resource_labels(r)
removes all labels for a resource
* u.ulabels.favorite_resource(r)
favorites a resource
* u.ulabels.unfavorite_resource(r)
removes a favorite
and the reporting functions
* u.ulabels.labeled_resources
A queryset of resources that are labeled.
* u.ulabels.favorited_resources
A queryset of resources that have been favorited
* u.ulabels.get_resources_with_label(label)
Get a queryset of resources possessing a specific label.
For a BaseResource r, this also adds a subobject rlabels that reports on labels for resources
* r.rlabels.get_labels(u)
* r.rlabels.is_favorite(u)
* r.rlabels.is_mine(u)
"""
# TODO: combine label filtering with access control
import re
from django.contrib.auth.models import User
from django.db import models
from django.db import transaction
from django.db.models import Q
from hs_core.models import BaseResource
class FlagCodes(object):
"""
Flag codes describe the meanings of per-user flags for a resource.
* 1 or FlagCodes.FAVORITE:
marked as a favorite on "My Resources" page
* 2 or FlagCodes.MINE:
marked as being part of "My Resources" on "Discover" page.
"""
FAVORITE = 1
MINE = 2
OPEN_WITH_APP = 3
FLAG_CHOICES = (
(FAVORITE, 'Favorite'), # marked as favorite in my resources page.
(MINE, 'Mine'), # marked as mine in discovery page.
(OPEN_WITH_APP, 'Open With App'), # marked as a open_with app
)
class UserResourceLabels(models.Model):
"""
Labels of a user for a resource
This model stores labels of an individual user, like an access control list. T
"""
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2url', # unused but must be defined and unique
help_text='user assigning a label',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name='r2url', # unused but must be defined and unique
help_text='resource to which a label applies',
on_delete=models.CASCADE)
label = models.TextField(null=False)
class Meta:
unique_together = ('user', 'resource', 'label')
class UserResourceFlags(models.Model):
"""
Per-user flagging of resources.
This model stores labels of an individual user, like an access
control list; There are several kinds of labels documented in FlagCodes.
These are similar in implementation but differ in semantics.
"""
kind = models.IntegerField(choices=FlagCodes.FLAG_CHOICES,
editable=False,
default=FlagCodes.FAVORITE)
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2urf', # unused but must be defined and unique
help_text='user assigning a flag',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name="r2urf", # unused but must be defined and unique
help_text='resource to which a flag applies',
on_delete=models.CASCADE)
class Meta:
unique_together = ('user', 'resource', 'kind')
class UserStoredLabels(models.Model):
"""
Storage class for persistent labels that are reusable across different kinds of objects
"""
user = models.ForeignKey(User, null=False,
help_text='user who stored the label',
related_name='ul2usl',
on_delete=models.CASCADE)
label = models.TextField(help_text='label to be stored by user')
class Meta:
unique_together = ('user', 'label')
class UserLabels(models.Model):
"""
Projection class puts methods and content inside basic User object
so that one can access things easily from that context.
This model is injected into the BaseResource as the related name "user".
Thus for an User u, u.user is this model.
"""
user = models.OneToOneField(User,
editable=False,
null=True,
related_name='ulabels', # induced field in User class.
related_query_name='ulabels',
on_delete=models.CASCADE)
##########################################
# PUBLIC FUNCTIONS: resources
##########################################
@property
def labeled_resources(self):
"""
Get a QuerySet of resources labeled by a user.
This eliminates duplicates.
"""
return BaseResource.objects.filter(r2url__user=self.user).distinct()
def get_flagged_resources(self, this_flagcode):
"""
Get resources with a specific flag.
"""
if __debug__: # during testing only, check argument types and preconditions
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
return BaseResource.objects.filter(r2urf__user=self.user,
r2urf__kind=this_flagcode)
@property
def favorited_resources(self):
"""
Get a QuerySet of resources favorited by a user.
This eliminates duplicates.
"""
return self.get_flagged_resources(FlagCodes.FAVORITE)
@property
def my_resources(self):
"""
Get a QuerySet of resources marked as mine (add to my resources) by a user.
This eliminates duplicates.
"""
return self.get_flagged_resources(FlagCodes.MINE)
@property
def resources_of_interest(self):
"""
Get a QuerySet of resources the user has tagged in any way.
"""
return BaseResource.objects.filter(Q(r2url__user=self.user) | Q(r2urf__user=self.user)).distinct()
def get_resources_with_label(self, this_label):
"""
Get a QuerySet of resources with a specific label.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_label, str)
label_string = UserLabels.clean_label(this_label) # remove leading and trailing spaces
return BaseResource.objects.filter(r2url__user=self.user,
r2url__label__exact=label_string)\
.distinct()\
.order_by('r2url__label')
@property
def user_labels(self):
"""
Get a QuerySet of labels in use now.
"""
return UserResourceLabels.objects.values_list('label', flat=True)\
.filter(user=self.user)\
.distinct().order_by('label')
######################################
# Label a resource
######################################
@staticmethod
def clean_label(name):
label_string = re.sub('/', r'', name) # no /'s
label_string = label_string.strip() # no leading or trailing whitespace
label_string = re.sub(r'\s+', r' ', label_string) # collapse multiple whitespace, including tabs
return label_string
def label_resource(self, this_resource, this_label):
"""
Assign a label to a resource
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert isinstance(this_label, str)
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserResourceLabels.objects.get_or_create(resource=this_resource,
label=label_string,
user=self.user)
def unlabel_resource(self, this_resource, this_label):
"""
Remove one label from a resource
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert isinstance(this_label, str)
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
UserResourceLabels.objects.filter(resource=this_resource,
label__exact=label_string,
user=self.user).delete()
def clear_resource_labels(self, this_resource):
"""
Clear all labels for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
UserResourceLabels.objects.filter(resource=this_resource,
user=self.user).delete()
def remove_resource_label(self, this_label):
"""
clear a label from the labeling system.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_label, str)
UserResourceLabels.objects.filter(label=this_label, user=self.user)\
.delete()
##########################################
# general flagging of resources
##########################################
def flag_resource(self, this_resource, this_flagcode):
"""
flag a resource with a specific flag code from FlagCodes
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because flagging information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserResourceFlags.objects.get_or_create(resource=this_resource,
kind=this_flagcode,
user=self.user)
def unflag_resource(self, this_resource, this_flagcode):
"""
unflag a resource with a specific flag.
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because flagging information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
UserResourceFlags.objects.filter(user=self.user,
resource=this_resource,
kind=this_flagcode).delete()
def | (self, this_flagcode):
"""
remove all flags of a specific kind for a user
"""
UserResourceFlags.objects.filter(user=self.user,
kind=this_flagcode)\
.delete()
##########################################
# favorite resources
##########################################
def favorite_resource(self, this_resource):
"""
Mark a resource as favorite.
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.flag_resource(this_resource, FlagCodes.FAVORITE)
def unfavorite_resource(self, this_resource):
"""
Clear favorite label for a resource
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.unflag_resource(this_resource, FlagCodes.FAVORITE)
##########################################
# my resources
##########################################
def claim_resource(self, this_resource):
"""
Label a resource as 'MINE' (adds to my resources).
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.flag_resource(this_resource, FlagCodes.MINE)
def unclaim_resource(self, this_resource):
"""
Clear 'MINE' label for a resource (removes from my resources)
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.unflag_resource(this_resource, FlagCodes.MINE)
##########################################
# open with app
##########################################
def add_open_with_app(self, this_resource):
"""
Mark a webapp resource as open-with-app
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
The calling function should make sure resource is a webapp resource
"""
self.flag_resource(this_resource, FlagCodes.OPEN_WITH_APP)
def remove_open_with_app(self, this_resource):
"""
Unmark a webapp resource as open-with-app
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
The calling function should make sure resource is a webapp resource
"""
self.unflag_resource(this_resource, FlagCodes.OPEN_WITH_APP)
##########################################
# routines that apply to all kinds of annotations
##########################################
def clear_resource_all(self, this_resource):
"""
Clear all annotations for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
UserResourceLabels.objects\
.filter(resource=this_resource,
user=self.user)\
.delete()
UserResourceFlags.objects\
.filter(resource=this_resource,
user=self.user)\
.delete()
##########################################
# save unused labels
##########################################
def save_label(self, this_label):
"""
Save a label for use later.
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
label_string = UserLabels.clean_label(this_label) # remove leading and trailing spaces
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserStoredLabels.objects.get_or_create(label=label_string, user=self.user)
def unsave_label(self, this_label):
"""
Remove the specified saved label.
"""
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
UserStoredLabels.objects.filter(label__exact=label_string, user=self.user).delete()
# remove all uses of that label from resources.
self.remove_resource_label(label_string)
def clear_saved_labels(self):
"""
Clear all saved labels for a user
"""
UserStoredLabels.objects.filter(user=self.user).delete()
@property
def saved_labels(self):
"""
Return a QuerySet of saved labels.
"""
return UserStoredLabels.objects.filter(user=self.user).values_list('label', flat=True).distinct()
class ResourceLabels(models.Model):
"""
For a BaseResource r, r.rlabels is this model. It contains functions relevant to resources.
"""
resource = models.OneToOneField(BaseResource,
editable=False,
null=True,
related_name='rlabels',
related_query_name='rlabels',
on_delete=models.CASCADE)
def get_users(self):
"""
Return a QuerySet of all users who have labeled this resource.
"""
return User.objects.filter(Q(u2url__resource=self.resource) | Q(u2urf__resource=self.resource))
def get_labels(self, this_user):
"""
Return a QuerySet of all user assigned labels for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_user, User)
labels = UserResourceLabels.objects\
.values_list('label', flat=True)\
.filter(user=this_user,
resource=self.resource)\
.order_by("label").all()
return labels
def is_flagged(self, this_user, this_flagcode):
"""
Return True if this resource has been flagged by a given user
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_user, User)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
return UserResourceFlags.objects.filter(user=this_user,
resource=self.resource,
kind=this_flagcode).exists()
def is_favorite(self, this_user):
"""
Return True if this resource has been favorited by a given user
"""
return self.is_flagged(this_user, FlagCodes.FAVORITE)
def is_mine(self, this_user):
"""
Return True if this resource has been labeled as mine by a given user
"""
return self.is_flagged(this_user, FlagCodes.MINE)
def is_open_with_app(self, this_user):
"""
Return True if this resource has been set as open-with-app by a given user
"""
return self.is_flagged(this_user, FlagCodes.OPEN_WITH_APP)
| clear_all_flags |
graphviz.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module provides linkage between RegionInferenceContext and
//! libgraphviz traits, specialized to attaching borrowck analysis
//! data to rendered labels.
use dot::{self, IntoCow};
use rustc_data_structures::indexed_vec::Idx;
use std::borrow::Cow;
use std::io::{self, Write};
use super::*;
impl<'tcx> RegionInferenceContext<'tcx> {
/// Write out the region constraint graph.
pub(crate) fn dump_graphviz(&self, mut w: &mut Write) -> io::Result<()> {
dot::render(self, &mut w)
}
}
impl<'this, 'tcx> dot::Labeller<'this> for RegionInferenceContext<'tcx> {
type Node = RegionVid;
type Edge = Constraint;
fn graph_id(&'this self) -> dot::Id<'this> {
dot::Id::new(format!("RegionInferenceContext")).unwrap()
}
fn node_id(&'this self, n: &RegionVid) -> dot::Id<'this> {
dot::Id::new(format!("r{}", n.index())).unwrap()
}
fn node_shape(&'this self, _node: &RegionVid) -> Option<dot::LabelText<'this>> {
Some(dot::LabelText::LabelStr(Cow::Borrowed("box")))
}
fn node_label(&'this self, n: &RegionVid) -> dot::LabelText<'this> {
dot::LabelText::LabelStr(format!("{:?}", n).into_cow())
}
fn edge_label(&'this self, e: &Constraint) -> dot::LabelText<'this> {
dot::LabelText::LabelStr(format!("{:?}", e.point).into_cow())
}
}
impl<'this, 'tcx> dot::GraphWalk<'this> for RegionInferenceContext<'tcx> {
type Node = RegionVid;
type Edge = Constraint;
fn nodes(&'this self) -> dot::Nodes<'this, RegionVid> {
let vids: Vec<RegionVid> = self.definitions.indices().collect();
vids.into_cow()
}
fn edges(&'this self) -> dot::Edges<'this, Constraint> |
// Render `a: b` as `a <- b`, indicating the flow
// of data during inference.
fn source(&'this self, edge: &Constraint) -> RegionVid {
edge.sub
}
fn target(&'this self, edge: &Constraint) -> RegionVid {
edge.sup
}
}
| {
(&self.constraints[..]).into_cow()
} |
storage.py | import alembic.command
import alembic.config
import alembic.migration
import alembic.script
from collections import defaultdict
import copy
from datetime import datetime
import json
import logging
import os
import six
from sqlalchemy.engine import create_engine
from sqlalchemy.engine import Engine # NOQA
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import orm
import sys
import threading
import uuid
import optuna
from optuna import distributions
from optuna.storages.base import BaseStorage
from optuna.storages.base import DEFAULT_STUDY_NAME_PREFIX
from optuna.storages.rdb import models
from optuna import structs
from optuna import type_checking
from optuna import version
if type_checking.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
class RDBStorage(BaseStorage):
"""Storage class for RDB backend.
This class is not supposed to be directly accessed by library users.
Args:
url: URL of the storage.
engine_kwargs:
A dictionary of keyword arguments that is passed to
:func:`sqlalchemy.engine.create_engine`.
enable_cache:
Flag to control whether to enable storage layer caching.
If this flag is set to :obj:`True` (the default), the finished trials are
cached on memory and never re-fetched from the storage.
Otherwise, the trials are fetched from the storage whenever they are needed.
"""
def __init__(self, url, engine_kwargs=None, enable_cache=True, skip_compatibility_check=False):
# type: (str, Optional[Dict[str, Any]], bool, bool) -> None
engine_kwargs = engine_kwargs or {}
url = self._fill_storage_url_template(url)
try:
self.engine = create_engine(url, **engine_kwargs)
except ImportError as e:
raise ImportError('Failed to import DB access module for the specified storage URL. '
'Please install appropriate one. (The actual import error is: ' +
str(e) + '.)')
self.scoped_session = orm.scoped_session(orm.sessionmaker(bind=self.engine))
models.BaseModel.metadata.create_all(self.engine)
self.logger = optuna.logging.get_logger(__name__)
self._version_manager = _VersionManager(url, self.engine, self.scoped_session)
if not skip_compatibility_check:
self._version_manager.check_table_schema_compatibility()
self._finished_trials_cache = _FinishedTrialsCache(enable_cache)
def create_new_study_id(self, study_name=None):
# type: (Optional[str]) -> int
session = self.scoped_session()
if study_name is None:
study_name = self._create_unique_study_name(session)
study = models.StudyModel(study_name=study_name, direction=structs.StudyDirection.NOT_SET)
session.add(study)
if not self._commit_with_integrity_check(session):
raise structs.DuplicatedStudyError(
"Another study with name '{}' already exists. "
"Please specify a different name, or reuse the existing one "
"by setting `load_if_exists` (for Python API) or "
"`--skip-if-exists` flag (for CLI).".format(study_name))
self.logger.info('A new study created with name: {}'.format(study.study_name))
return study.study_id
@staticmethod
def _create_unique_study_name(session):
# type: (orm.Session) -> str
while True:
study_uuid = str(uuid.uuid4())
study_name = DEFAULT_STUDY_NAME_PREFIX + study_uuid
study = models.StudyModel.find_by_name(study_name, session)
if study is None:
break
return study_name
# TODO(sano): Prevent simultaneously setting different direction in distributed environments.
def set_study_direction(self, study_id, direction):
# type: (int, structs.StudyDirection) -> None
session = self.scoped_session()
study = models.StudyModel.find_or_raise_by_id(study_id, session)
if study.direction != structs.StudyDirection.NOT_SET and study.direction != direction:
raise ValueError('Cannot overwrite study direction from {} to {}.'.format(
study.direction, direction))
study.direction = direction
self._commit(session)
def set_study_user_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
session = self.scoped_session()
study = models.StudyModel.find_or_raise_by_id(study_id, session)
attribute = models.StudyUserAttributeModel.find_by_study_and_key(study, key, session)
if attribute is None:
attribute = models.StudyUserAttributeModel(
study_id=study_id, key=key, value_json=json.dumps(value))
session.add(attribute)
else:
attribute.value_json = json.dumps(value)
self._commit_with_integrity_check(session)
def set_study_system_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
session = self.scoped_session()
study = models.StudyModel.find_or_raise_by_id(study_id, session)
attribute = models.StudySystemAttributeModel.find_by_study_and_key(study, key, session)
if attribute is None:
attribute = models.StudySystemAttributeModel(
study_id=study_id, key=key, value_json=json.dumps(value))
session.add(attribute)
else:
attribute.value_json = json.dumps(value)
self._commit_with_integrity_check(session)
def get_study_id_from_name(self, study_name):
# type: (str) -> int
session = self.scoped_session()
study = models.StudyModel.find_or_raise_by_name(study_name, session)
return study.study_id
def get_study_id_from_trial_id(self, trial_id):
# type: (int) -> int
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
return trial.study_id
def get_study_name_from_id(self, study_id):
# type: (int) -> str
session = self.scoped_session()
study = models.StudyModel.find_or_raise_by_id(study_id, session)
return study.study_name
def get_study_direction(self, study_id):
# type: (int) -> structs.StudyDirection
session = self.scoped_session()
study = models.StudyModel.find_or_raise_by_id(study_id, session)
return study.direction
def get_study_user_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
session = self.scoped_session()
attributes = models.StudyUserAttributeModel.where_study_id(study_id, session)
return {attr.key: json.loads(attr.value_json) for attr in attributes}
def get_study_system_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
session = self.scoped_session()
attributes = models.StudySystemAttributeModel.where_study_id(study_id, session)
return {attr.key: json.loads(attr.value_json) for attr in attributes}
def get_trial_user_attrs(self, trial_id):
# type: (int) -> Dict[str, Any]
session = self.scoped_session()
attributes = models.TrialUserAttributeModel.where_trial_id(trial_id, session)
return {attr.key: json.loads(attr.value_json) for attr in attributes}
def get_trial_system_attrs(self, trial_id):
# type: (int) -> Dict[str, Any]
session = self.scoped_session()
attributes = models.TrialSystemAttributeModel.where_trial_id(trial_id, session)
return {attr.key: json.loads(attr.value_json) for attr in attributes}
# TODO(sano): Optimize this method to reduce the number of queries.
def get_all_study_summaries(self):
# type: () -> List[structs.StudySummary]
session = self.scoped_session()
study_models = models.StudyModel.all(session)
trial_models = models.TrialModel.all(session)
param_models = models.TrialParamModel.all(session)
value_models = models.TrialValueModel.all(session)
trial_user_attribute_models = models.TrialUserAttributeModel.all(session)
trial_system_attribute_models = models.TrialSystemAttributeModel.all(session)
study_sumarries = []
for study_model in study_models:
# Filter model objects by study.
study_trial_models = [t for t in trial_models if t.study_id == study_model.study_id]
# Get best trial.
completed_trial_models = [
t for t in study_trial_models if t.state is structs.TrialState.COMPLETE
]
best_trial = None
if len(completed_trial_models) > 0:
if study_model.direction == structs.StudyDirection.MAXIMIZE:
best_trial_model = max(completed_trial_models, key=lambda t: t.value)
else:
best_trial_model = min(completed_trial_models, key=lambda t: t.value)
best_param_models = [
p for p in param_models if p.trial_id == best_trial_model.trial_id
]
best_value_models = [
v for v in value_models if v.trial_id == best_trial_model.trial_id
]
best_trial_user_models = [
u for u in trial_user_attribute_models
if u.trial_id == best_trial_model.trial_id
]
best_trial_system_models = [
s for s in trial_system_attribute_models
if s.trial_id == best_trial_model.trial_id
]
# Merge model objects related to the best trial.
best_trial = self._merge_trials_orm([best_trial_model], best_param_models,
best_value_models, best_trial_user_models,
best_trial_system_models)[0]
# Find datetime_start.
datetime_start = None
if len(study_trial_models) > 0:
datetime_start = min([t.datetime_start for t in study_trial_models])
attributes = models.StudySystemAttributeModel.where_study_id(
study_model.study_id, session)
system_attrs = {attr.key: json.loads(attr.value_json) for attr in attributes}
# Consolidate StudySummary.
study_sumarries.append(
structs.StudySummary(
study_id=study_model.study_id,
study_name=study_model.study_name,
direction=self.get_study_direction(study_model.study_id),
best_trial=best_trial,
user_attrs=self.get_study_user_attrs(study_model.study_id),
system_attrs=system_attrs,
n_trials=len(study_trial_models),
datetime_start=datetime_start))
return study_sumarries
def create_new_trial_id(self, study_id):
# type: (int) -> int
session = self.scoped_session()
trial = models.TrialModel(study_id=study_id, state=structs.TrialState.RUNNING)
session.add(trial)
self._commit(session)
self._create_new_trial_number(trial.trial_id)
return trial.trial_id
def _create_new_trial_number(self, trial_id):
# type: (int) -> int
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
trial_number = trial.count_past_trials(session)
self.set_trial_system_attr(trial.trial_id, '_number', trial_number)
return trial_number
def set_trial_state(self, trial_id, state):
# type: (int, structs.TrialState) -> None
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
self.check_trial_is_updatable(trial_id, trial.state)
trial.state = state
if state.is_finished():
trial.datetime_complete = datetime.now()
self._commit(session)
def set_trial_param(self, trial_id, param_name, param_value_internal, distribution):
# type: (int, str, float, distributions.BaseDistribution) -> bool
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
self.check_trial_is_updatable(trial_id, trial.state)
trial_param = \
models.TrialParamModel.find_by_trial_and_param_name(trial, param_name, session)
if trial_param is not None:
# Raise error in case distribution is incompatible.
distributions.check_distribution_compatibility(
distributions.json_to_distribution(trial_param.distribution_json), distribution)
# Return False when distribution is compatible but parameter has already been set.
return False
param = models.TrialParamModel(
trial_id=trial_id,
param_name=param_name,
param_value=param_value_internal,
distribution_json=distributions.distribution_to_json(distribution))
param.check_and_add(session)
commit_success = self._commit_with_integrity_check(session)
return commit_success
def get_trial_param(self, trial_id, param_name):
# type: (int, str) -> float
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
trial_param = models.TrialParamModel.find_or_raise_by_trial_and_param_name(
trial, param_name, session)
return trial_param.param_value
def set_trial_value(self, trial_id, value):
# type: (int, float) -> None
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
self.check_trial_is_updatable(trial_id, trial.state)
trial.value = value
self._commit(session)
def set_trial_intermediate_value(self, trial_id, step, intermediate_value):
# type: (int, int, float) -> bool
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
self.check_trial_is_updatable(trial_id, trial.state)
trial_value = models.TrialValueModel.find_by_trial_and_step(trial, step, session)
if trial_value is not None:
return False
trial_value = models.TrialValueModel(
trial_id=trial_id, step=step, value=intermediate_value)
session.add(trial_value)
commit_success = self._commit_with_integrity_check(session)
return commit_success
def set_trial_user_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
self.check_trial_is_updatable(trial_id, trial.state)
attribute = models.TrialUserAttributeModel.find_by_trial_and_key(trial, key, session)
if attribute is None:
attribute = models.TrialUserAttributeModel(
trial_id=trial_id, key=key, value_json=json.dumps(value))
session.add(attribute)
else:
attribute.value_json = json.dumps(value)
self._commit_with_integrity_check(session)
def set_trial_system_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
if key == '_number':
# `_number` attribute may be set even after a trial is finished.
# This happens if the trial was created before v0.9.0,
# where a trial didn't have `_number` attribute.
# In this case, `check_trial_is_updatable` is skipped to avoid the `RuntimeError`.
#
# TODO(ohta): Remove this workaround when `number` field is added to `TrialModel`.
pass
else:
self.check_trial_is_updatable(trial_id, trial.state)
attribute = models.TrialSystemAttributeModel.find_by_trial_and_key(trial, key, session)
if attribute is None:
attribute = models.TrialSystemAttributeModel(
trial_id=trial_id, key=key, value_json=json.dumps(value))
session.add(attribute)
else:
attribute.value_json = json.dumps(value)
self._commit_with_integrity_check(session)
def get_trial_number_from_id(self, trial_id):
# type: (int) -> int
trial_number = self.get_trial_system_attrs(trial_id).get('_number')
if trial_number is None:
# If a study is created by optuna<=0.8.0, trial number is not found.
# Create new one.
return self._create_new_trial_number(trial_id)
return trial_number
def get_trial(self, trial_id):
# type: (int) -> structs.FrozenTrial
cached_trial = self._finished_trials_cache.get_cached_trial(trial_id)
if cached_trial is not None:
return copy.deepcopy(cached_trial)
session = self.scoped_session()
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
params = models.TrialParamModel.where_trial(trial, session)
values = models.TrialValueModel.where_trial(trial, session)
user_attributes = models.TrialUserAttributeModel.where_trial(trial, session)
system_attributes = models.TrialSystemAttributeModel.where_trial(trial, session)
frozen_trial = self._merge_trials_orm([trial], params, values, user_attributes,
system_attributes)[0]
self._finished_trials_cache.cache_trial_if_finished(frozen_trial)
return frozen_trial
def get_all_trials(self, study_id):
# type: (int) -> List[structs.FrozenTrial]
if self._finished_trials_cache.is_empty():
trials = self._get_all_trials_without_cache(study_id)
for trial in trials:
self._finished_trials_cache.cache_trial_if_finished(trial)
return trials
trial_ids = self._get_all_trial_ids(study_id)
trials = [self.get_trial(trial_id) for trial_id in trial_ids]
return trials
def _get_all_trial_ids(self, study_id):
# type: (int) -> List[int]
session = self.scoped_session()
study = models.StudyModel.find_or_raise_by_id(study_id, session)
return models.TrialModel.get_all_trial_ids_where_study(study, session)
def _get_all_trials_without_cache(self, study_id):
# type: (int) -> List[structs.FrozenTrial]
session = self.scoped_session()
study = models.StudyModel.find_or_raise_by_id(study_id, session)
trials = models.TrialModel.where_study(study, session)
params = models.TrialParamModel.where_study(study, session)
values = models.TrialValueModel.where_study(study, session)
user_attributes = models.TrialUserAttributeModel.where_study(study, session)
system_attributes = models.TrialSystemAttributeModel.where_study(study, session)
return self._merge_trials_orm(trials, params, values, user_attributes, system_attributes)
def get_n_trials(self, study_id, state=None):
# type: (int, Optional[structs.TrialState]) -> int
session = self.scoped_session()
study = models.StudyModel.find_or_raise_by_id(study_id, session)
return models.TrialModel.count(session, study, state)
def _merge_trials_orm(
self,
trials, # type: List[models.TrialModel]
trial_params, # type: List[models.TrialParamModel]
trial_intermediate_values, # type: List[models.TrialValueModel]
trial_user_attrs, # type: List[models.TrialUserAttributeModel]
trial_system_attrs # type: List[models.TrialSystemAttributeModel]
):
# type: (...) -> List[structs.FrozenTrial]
id_to_trial = {}
for trial in trials:
id_to_trial[trial.trial_id] = trial
id_to_params = defaultdict(list) # type: Dict[int, List[models.TrialParamModel]]
for param in trial_params:
id_to_params[param.trial_id].append(param)
id_to_values = defaultdict(list) # type: Dict[int, List[models.TrialValueModel]]
for value in trial_intermediate_values:
id_to_values[value.trial_id].append(value)
id_to_user_attrs = \
defaultdict(list) # type: Dict[int, List[models.TrialUserAttributeModel]]
for user_attr in trial_user_attrs:
id_to_user_attrs[user_attr.trial_id].append(user_attr)
id_to_system_attrs = \
defaultdict(list) # type: Dict[int, List[models.TrialSystemAttributeModel]]
for system_attr in trial_system_attrs:
id_to_system_attrs[system_attr.trial_id].append(system_attr)
temp_trials = []
for trial_id, trial in id_to_trial.items():
params = {}
param_distributions = {}
for param in id_to_params[trial_id]:
distribution = distributions.json_to_distribution(param.distribution_json)
params[param.param_name] = distribution.to_external_repr(param.param_value)
param_distributions[param.param_name] = distribution
intermediate_values = {}
for value in id_to_values[trial_id]:
intermediate_values[value.step] = value.value
user_attrs = {}
for user_attr in id_to_user_attrs[trial_id]:
user_attrs[user_attr.key] = json.loads(user_attr.value_json)
system_attrs = {}
for system_attr in id_to_system_attrs[trial_id]:
system_attrs[system_attr.key] = json.loads(system_attr.value_json)
# `-1` is a dummy value.
# It will be replaced by a proper value before returned to the caller.
#
# TODO(ohta): Use trial.number after TrialModel.number is added.
trial_number = -1
temp_trials.append(
structs.FrozenTrial(
number=trial_number,
state=trial.state,
params=params,
distributions=param_distributions,
user_attrs=user_attrs,
system_attrs=system_attrs,
value=trial.value,
intermediate_values=intermediate_values,
datetime_start=trial.datetime_start,
datetime_complete=trial.datetime_complete,
trial_id=trial_id))
result = []
for temp_trial in temp_trials:
# [NOTE]
# We set actual trial numbers here to avoid calling `self.get_trial_number_from_id()`
# within the above loop.
#
# This is because `self.get_trial_number_from_id()` may call `session.commit()`
# internally, which causes unintended changes of the states of `trials`.
# (see https://github.com/pfnet/optuna/pull/349#issuecomment-475086642 for details)
trial_number = self.get_trial_number_from_id(temp_trial.trial_id)
result.append(temp_trial._replace(number=trial_number))
return result
@staticmethod
def _fill_storage_url_template(template):
# type: (str) -> str
return template.format(SCHEMA_VERSION=models.SCHEMA_VERSION)
@staticmethod
def _commit_with_integrity_check(session):
# type: (orm.Session) -> bool
try:
session.commit()
except IntegrityError as e:
logger = optuna.logging.get_logger(__name__)
logger.debug(
'Ignoring {}. This happens due to a timing issue among threads/processes/nodes. '
'Another one might have committed a record with the same key(s).'.format(repr(e)))
session.rollback()
return False
return True
@staticmethod
def _commit(session):
# type: (orm.Session) -> None
try:
session.commit()
except SQLAlchemyError as e:
session.rollback()
message = \
'An exception is raised during the commit. ' \
'This typically happens due to invalid data in the commit, ' \
'e.g. exceeding max length. ' \
'(The actual exception is as follows: {})'.format(repr(e))
six.reraise(structs.StorageInternalError, structs.StorageInternalError(message),
sys.exc_info()[2])
def remove_session(self):
# type: () -> None
"""Removes the current session.
A session is stored in SQLAlchemy's ThreadLocalRegistry for each thread. This method
closes and removes the session which is associated to the current thread. Particularly,
under multi-thread use cases, it is important to call this method *from each thread*.
Otherwise, all sessions and their associated DB connections are destructed by a thread
that occasionally invoked the garbage collector. By default, it is not allowed to touch
a SQLite connection from threads other than the thread that created the connection.
Therefore, we need to explicitly close the connection from each thread.
"""
self.scoped_session.remove()
def __del__(self):
# type: () -> None
# This destructor calls remove_session to explicitly close the DB connection. We need this
# because DB connections created in SQLAlchemy are not automatically closed by reference
# counters, so it is not guaranteed that they are released by correct threads (for more
# information, please see the docstring of remove_session).
if hasattr(self, 'scoped_session'):
self.remove_session()
def upgrade(self):
# type: () -> None
"""Upgrade the storage schema."""
self._version_manager.upgrade()
def get_current_version(self):
# type: () -> str
"""Return the schema version currently used by this storage."""
return self._version_manager.get_current_version()
def get_head_version(self):
# type: () -> str
"""Return the latest schema version."""
return self._version_manager.get_head_version()
def get_all_versions(self):
# type: () -> List[str]
"""Return the schema version list."""
return self._version_manager.get_all_versions()
class _VersionManager(object):
def __init__(self, url, engine, scoped_session):
# type: (str, Engine, orm.scoped_session) -> None
self.url = url
self.engine = engine
self.scoped_session = scoped_session
self._init_version_info_model()
self._init_alembic()
def _init_version_info_model(self):
# type: () -> None
session = self.scoped_session()
version_info = models.VersionInfoModel.find(session)
if version_info is not None:
return
version_info = models.VersionInfoModel(
schema_version=models.SCHEMA_VERSION, library_version=version.__version__)
session.add(version_info)
RDBStorage._commit_with_integrity_check(session)
def _init_alembic(self):
# type: () -> None
logging.getLogger('alembic').setLevel(logging.WARN)
context = alembic.migration.MigrationContext.configure(self.engine.connect())
is_initialized = context.get_current_revision() is not None
if is_initialized:
# The `alembic_version` table already exists and is not empty.
return
if self._is_alembic_supported():
revision = self.get_head_version()
else:
# The storage has been created before alembic is introduced.
revision = self._get_base_version()
self._set_alembic_revision(revision)
def _set_alembic_revision(self, revision):
# type: (str) -> None
context = alembic.migration.MigrationContext.configure(self.engine.connect())
script = self._create_alembic_script()
context.stamp(script, revision)
def check_table_schema_compatibility(self):
# type: () -> None
session = self.scoped_session()
# NOTE: After invocation of `_init_version_info_model` method,
# it is ensured that a `VersionInfoModel` entry exists.
version_info = models.VersionInfoModel.find(session)
assert version_info is not None
current_version = self.get_current_version()
head_version = self.get_head_version()
if current_version == head_version:
return
message = 'The runtime optuna version {} is no longer compatible with the table schema ' \
'(set up by optuna {}). '.format(version.__version__,
version_info.library_version)
known_versions = self.get_all_versions()
if current_version in known_versions:
message += 'Please execute `$ optuna storage upgrade --storage $STORAGE_URL` ' \
'for upgrading the storage.'
else:
message += 'Please try updating optuna to the latest version by '\
'`$ pip install -U optuna`.'
raise RuntimeError(message)
def get_current_version(self):
# type: () -> str
context = alembic.migration.MigrationContext.configure(self.engine.connect())
version = context.get_current_revision()
assert version is not None
return version
def get_head_version(self):
# type: () -> str
script = self._create_alembic_script()
return script.get_current_head()
def _get_base_version(self):
# type: () -> str
script = self._create_alembic_script()
return script.get_base()
def get_all_versions(self):
# type: () -> List[str]
script = self._create_alembic_script()
return [r.revision for r in script.walk_revisions()]
def upgrade(self):
# type: () -> None
|
def _is_alembic_supported(self):
# type: () -> bool
session = self.scoped_session()
version_info = models.VersionInfoModel.find(session)
if version_info is None:
# `None` means this storage was created just now.
return True
return version_info.schema_version == models.SCHEMA_VERSION
def _create_alembic_script(self):
# type: () -> alembic.script.ScriptDirectory
config = self._create_alembic_config()
script = alembic.script.ScriptDirectory.from_config(config)
return script
def _create_alembic_config(self):
# type: () -> alembic.config.Config
alembic_dir = os.path.join(os.path.dirname(__file__), 'alembic')
config = alembic.config.Config(os.path.join(os.path.dirname(__file__), 'alembic.ini'))
config.set_main_option('script_location', escape_alembic_config_value(alembic_dir))
config.set_main_option('sqlalchemy.url', escape_alembic_config_value(self.url))
return config
class _FinishedTrialsCache(object):
def __init__(self, enabled):
# type: (bool) -> None
self._finished_trials = {} # type: Dict[int, structs.FrozenTrial]
self._enabled = enabled
self._lock = threading.Lock()
def is_empty(self):
# type: () -> bool
if not self._enabled:
return True
with self._lock:
return len(self._finished_trials) == 0
def cache_trial_if_finished(self, trial):
# type: (structs.FrozenTrial) -> None
if not self._enabled:
return
if trial.state.is_finished():
with self._lock:
self._finished_trials[trial.trial_id] = copy.deepcopy(trial)
def get_cached_trial(self, trial_id):
# type: (int) -> Optional[structs.FrozenTrial]
if not self._enabled:
return None
with self._lock:
return self._finished_trials.get(trial_id)
def escape_alembic_config_value(value):
# type: (str) -> str
# We must escape '%' in a value string because the character
# is regarded as the trigger of variable expansion.
# Please see the documentation of `configparser.BasicInterpolation` for more details.
return value.replace('%', '%%')
| config = self._create_alembic_config()
alembic.command.upgrade(config, 'head') |
service_test.go | package gs_test
import (
"testing"
)
func TestService_List(t *testing.T) | {
/* credential := option.WithServiceAccountFile("<Secret file path>")
service := gs.NewService(credential)
assert.NotNil(t, service)
objects, err := service.List("<GCS bucket>")
assert.Nil(t, err)
for _, o := range objects {
fmt.Printf("%v\n", o.URL())
}
object, err := service.StorageObject("<GCS bucket>")
assert.Nil(t, err)
assert.NotNil(t, object)
err = service.Delete(object)
assert.Nil(t, err)*/
} |
|
ReservationForm.js | import React, { useEffect } from 'react';
import { Container, Form, Button } from 'react-bootstrap';
import PropTypes from 'prop-types';
import './ReservationForm.css';
const ReservationForm = ({
reservationDate,
handleSubmit,
title,
park,
username,
email,
newReservationErrors,
}) => {
useEffect(() => {
reservationDate.current.focus();
}, []);
return (
<div className="reservation-form-wrapper">
<p className="fs-1 text-center">Reservation Form </p>
<Container>
<Form onSubmit={handleSubmit}>
<div>
<p className="text-danger">{newReservationErrors}</p>
</div>
<Form.Group className="mb-3" controlId="userName">
<Form.Label>Username</Form.Label>
<Form.Control
type="text"
placeholder="Enter username"
defaultValue={username}
readOnly
/>
</Form.Group>
<Form.Group className="mb-3" controlId="email">
<Form.Label>Email</Form.Label>
<Form.Control
type="email"
placeholder="[email protected]"
defaultValue={email}
readOnly
/>
</Form.Group>
<Form.Group className="mb-3" controlId="activity">
<Form.Label>Activity</Form.Label>
<Form.Control
type="text"
placeholder="Enter activity"
defaultValue={title}
readOnly
/> | <Form.Label>Park</Form.Label>
<Form.Control
type="text"
placeholder="Enter park"
defaultValue={park}
readOnly
/>
</Form.Group>
<Form.Group className="mb-3" controlId="reservationDate">
<Form.Label>Date</Form.Label>
<Form.Control
type="date"
placeholder="Select date"
ref={reservationDate}
required
/>
</Form.Group>
<Button variant="primary" type="submit">
Submit
</Button>
</Form>
</Container>
</div>
);
};
ReservationForm.propTypes = {
reservationDate: PropTypes.instanceOf(Object).isRequired,
handleSubmit: PropTypes.func.isRequired,
title: PropTypes.string.isRequired,
park: PropTypes.string.isRequired,
username: PropTypes.string.isRequired,
email: PropTypes.string.isRequired,
newReservationErrors: PropTypes.string.isRequired,
};
export default ReservationForm; | </Form.Group>
<Form.Group className="mb-3" controlId="park"> |
indices_rollover.go | // Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/ozanturksever/elastic/uritemplates"
)
// IndicesRolloverService rolls an alias over to a new index when the
// existing index is considered to be too large or too old.
//
// It is documented at
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-rollover-index.html.
type IndicesRolloverService struct {
client *Client
pretty *bool // pretty format the returned JSON response
human *bool // return human readable values for statistics
errorTrace *bool // include the stack trace of returned errors
filterPath []string // list of filters used to reduce the response
headers http.Header // custom request-level HTTP headers
dryRun bool
newIndex string
alias string
masterTimeout string
timeout string
waitForActiveShards string
conditions map[string]interface{}
settings map[string]interface{}
mappings map[string]interface{}
bodyJson interface{}
bodyString string
}
// NewIndicesRolloverService creates a new IndicesRolloverService.
func NewIndicesRolloverService(client *Client) *IndicesRolloverService |
// Pretty tells Elasticsearch whether to return a formatted JSON response.
func (s *IndicesRolloverService) Pretty(pretty bool) *IndicesRolloverService {
s.pretty = &pretty
return s
}
// Human specifies whether human readable values should be returned in
// the JSON response, e.g. "7.5mb".
func (s *IndicesRolloverService) Human(human bool) *IndicesRolloverService {
s.human = &human
return s
}
// ErrorTrace specifies whether to include the stack trace of returned errors.
func (s *IndicesRolloverService) ErrorTrace(errorTrace bool) *IndicesRolloverService {
s.errorTrace = &errorTrace
return s
}
// FilterPath specifies a list of filters used to reduce the response.
func (s *IndicesRolloverService) FilterPath(filterPath ...string) *IndicesRolloverService {
s.filterPath = filterPath
return s
}
// Header adds a header to the request.
func (s *IndicesRolloverService) Header(name string, value string) *IndicesRolloverService {
if s.headers == nil {
s.headers = http.Header{}
}
s.headers.Add(name, value)
return s
}
// Headers specifies the headers of the request.
func (s *IndicesRolloverService) Headers(headers http.Header) *IndicesRolloverService {
s.headers = headers
return s
}
// Alias is the name of the alias to rollover.
func (s *IndicesRolloverService) Alias(alias string) *IndicesRolloverService {
s.alias = alias
return s
}
// NewIndex is the name of the rollover index.
func (s *IndicesRolloverService) NewIndex(newIndex string) *IndicesRolloverService {
s.newIndex = newIndex
return s
}
// MasterTimeout specifies the timeout for connection to master.
func (s *IndicesRolloverService) MasterTimeout(masterTimeout string) *IndicesRolloverService {
s.masterTimeout = masterTimeout
return s
}
// Timeout sets an explicit operation timeout.
func (s *IndicesRolloverService) Timeout(timeout string) *IndicesRolloverService {
s.timeout = timeout
return s
}
// WaitForActiveShards sets the number of active shards to wait for on the
// newly created rollover index before the operation returns.
func (s *IndicesRolloverService) WaitForActiveShards(waitForActiveShards string) *IndicesRolloverService {
s.waitForActiveShards = waitForActiveShards
return s
}
// DryRun, when set, specifies that only conditions are checked without
// performing the actual rollover.
func (s *IndicesRolloverService) DryRun(dryRun bool) *IndicesRolloverService {
s.dryRun = dryRun
return s
}
// Conditions allows to specify all conditions as a dictionary.
func (s *IndicesRolloverService) Conditions(conditions map[string]interface{}) *IndicesRolloverService {
s.conditions = conditions
return s
}
// AddCondition adds a condition to the rollover decision.
func (s *IndicesRolloverService) AddCondition(name string, value interface{}) *IndicesRolloverService {
s.conditions[name] = value
return s
}
// AddMaxIndexAgeCondition adds a condition to set the max index age.
func (s *IndicesRolloverService) AddMaxIndexAgeCondition(time string) *IndicesRolloverService {
s.conditions["max_age"] = time
return s
}
// AddMaxIndexDocsCondition adds a condition to set the max documents in the index.
func (s *IndicesRolloverService) AddMaxIndexDocsCondition(docs int64) *IndicesRolloverService {
s.conditions["max_docs"] = docs
return s
}
// Settings adds the index settings.
func (s *IndicesRolloverService) Settings(settings map[string]interface{}) *IndicesRolloverService {
s.settings = settings
return s
}
// AddSetting adds an index setting.
func (s *IndicesRolloverService) AddSetting(name string, value interface{}) *IndicesRolloverService {
s.settings[name] = value
return s
}
// Mappings adds the index mappings.
func (s *IndicesRolloverService) Mappings(mappings map[string]interface{}) *IndicesRolloverService {
s.mappings = mappings
return s
}
// AddMapping adds a mapping for the given type.
func (s *IndicesRolloverService) AddMapping(typ string, mapping interface{}) *IndicesRolloverService {
s.mappings[typ] = mapping
return s
}
// BodyJson sets the conditions that needs to be met for executing rollover,
// specified as a serializable JSON instance which is sent as the body of
// the request.
func (s *IndicesRolloverService) BodyJson(body interface{}) *IndicesRolloverService {
s.bodyJson = body
return s
}
// BodyString sets the conditions that needs to be met for executing rollover,
// specified as a string which is sent as the body of the request.
func (s *IndicesRolloverService) BodyString(body string) *IndicesRolloverService {
s.bodyString = body
return s
}
// getBody returns the body of the request, if not explicitly set via
// BodyJson or BodyString.
func (s *IndicesRolloverService) getBody() interface{} {
body := make(map[string]interface{})
if len(s.conditions) > 0 {
body["conditions"] = s.conditions
}
if len(s.settings) > 0 {
body["settings"] = s.settings
}
if len(s.mappings) > 0 {
body["mappings"] = s.mappings
}
return body
}
// buildURL builds the URL for the operation.
func (s *IndicesRolloverService) buildURL() (string, url.Values, error) {
// Build URL
var err error
var path string
if s.newIndex != "" {
path, err = uritemplates.Expand("/{alias}/_rollover/{new_index}", map[string]string{
"alias": s.alias,
"new_index": s.newIndex,
})
} else {
path, err = uritemplates.Expand("/{alias}/_rollover", map[string]string{
"alias": s.alias,
})
}
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if v := s.pretty; v != nil {
params.Set("pretty", fmt.Sprint(*v))
}
if v := s.human; v != nil {
params.Set("human", fmt.Sprint(*v))
}
if v := s.errorTrace; v != nil {
params.Set("error_trace", fmt.Sprint(*v))
}
if len(s.filterPath) > 0 {
params.Set("filter_path", strings.Join(s.filterPath, ","))
}
if s.dryRun {
params.Set("dry_run", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
if s.waitForActiveShards != "" {
params.Set("wait_for_active_shards", s.waitForActiveShards)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesRolloverService) Validate() error {
var invalid []string
if s.alias == "" {
invalid = append(invalid, "Alias")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Setup HTTP request body
var body interface{}
if s.bodyJson != nil {
body = s.bodyJson
} else if s.bodyString != "" {
body = s.bodyString
} else {
body = s.getBody()
}
// Get HTTP response
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
Method: "POST",
Path: path,
Params: params,
Body: body,
Headers: s.headers,
})
if err != nil {
return nil, err
}
// Return operation response
ret := new(IndicesRolloverResponse)
if err := json.Unmarshal(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// IndicesRolloverResponse is the response of IndicesRolloverService.Do.
type IndicesRolloverResponse struct {
OldIndex string `json:"old_index"`
NewIndex string `json:"new_index"`
RolledOver bool `json:"rolled_over"`
DryRun bool `json:"dry_run"`
Acknowledged bool `json:"acknowledged"`
ShardsAcknowledged bool `json:"shards_acknowledged"`
Conditions map[string]bool `json:"conditions"`
}
| {
return &IndicesRolloverService{
client: client,
conditions: make(map[string]interface{}),
settings: make(map[string]interface{}),
mappings: make(map[string]interface{}),
}
} |
App.ts | import { Navigation } from 'react-native-navigation'
import { Screens, Root, Pushed, Secondary, Modal } from './screens'
import { TestIDs } from './testIDs'
/**
* Register Screens
*/
Navigation.registerComponent(Screens.Root, () => Root)
Navigation.registerComponent(Screens.Pushed, () => Pushed)
Navigation.registerComponent(Screens.Secondary, () => Secondary)
Navigation.registerComponent(Screens.Modal, () => Modal)
/**
* Start app
*/
export const startApp = () => {
Navigation.setRoot({
root: {
bottomTabs: {
children: [
// Root
{
stack: {
children: [
{
component: {
name: Screens.Root,
options: {
topBar: {
rightButtons: [
{
id: 'id',
text: 'Button', | ],
},
},
},
},
],
options: {
bottomTab: {
text: 'Root',
selectedTextColor: 'green',
},
},
},
},
// Secondary
{
stack: {
children: [
{
component: {
name: Screens.Secondary,
},
},
],
options: {
bottomTab: {
text: 'Secondary',
selectedTextColor: 'green',
testID: TestIDs.SECONDARY_TAB,
},
},
},
},
],
},
},
})
} | testID: TestIDs.NAV_BAR_BTN,
}, |
__init__.py | import os
import subprocess
from celestial.strings import Filesystems
from celestial.client.system import cmdline
def get_fs_types(path):
"""
Fetch a list of possible filesystem types
:param path:
:return: a list of strings with the possible filesystem type, else None
"""
if not os.path.exists(path):
return None
output = subprocess.check_output(
['''(eval $(blkid {} | awk ' {{ print $3 }} '); echo $TYPE)'''.format(path)],
shell=True,
executable='/bin/bash').decode().rstrip()
if output == "":
retval = []
elif output == Filesystems.EXT2:
# ext3 filesystems misidentify as ext2. Consider both as possible outputs
retval = [Filesystems.EXT2, Filesystems.EXT3]
else:
retval = [output]
return retval
def | (rootfs_file, device_node, block_size_kb=10, expected_fs=Filesystems.NONE):
"""
Install rootfs_file into device_node
"""
if expected_fs is not None:
fs_types = get_fs_types(rootfs_file)
if expected_fs not in fs_types:
raise ValueError("rootfs_file is type {}, expected {}".format(rootfs_file, expected_fs))
result = subprocess.run([
'dd',
'if={}'.format(rootfs_file),
'of={}'.format(device_node),
'bs={}K'.format(block_size_kb)
])
return result
def get_boot_device(cmdline_file="/proc/cmdline"):
"""
Retrieve the "root" parameter of "/proc/cmdline"
:param cmdline_file: The location of the cmdline file (that we booted with)
:return:
"""
return cmdline.get_parameter("root", cmdline_file)
def set_boot_device(boot_device, cmdline_file="/boot/cmdline"):
"""
Update the "root" parameter of the "cmdline_file" to "boot_device"
:param boot_device:
:param cmdline_file: The location of the boot partition's commandline file
:return:
"""
cmdline.set_parameter("root", boot_device, cmdline_file)
| install |
fts3.py | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2013
import datetime
import random
import time
from sqlalchemy import and_, or_
from rucio.common.utils import generate_uuid
from rucio.core.monitor import record_counter, record_timer
from rucio.db.sqla import test_models
from rucio.db.sqla.constants import FTSState
from rucio.db.sqla.session import read_session, transactional_session
"""
This mock FTS3 server provides basic job control, with a random job progression model.
"""
@read_session
def list_all(session):
"""
List all transfer jobs.
:returns: List of dictionaries with job information
"""
record_counter('daemons.mock.fts3.list_all')
query = session.query(test_models.MockFTSTransfer).order_by(test_models.MockFTSTransfer.lastmodified.desc())
for row in query.yield_per(5):
yield row
@transactional_session
def submit(tinfo, session):
"""
Create a new transfer job in state QUEUED.
:param tinfo: The transfer job information as a string.
:returns: The transfer job id.
"""
record_counter('daemons.mock.fts3.submit')
ts = time.time()
tid = generate_uuid()
record_timer('daemons.mock.fts3.submit.000-generate_uuid', (time.time() - ts) * 1000)
ts = time.time()
new_transfer = test_models.MockFTSTransfer(transfer_id=tid, transfer_metadata=str(tinfo))
new_transfer.save(session=session)
record_timer('daemons.mock.fts3.submit.001-new_transfer', (time.time() - ts) * 1000)
return {'job_id': tid}
@transactional_session
def | (tid, session):
"""
Query the transfer job information of a single job. Has a chance to progress the job from QUEUED to either DONE or FAILED.
:param tid: The transfer job id.
:returns: The transfer job information.
"""
record_counter('daemons.mock.fts3.query')
ts = time.time()
new_state = random.sample(sum([[FTSState.FINISHED] * 15, [FTSState.FAILED] * 3, [FTSState.FINISHEDDIRTY] * 2, [FTSState.ACTIVE] * 80], []), 1)[0]
record_timer('daemons.mock.fts3.query.000-random_sample', (time.time() - ts) * 1000)
ts = time.time()
query = session.query(test_models.MockFTSTransfer).filter(and_(test_models.MockFTSTransfer.transfer_id == tid,
or_(test_models.MockFTSTransfer.state == FTSState.SUBMITTED,
test_models.MockFTSTransfer.state == FTSState.ACTIVE)))
if query.update({'state': new_state,
'last_modified': datetime.datetime.utcnow()}) == 0:
return None
r = {'job_state': str(new_state)}
if new_state == FTSState.FAILED or new_state == FTSState.FINISHEDDIRTY:
r['reason'] = 'Mock FTS decided to kill your transfer.'
r['files'] = [{'source_surl': 'mock_src', 'dest_surl': 'mock_dest', 'reason': 'mock failure'}]
return r
@transactional_session
def cancel(tid, session):
"""
Kills a transfer by setting its state to CANCELLED.
:param tid: The transfer job id.
"""
record_counter('daemons.mock.fts3.cancel')
ts = time.time()
query = session.query(test_models.MockFTSTransfer).filter(tid=tid)
query.update({'state': FTSState.CANCELED,
'last_modified': datetime.datetime.utcnow()})
record_timer('daemons.mock.fts3.cancel.update_state', (time.time() - ts) * 1000)
| query |
collector.go | package collector
import (
"context"
"sync"
"github.com/puppetlabs/lumogon/logging"
"github.com/puppetlabs/lumogon/storage"
"github.com/puppetlabs/lumogon/types"
)
var mu sync.Mutex
var results map[string]types.ContainerReport
// RunCollector starts the collector which will block on reading all
// expected ContainerReports from the results channel, before sending
// them to the ReportStorage backend.
func RunCollector(ctx context.Context, wg *sync.WaitGroup, expectedResults int, resultsCh chan types.ContainerReport, backend storage.ReportStorage, reportID string) error {
defer logging.Debug("[Collector] Exiting")
defer wg.Done()
doneChannel := make(chan int)
results = make(map[string]types.ContainerReport)
go func() {
logging.Debug("[Collector] Waiting for %d results", expectedResults)
for i := 1; i <= expectedResults; i++ { | cacheResult(result)
logging.Debug("[Collector] Result received from name: %s, ID: %s", result.ContainerName, result.ContainerID)
}
doneChannel <- 0
}()
var resultsWg sync.WaitGroup
resultsWg.Add(1)
var err error
select {
case <-doneChannel:
logging.Debug("[Collector] All expected results received")
resultsWg.Done()
case <-ctx.Done():
logging.Debug("[Collector] Context timed out waiting for results, continuing...")
resultsWg.Done()
}
resultsWg.Wait()
logging.Debug("[Collector] Generating report: %s", reportID)
err = backend.Store(results, reportID)
return err
}
// cacheResult caches the supplied types.ContainerReport.
// It consists of a map of container IDs to ContainerReports either adding
// a new key or appending the capabilities to an existing ContainerReport.
func cacheResult(result types.ContainerReport) {
logging.Debug("[Collector] Caching result")
defer logging.Debug("[Collector] Caching result complete")
mu.Lock()
defer mu.Unlock()
if _, ok := results[result.ContainerID]; ok {
for capabilityID, capabilityData := range result.Capabilities {
results[result.ContainerID].Capabilities[capabilityID] = capabilityData
}
return
}
results[result.ContainerID] = result
} | result := <-resultsCh
logging.Debug("[Collector] Received result [%d]", i) |
optionalCollegeId.dto.d.ts | export declare class OptionalCollegeIdDto { | collegeId?: string;
} |
|
typescript.rs | use super::{Emitter, Result};
use crate::{list::ListFormat, text_writer::WriteJs};
use swc_common::Spanned;
use swc_ecma_ast::*;
use swc_ecma_codegen_macros::emitter;
impl<'a> Emitter<'a> {
#[emitter]
fn emit_pat_or_ts_param_prop(&mut self, n: &ParamOrTsParamProp) -> Result {
match *n {
ParamOrTsParamProp::Param(ref n) => emit!(n),
ParamOrTsParamProp::TsParamProp(ref n) => emit!(n),
}
}
#[emitter]
fn emit_ts_array_type(&mut self, n: &TsArrayType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.elem_type);
punct!("[");
punct!("]");
}
#[emitter]
fn emit_ts_as_expr(&mut self, n: &TsAsExpr) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.expr);
space!();
keyword!("as");
space!();
emit!(n.type_ann);
}
#[emitter]
fn | (&mut self, n: &TsCallSignatureDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_params);
punct!("(");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!(")");
if let Some(type_ann) = &n.type_ann {
space!();
punct!("=>");
space!();
emit!(type_ann);
}
}
#[emitter]
fn emit_ts_cond_type(&mut self, n: &TsConditionalType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.check_type);
space!();
keyword!("extends");
space!();
emit!(n.extends_type);
space!();
punct!("?");
space!();
emit!(n.true_type);
space!();
punct!(":");
space!();
emit!(n.false_type);
}
#[emitter]
fn emit_ts_constructor_signature_decl(&mut self, n: &TsConstructSignatureDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_constructor_signature_decl")
}
#[emitter]
fn emit_ts_constructor_type(&mut self, n: &TsConstructorType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!("new");
space!();
if let Some(type_params) = &n.type_params {
emit!(type_params);
}
punct!("(");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!(")");
formatting_space!();
punct!("=>");
formatting_space!();
emit!(n.type_ann)
}
#[emitter]
fn emit_ts_entity_name(&mut self, n: &TsEntityName) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n {
TsEntityName::TsQualifiedName(n) => {
emit!(n);
punct!(".");
}
TsEntityName::Ident(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_enum_decl(&mut self, n: &TsEnumDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.declare {
keyword!("declare");
space!();
}
if n.is_const {
keyword!("const");
space!();
}
keyword!("enum");
space!();
emit!(n.id);
formatting_space!();
punct!("{");
self.emit_list(n.span, Some(&n.members), ListFormat::EnumMembers)?;
punct!("}");
}
#[emitter]
fn emit_ts_enum_member(&mut self, n: &TsEnumMember) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.id);
if let Some(init) = &n.init {
formatting_space!();
punct!("=");
formatting_space!();
emit!(init);
}
}
#[emitter]
fn emit_ts_enum_member_id(&mut self, n: &TsEnumMemberId) -> Result {
match n {
TsEnumMemberId::Ident(n) => emit!(n),
TsEnumMemberId::Str(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_export_assignment(&mut self, n: &TsExportAssignment) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_export_assignment")
}
#[emitter]
fn emit_ts_expr_with_type_args(&mut self, n: &TsExprWithTypeArgs) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.expr);
emit!(n.type_args);
}
#[emitter]
fn emit_ts_external_module_ref(&mut self, n: &TsExternalModuleRef) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_external_module_ref")
}
#[emitter]
fn emit_ts_fn_or_constructor_type(&mut self, n: &TsFnOrConstructorType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n {
TsFnOrConstructorType::TsFnType(n) => emit!(n),
TsFnOrConstructorType::TsConstructorType(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_fn_param(&mut self, n: &TsFnParam) -> Result {
match n {
TsFnParam::Ident(n) => emit!(n),
TsFnParam::Array(n) => emit!(n),
TsFnParam::Rest(n) => emit!(n),
TsFnParam::Object(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_fn_type(&mut self, n: &TsFnType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_params);
punct!("(");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!(")");
formatting_space!();
punct!("=>");
formatting_space!();
emit!(n.type_ann);
}
#[emitter]
fn emit_ts_import_equals_decl(&mut self, n: &TsImportEqualsDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.is_export {
keyword!("export");
space!();
}
keyword!("import");
formatting_space!();
punct!("=");
formatting_space!();
emit!(n.module_ref);
}
#[emitter]
fn emit_ts_index_signature(&mut self, n: &TsIndexSignature) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("[");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!("]");
punct!(":");
formatting_space!();
emit!(n.type_ann);
semi!();
}
#[emitter]
fn emit_ts_index_accessed_type(&mut self, n: &TsIndexedAccessType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.obj_type);
punct!("[");
emit!(n.index_type);
punct!("]");
}
#[emitter]
fn emit_ts_infer_type(&mut self, n: &TsInferType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!("infer");
space!();
emit!(n.type_param);
}
#[emitter]
fn emit_ts_interface_body(&mut self, n: &TsInterfaceBody) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("{");
self.emit_list(n.span, Some(&n.body), ListFormat::InterfaceMembers)?;
punct!("}");
}
#[emitter]
fn emit_ts_interface_decl(&mut self, n: &TsInterfaceDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.declare {
keyword!("declare");
space!();
}
keyword!("interface");
space!();
emit!(n.id);
if !n.extends.is_empty() {
space!();
keyword!("extends");
space!();
self.emit_list(n.span, Some(&n.extends), ListFormat::HeritageClauseTypes)?;
}
formatting_space!();
emit!(n.body);
}
#[emitter]
fn emit_ts_intersection_type(&mut self, n: &TsIntersectionType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
self.emit_list(
n.span,
Some(&n.types),
ListFormat::IntersectionTypeConstituents,
)?;
}
#[emitter]
fn emit_ts_keyword_type(&mut self, n: &TsKeywordType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n.kind {
TsKeywordTypeKind::TsAnyKeyword => keyword!(n.span, "any"),
TsKeywordTypeKind::TsUnknownKeyword => keyword!(n.span, "unknown"),
TsKeywordTypeKind::TsNumberKeyword => keyword!(n.span, "number"),
TsKeywordTypeKind::TsObjectKeyword => keyword!(n.span, "object"),
TsKeywordTypeKind::TsBooleanKeyword => keyword!(n.span, "boolean"),
TsKeywordTypeKind::TsBigIntKeyword => keyword!(n.span, "bigint"),
TsKeywordTypeKind::TsStringKeyword => keyword!(n.span, "string"),
TsKeywordTypeKind::TsSymbolKeyword => keyword!(n.span, "symbol"),
TsKeywordTypeKind::TsVoidKeyword => keyword!(n.span, "void"),
TsKeywordTypeKind::TsUndefinedKeyword => keyword!(n.span, "undefined"),
TsKeywordTypeKind::TsNullKeyword => keyword!(n.span, "null"),
TsKeywordTypeKind::TsNeverKeyword => keyword!(n.span, "never"),
TsKeywordTypeKind::TsIntrinsicKeyword => keyword!(n.span, "intrinsic"),
}
}
#[emitter]
fn emit_ts_lit(&mut self, n: &TsLit) -> Result {
match n {
TsLit::BigInt(n) => emit!(n),
TsLit::Number(n) => emit!(n),
TsLit::Str(n) => emit!(n),
TsLit::Bool(n) => emit!(n),
TsLit::Tpl(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_tpl_lit(&mut self, node: &TsTplLitType) -> Result {
debug_assert!(node.quasis.len() == node.types.len() + 1);
self.emit_leading_comments_of_pos(node.span().lo())?;
punct!("`");
let i = 0;
for i in 0..(node.quasis.len() + node.types.len()) {
if i % 2 == 0 {
emit!(node.quasis[i / 2]);
} else {
punct!("${");
emit!(node.types[i / 2]);
punct!("}");
}
}
punct!("`");
}
#[emitter]
fn emit_ts_lit_type(&mut self, n: &TsLitType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.lit);
}
#[emitter]
fn emit_ts_mapped_type(&mut self, n: &TsMappedType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("{");
self.wr.write_line()?;
self.wr.increase_indent()?;
match n.readonly {
None => {}
Some(tpm) => match tpm {
TruePlusMinus::True => {
keyword!("readonly");
space!();
}
TruePlusMinus::Plus => {
punct!("+");
keyword!("readonly");
space!();
}
TruePlusMinus::Minus => {
punct!("-");
keyword!("readonly");
space!();
}
},
}
punct!("[");
emit!(n.type_param.name);
if let Some(constraints) = &n.type_param.constraint {
space!();
keyword!("in");
space!();
}
if let Some(default) = &n.type_param.default {
formatting_space!();
punct!("=");
formatting_space!();
emit!(default);
}
emit!(n.type_param.constraint);
punct!("]");
match n.optional {
None => {}
Some(tpm) => match tpm {
TruePlusMinus::True => {
punct!("?");
}
TruePlusMinus::Plus => {
punct!("+");
punct!("/");
}
TruePlusMinus::Minus => {
punct!("-");
punct!("?");
}
},
}
punct!(":");
space!();
emit!(n.type_ann);
semi!();
self.wr.write_line()?;
self.wr.decrease_indent()?;
punct!("}");
}
#[emitter]
fn emit_ts_method_signature(&mut self, n: &TsMethodSignature) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.readonly {
keyword!("readonly");
}
if n.computed {
punct!("[");
emit!(n.key);
punct!("]");
} else {
emit!(n.key)
}
if n.optional {
punct!("?");
}
if let Some(type_params) = &n.type_params {
emit!(type_params);
}
punct!("(");
self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
punct!(")");
if let Some(ref type_ann) = n.type_ann {
punct!(":");
formatting_space!();
emit!(type_ann);
}
}
#[emitter]
fn emit_ts_module_block(&mut self, n: &TsModuleBlock) -> Result {
self.emit_list(n.span, Some(&n.body), ListFormat::SourceFileStatements)?;
self.emit_leading_comments_of_pos(n.span().lo())?;
}
#[emitter]
fn emit_ts_module_decl(&mut self, n: &TsModuleDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.declare {
keyword!("declare");
space!();
}
keyword!("module");
space!();
emit!(n.id);
formatting_space!();
if let Some(body) = &n.body {
emit!(body);
}
}
#[emitter]
fn emit_ts_module_name(&mut self, n: &TsModuleName) -> Result {
match n {
TsModuleName::Ident(n) => emit!(n),
TsModuleName::Str(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_module_ref(&mut self, n: &TsModuleRef) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_module_ref")
}
#[emitter]
fn emit_ts_ns_body(&mut self, n: &TsNamespaceBody) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("{");
self.wr.increase_indent()?;
match n {
TsNamespaceBody::TsModuleBlock(n) => emit!(n),
TsNamespaceBody::TsNamespaceDecl(n) => emit!(n),
}
self.wr.decrease_indent()?;
punct!("}");
}
#[emitter]
fn emit_ts_ns_decl(&mut self, n: &TsNamespaceDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_ns_decl")
}
#[emitter]
fn emit_ts_ns_export_decl(&mut self, n: &TsNamespaceExportDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_ns_export_decl")
}
#[emitter]
fn emit_ts_non_null_expr(&mut self, n: &TsNonNullExpr) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_non_null_expr")
}
#[emitter]
fn emit_ts_optional_type(&mut self, n: &TsOptionalType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_ann);
punct!("?");
}
#[emitter]
fn emit_ts_param_prop(&mut self, n: &TsParamProp) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
self.emit_accesibility(n.accessibility)?;
if n.readonly {
keyword!("readonly");
space!();
}
emit!(n.param);
}
#[emitter]
fn emit_ts_param_prop_param(&mut self, n: &TsParamPropParam) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n {
TsParamPropParam::Ident(n) => emit!(n),
TsParamPropParam::Assign(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_paren_type(&mut self, n: &TsParenthesizedType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("(");
emit!(n.type_ann);
punct!(")");
}
#[emitter]
fn emit_ts_property_signature(&mut self, n: &TsPropertySignature) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.readonly {
keyword!("readonly");
space!();
}
if n.computed {
punct!("[");
emit!(n.key);
punct!("]");
} else {
emit!(n.key);
}
if n.optional {
punct!("?");
}
emit!(n.type_params);
// punct!("(");
// self.emit_list(n.span, Some(&n.params), ListFormat::Parameters)?;
// punct!(")");
if let Some(type_ann) = &n.type_ann {
punct!(":");
formatting_space!();
emit!(type_ann);
}
if let Some(init) = &n.init {
formatting_space!();
punct!("=");
formatting_space!();
emit!(init);
}
}
#[emitter]
fn emit_ts_qualified_name(&mut self, n: &TsQualifiedName) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.left);
punct!(".");
emit!(n.right);
}
#[emitter]
fn emit_ts_rest_type(&mut self, n: &TsRestType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("...");
emit!(n.type_ann);
}
#[emitter]
fn emit_ts_signature_decl(&mut self, n: &TsSignatureDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_signature_decl")
}
#[emitter]
fn emit_ts_this_type(&mut self, n: &TsThisType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!(n.span, "this");
}
#[emitter]
fn emit_ts_this_type_or_ident(&mut self, n: &TsThisTypeOrIdent) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n {
TsThisTypeOrIdent::TsThisType(n) => emit!(n),
TsThisTypeOrIdent::Ident(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_tuple_type(&mut self, n: &TsTupleType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("[");
self.emit_list(n.span, Some(&n.elem_types), ListFormat::TupleTypeElements)?;
punct!("]");
}
#[emitter]
fn emit_ts_tuple_element(&mut self, n: &TsTupleElement) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if let Some(label) = &n.label {
emit!(label);
punct!(":");
formatting_space!();
}
emit!(n.ty)
}
#[emitter]
fn emit_ts_type(&mut self, n: &TsType) -> Result {
match n {
TsType::TsKeywordType(n) => emit!(n),
TsType::TsThisType(n) => emit!(n),
TsType::TsFnOrConstructorType(n) => emit!(n),
TsType::TsTypeRef(n) => emit!(n),
TsType::TsTypeQuery(n) => emit!(n),
TsType::TsTypeLit(n) => emit!(n),
TsType::TsArrayType(n) => emit!(n),
TsType::TsTupleType(n) => emit!(n),
TsType::TsOptionalType(n) => emit!(n),
TsType::TsRestType(n) => emit!(n),
TsType::TsUnionOrIntersectionType(n) => emit!(n),
TsType::TsConditionalType(n) => emit!(n),
TsType::TsInferType(n) => emit!(n),
TsType::TsParenthesizedType(n) => emit!(n),
TsType::TsTypeOperator(n) => emit!(n),
TsType::TsIndexedAccessType(n) => emit!(n),
TsType::TsMappedType(n) => emit!(n),
TsType::TsLitType(n) => emit!(n),
TsType::TsTypePredicate(n) => emit!(n),
TsType::TsImportType(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_import_type(&mut self, n: &TsImportType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!("import");
punct!("(");
emit!(n.arg);
punct!(")");
if let Some(n) = &n.qualifier {
punct!(".");
emit!(n);
}
if let Some(type_args) = &n.type_args {
punct!("<");
emit!(type_args);
punct!(">");
}
}
#[emitter]
fn emit_ts_type_alias_decl(&mut self, n: &TsTypeAliasDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.declare {
keyword!("declare");
space!();
}
keyword!("type");
space!();
emit!(n.id);
if let Some(type_params) = &n.type_params {
emit!(type_params);
}
formatting_space!();
punct!("=");
formatting_space!();
emit!(n.type_ann);
semi!();
}
#[emitter]
fn emit_ts_type_ann(&mut self, n: &TsTypeAnn) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_ann)
}
#[emitter]
fn emit_ts_type_assertion(&mut self, n: &TsTypeAssertion) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_type_assertion")
}
#[emitter]
fn emit_ts_const_assertion(&mut self, n: &TsConstAssertion) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.expr);
space!();
keyword!("as");
space!();
keyword!("const");
}
#[emitter]
fn emit_ts_type_cast_expr(&mut self, n: &TsTypeCastExpr) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
unimplemented!("emit_ts_type_cast_expr")
}
#[emitter]
fn emit_ts_type_element(&mut self, n: &TsTypeElement) -> Result {
match n {
TsTypeElement::TsCallSignatureDecl(n) => emit!(n),
TsTypeElement::TsConstructSignatureDecl(n) => emit!(n),
TsTypeElement::TsPropertySignature(n) => emit!(n),
TsTypeElement::TsMethodSignature(n) => emit!(n),
TsTypeElement::TsIndexSignature(n) => emit!(n),
}
semi!();
}
#[emitter]
fn emit_ts_type_lit(&mut self, n: &TsTypeLit) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("{");
self.emit_list(
n.span,
Some(&n.members),
ListFormat::MultiLineTypeLiteralMembers,
)?;
punct!("}");
}
#[emitter]
fn emit_ts_type_operator(&mut self, n: &TsTypeOperator) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
match n.op {
TsTypeOperatorOp::KeyOf => keyword!("keyof"),
TsTypeOperatorOp::Unique => keyword!("unique"),
TsTypeOperatorOp::ReadOnly => keyword!("readonly"),
}
space!();
emit!(n.type_ann);
}
#[emitter]
fn emit_ts_type_param(&mut self, n: &TsTypeParam) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.name);
if let Some(constraints) = &n.constraint {
space!();
keyword!("extends");
space!();
emit!(constraints);
}
if let Some(default) = &n.default {
formatting_space!();
punct!("=");
formatting_space!();
emit!(default);
}
}
#[emitter]
fn emit_ts_type_param_decl(&mut self, n: &TsTypeParamDecl) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("<");
self.emit_list(n.span, Some(&n.params), ListFormat::TypeParameters)?;
punct!(">");
}
#[emitter]
fn emit_ts_type_param_instantiation(&mut self, n: &TsTypeParamInstantiation) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
punct!("<");
self.emit_list(n.span, Some(&n.params), ListFormat::TypeParameters)?;
punct!(">");
}
#[emitter]
fn emit_ts_type_predicate(&mut self, n: &TsTypePredicate) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
if n.asserts {
keyword!("asserts");
space!();
}
emit!(n.param_name);
if let Some(type_ann) = &n.type_ann {
space!();
keyword!("is");
space!();
emit!(type_ann);
}
}
#[emitter]
fn emit_ts_type_query(&mut self, n: &TsTypeQuery) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
keyword!("typeof");
space!();
emit!(n.expr_name);
}
#[emitter]
fn emit_ts_type_query_expr(&mut self, n: &TsTypeQueryExpr) -> Result {
match n {
TsTypeQueryExpr::TsEntityName(n) => emit!(n),
TsTypeQueryExpr::Import(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_type_ref(&mut self, n: &TsTypeRef) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
emit!(n.type_name);
if let Some(n) = &n.type_params {
punct!("<");
self.emit_list(n.span, Some(&n.params), ListFormat::TypeArguments)?;
punct!(">");
}
}
#[emitter]
fn emit_ts_union_or_intersection_type(&mut self, n: &TsUnionOrIntersectionType) -> Result {
match n {
TsUnionOrIntersectionType::TsUnionType(n) => emit!(n),
TsUnionOrIntersectionType::TsIntersectionType(n) => emit!(n),
}
}
#[emitter]
fn emit_ts_union_type(&mut self, n: &TsUnionType) -> Result {
self.emit_leading_comments_of_pos(n.span().lo())?;
self.emit_list(n.span, Some(&n.types), ListFormat::UnionTypeConstituents)?;
}
}
| emit_ts_call_signature_decl |
verify_email.py | from src.main.config import config
import requests
import json
def validate_email(email):
try:
api_response = requests.post(
config.EMAIL_VERIFICATION_URL.format(config.NEVERBOUNCE_API_KEY, email)
).content
api_response = json.loads(api_response) |
if api_response['result'] == 'invalid':
raise Exception('Invalid email')
except Exception:
raise Exception('Error(s) happened when validating email') | |
test_resource_periods_as_text.py | import datetime
import pytest
from django.utils import translation
from hours.enums import FrequencyModifier, RuleContext, RuleSubject, State, Weekday
from hours.models import Rule
from hours.tests.conftest import (
DatePeriodFactory,
RuleFactory,
TimeSpanFactory,
TimeSpanGroupFactory,
)
@pytest.mark.django_db
@pytest.mark.parametrize("lang", ["en", "fi"])
def test_resource_opening_hours_as_text_no_date_periods(resource, lang):
with translation.override(lang):
assert resource._get_date_periods_as_text() == ""
@pytest.mark.django_db
def test_resource_opening_hours_as_text(resource):
DatePeriodFactory(
name="Special hours",
resource=resource,
resource_state=State.CLOSED,
start_date=datetime.date(year=2021, month=12, day=27),
end_date=datetime.date(year=2022, month=1, day=2),
override=True,
)
date_period = DatePeriodFactory(
name="Regular opening hours",
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2021, month=1, day=1),
end_date=datetime.date(year=2022, month=12, day=31),
)
time_span_group = TimeSpanGroupFactory(period=date_period)
TimeSpanFactory(
name="Test time span",
group=time_span_group,
start_time=datetime.time(hour=9, minute=0),
end_time=datetime.time(hour=17, minute=0),
weekdays=[Weekday.MONDAY, Weekday.TUESDAY, Weekday.THURSDAY],
)
TimeSpanFactory(
name="Test time span...",
group=time_span_group,
start_time=datetime.time(hour=9, minute=0),
end_time=datetime.time(hour=19, minute=0),
weekdays=[Weekday.FRIDAY, Weekday.SATURDAY],
)
TimeSpanFactory(
name="Test time span 2",
group=time_span_group,
start_time=datetime.time(hour=10, minute=0),
end_time=datetime.time(hour=14, minute=0),
weekdays=[Weekday.SUNDAY],
)
RuleFactory(
group=time_span_group,
context=RuleContext.PERIOD,
subject=RuleSubject.WEEK,
frequency_modifier=FrequencyModifier.EVEN,
)
time_span_group2 = TimeSpanGroupFactory(period=date_period)
TimeSpanFactory(
name="Test time span 3",
group=time_span_group2,
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
weekdays=[Weekday.MONDAY, Weekday.TUESDAY],
)
TimeSpanFactory(
name="Test time span 4",
group=time_span_group2,
start_time=datetime.time(hour=9, minute=0),
end_time=datetime.time(hour=13, minute=0),
weekdays=Weekday.weekend(),
)
RuleFactory(
group=time_span_group2,
context=RuleContext.PERIOD,
subject=RuleSubject.MONTH,
frequency_ordinal=2,
)
RuleFactory(
group=time_span_group2,
context=RuleContext.PERIOD,
subject=RuleSubject.WEEK,
frequency_modifier=FrequencyModifier.ODD,
)
with translation.override("en"):
assert resource._get_date_periods_as_text() == (
"\n"
"========================================\n"
"Regular opening hours\n"
"Date period: Jan. 1, 2021 - Dec. 31, 2022\n"
"Opening hours:\n"
"\n"
" Monday-Tuesday, Thursday 9 a.m.-5 p.m. Open\n"
" Friday-Saturday 9 a.m.-7 p.m. Open\n"
" Sunday 10 a.m.-2 p.m. Open\n"
"\n"
" In effect when every one of these match:\n"
" - On even weeks in the period\n"
"\n"
" ---------------------------------------\n"
"\n"
" Monday-Tuesday 8 a.m.-4 p.m. Open\n"
" Saturday-Sunday 9 a.m.-1 p.m. Open\n"
"\n"
" In effect when every one of these match:\n"
" - Every 2nd month in the period\n"
" - On odd weeks in the period\n"
"\n"
"========================================\n"
"Special hours\n"
"Date period: Dec. 27, 2021 - Jan. 2, 2022\n"
"Opening hours:\n"
"\n"
" Closed\n"
"\n"
"========================================\n"
)
with translation.override("fi"):
assert resource._get_date_periods_as_text() == (
"\n"
"========================================\n"
"Regular opening hours\n"
"Aikajakso: 1. tammikuuta 2021 - 31. joulukuuta 2022\n"
"Aukioloajat:\n"
"\n"
" Maanantai-Tiistai, Torstai 9.00-17.00 Auki\n"
" Perjantai-Lauantai 9.00-19.00 Auki\n"
" Sunnuntai 10.00-14.00 Auki\n"
"\n"
" Voimassa kun kaikki seuraavat pätevät:\n"
" - Jakson jokainen parillinen viikko\n"
"\n"
" ---------------------------------------\n"
"\n"
" Maanantai-Tiistai 8.00-16.00 Auki\n"
" Lauantai-Sunnuntai 9.00-13.00 Auki\n"
"\n"
" Voimassa kun kaikki seuraavat pätevät:\n"
" - Jakson joka 2. kuukausi\n"
" - Jakson jokainen pariton viikko\n"
"\n"
"========================================\n"
"Special hours\n"
"Aikajakso: 27. joulukuuta 2021 - 2. tammikuuta 2022\n"
"Aukioloajat:\n"
"\n"
" Suljettu\n"
"\n"
"========================================\n"
)
@pytest.mark.django_db
@pytest.mark.parametrize(
"modifier", [None, FrequencyModifier.EVEN, FrequencyModifier.ODD]
)
@pytest.mark.parametrize("start", [None, 1, 2, -1, -2])
@pytest.mark.parametrize("ordinal", [None, 1, 4])
@pytest.mark.parametrize("subject", list(RuleSubject))
@pytest.mark.parametrize("context", list(RuleContext))
def test_rule_as_text_frequency_ordinal(context, subject, start, ordinal, modifier):
if not any([start, ordinal, modifier]) or (
subject == RuleSubject.MONTH and context == RuleContext.MONTH
):
pytest.skip("Won't test this combination as it's an invalid rule")
rule = Rule(
context=context,
subject=subject,
start=start,
frequency_ordinal=ordinal,
frequency_modifier=modifier,
)
with translation.override("en"):
rule_as_text_en = rule.as_text()
with translation.override("fi"):
rule_as_text_fi = rule.as_text()
assert rule_as_text_en
assert rule_as_text_fi
assert rule_as_text_en != rule_as_text_fi
@pytest.mark.django_db
@pytest.mark.parametrize("lang", ["en", "fi"])
def test_resource_date_periods_as_text_is_kept_up_to_date(resource, lang):
asse | rt resource.date_periods_as_text == ""
date_period = DatePeriodFactory(
name="Test hours",
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2021, month=1, day=1),
end_date=datetime.date(year=2022, month=12, day=31),
)
assert resource.date_periods_as_text == (
"\n========================================\n"
"Test hours\n"
"Aikajakso: 1. tammikuuta 2021 - 31. joulukuuta 2022\n"
"Aukioloajat:\n"
"\n"
" Auki\n"
"\n"
"========================================\n"
)
date_period.resource_state = State.CLOSED
date_period.save()
assert resource.date_periods_as_text == (
"\n========================================\n"
"Test hours\n"
"Aikajakso: 1. tammikuuta 2021 - 31. joulukuuta 2022\n"
"Aukioloajat:\n"
"\n"
" Suljettu\n"
"\n"
"========================================\n"
)
time_span_group = TimeSpanGroupFactory(period=date_period)
TimeSpanFactory(
group=time_span_group,
start_time=datetime.time(hour=10, minute=0),
end_time=datetime.time(hour=12, minute=0),
weekdays=[Weekday.MONDAY],
resource_state=State.OPEN,
)
assert resource.date_periods_as_text == (
"\n========================================\n"
"Test hours\n"
"Aikajakso: 1. tammikuuta 2021 - 31. joulukuuta 2022\n"
"Aukioloajat:\n"
"\n"
" Maanantai 10.00-12.00 Auki\n"
"\n"
"========================================\n"
)
RuleFactory(
group=time_span_group,
context=RuleContext.PERIOD,
subject=RuleSubject.WEEK,
frequency_ordinal=2,
)
assert resource.date_periods_as_text == (
"\n========================================\n"
"Test hours\n"
"Aikajakso: 1. tammikuuta 2021 - 31. joulukuuta 2022\n"
"Aukioloajat:\n"
"\n"
" Maanantai 10.00-12.00 Auki\n"
"\n"
" Voimassa kun kaikki seuraavat pätevät:\n"
" - Jakson joka 2. viikko\n"
"\n"
"========================================\n"
)
|
|
extension.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { window, commands, ExtensionContext } from 'vscode';
import { showQuickPick, showInputBox } from './basicInput';
import { multiStepInput } from './multiStepInput';
import { quickOpen } from './quickOpen';
export function | (context: ExtensionContext) {
context.subscriptions.push(commands.registerCommand('samples.quickInput', async () => {
const options: { [key: string]: (context: ExtensionContext) => Promise<void> } = {
showQuickPick,
showInputBox,
multiStepInput,
quickOpen,
};
const quickPick = window.createQuickPick();
quickPick.items = Object.keys(options).map(label => ({ label }));
quickPick.onDidChangeSelection(selection => {
if (selection[0]) {
options[selection[0].label](context)
.catch(console.error);
}
});
quickPick.onDidHide(() => quickPick.dispose());
quickPick.show();
}));
}
| activate |
public_delegated_prefixes_client_example_test.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
package compute_test
import (
"context"
compute "cloud.google.com/go/compute/apiv1"
"google.golang.org/api/iterator"
computepb "google.golang.org/genproto/googleapis/cloud/compute/v1"
)
func ExampleNewPublicDelegatedPrefixesRESTClient() {
ctx := context.Background()
c, err := compute.NewPublicDelegatedPrefixesRESTClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
// TODO: Use client.
_ = c
}
func ExamplePublicDelegatedPrefixesClient_AggregatedList() {
ctx := context.Background()
c, err := compute.NewPublicDelegatedPrefixesRESTClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &computepb.AggregatedListPublicDelegatedPrefixesRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/compute/v1#AggregatedListPublicDelegatedPrefixesRequest.
}
it := c.AggregatedList(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExamplePublicDelegatedPrefixesClient_Delete() {
ctx := context.Background()
c, err := compute.NewPublicDelegatedPrefixesRESTClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &computepb.DeletePublicDelegatedPrefixeRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/compute/v1#DeletePublicDelegatedPrefixeRequest.
}
resp, err := c.Delete(ctx, req)
if err != nil |
// TODO: Use resp.
_ = resp
}
func ExamplePublicDelegatedPrefixesClient_Get() {
ctx := context.Background()
c, err := compute.NewPublicDelegatedPrefixesRESTClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &computepb.GetPublicDelegatedPrefixeRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/compute/v1#GetPublicDelegatedPrefixeRequest.
}
resp, err := c.Get(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExamplePublicDelegatedPrefixesClient_Insert() {
ctx := context.Background()
c, err := compute.NewPublicDelegatedPrefixesRESTClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &computepb.InsertPublicDelegatedPrefixeRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/compute/v1#InsertPublicDelegatedPrefixeRequest.
}
resp, err := c.Insert(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExamplePublicDelegatedPrefixesClient_List() {
ctx := context.Background()
c, err := compute.NewPublicDelegatedPrefixesRESTClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &computepb.ListPublicDelegatedPrefixesRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/compute/v1#ListPublicDelegatedPrefixesRequest.
}
it := c.List(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExamplePublicDelegatedPrefixesClient_Patch() {
ctx := context.Background()
c, err := compute.NewPublicDelegatedPrefixesRESTClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &computepb.PatchPublicDelegatedPrefixeRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/compute/v1#PatchPublicDelegatedPrefixeRequest.
}
resp, err := c.Patch(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
| {
// TODO: Handle error.
} |
python_operator.py | """The dagster-airflow operators."""
from dagster_airflow.operators.util import invoke_steps_within_python_operator
from dagster_airflow.vendor.python_operator import PythonOperator
| class DagsterPythonOperator(PythonOperator):
def __init__(self, dagster_operator_parameters, *args, **kwargs):
def python_callable(ts, dag_run, **kwargs): # pylint: disable=unused-argument
return invoke_steps_within_python_operator(
dagster_operator_parameters.invocation_args, ts, dag_run, **kwargs
)
super(DagsterPythonOperator, self).__init__(
task_id=dagster_operator_parameters.task_id,
provide_context=True,
python_callable=python_callable,
dag=dagster_operator_parameters.dag,
*args,
**kwargs,
) | |
vehicle_candidate.py | # coding: utf-8
"""
OpenALPR Cloud API
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class VehicleCandidate(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, confidence=None):
"""
VehicleCandidate - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'confidence': 'float'
}
self.attribute_map = {
'name': 'name',
'confidence': 'confidence'
}
self._name = name
self._confidence = confidence
@property
def name(self):
"""
Gets the name of this VehicleCandidate.
name of value
:return: The name of this VehicleCandidate.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this VehicleCandidate.
name of value
:param name: The name of this VehicleCandidate.
:type: str
"""
self._name = name
@property
def confidence(self):
"""
Gets the confidence of this VehicleCandidate.
confidence of value (percent)
:return: The confidence of this VehicleCandidate.
:rtype: float
"""
return self._confidence
@confidence.setter
def confidence(self, confidence):
"""
Sets the confidence of this VehicleCandidate.
confidence of value (percent)
:param confidence: The confidence of this VehicleCandidate.
:type: float
"""
self._confidence = confidence
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def | (self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| __repr__ |
userPolicy.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package iam
import (
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Provides an IAM policy attached to a user.
//
// ## Example Usage
//
// ```go
// package main
//
// import (
// "fmt"
//
// "github.com/pulumi/pulumi-aws/sdk/v3/go/aws/iam"
// "github.com/pulumi/pulumi/sdk/v2/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// lbUser, err := iam.NewUser(ctx, "lbUser", &iam.UserArgs{
// Path: pulumi.String("/system/"),
// })
// if err != nil {
// return err
// }
// _, err = iam.NewUserPolicy(ctx, "lbRo", &iam.UserPolicyArgs{
// User: lbUser.Name,
// Policy: pulumi.String(fmt.Sprintf("%v%v%v%v%v%v%v%v%v%v%v%v", "{\n", " \"Version\": \"2012-10-17\",\n", " \"Statement\": [\n", " {\n", " \"Action\": [\n", " \"ec2:Describe*\"\n", " ],\n", " \"Effect\": \"Allow\",\n", " \"Resource\": \"*\"\n", " }\n", " ]\n", "}\n")),
// })
// if err != nil {
// return err
// }
// _, err = iam.NewAccessKey(ctx, "lbAccessKey", &iam.AccessKeyArgs{
// User: lbUser.Name,
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
type UserPolicy struct {
pulumi.CustomResourceState
// The name of the policy. If omitted, this provider will assign a random, unique name.
Name pulumi.StringOutput `pulumi:"name"`
// Creates a unique name beginning with the specified prefix. Conflicts with `name`.
NamePrefix pulumi.StringPtrOutput `pulumi:"namePrefix"`
// The policy document. This is a JSON formatted string.
Policy pulumi.StringOutput `pulumi:"policy"`
// IAM user to which to attach this policy.
User pulumi.StringOutput `pulumi:"user"`
}
// NewUserPolicy registers a new resource with the given unique name, arguments, and options.
func NewUserPolicy(ctx *pulumi.Context,
name string, args *UserPolicyArgs, opts ...pulumi.ResourceOption) (*UserPolicy, error) {
if args == nil || args.Policy == nil {
return nil, errors.New("missing required argument 'Policy'")
}
if args == nil || args.User == nil {
return nil, errors.New("missing required argument 'User'")
}
if args == nil {
args = &UserPolicyArgs{}
}
var resource UserPolicy
err := ctx.RegisterResource("aws:iam/userPolicy:UserPolicy", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetUserPolicy gets an existing UserPolicy resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetUserPolicy(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *UserPolicyState, opts ...pulumi.ResourceOption) (*UserPolicy, error) {
var resource UserPolicy
err := ctx.ReadResource("aws:iam/userPolicy:UserPolicy", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering UserPolicy resources.
type userPolicyState struct {
// The name of the policy. If omitted, this provider will assign a random, unique name.
Name *string `pulumi:"name"`
// Creates a unique name beginning with the specified prefix. Conflicts with `name`.
NamePrefix *string `pulumi:"namePrefix"`
// The policy document. This is a JSON formatted string.
Policy *string `pulumi:"policy"`
// IAM user to which to attach this policy.
User *string `pulumi:"user"` | type UserPolicyState struct {
// The name of the policy. If omitted, this provider will assign a random, unique name.
Name pulumi.StringPtrInput
// Creates a unique name beginning with the specified prefix. Conflicts with `name`.
NamePrefix pulumi.StringPtrInput
// The policy document. This is a JSON formatted string.
Policy pulumi.StringPtrInput
// IAM user to which to attach this policy.
User pulumi.StringPtrInput
}
func (UserPolicyState) ElementType() reflect.Type {
return reflect.TypeOf((*userPolicyState)(nil)).Elem()
}
type userPolicyArgs struct {
// The name of the policy. If omitted, this provider will assign a random, unique name.
Name *string `pulumi:"name"`
// Creates a unique name beginning with the specified prefix. Conflicts with `name`.
NamePrefix *string `pulumi:"namePrefix"`
// The policy document. This is a JSON formatted string.
Policy interface{} `pulumi:"policy"`
// IAM user to which to attach this policy.
User string `pulumi:"user"`
}
// The set of arguments for constructing a UserPolicy resource.
type UserPolicyArgs struct {
// The name of the policy. If omitted, this provider will assign a random, unique name.
Name pulumi.StringPtrInput
// Creates a unique name beginning with the specified prefix. Conflicts with `name`.
NamePrefix pulumi.StringPtrInput
// The policy document. This is a JSON formatted string.
Policy pulumi.Input
// IAM user to which to attach this policy.
User pulumi.StringInput
}
func (UserPolicyArgs) ElementType() reflect.Type {
return reflect.TypeOf((*userPolicyArgs)(nil)).Elem()
} | }
|
console_io.py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General console printing utilities used by the Cloud SDK."""
import logging
import os
import re
import sys
import textwrap
import threading
import time
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import console_pager
from googlecloudsdk.core.util import files
from googlecloudsdk.third_party.py27 import py27_subprocess as subprocess
FLOAT_COMPARE_EPSILON = 1e-6
class Error(exceptions.Error):
"""Base exception for the module."""
pass
class UnattendedPromptError(Error):
"""An exception for when a prompt cannot be answered."""
def __init__(self):
super(UnattendedPromptError, self).__init__(
'This prompt could not be answered because you are not in an '
'interactive session. You can re-run the command with the --quiet '
'flag to accept default answers for all prompts.')
class OperationCancelledError(Error):
"""An exception for when a prompt cannot be answered."""
def __init__(self):
super(OperationCancelledError, self).__init__('Operation cancelled.')
class TablePrinter(object):
"""Provides the ability to print a list of items as a formatted table.
Using this class helps you adhere to the gcloud style guide.
The table will auto size the columns to fit the maximum item length for that
column. You can also choose how to justify each column and to add extra
padding to each column.
"""
JUSTIFY_LEFT = '<'
JUSTIFY_RIGHT = '>'
JUSTIFY_CENTER = '^'
def __init__(self, headers, title=None,
justification=None, column_padding=None):
"""Creates a new TablePrinter.
Args:
headers: A tuple of strings that represent the column headers titles.
This can be a tuple of empty strings or None's if you do not want
headers displayed. The number of empty elements in the tuple must match
the number of columns you want to display.
title: str, An optional title for the table.
justification: A tuple of JUSTIFY_LEFT, JUSTIFY_RIGHT, JUSTIFY_CENTER that
describes the justification for each column. This must have the same
number of items as the headers tuple.
column_padding: A tuple of ints that describes the extra padding that
should be added to each column. This must have the same
number of items as the headers tuple.
Raises:
ValueError: If the justification or column_padding tuples are not of the
correct type or length.
"""
self.__headers = [h if h else '' for h in headers]
self.__title = title
self.__num_columns = len(self.__headers)
self.__header_widths = [len(str(x)) for x in self.__headers]
self.__column_padding = column_padding
if self.__column_padding is None:
self.__column_padding = tuple([0] * self.__num_columns)
if (not isinstance(self.__column_padding, (tuple)) or
len(self.__column_padding) != self.__num_columns):
raise ValueError('Column padding tuple does not have {0} columns'
.format(self.__num_columns))
self.__justification = justification
if self.__justification is None:
self.__justification = tuple([TablePrinter.JUSTIFY_LEFT] *
self.__num_columns)
if (not isinstance(self.__justification, tuple) or
len(self.__justification) != self.__num_columns):
raise ValueError('Justification tuple does not have {0} columns'
.format(self.__num_columns))
for value in self.__justification:
if not (value is TablePrinter.JUSTIFY_LEFT or
value is TablePrinter.JUSTIFY_RIGHT or
value is TablePrinter.JUSTIFY_CENTER):
raise ValueError('Justification values must be one of JUSTIFY_LEFT, '
'JUSTIFY_RIGHT, or JUSTIFY_CENTER')
def SetTitle(self, title):
"""Sets the title of the table.
Args:
title: str, The new title.
"""
self.__title = title
def Log(self, rows, logger=None, level=logging.INFO):
"""Logs the given rows to the given logger.
Args:
rows: list of tuples, The rows to log the formatted table for.
logger: logging.Logger, The logger to do the logging. If None, the root
logger will be used.
level: logging level, An optional override for the logging level, INFO by
default.
"""
if not logger:
logger = log.getLogger()
lines = self.GetLines(rows)
for line in lines:
logger.log(level, line)
def Print(self, rows, output_stream=None, indent=0):
"""Prints the given rows to stdout.
Args:
rows: list of tuples, The rows to print the formatted table for.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
indent: int, The number of spaces to indent all lines of the table.
"""
if not output_stream:
output_stream = log.out
lines = self.GetLines(rows, indent=indent)
for line in lines:
output_stream.write(line + '\n')
def GetLines(self, rows, indent=0):
"""Gets a list of strings of formatted lines for the given rows.
Args:
rows: list of tuples, The rows to get the formatted table for.
indent: int, The number of spaces to indent all lines of the table.
Returns:
list of str, The lines of the formatted table that can be printed.
Raises:
ValueError: If any row does not have the correct number of columns.
"""
column_widths = list(self.__header_widths)
for row in rows:
if len(row) != self.__num_columns:
raise ValueError('Row [{row}] does not have {rows} columns'
.format(row=row, rows=self.__num_columns))
# Find the max width of each column
for i in range(self.__num_columns):
column_widths[i] = max(column_widths[i], len(str(row[i])))
# Add padding
column_widths = [column_widths[i] + self.__column_padding[i]
for i in range(self.__num_columns)]
total_width = (len(column_widths) - 1) * 3
for width in column_widths:
total_width += width
edge_line = ('--' +
'---'.join(['-' * width for width in column_widths]) +
'--')
title_divider_line = ('|-' +
'---'.join(['-' * width for width in column_widths]) +
'-|')
divider_line = ('|-' +
'-+-'.join(['-' * width for width in column_widths]) +
'-|')
lines = [edge_line]
if self.__title:
title_line = '| {{title:{justify}{width}s}} |'.format(
justify=TablePrinter.JUSTIFY_CENTER, width=total_width).format(
title=self.__title)
lines.append(title_line)
lines.append(title_divider_line)
# Generate format strings with the correct width for each column
column_formats = []
for i in range(self.__num_columns):
column_formats.append('{{i{i}:{justify}{width}s}}'.format(
i=i, justify=self.__justification[i], width=column_widths[i]))
pattern = '| ' + ' | '.join(column_formats) + ' |'
def _ParameterizedArrayDict(array):
return dict(('i{i}'.format(i=i), array[i]) for i in range(len(array)))
if [h for h in self.__headers if h]:
# Only print headers if there is at least one non-empty header
lines.append(pattern.format(**_ParameterizedArrayDict(self.__headers)))
lines.append(divider_line)
lines.extend([pattern.format(**_ParameterizedArrayDict(row))
for row in rows])
lines.append(edge_line)
if indent:
return [(' ' * indent) + l for l in lines]
return lines
class ListPrinter(object):
"""Provides the ability to print a list of items as a formatted list.
Using this class helps you adhere to the gcloud style guide.
"""
def __init__(self, title):
"""Create a titled list printer that can print rows to stdout.
Args:
title: A string for the title of the list.
"""
self.__title = title
def Print(self, rows, output_stream=None):
"""Print this list with the provided rows to stdout.
Args:
rows: A list of objects representing the rows of this list. Before being
printed, they will be converted to strings.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
"""
if not output_stream:
output_stream = log.out
output_stream.write(self.__title + '\n')
for row in rows:
output_stream.write(' - ' + str(row) + '\n')
TEXTWRAP = textwrap.TextWrapper(replace_whitespace=False,
drop_whitespace=False,
break_on_hyphens=False)
def _DoWrap(message):
"""Text wrap the given message and correctly handle newlines in the middle.
Args:
message: str, The message to wrap. It may have newlines in the middle of
it.
Returns:
str, The wrapped message.
"""
return '\n'.join([TEXTWRAP.fill(line) for line in message.splitlines()])
def _RawInput(prompt=None):
"""A simple redirect to the built-in raw_input function.
If the prompt is given, it is correctly line wrapped.
Args:
prompt: str, An optional prompt.
Returns:
The input from stdin.
"""
if prompt:
sys.stderr.write(_DoWrap(prompt))
try:
return raw_input()
except EOFError:
return None
def IsInteractive(output=False, error=False, heuristic=False):
"""Determines if the current terminal session is interactive.
sys.stdin must be a terminal input stream.
Args:
output: If True then sys.stdout must also be a terminal output stream.
error: If True then sys.stderr must also be a terminal output stream.
heuristic: If True then we also do some additional heuristics to check if
we are in an interactive context. Checking home path for example.
Returns:
True if the current terminal session is interactive.
"""
if not sys.stdin.isatty():
return False
if output and not sys.stdout.isatty():
return False
if error and not sys.stderr.isatty():
return False
if heuristic:
# Check the home path. Most startup scripts for example are executed by
# users that don't have a home path set. Home is OS dependent though, so
# check everything.
# *NIX OS usually sets the HOME env variable. It is usually '/home/user',
# but can also be '/root'. If it's just '/' we are most likely in an init
# script.
# Windows usually sets HOMEDRIVE and HOMEPATH. If they don't exist we are
# probably being run from a task scheduler context. HOMEPATH can be '\'
# when a user has a network mapped home directory.
# Cygwin has it all! Both Windows and Linux. Checking both is perfect.
home = os.getenv('HOME')
homepath = os.getenv('HOMEPATH')
if not homepath and (not home or home == '/'):
return False
return True
def CanPrompt():
"""Returns true if we can prompt the user for information.
This combines all checks (IsInteractive(), disable_prompts is False) to
verify that we can prompt the user for information.
Returns:
bool, True if we can prompt the user for information.
"""
return (IsInteractive(error=True) and
not properties.VALUES.core.disable_prompts.GetBool())
def PromptContinue(message=None, prompt_string=None, default=True,
throw_if_unattended=False, cancel_on_no=False):
"""Prompts the user a yes or no question and asks if they want to continue.
Args:
message: str, The prompt to print before the question.
prompt_string: str, An alternate yes/no prompt to display. If None, it
defaults to 'Do you want to continue'.
default: bool, What the default answer should be. True for yes, False for
no.
throw_if_unattended: bool, If True, this will throw if there was nothing
to consume on stdin and stdin is not a tty.
cancel_on_no: bool, If True and the user answers no, throw an exception to
cancel the entire operation. Useful if you know you don't want to
continue doing anything and don't want to have to raise your own
exception.
Raises:
UnattendedPromptError: If there is no input to consume and this is not
running in an interactive terminal.
OperationCancelledError: If the user answers no and cancel_on_no is True.
Returns:
bool, False if the user said no, True if the user said anything else or if
prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
if not default and cancel_on_no:
raise OperationCancelledError()
return default
if message:
sys.stderr.write(_DoWrap(message) + '\n\n')
if not prompt_string:
prompt_string = 'Do you want to continue'
if default:
prompt_string += ' (Y/n)? '
else:
prompt_string += ' (y/N)? '
sys.stderr.write(_DoWrap(prompt_string))
def GetAnswer():
while True:
answer = _RawInput()
# pylint:disable=g-explicit-bool-comparison, We explicitly want to
# distinguish between empty string and None.
if answer == '':
# User just hit enter, return default.
sys.stderr.write('\n')
return default
elif answer is None:
# This means we hit EOF, no input or user closed the stream.
if throw_if_unattended and not IsInteractive():
sys.stderr.write('\n')
raise UnattendedPromptError()
else:
sys.stderr.write('\n')
return default
elif answer.lower() in ['y', 'yes']:
sys.stderr.write('\n')
return True
elif answer.lower() in ['n', 'no']:
sys.stderr.write('\n')
return False
else:
sys.stderr.write("Please enter 'y' or 'n': ")
answer = GetAnswer()
if not answer and cancel_on_no:
raise OperationCancelledError()
return answer
def PromptResponse(message):
"""Prompts the user for a string.
Args:
message: str, The prompt to print before the question.
Returns:
str, The string entered by the user, or None if prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return None
response = _RawInput(message)
return response
def PromptWithDefault(message, default=None):
"""Prompts the user for a string, allowing a default.
Unlike PromptResponse, this also appends a ': ' to the prompt. If 'default'
is specified, the default is also written written into the prompt (e.g.
if message is "message" and default is "default", the prompt would be
"message (default): ").
The default is returned if the user simply presses enter (no input) or an
EOF is received.
Args:
message: str, The prompt to print before the question.
default: str, The default value (if any).
Returns:
str, The string entered by the user, or the default if no value was
entered or prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return default
if default:
message += ' ({default}): '.format(default=default)
else:
message += ': '
response = _RawInput(message)
if not response:
response = default
return response
def PromptChoice(options, default=None, message=None, prompt_string=None):
"""Prompt the user to select a choice from a list of items.
Args:
options: [object], A list of objects to print as choices. Their str()
method will be used to display them.
default: int, The default index to return if prompting is disabled or if
they do not enter a choice.
message: str, An optional message to print before the choices are displayed.
prompt_string: str, A string to print when prompting the user to enter a
choice. If not given, a default prompt is used.
Raises:
ValueError: If no options are given or if the default is not in the range of
available options.
Returns:
The index of the item in the list that was chosen, or the default if prompts
are disabled.
"""
if not options:
raise ValueError('You must provide at least one option.')
maximum = len(options)
if default is not None and not 0 <= default < maximum:
raise ValueError(
'Default option [{default}] is not a valid index for the options list '
'[{maximum} options given]'.format(default=default, maximum=maximum))
if properties.VALUES.core.disable_prompts.GetBool():
return default
if message:
sys.stderr.write(_DoWrap(message) + '\n')
for i, option in enumerate(options):
sys.stderr.write(' [{index}] {option}\n'.format(
index=i + 1, option=str(option)))
if not prompt_string:
prompt_string = 'Please enter your numeric choice'
if default is None:
suffix_string = ': '
else:
suffix_string = ' ({default}): '.format(default=default + 1)
sys.stderr.write(_DoWrap(prompt_string + suffix_string))
while True:
answer = _RawInput()
if answer is None or (answer is '' and default is not None):
# Return default if we failed to read from stdin
# Return default if the user hit enter and there is a valid default
# Prompt again otherwise
sys.stderr.write('\n')
return default
try:
num_choice = int(answer)
if num_choice < 1 or num_choice > maximum:
raise ValueError('Choice must be between 1 and {maximum}'.format(
maximum=maximum))
sys.stderr.write('\n')
return num_choice - 1
except ValueError:
sys.stderr.write('Please enter a value between 1 and {maximum}: '
.format(maximum=maximum))
def LazyFormat(s, **kwargs):
"""Converts {key} => value for key, value in kwargs.iteritems().
After the {key} converstions it converts {{<identifier>}} => {<identifier>}.
Args:
s: str, The string to format.
**kwargs: {str:str}, A dict of strings for named parameters.
Returns:
str, The lazily-formatted string.
"""
for key, value in kwargs.iteritems():
fmt = '{' + key + '}'
start = 0
while True:
start = s.find(fmt, start)
if start == -1:
break
if (start and s[start - 1] == '{' and
len(fmt) < len(s[start:]) and s[start + len(fmt)] == '}'):
# {{key}} => {key}
s = s[0:start - 1] + fmt + s[start + len(fmt) + 1:]
start += len(fmt)
else:
# {key} => value
s = s[0:start] + value + s[start + len(fmt):]
start += len(value)
# {{unknown}} => {unknown}
return re.sub(r'{({\w+})}', r'\1', s)
def PrintExtendedList(items, col_fetchers):
"""Print a properly formated extended list for some set of resources.
If items is a generator, this function may elect to only request those rows
that it is ready to display.
Args:
items: [resource] or a generator producing resources, The objects
representing cloud resources.
col_fetchers: [(string, func(resource))], A list of tuples, one for each
column, in the order that they should appear. The string is the title
of that column which will be printed in a header. The func is a function
that will fetch a row-value for that column, given the resource
corresponding to the row.
"""
total_items = 0
rows = [[title for (title, unused_func) in col_fetchers]]
for item in items:
total_items += 1
row = []
for (unused_title, func) in col_fetchers:
value = func(item)
if value is None:
row.append('-')
else:
row.append(value)
rows.append(row)
attr = console_attr.GetConsoleAttr()
max_col_widths = [0] * len(col_fetchers)
for row in rows:
for col in range(len(row)):
max_col_widths[col] = max(max_col_widths[col],
attr.DisplayWidth(unicode(row[col]))+2)
for row in rows:
for col in range(len(row)):
width = max_col_widths[col]
item = unicode(row[col])
item_width = attr.DisplayWidth(item)
if item_width < width and col != len(row) - 1:
item += u' ' * (width - item_width)
log.out.write(item)
log.out.write('\n')
if not total_items:
log.status.write('Listed 0 items.\n')
class ProgressTracker(object):
"""A context manager for telling the user about long-running progress."""
SPIN_MARKS = [
'|',
'/',
'-',
'\\',
]
def __init__(self, message, autotick=True, detail_message_callback=None,
tick_delay=1):
self._message = message
self._prefix = message + '...'
self._ticks = 0
self._autotick = autotick
self._done = False
self._lock = threading.Lock()
self._detail_message_callback = detail_message_callback
self._last_message_size = 0
self._tick_delay = tick_delay
self._is_tty = IsInteractive(output=True, error=True)
def _GetPrefix(self):
if self._detail_message_callback:
detail_message = self._detail_message_callback()
if detail_message:
return self._prefix + ' ' + detail_message + '...'
return self._prefix
def __enter__(self):
log.file_only_logger.info(self._GetPrefix())
self._Print()
if self._autotick:
def Ticker():
while True:
time.sleep(self._tick_delay)
if self.Tick():
return
threading.Thread(target=Ticker).start()
return self
def Tick(self):
"""Give a visual indication to the user that some progress has been made.
Output is sent to sys.stderr. Nothing is shown if output is not a TTY.
Returns:
Whether progress has completed.
"""
if self._is_tty:
with self._lock:
if not self._done:
self._ticks += 1
self._Print(ProgressTracker.SPIN_MARKS[
self._ticks % len(ProgressTracker.SPIN_MARKS)])
return self._done
def _Print(self, message=''):
"""Reprints the prefix followed by an optional message."""
display_message = self._GetPrefix()
if message:
display_message += message
# This is to clear the display buffer, otherwise it would display the
# trailing parts of the previous line
if self._last_message_size > 0:
sys.stderr.write('\r' + self._last_message_size * ' ')
self._last_message_size = len(display_message)
sys.stderr.write('\r' + display_message)
sys.stderr.flush()
def __exit__(self, ex_type, unused_value, unused_traceback):
with self._lock:
self._done = True
# If an exception was raised during progress tracking, exit silently here
# and let the appropriate exception handler tell the user what happened.
if ex_type:
# This is to prevent the tick character from appearing before 'failed.'
# (ex. 'message...failed' instead of 'message.../failed.')
self._Print('failed.\n')
return False
self._Print('done.\n')
class DelayedProgressTracker(ProgressTracker):
"""A progress tracker that only appears during a long running operation.
Waits for the given timeout, then displays a progress tacker.
"""
class TrackerState(object):
"""Enum representing the current state of the progress tracker."""
class _TrackerStateTuple(object):
def __init__(self, name):
self.name = name
WAITING = _TrackerStateTuple('Waiting')
STARTED = _TrackerStateTuple('Started')
FINISHED = _TrackerStateTuple('Finished')
def __init__(self, message, timeout, autotick=True,
detail_message_callback=None):
super(DelayedProgressTracker, self).__init__(
message, autotick=autotick,
detail_message_callback=detail_message_callback)
self._timeout = timeout
self._state = self.TrackerState.WAITING
self._state_lock = threading.Lock()
def _SleepWhileNotFinished(self, timeout, increment=0.1):
"""Sleep for the given time unless the tracker enters the FINISHED state.
Args:
timeout: number, the total time for which to sleep
increment: number, the increment at which to check whether the tracker is
FINISHED
Returns:
bool, True unless the tracker reached the FINISHED state before the total
sleep time elapsed
"""
elapsed_time = 0
while (elapsed_time + FLOAT_COMPARE_EPSILON) <= timeout:
time.sleep(increment)
elapsed_time += increment
if self._state is self.TrackerState.FINISHED:
return False
return True
def __enter__(self):
def StartTracker():
if not self._SleepWhileNotFinished(self._timeout):
# If we aborted sleep early, return. We exited the progress tracker
# before the delay finished.
return
with self._state_lock:
if self._state is not self.TrackerState.FINISHED:
self._state = self.TrackerState.STARTED
super(DelayedProgressTracker, self).__enter__()
threading.Thread(target=StartTracker).start()
return self
def __exit__(self, exc_type, exc_value, traceback):
with self._state_lock:
if self._state is self.TrackerState.STARTED:
super(DelayedProgressTracker, self).__exit__(exc_type, exc_value,
traceback)
self._state = self.TrackerState.FINISHED
def Tick(self):
with self._state_lock:
if self._state is self.TrackerState.STARTED:
return super(DelayedProgressTracker, self).Tick()
return self._state is self.TrackerState.FINISHED
class ProgressBar(object):
"""A simple progress bar for tracking completion of an action.
This progress bar works without having to use any control characters. It
prints the action that is being done, and then fills a progress bar below it.
You should not print anything else on the output stream during this time as it
will cause the progress bar to break on lines.
Progress bars can be stacked into a group. first=True marks the first bar in
the group and last=True marks the last bar in the group. The default assumes
a singleton bar with first=True and last=True.
This class can also be used in a context manager.
"""
@staticmethod
def _DefaultCallback(progress_factor):
pass
DEFAULT_CALLBACK = _DefaultCallback
@staticmethod
def SplitProgressBar(original_callback, weights):
"""Splits a progress bar into logical sections.
Wraps the original callback so that each of the subsections can use the full
range of 0 to 1 to indicate its progress. The overall progress bar will
display total progress based on the weights of the tasks.
Args:
original_callback: f(float), The original callback for the progress bar.
weights: [float], The weights of the tasks to create. These can be any
numbers you want and the split will be based on their proportions to
each other.
Raises:
ValueError: If the weights don't add up to 1.
Returns:
(f(float), ), A tuple of callback functions, in order, for the subtasks.
"""
if (original_callback is None or
original_callback == ProgressBar.DEFAULT_CALLBACK):
return tuple([ProgressBar.DEFAULT_CALLBACK for _ in range(len(weights))])
def MakeCallback(already_done, weight):
def Callback(done_fraction):
original_callback(already_done + (done_fraction * weight))
return Callback
total = float(sum(weights))
callbacks = []
already_done = 0
for weight in weights:
normalized_weight = weight / total
callbacks.append(MakeCallback(already_done, normalized_weight))
already_done += normalized_weight
return tuple(callbacks)
def __init__(self, label, stream=log.status, total_ticks=60, first=True,
last=True):
"""Creates a progress bar for the given action.
| stream: The output stream to write to, stderr by default.
total_ticks: int, The number of ticks wide to make the progress bar.
first: bool, True if this is the first bar in a stacked group.
last: bool, True if this is the last bar in a stacked group.
"""
self._stream = stream
self._ticks_written = 0
self._total_ticks = total_ticks
self._first = first
self._last = last
attr = console_attr.ConsoleAttr()
self._box = attr.GetBoxLineCharacters()
self._redraw = (self._box.d_dr != self._box.d_vr or
self._box.d_dl != self._box.d_vl)
max_label_width = self._total_ticks - 4
if len(label) > max_label_width:
label = label[:max_label_width - 3] + '...'
elif len(label) < max_label_width:
diff = max_label_width - len(label)
label += ' ' * diff
left = self._box.d_vr + self._box.d_h
right = self._box.d_h + self._box.d_vl
self._label = u'{left} {label} {right}'.format(left=left, label=label,
right=right)
def Start(self):
"""Starts the progress bar by writing the top rule and label."""
if self._first or self._redraw:
left = self._box.d_dr if self._first else self._box.d_vr
right = self._box.d_dl if self._first else self._box.d_vl
rule = u'{left}{middle}{right}\n'.format(
left=left, middle=self._box.d_h * self._total_ticks, right=right)
self._stream.write(rule)
self._stream.write(self._label + '\n')
self._stream.write(self._box.d_ur)
self._ticks_written = 0
def SetProgress(self, progress_factor):
"""Sets the current progress of the task.
This method has no effect if the progress bar has already progressed past
the progress you call it with (since the progress bar cannot back up).
Args:
progress_factor: float, The current progress as a float between 0 and 1.
"""
expected_ticks = int(self._total_ticks * progress_factor)
new_ticks = expected_ticks - self._ticks_written
# Don't allow us to go over 100%.
new_ticks = min(new_ticks, self._total_ticks - self._ticks_written)
if new_ticks > 0:
self._stream.write(self._box.d_h * new_ticks)
self._ticks_written += new_ticks
if expected_ticks == self._total_ticks:
end = '\n' if self._last or not self._redraw else '\r'
self._stream.write(self._box.d_ul + end)
self._stream.flush()
def Finish(self):
"""Mark the progress as done."""
self.SetProgress(1)
def __enter__(self):
self.Start()
return self
def __exit__(self, *args):
self.Finish()
def More(contents, out=None, prompt=None, check_pager=True):
"""Run a user specified pager or fall back to the internal pager.
Args:
contents: The entire contents of the text lines to page.
out: The output stream, log.out (effectively) if None.
prompt: The page break prompt.
check_pager: Checks the PAGER env var and uses it if True.
"""
if not IsInteractive(output=True):
if not out:
out = log.out
out.write(contents)
return
if not out:
# Rendered help to the log file.
log.file_only_logger.info(contents)
# Paging shenanigans to stdout.
out = sys.stdout
if check_pager:
pager = os.environ.get('PAGER', None)
if pager == '-':
# Use the fallback Pager.
pager = None
elif not pager:
# Search for a pager that handles ANSI escapes.
for command in ('less', 'pager'):
if files.FindExecutableOnPath(command):
pager = command
break
if pager:
less = os.environ.get('LESS', None)
if less is None:
os.environ['LESS'] = '-R'
p = subprocess.Popen(pager, stdin=subprocess.PIPE, shell=True)
encoding = console_attr.GetConsoleAttr().GetEncoding()
p.communicate(input=contents.encode(encoding))
p.wait()
if less is None:
os.environ.pop('LESS')
return
# Fall back to the internal pager.
console_pager.Pager(contents, out, prompt).Run() | Args:
label: str, The action that is being performed. |
candidates.rs | //! Types for the [`m.call.candidates`] event.
//!
//! [`m.call.candidates`]: https://spec.matrix.org/v1.1/client-server-api/#mcallcandidates
use js_int::UInt;
use ruma_events_macros::EventContent;
use serde::{Deserialize, Serialize};
/// The content of an `m.call.candidates` event.
///
/// This event is sent by callers after sending an invite and by the callee after answering. Its
/// purpose is to give the other party additional ICE candidates to try using to communicate.
#[derive(Clone, Debug, Deserialize, Serialize, EventContent)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[ruma_event(type = "m.call.candidates", kind = Message)]
pub struct | {
/// The ID of the call this event relates to.
pub call_id: String,
/// A list of candidates.
pub candidates: Vec<Candidate>,
/// The version of the VoIP specification this messages adheres to.
pub version: UInt,
}
impl CallCandidatesEventContent {
/// Creates a new `CandidatesEventContent` with the given call id, candidate list and VoIP
/// version.
pub fn new(call_id: String, candidates: Vec<Candidate>, version: UInt) -> Self {
Self { call_id, candidates, version }
}
}
/// An ICE (Interactive Connectivity Establishment) candidate.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[serde(rename_all = "camelCase")]
pub struct Candidate {
/// The SDP "a" line of the candidate.
pub candidate: String,
/// The SDP media type this candidate is intended for.
pub sdp_mid: String,
/// The index of the SDP "m" line this candidate is intended for.
pub sdp_m_line_index: UInt,
}
impl Candidate {
/// Creates a new `Candidate` with the given "a" line, SDP media type and SDP "m" line.
pub fn new(candidate: String, sdp_mid: String, sdp_m_line_index: UInt) -> Self {
Self { candidate, sdp_mid, sdp_m_line_index }
}
}
| CallCandidatesEventContent |
interceptor.go | /*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software | */
/**
* builtin interceptor
*/
package producer
import (
"context"
"time"
"github.com/apache/rocketmq-client-go/internal"
"github.com/apache/rocketmq-client-go/internal/utils"
"github.com/apache/rocketmq-client-go/primitive"
)
// WithTrace support rocketmq trace: https://github.com/apache/rocketmq/wiki/RIP-6-Message-Trace.
func WithTrace(traceCfg *primitive.TraceConfig) Option {
return func(options *producerOptions) {
ori := options.Interceptors
options.Interceptors = make([]primitive.Interceptor, 0)
options.Interceptors = append(options.Interceptors, newTraceInterceptor(traceCfg))
options.Interceptors = append(options.Interceptors, ori...)
}
}
func newTraceInterceptor(traceCfg *primitive.TraceConfig) primitive.Interceptor {
dispatcher := internal.NewTraceDispatcher(traceCfg)
dispatcher.Start()
return func(ctx context.Context, req, reply interface{}, next primitive.Invoker) error {
beginT := time.Now()
err := next(ctx, req, reply)
producerCtx := primitive.GetProducerCtx(ctx)
if producerCtx.Message.Topic == dispatcher.GetTraceTopicName() {
return next(ctx, req, reply)
}
// SendOneway && SendAsync has no reply.
if reply == nil {
return err
}
result := reply.(*primitive.SendResult)
if result.RegionID == "" || !result.TraceOn {
return err
}
sendSuccess := result.Status == primitive.SendOK
costT := time.Since(beginT).Nanoseconds() / int64(time.Millisecond)
storeT := beginT.UnixNano()/int64(time.Millisecond) + costT/2
traceBean := internal.TraceBean{
Topic: producerCtx.Message.Topic,
Tags: producerCtx.Message.GetTags(),
Keys: producerCtx.Message.GetKeys(),
StoreHost: producerCtx.BrokerAddr,
ClientHost: utils.LocalIP,
BodyLength: len(producerCtx.Message.Body),
MsgType: producerCtx.MsgType,
MsgId: result.MsgID,
OffsetMsgId: result.OffsetMsgID,
StoreTime: storeT,
}
traceCtx := internal.TraceContext{
RequestId: primitive.CreateUniqID(), // set id
TimeStamp: time.Now().UnixNano() / int64(time.Millisecond),
TraceType: internal.Pub,
GroupName: producerCtx.ProducerGroup,
RegionId: result.RegionID,
TraceBeans: []internal.TraceBean{traceBean},
CostTime: costT,
IsSuccess: sendSuccess,
}
dispatcher.Append(traceCtx)
return err
}
} | distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. |
event.go | package rotel
import (
"fmt"
gopi "github.com/djthorpe/gopi/v3"
)
////////////////////////////////////////////////////////////////////////////////
// TYPES
type event struct {
gopi.RotelFlag
*State
}
////////////////////////////////////////////////////////////////////////////////
// LIFECYCLE
func NewEvent(flag gopi.RotelFlag, state *State) gopi.RotelEvent {
return &event{flag, state}
}
////////////////////////////////////////////////////////////////////////////////
// PROPERTIES
func (this *event) Name() string {
return this.State.Model()
}
func (this *event) Flags() gopi.RotelFlag {
return this.RotelFlag
}
////////////////////////////////////////////////////////////////////////////////
// STRINGIFY |
func (this *event) String() string {
str := "<rotel.event"
str += fmt.Sprintf(" name=%q", this.Name())
if this.RotelFlag != gopi.ROTEL_FLAG_NONE {
str += fmt.Sprint(" flags=", this.RotelFlag)
}
if this.State != nil {
str += fmt.Sprint(" ", this.State)
}
return str + ">"
} | |
hui-view.ts | import {
html,
LitElement,
PropertyValues,
PropertyDeclarations,
TemplateResult,
} from "lit-element";
import "../../components/entity/ha-state-label-badge";
// This one is for types
// tslint:disable-next-line
import { HaStateLabelBadge } from "../../components/entity/ha-state-label-badge";
import applyThemesOnElement from "../../common/dom/apply_themes_on_element";
import { LovelaceViewConfig, LovelaceCardConfig } from "../../data/lovelace";
import { HomeAssistant } from "../../types";
import { classMap } from "lit-html/directives/class-map";
import { Lovelace, LovelaceCard } from "./types";
import { createCardElement } from "./common/create-card-element";
import { computeCardSize } from "./common/compute-card-size";
import { showEditCardDialog } from "./editor/card-editor/show-edit-card-dialog";
import { HuiErrorCard } from "./cards/hui-error-card";
import { computeRTL } from "../../common/util/compute_rtl";
let editCodeLoaded = false;
// Find column with < 5 entities, else column with lowest count
const getColumnIndex = (columnEntityCount: number[], size: number) => {
let minIndex = 0;
for (let i = 0; i < columnEntityCount.length; i++) {
if (columnEntityCount[i] < 5) {
minIndex = i;
break;
}
if (columnEntityCount[i] < columnEntityCount[minIndex]) {
minIndex = i;
}
}
columnEntityCount[minIndex] += size;
return minIndex;
};
export class | extends LitElement {
public hass?: HomeAssistant;
public lovelace?: Lovelace;
public columns?: number;
public index?: number;
private _cards: Array<LovelaceCard | HuiErrorCard>;
private _badges: Array<{ element: HaStateLabelBadge; entityId: string }>;
static get properties(): PropertyDeclarations {
return {
hass: {},
lovelace: {},
columns: { type: Number },
index: { type: Number },
_cards: {},
_badges: {},
};
}
constructor() {
super();
this._cards = [];
this._badges = [];
}
// Public to make demo happy
public createCardElement(cardConfig: LovelaceCardConfig) {
const element = createCardElement(cardConfig) as LovelaceCard;
element.hass = this.hass;
element.addEventListener(
"ll-rebuild",
(ev) => {
// In edit mode let it go to hui-root and rebuild whole view.
if (!this.lovelace!.editMode) {
ev.stopPropagation();
this._rebuildCard(element, cardConfig);
}
},
{ once: true }
);
return element;
}
protected render(): TemplateResult | void {
return html`
${this.renderStyles()}
<div id="badges"></div>
<div id="columns"></div>
${this.lovelace!.editMode
? html`
<paper-fab
elevated="2"
icon="hass:plus"
title="${this.hass!.localize(
"ui.panel.lovelace.editor.edit_card.add"
)}"
@click="${this._addCard}"
class="${classMap({
rtl: computeRTL(this.hass!),
})}"
></paper-fab>
`
: ""}
`;
}
protected renderStyles(): TemplateResult {
return html`
<style>
:host {
display: block;
box-sizing: border-box;
padding: 4px 4px 0;
transform: translateZ(0);
position: relative;
}
#badges {
margin: 8px 16px;
font-size: 85%;
text-align: center;
}
#columns {
display: flex;
flex-direction: row;
justify-content: center;
}
.column {
flex-basis: 0;
flex-grow: 1;
max-width: 500px;
overflow-x: hidden;
}
.column > * {
display: block;
margin: 4px 4px 8px;
}
paper-fab {
position: sticky;
float: right;
bottom: 16px;
right: 16px;
z-index: 1;
}
paper-fab.rtl {
float: left;
right: auto;
left: 16px;
}
@media (max-width: 500px) {
:host {
padding-left: 0;
padding-right: 0;
}
.column > * {
margin-left: 0;
margin-right: 0;
}
}
@media (max-width: 599px) {
.column {
max-width: 600px;
}
}
</style>
`;
}
protected updated(changedProperties: PropertyValues): void {
super.updated(changedProperties);
const lovelace = this.lovelace!;
if (lovelace.editMode && !editCodeLoaded) {
editCodeLoaded = true;
import(/* webpackChunkName: "hui-view-editable" */ "./hui-view-editable");
}
let editModeChanged = false;
let configChanged = false;
if (changedProperties.has("index")) {
configChanged = true;
} else if (changedProperties.has("lovelace")) {
const oldLovelace = changedProperties.get("lovelace") as Lovelace;
editModeChanged =
!oldLovelace || lovelace.editMode !== oldLovelace.editMode;
configChanged = !oldLovelace || lovelace.config !== oldLovelace.config;
}
if (configChanged) {
this._createBadges(lovelace.config.views[this.index!]);
} else if (changedProperties.has("hass")) {
this._badges.forEach((badge) => {
const { element, entityId } = badge;
element.hass = this.hass!;
element.state = this.hass!.states[entityId];
});
}
if (configChanged || editModeChanged || changedProperties.has("columns")) {
this._createCards(lovelace.config.views[this.index!]);
} else if (changedProperties.has("hass")) {
this._cards.forEach((element) => {
element.hass = this.hass;
});
}
}
private _addCard(): void {
showEditCardDialog(this, {
lovelace: this.lovelace!,
path: [this.index!],
});
}
private _createBadges(config: LovelaceViewConfig): void {
const root = this.shadowRoot!.getElementById("badges")!;
while (root.lastChild) {
root.removeChild(root.lastChild);
}
if (!config || !config.badges || !Array.isArray(config.badges)) {
root.style.display = "none";
this._badges = [];
return;
}
const elements: HUIView["_badges"] = [];
for (const entityId of config.badges) {
const element = document.createElement("ha-state-label-badge");
element.hass = this.hass;
element.state = this.hass!.states[entityId];
elements.push({ element, entityId });
root.appendChild(element);
}
this._badges = elements;
root.style.display = elements.length > 0 ? "block" : "none";
}
private _createCards(config: LovelaceViewConfig): void {
const root = this.shadowRoot!.getElementById("columns")!;
while (root.lastChild) {
root.removeChild(root.lastChild);
}
if (!config || !config.cards || !Array.isArray(config.cards)) {
this._cards = [];
return;
}
const elements: LovelaceCard[] = [];
const elementsToAppend: HTMLElement[] = [];
config.cards.forEach((cardConfig, cardIndex) => {
const element = this.createCardElement(cardConfig);
elements.push(element);
if (!this.lovelace!.editMode) {
elementsToAppend.push(element);
return;
}
const wrapper = document.createElement("hui-card-options");
wrapper.hass = this.hass;
wrapper.lovelace = this.lovelace;
wrapper.path = [this.index!, cardIndex];
wrapper.appendChild(element);
elementsToAppend.push(wrapper);
});
let columns: HTMLElement[][] = [];
const columnEntityCount: number[] = [];
for (let i = 0; i < this.columns!; i++) {
columns.push([]);
columnEntityCount.push(0);
}
elements.forEach((el, index) => {
const cardSize = computeCardSize(el);
// Element to append might be the wrapped card when we're editing.
columns[getColumnIndex(columnEntityCount, cardSize)].push(
elementsToAppend[index]
);
});
// Remove empty columns
columns = columns.filter((val) => val.length > 0);
columns.forEach((column) => {
const columnEl = document.createElement("div");
columnEl.classList.add("column");
column.forEach((el) => columnEl.appendChild(el));
root.appendChild(columnEl);
});
this._cards = elements;
applyThemesOnElement(root, this.hass!.themes, config.theme);
}
private _rebuildCard(
cardElToReplace: LovelaceCard,
config: LovelaceCardConfig
): void {
const newCardEl = this.createCardElement(config);
cardElToReplace.parentElement!.replaceChild(newCardEl, cardElToReplace);
this._cards = this._cards!.map((curCardEl) =>
curCardEl === cardElToReplace ? newCardEl : curCardEl
);
}
}
declare global {
interface HTMLElementTagNameMap {
"hui-view": HUIView;
}
}
customElements.define("hui-view", HUIView);
| HUIView |
helper.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import random
import numpy as np
import torch
from torch import nn
from typing import Dict
def to_device(data, device):
if isinstance(data, torch.Tensor):
return data.to(device)
elif isinstance(data, dict):
return {k: to_device(v, device) for k, v in data.items()}
elif isinstance(data, list):
return [to_device(v, device) for v in data]
def get_all_files(root, file_extension, contain=None):
files = []
for folder, _, fs in os.walk(root):
for f in fs:
if file_extension is not None:
if f.endswith(file_extension):
if contain is None or contain in os.path.join(folder, f):
files.append(os.path.join(folder, f))
else:
if contain in f:
files.append(os.path.join(folder, f))
return files
def flatten(s):
|
def moving_average(data, period):
# padding
left_pad = [data[0] for _ in range(period // 2)]
right_pad = data[-period // 2 + 1 :]
data = left_pad + data + right_pad
weights = np.ones(period) / period
return np.convolve(data, weights, mode="valid")
def mem2str(num_bytes):
assert num_bytes >= 0
if num_bytes >= 2 ** 30: # GB
val = float(num_bytes) / (2 ** 30)
result = "%.3f GB" % val
elif num_bytes >= 2 ** 20: # MB
val = float(num_bytes) / (2 ** 20)
result = "%.3f MB" % val
elif num_bytes >= 2 ** 10: # KB
val = float(num_bytes) / (2 ** 10)
result = "%.3f KB" % val
else:
result = "%d bytes" % num_bytes
return result
def sec2str(seconds):
seconds = int(seconds)
hour = seconds // 3600
seconds = seconds % (24 * 3600)
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%dH %02dM %02dS" % (hour, minutes, seconds)
def num2str(n):
if n < 1e3:
s = str(n)
unit = ""
elif n < 1e6:
n /= 1e3
s = "%.3f" % n
unit = "K"
else:
n /= 1e6
s = "%.3f" % n
unit = "M"
s = s.rstrip("0").rstrip(".")
return s + unit
def get_mem_usage():
import psutil
mem = psutil.virtual_memory()
result = ""
result += "available: %s, " % (mem2str(mem.available))
result += "used: %s, " % (mem2str(mem.used))
result += "free: %s" % (mem2str(mem.free))
return result
def flatten_first2dim(batch):
if isinstance(batch, torch.Tensor):
size = batch.size()[2:]
batch = batch.view(-1, *size)
return batch
elif isinstance(batch, dict):
return {key: flatten_first2dim(batch[key]) for key in batch}
else:
assert False, "unsupported type: %s" % type(batch)
def _tensor_slice(t, dim, b, e):
if dim == 0:
return t[b:e]
elif dim == 1:
return t[:, b:e]
elif dim == 2:
return t[:, :, b:e]
else:
raise ValueError("unsupported %d in tensor_slice" % dim)
def tensor_slice(t, dim, b, e):
if isinstance(t, dict):
return {key: tensor_slice(t[key], dim, b, e) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, b, e).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def tensor_index(t, dim, i):
if isinstance(t, dict):
return {key: tensor_index(t[key], dim, i) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, i, i + 1).squeeze(dim).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def one_hot(x, n):
assert x.dim() == 2 and x.size(1) == 1
one_hot_x = torch.zeros(x.size(0), n, device=x.device)
one_hot_x.scatter_(1, x, 1)
return one_hot_x
def set_all_seeds(rand_seed):
random.seed(rand_seed)
np.random.seed(rand_seed + 1)
torch.manual_seed(rand_seed + 2)
torch.cuda.manual_seed(rand_seed + 3)
def weights_init(m):
"""custom weights initialization"""
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal(m.weight.data)
nn.init.orthogonal_(m.weight.data)
else:
print("%s is not custom-initialized." % m.__class__)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def count_output_size(input_shape, model):
fake_input = torch.FloatTensor(*input_shape)
output_size = model.forward(fake_input).view(-1).size()[0]
return output_size
| if s == []:
return s
if isinstance(s[0], list):
return flatten(s[0]) + flatten(s[1:])
return s[:1] + flatten(s[1:]) |
output.rs | use proc_macro2::{Ident, TokenStream};
use quote::quote;
use syn::Path;
use tokio_sqlx::describe::Describe;
use crate::database::DatabaseExt;
pub struct RustColumn {
pub(super) ident: Ident,
pub(super) type_: TokenStream,
}
pub fn columns_to_rust<DB: DatabaseExt>(describe: &Describe<DB>) -> crate::Result<Vec<RustColumn>> {
describe
.result_columns
.iter()
.enumerate()
.map(|(i, column)| -> crate::Result<_> {
let name = column
.name
.as_deref()
.ok_or_else(|| format!("column at position {} must have a name", i))?;
let ident = syn::parse_str::<Ident>(name)
.map_err(|_| format!("{:?} is not a valid Rust identifier", name))?;
let type_ = <DB as DatabaseExt>::return_type_for_id(&column.type_id)
.ok_or_else(|| format!("unknown field type ID: {}", &column.type_id))?
.parse::<TokenStream>()
.unwrap();
Ok(RustColumn { ident, type_ })
})
.collect::<crate::Result<Vec<_>>>()
}
pub fn | <DB: DatabaseExt>(
sql: &str,
out_ty: &Path,
columns: &[RustColumn],
) -> TokenStream {
let instantiations = columns.iter().enumerate().map(
|(
i,
&RustColumn {
ref ident,
ref type_,
..
},
)| { quote!( #ident: #i.try_get::<#type_>(&row).try_unwrap_optional()? ) },
);
let db_path = DB::quotable_path();
quote! {
tokio_sqlx::query_as_mapped::<#db_path, _>(#sql, |row| {
use tokio_sqlx::row::RowIndex as _;
use tokio_sqlx::result_ext::ResultExt as _;
Ok(#out_ty { #(#instantiations),* })
})
}
}
| quote_query_as |
AutoRefreshingTraits.ts | import primitiveTrait from "../Decorators/primitiveTrait";
import MappableTraits from "./MappableTraits";
import mixTraits from "../mixTraits";
export default class | extends mixTraits(MappableTraits) {
@primitiveTrait({
name: "Refresh interval",
description: "How often the data in this model is refreshed, in seconds",
type: "number"
})
refreshInterval?: number;
@primitiveTrait({
name: "Refresh enabled",
description: "Toggle for enabling auto refresh.",
type: "boolean"
})
refreshEnabled: boolean = true;
}
| AutoRefreshingTraits |
CryptoOps.py | """
Responsible for cryptographic operations.
"""
# PyCrypto needs to be installed for Crypto to work
import tkMessageBox
from Crypto.Cipher import AES
from Crypto import Random
import hashlib
import base64
# Pads data
def | (data):
BS = 16 # Block Size
r = data + (BS - len(data) % BS) * chr(BS - len(data) % BS)
return r
# Unpads data
def unpad(data):
r = data[0:-ord(data[-1])]
return r
# AES Encryption
def encrypt(plaintext, key):
plaintext = pad(plaintext)
iv = Random.new().read(16)
cipher = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(plaintext))
# AES Decryption
def decrypt(ciphertext, key):
ciphertext = base64.b64decode(ciphertext)
iv = ciphertext[:16]
cipher = AES.new(key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(ciphertext[16:]))
# Generates symmetric key based on username and password
# Also generates a salt if one is not given (if salt == 0)
def generate_key(password, username, iterations, salt):
assert iterations > 0
if salt == 0:
salt = Random.get_random_bytes(16)
key = password + username + salt
for i in range(iterations):
key = hashlib.sha256(key).digest()
return key, salt
# Authentication
def authenticate(username, password):
# tkMessageBox.showinfo("Login", username + "\n" + password) # Test
try:
# Attempts to open the file storing salts for different users
usersalt = open("usersalt.txt", "r")
# If the salt file does not exist at all, generate new salt and create file
except IOError:
usersalt = open("usersalt.txt", "w")
usersalt.close()
usersalt = open("usersalt.txt", "r")
db_username = 0
# Attempts to locate corresponding user entry
while db_username != username + "\n":
db_username = usersalt.readline()
# If the user is not registered, randomly generate new salt and key based on password
if db_username == "":
key, salt = generate_key(password, username, 50, 0)
# Store newly generated salt
with open("usersalt.txt", "a") as usersalt_a:
usersalt_a.write(username + "\n")
usersalt_a.write(salt + "\n")
# If user is found, get salt and re-generate key based on password + salt
elif db_username == username + "\n":
salt = usersalt.readline()
# print salt
key, salt = generate_key(password, username, 50, salt)
usersalt.close()
# Attempts to open database file for specified user
try:
database = open("database" + username + ".txt", "r+")
# Reads the first line (Authentication Line)
auth_line = database.readline()
# print auth_line
# Decryption of first line
auth_line_plain = decrypt(auth_line, key)
#print auth_line_plain
#Checks if the decrypted result is AUTH_SUCCESS
if auth_line_plain == "AUTH_SUCCESS":
auth = True
tkMessageBox.showinfo("Success", "Authentication Successful")
return key, auth
else:
auth = False
tkMessageBox.showerror("Error", "Authentication Failed")
return key, auth
# If the file for user is not found, create file
except (IOError, ValueError):
database = open("database" + username + ".txt", "w")
database.close()
# Open new file and save encrypted version of AUTH_SUCCESS
database = open("database" + username + ".txt", "r+")
database.write(encrypt("AUTH_SUCCESS", key) + "\n")
database.close()
# Closes and opens the newly created file to check if the first line is properly stored
database = open("database" + username + ".txt", "r+")
auth_line = database.readline()
database.close()
#print auth_line
#Decrypts and checks if the decrypted result is AUTH_SUCCESS
auth_line_plain = decrypt(auth_line, key)
#print auth_line_plain
#If true, user creation has been successful
if auth_line_plain == "AUTH_SUCCESS":
auth = True
tkMessageBox.showinfo("Success", "New User Creation Successful.")
return key, auth
#print "New User Creation Successful"
#Otherwise, user creation has failed
else:
auth = False
tkMessageBox.showerror("Error", "New User Creation Failed.")
return key, auth
#print "New User Creation FAILED"
def read_database(username, key):
database = open("database" + username + ".txt", "r")
accounts_list = {}
database.readline() # Ignores/Skips the first line
account = 0
while account != "":
account = database.readline()
if account == "":
break
password = database.readline()
account = decrypt(account, key)
password = decrypt(password, key)
accounts_list[account] = password
database.close()
return accounts_list
def write_database(username, key, account, password):
database = open("database" + username + ".txt", "a")
account = encrypt(account, key)
password = encrypt(password, key)
database.write(account+"\n")
database.write(password+"\n")
return | pad |
remove_errors.py | def correct_ini_file(config_file):
with open(config_file, mode='r') as raw_open:
raw_open.seek(0)
temp_api_details = raw_open.readlines(0)
# print(type(temp_api_details[0]))
with open(config_file, mode='w') as rewrite_config:
if temp_api_details[0] != '[TELEGRAM]\n':
|
for i in temp_api_details:
rewrite_config.write(i)
| rewrite_config.write('[TELEGRAM]\n') |
configSettings.unit.test.ts | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { expect } from 'chai';
import * as path from 'path';
import * as sinon from 'sinon';
import * as TypeMoq from 'typemoq';
import untildify = require('untildify');
import { WorkspaceConfiguration } from 'vscode';
import { LanguageServerType } from '../../../client/activation/types';
import { WorkspaceService } from '../../../client/common/application/workspace';
import { PythonSettings } from '../../../client/common/configSettings';
import { InterpreterPathService } from '../../../client/common/interpreterPathService';
import { PersistentStateFactory } from '../../../client/common/persistentState';
import {
IAutoCompleteSettings,
IExperiments,
IFormattingSettings,
ILintingSettings,
ISortImportSettings,
ITerminalSettings,
} from '../../../client/common/types';
import { noop } from '../../../client/common/utils/misc';
import * as EnvFileTelemetry from '../../../client/telemetry/envFileTelemetry';
import { ITestingSettings } from '../../../client/testing/configuration/types';
import { MockAutoSelectionService } from '../../mocks/autoSelector';
import { MockMemento } from '../../mocks/mementos';
suite('Python Settings', async () => {
class CustomPythonSettings extends PythonSettings {
public update(pythonSettings: WorkspaceConfiguration) {
return super.update(pythonSettings);
}
public initialize() {
noop();
}
}
let config: TypeMoq.IMock<WorkspaceConfiguration>;
let expected: CustomPythonSettings;
let settings: CustomPythonSettings;
setup(() => {
sinon.stub(EnvFileTelemetry, 'sendSettingTelemetry').returns();
config = TypeMoq.Mock.ofType<WorkspaceConfiguration>(undefined, TypeMoq.MockBehavior.Loose);
const workspaceService = new WorkspaceService();
const workspaceMemento = new MockMemento();
const globalMemento = new MockMemento();
const persistentStateFactory = new PersistentStateFactory(globalMemento, workspaceMemento);
expected = new CustomPythonSettings(
undefined,
new MockAutoSelectionService(),
workspaceService,
new InterpreterPathService(persistentStateFactory, workspaceService, []),
undefined,
);
settings = new CustomPythonSettings(
undefined,
new MockAutoSelectionService(),
workspaceService,
new InterpreterPathService(persistentStateFactory, workspaceService, []),
undefined,
);
expected.defaultInterpreterPath = 'python';
});
teardown(() => {
sinon.restore();
});
function | (sourceSettings: PythonSettings) {
// string settings
for (const name of [
'pythonPath',
'venvPath',
'condaPath',
'pipenvPath',
'envFile',
'poetryPath',
'defaultInterpreterPath',
]) {
config
.setup((c) => c.get<string>(name))
.returns(() => (sourceSettings as any)[name]);
}
for (const name of ['venvFolders']) {
config
.setup((c) => c.get<string[]>(name))
.returns(() => (sourceSettings as any)[name]);
}
// boolean settings
for (const name of ['downloadLanguageServer', 'autoUpdateLanguageServer']) {
config
.setup((c) => c.get<boolean>(name, true))
.returns(() => (sourceSettings as any)[name]);
}
for (const name of ['disableInstallationCheck', 'globalModuleInstallation']) {
config
.setup((c) => c.get<boolean>(name))
.returns(() => (sourceSettings as any)[name]);
}
// Language server type settings
config.setup((c) => c.get<LanguageServerType>('languageServer')).returns(() => sourceSettings.languageServer);
// "any" settings
config.setup((c) => c.get<any[]>('devOptions')).returns(() => sourceSettings.devOptions);
// complex settings
config.setup((c) => c.get<ILintingSettings>('linting')).returns(() => sourceSettings.linting);
config.setup((c) => c.get<ISortImportSettings>('sortImports')).returns(() => sourceSettings.sortImports);
config.setup((c) => c.get<IFormattingSettings>('formatting')).returns(() => sourceSettings.formatting);
config.setup((c) => c.get<IAutoCompleteSettings>('autoComplete')).returns(() => sourceSettings.autoComplete);
config.setup((c) => c.get<ITestingSettings>('testing')).returns(() => sourceSettings.testing);
config.setup((c) => c.get<ITerminalSettings>('terminal')).returns(() => sourceSettings.terminal);
config.setup((c) => c.get<IExperiments>('experiments')).returns(() => sourceSettings.experiments);
}
function testIfValueIsUpdated(settingName: string, value: any) {
test(`${settingName} updated`, async () => {
expected.pythonPath = 'python3';
(expected as any)[settingName] = value;
initializeConfig(expected);
settings.update(config.object);
expect((settings as any)[settingName]).to.be.equal((expected as any)[settingName]);
config.verifyAll();
});
}
suite('String settings', async () => {
['venvPath', 'condaPath', 'pipenvPath', 'envFile', 'poetryPath', 'defaultInterpreterPath'].forEach(
async (settingName) => {
testIfValueIsUpdated(settingName, 'stringValue');
},
);
});
suite('Boolean settings', async () => {
['downloadLanguageServer', 'autoUpdateLanguageServer', 'globalModuleInstallation'].forEach(
async (settingName) => {
testIfValueIsUpdated(settingName, true);
},
);
});
test('condaPath updated', () => {
expected.pythonPath = 'python3';
expected.condaPath = 'spam';
initializeConfig(expected);
config
.setup((c) => c.get<string>('condaPath'))
.returns(() => expected.condaPath)
.verifiable(TypeMoq.Times.once());
settings.update(config.object);
expect(settings.condaPath).to.be.equal(expected.condaPath);
config.verifyAll();
});
test('condaPath (relative to home) updated', async () => {
expected.pythonPath = 'python3';
expected.condaPath = path.join('~', 'anaconda3', 'bin', 'conda');
initializeConfig(expected);
config
.setup((c) => c.get<string>('condaPath'))
.returns(() => expected.condaPath)
.verifiable(TypeMoq.Times.once());
settings.update(config.object);
expect(settings.condaPath).to.be.equal(untildify(expected.condaPath));
config.verifyAll();
});
function testLanguageServer(
languageServer: LanguageServerType,
expectedValue: LanguageServerType,
isDefault: boolean,
) {
test(languageServer, () => {
expected.pythonPath = 'python3';
expected.languageServer = languageServer;
initializeConfig(expected);
config
.setup((c) => c.get<LanguageServerType>('languageServer'))
.returns(() => expected.languageServer)
.verifiable(TypeMoq.Times.once());
settings.update(config.object);
expect(settings.languageServer).to.be.equal(expectedValue);
expect(settings.languageServerIsDefault).to.be.equal(isDefault);
config.verifyAll();
});
}
suite('languageServer settings', async () => {
const values = [
{ ls: LanguageServerType.Jedi, expected: LanguageServerType.Jedi, default: false },
{ ls: LanguageServerType.JediLSP, expected: LanguageServerType.Jedi, default: false },
{ ls: LanguageServerType.Microsoft, expected: LanguageServerType.None, default: true },
{ ls: LanguageServerType.Node, expected: LanguageServerType.Node, default: false },
{ ls: LanguageServerType.None, expected: LanguageServerType.None, default: false },
];
values.forEach((v) => {
testLanguageServer(v.ls, v.expected, v.default);
});
testLanguageServer('invalid' as LanguageServerType, LanguageServerType.None, true);
});
function testExperiments(enabled: boolean) {
expected.pythonPath = 'python3';
expected.experiments = {
enabled,
optInto: [],
optOutFrom: [],
};
initializeConfig(expected);
config
.setup((c) => c.get<IExperiments>('experiments'))
.returns(() => expected.experiments)
.verifiable(TypeMoq.Times.once());
settings.update(config.object);
for (const key of Object.keys(expected.experiments)) {
expect((settings.experiments as any)[key]).to.be.deep.equal((expected.experiments as any)[key]);
}
config.verifyAll();
}
test('Experiments (not enabled)', () => testExperiments(false));
test('Experiments (enabled)', () => testExperiments(true));
test('Formatter Paths and args', () => {
expected.pythonPath = 'python3';
expected.formatting = {
autopep8Args: ['1', '2'],
autopep8Path: 'one',
blackArgs: ['3', '4'],
blackPath: 'two',
yapfArgs: ['5', '6'],
yapfPath: 'three',
provider: '',
};
expected.formatting.blackPath = 'spam';
initializeConfig(expected);
config
.setup((c) => c.get<IFormattingSettings>('formatting'))
.returns(() => expected.formatting)
.verifiable(TypeMoq.Times.once());
settings.update(config.object);
for (const key of Object.keys(expected.formatting)) {
expect((settings.formatting as any)[key]).to.be.deep.equal((expected.formatting as any)[key]);
}
config.verifyAll();
});
test('Formatter Paths (paths relative to home)', () => {
expected.pythonPath = 'python3';
expected.formatting = {
autopep8Args: [],
autopep8Path: path.join('~', 'one'),
blackArgs: [],
blackPath: path.join('~', 'two'),
yapfArgs: [],
yapfPath: path.join('~', 'three'),
provider: '',
};
expected.formatting.blackPath = 'spam';
initializeConfig(expected);
config
.setup((c) => c.get<IFormattingSettings>('formatting'))
.returns(() => expected.formatting)
.verifiable(TypeMoq.Times.once());
settings.update(config.object);
for (const key of Object.keys(expected.formatting)) {
if (!key.endsWith('path')) {
continue;
}
const expectedPath = untildify((expected.formatting as any)[key]);
expect((settings.formatting as any)[key]).to.be.equal(expectedPath);
}
config.verifyAll();
});
});
| initializeConfig |
models.go | package operationalinsights
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/Azure/go-autorest/autorest"
)
// The package's fully qualified name.
const fqdn = "github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights"
// Column a column in a table.
type Column struct {
// Name - The name of this column.
Name *string `json:"name,omitempty"`
// Type - The data type of this column.
Type *string `json:"type,omitempty"`
}
// ErrorDetail ...
type ErrorDetail struct {
// Code - The error's code.
Code *string `json:"code,omitempty"`
// Message - A human readable error message.
Message *string `json:"message,omitempty"`
// Target - Indicates which property in the request is responsible for the error.
Target *string `json:"target,omitempty"`
// Value - Indicates which value in 'target' is responsible for the error.
Value *string `json:"value,omitempty"`
// Resources - Indicates resources which were responsible for the error.
Resources *[]string `json:"resources,omitempty"`
// AdditionalProperties - Additional properties that can be provided on the error details object
AdditionalProperties interface{} `json:"additionalProperties,omitempty"`
}
// ErrorInfo ...
type ErrorInfo struct {
// Code - A machine readable error code.
Code *string `json:"code,omitempty"`
// Message - A human readable error message.
Message *string `json:"message,omitempty"`
// Details - error details.
Details *[]ErrorDetail `json:"details,omitempty"`
// Innererror - Inner error details if they exist.
Innererror *ErrorInfo `json:"innererror,omitempty"`
// AdditionalProperties - Additional properties that can be provided on the error info object
AdditionalProperties interface{} `json:"additionalProperties,omitempty"`
}
// ErrorResponse contains details when the response code indicates an error.
type ErrorResponse struct {
// Error - The error details.
Error *ErrorInfo `json:"error,omitempty"`
}
// MetadataApplication application Insights apps that were part of the metadata request and that the user
// has access to.
type MetadataApplication struct {
// ID - The ID of the Application Insights app.
ID *string `json:"id,omitempty"`
// ResourceID - The ARM resource ID of the Application Insights app.
ResourceID *string `json:"resourceId,omitempty"`
// Name - The name of the Application Insights app.
Name *string `json:"name,omitempty"`
// Region - The Azure region of the Application Insights app.
Region *string `json:"region,omitempty"`
// Related - The related metadata items for the Application Insights app.
Related *MetadataApplicationRelated `json:"related,omitempty"`
}
// MetadataApplicationRelated the related metadata items for the Application Insights app.
type MetadataApplicationRelated struct {
// Tables - The related tables for the Application Insights app.
Tables *[]string `json:"tables,omitempty"`
// Functions - The related functions for the Application Insights app.
Functions *[]string `json:"functions,omitempty"`
}
// MetadataCategory categories are used to group other metadata entities.
type MetadataCategory struct {
// ID - The ID of the category
ID *string `json:"id,omitempty"`
// DisplayName - The display name of the category
DisplayName *string `json:"displayName,omitempty"`
// Description - The description of the category
Description *string `json:"description,omitempty"`
// Related - The related metadata items for the category
Related *MetadataCategoryRelated `json:"related,omitempty"`
}
// MetadataCategoryRelated the related metadata items for the category
type MetadataCategoryRelated struct {
// Tables - The tables related to the category
Tables *[]string `json:"tables,omitempty"`
// Functions - The functions related to the category
Functions *[]string `json:"functions,omitempty"`
// ResourceTypes - The resource types related to the category
ResourceTypes *[]string `json:"resourceTypes,omitempty"`
// Queries - The saved queries related to the category
Queries *[]string `json:"queries,omitempty"`
// Solutions - The Log Analytics solutions related to the category
Solutions *[]string `json:"solutions,omitempty"`
}
// MetadataFunction functions are stored Kusto queries that can be specified as part of queries by using
// their name.
type MetadataFunction struct {
// ID - The ID of the function.
ID *string `json:"id,omitempty"`
// Name - The name of the function, to be used in queries.
Name *string `json:"name,omitempty"`
// Parameters - The parameters/arguments of the function, if any.
Parameters *string `json:"parameters,omitempty"`
// DisplayName - The display name of the function.
DisplayName *string `json:"displayName,omitempty"`
// Description - The description of the function.
Description *string `json:"description,omitempty"`
// Body - The KQL body of the function.
Body *string `json:"body,omitempty"`
// Tags - The tags associated with the function.
Tags interface{} `json:"tags,omitempty"`
// Properties - The properties of the function.
Properties interface{} `json:"properties,omitempty"`
// Related - The related metadata items for the function.
Related *MetadataFunctionRelated `json:"related,omitempty"`
}
// MetadataFunctionRelated the related metadata items for the function.
type MetadataFunctionRelated struct {
// Tables - The related tables for the function.
Tables *[]string `json:"tables,omitempty"`
// Solutions - The related Log Analytics solutions for the function.
Solutions *[]string `json:"solutions,omitempty"`
// ResourceTypes - The related resource types for the function.
ResourceTypes *[]string `json:"resourceTypes,omitempty"`
// Categories - The related categories for the function.
Categories *[]string `json:"categories,omitempty"`
// Workspaces - The related workspaces for the function.
Workspaces *[]string `json:"workspaces,omitempty"`
}
// MetadataPermissions permission information for the metadata call, includes apps/workspaces/resource the
// user didn't have access to.
type MetadataPermissions struct {
// Workspaces - The permission indication for the workspaces on the metadata request.
Workspaces *[]MetadataPermissionsWorkspacesItem `json:"workspaces,omitempty"`
// Resources - The permission indication for the Azure resources on the metadata request.
Resources *[]MetadataPermissionsResourcesItem `json:"resources,omitempty"`
// Applications - The permission indication for the Application Insights apps on the metadata request.
Applications *[]MetadataPermissionsApplicationsItem `json:"applications,omitempty"`
}
// MetadataPermissionsApplicationsItem ...
type MetadataPermissionsApplicationsItem struct {
// ResourceID - The resource ID on the permission indication.
ResourceID *string `json:"resourceId,omitempty"`
}
// MetadataPermissionsResourcesItem ...
type MetadataPermissionsResourcesItem struct {
// ResourceID - The resource ID on the permission indication.
ResourceID *string `json:"resourceId,omitempty"`
// DenyTables - The list of tables that were denied access for the resource ID.
DenyTables *[]string `json:"denyTables,omitempty"`
}
// MetadataPermissionsWorkspacesItem ...
type MetadataPermissionsWorkspacesItem struct {
// ResourceID - The resource ID on the permission indication.
ResourceID *string `json:"resourceId,omitempty"`
// DenyTables - The list of tables that were denied access for the resource ID.
DenyTables *[]string `json:"denyTables,omitempty"`
}
// MetadataQuery queries are stored pieces of KQL, along with a list of relevant metadata items.
type MetadataQuery struct {
// ID - The ID of the query.
ID *string `json:"id,omitempty"`
// DisplayName - The display name of the query.
DisplayName *string `json:"displayName,omitempty"`
// Description - The description of the query.
Description *string `json:"description,omitempty"`
// Body - The KQL body of the query.
Body *string `json:"body,omitempty"`
// Labels - The user defined labels associated with the query.
Labels *[]string `json:"labels,omitempty"`
// Tags - The tags associated with the query.
Tags interface{} `json:"tags,omitempty"`
// Properties - The properties of the query.
Properties interface{} `json:"properties,omitempty"`
// Related - The related metadata items for the query.
Related *MetadataQueryRelated `json:"related,omitempty"`
}
// MetadataQueryRelated the related metadata items for the query.
type MetadataQueryRelated struct {
// Categories - The related categories for the query.
Categories *[]string `json:"categories,omitempty"`
// Solutions - The related Log Analytics solutions for the query.
Solutions *[]string `json:"solutions,omitempty"`
// ResourceTypes - The related resource types for the query.
ResourceTypes *[]string `json:"resourceTypes,omitempty"`
// Tables - The related tables for the query.
Tables *[]string `json:"tables,omitempty"`
}
// MetadataResourceType metadata about types of Azure resources, containing relevant tables, functions,
// etc.
type MetadataResourceType struct {
// ID - The ID of the resource-type
ID *string `json:"id,omitempty"`
// Type - The type of the resource-type
Type *string `json:"type,omitempty"`
// DisplayName - The display name of the resource-type
DisplayName *string `json:"displayName,omitempty"`
// Description - The description of the resource-type
Description *string `json:"description,omitempty"`
// Labels - The user-defined labels of the resource-type
Labels *[]string `json:"labels,omitempty"`
// Tags - The tags associated with the resource-type
Tags interface{} `json:"tags,omitempty"`
// Properties - The properties of the resource-type
Properties interface{} `json:"properties,omitempty"`
// Related - The related metadata items for the resource-type
Related *MetadataResourceTypeRelated `json:"related,omitempty"`
}
// MetadataResourceTypeRelated the related metadata items for the resource-type
type MetadataResourceTypeRelated struct {
// Tables - The tables related to the resource-type
Tables *[]string `json:"tables,omitempty"`
// Functions - The functions related to the resource-type
Functions *[]string `json:"functions,omitempty"`
// Categories - The categories related to the resource-type
Categories *[]string `json:"categories,omitempty"`
// Queries - The queries related to the resource-type
Queries *[]string `json:"queries,omitempty"`
// Workspaces - The Log Analytics workspaces related to the resource-type
Workspaces *[]string `json:"workspaces,omitempty"`
// Resources - The Azure resources related to the resource-type
Resources *[]string `json:"resources,omitempty"`
}
// MetadataResults the metadata response for the app, including available tables, etc.
type MetadataResults struct {
autorest.Response `json:"-"`
// Categories - The list of categories that are referenced in this metadata response.
Categories *[]MetadataCategory `json:"categories,omitempty"`
// ResourceTypes - The list of resource types that are referenced in this metadata response.
ResourceTypes *[]MetadataResourceType `json:"resourceTypes,omitempty"`
// Solutions - The list of Log Analytics solutions installed on the workspace.
Solutions *[]MetadataSolution `json:"solutions,omitempty"`
// Tables - The list of tables and columns that comprise the schema of the workspace.
Tables *[]MetadataTable `json:"tables,omitempty"`
// Functions - The list of functions stored on the workspace, or introduced by solutions etc.
Functions *[]MetadataFunction `json:"functions,omitempty"`
// Queries - The list of saved queries stored on the workspace, or introduced by solutions, resource types, etc.
Queries *[]MetadataQuery `json:"queries,omitempty"`
// Applications - The list of Application Insights apps that were referenced in the metadata request.
Applications *[]MetadataApplication `json:"applications,omitempty"`
// Workspaces - The list of Log Analytics workspaces that were referenced in the metadata request.
Workspaces *[]MetadataWorkspace `json:"workspaces,omitempty"`
// Resources - The list of Azure resources that were referenced in the metadata request.
Resources *[]interface{} `json:"resources,omitempty"`
// Permissions - The list of permission rules that affected the metadata request.
Permissions *[]MetadataPermissions `json:"permissions,omitempty"`
}
// MetadataSolution solutions can group tables and functions that are associated with a certain Azure Log
// Analytics offering.
type MetadataSolution struct {
// ID - The ID of the Log Analytics solution
ID *string `json:"id,omitempty"`
// Name - The name of the Log Analytics solution
Name *string `json:"name,omitempty"`
// DisplayName - The display name of the Log Analytics solution
DisplayName *string `json:"displayName,omitempty"`
// Description - The description of the Log Analytics solution
Description *string `json:"description,omitempty"`
// Tags - The tags that are associated with the Log Analytics solution
Tags interface{} `json:"tags,omitempty"`
// Properties - The properties of the Log Analytics solution
Properties interface{} `json:"properties,omitempty"`
// Related - The related metadata items for the Log Analytics solution
Related *MetadataSolutionRelated `json:"related,omitempty"`
}
// MetadataSolutionRelated the related metadata items for the Log Analytics solution
type MetadataSolutionRelated struct {
// Tables - The tables related to the Log Analytics solution
Tables *[]string `json:"tables,omitempty"`
// Functions - The functions related to the Log Analytics solution
Functions *[]string `json:"functions,omitempty"`
// Categories - The categories related to the Log Analytics solution
Categories *[]string `json:"categories,omitempty"`
// Queries - The saved queries related to the Log Analytics solution
Queries *[]string `json:"queries,omitempty"`
// Workspaces - The Workspaces referenced in the metadata request that are related to the Log Analytics solution
Workspaces *[]string `json:"workspaces,omitempty"`
}
// MetadataTable tables are part of the workspace schema, and contain a list of columns and a reference to | // other relevant metadata items.
type MetadataTable struct {
// ID - The ID of the table
ID *string `json:"id,omitempty"`
// Name - The name of the table
Name *string `json:"name,omitempty"`
// Description - The description of the table
Description *string `json:"description,omitempty"`
// TimespanColumn - The column associated with the timespan query parameter for the table
TimespanColumn *string `json:"timespanColumn,omitempty"`
// Labels - The user defined labels of the table
Labels *[]string `json:"labels,omitempty"`
// Tags - The tags associated with the table
Tags interface{} `json:"tags,omitempty"`
// Properties - The properties of the table
Properties interface{} `json:"properties,omitempty"`
// Columns - The list of columns defined on the table
Columns *[]MetadataTableColumnsItem `json:"columns,omitempty"`
// Related - The related metadata items for the table
Related *MetadataTableRelated `json:"related,omitempty"`
}
// MetadataTableColumnsItem ...
type MetadataTableColumnsItem struct {
// Name - The name of the column
Name *string `json:"name,omitempty"`
// Description - The description of the column
Description *string `json:"description,omitempty"`
// Type - The data type of the column. Possible values include: 'Bool', 'Datetime', 'Dynamic', 'Int', 'Long', 'Real', 'String'
Type MetadataColumnDataType `json:"type,omitempty"`
// IsPreferredFacet - A flag indicating this column is a preferred facet
IsPreferredFacet *bool `json:"isPreferredFacet,omitempty"`
// Source - an indication of the source of the column, used only when multiple workspaces have conflicting definition for the column
Source interface{} `json:"source,omitempty"`
}
// MetadataTableRelated the related metadata items for the table
type MetadataTableRelated struct {
// Categories - The related categories for the table
Categories *[]string `json:"categories,omitempty"`
// Solutions - The related Log Analytics solutions for the table
Solutions *[]string `json:"solutions,omitempty"`
// ResourceTypes - The related resource types for the table
ResourceTypes *[]string `json:"resourceTypes,omitempty"`
// Workspaces - The related Log Analytics workspaces for the table
Workspaces *[]string `json:"workspaces,omitempty"`
// Functions - The related functions for the table
Functions *[]string `json:"functions,omitempty"`
// Queries - The related saved queries for the table
Queries *[]string `json:"queries,omitempty"`
}
// MetadataWorkspace log Analytics workspaces that were part of the metadata request and that the user has
// access to.
type MetadataWorkspace struct {
// ID - The ID of the Log Analytics workspace.
ID *string `json:"id,omitempty"`
// ResourceID - The ARM resource ID of the Log Analytics workspace.
ResourceID *string `json:"resourceId,omitempty"`
// Name - The name of the Log Analytics workspace.
Name *string `json:"name,omitempty"`
// Region - The Azure region of the Log Analytics workspace.
Region *string `json:"region,omitempty"`
// Related - The related metadata items for the Log Analytics workspace.
Related *MetadataWorkspaceRelated `json:"related,omitempty"`
}
// MetadataWorkspaceRelated the related metadata items for the Log Analytics workspace.
type MetadataWorkspaceRelated struct {
// Tables - The related tables for the Log Analytics workspace.
Tables *[]string `json:"tables,omitempty"`
// Solutions - The related Log Analytics solutions for the Log Analytics workspace.
Solutions *[]string `json:"solutions,omitempty"`
// ResourceTypes - The related resource types for the Log Analytics workspace.
ResourceTypes *[]string `json:"resourceTypes,omitempty"`
// Functions - The related functions for the Log Analytics workspace.
Functions *[]string `json:"functions,omitempty"`
// Resources - The related Azure resources for the Log Analytics workspace.
Resources *[]string `json:"resources,omitempty"`
}
// QueryBody the Analytics query. Learn more about the [Analytics query
// syntax](https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/)
type QueryBody struct {
// Query - The query to execute.
Query *string `json:"query,omitempty"`
// Timespan - Optional. The timespan over which to query data. This is an ISO8601 time period value. This timespan is applied in addition to any that are specified in the query expression.
Timespan *string `json:"timespan,omitempty"`
// Workspaces - A list of workspaces that are included in the query.
Workspaces *[]string `json:"workspaces,omitempty"`
}
// QueryResults contains the tables, columns & rows resulting from a query.
type QueryResults struct {
autorest.Response `json:"-"`
// Tables - The list of tables, columns and rows.
Tables *[]Table `json:"tables,omitempty"`
}
// Table contains the columns and rows for one table in a query response.
type Table struct {
// Name - The name of the table.
Name *string `json:"name,omitempty"`
// Columns - The list of columns in this table.
Columns *[]Column `json:"columns,omitempty"`
// Rows - The resulting rows from this query.
Rows *[][]interface{} `json:"rows,omitempty"`
} | |
non_standard_payment.rs | use casper_engine_test_support::{
internal::{
utils, DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder,
DEFAULT_GAS_PRICE, DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST,
},
DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE,
};
use casper_execution_engine::shared::motes::Motes;
use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512};
const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]);
const DO_NOTHING_WASM: &str = "do_nothing.wasm";
const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm";
const TRANSFER_MAIN_PURSE_TO_NEW_PURSE_WASM: &str = "transfer_main_purse_to_new_purse.wasm";
const NAMED_PURSE_PAYMENT_WASM: &str = "named_purse_payment.wasm";
const ARG_TARGET: &str = "target";
const ARG_AMOUNT: &str = "amount";
const ARG_PURSE_NAME: &str = "purse_name";
const ARG_DESTINATION: &str = "destination";
#[ignore]
#[test]
fn should_charge_non_main_purse() {
// as account_1, create & fund a new purse and use that to pay for something
// instead of account_1 main purse
const TEST_PURSE_NAME: &str = "test-purse";
let account_1_account_hash = ACCOUNT_1_ADDR;
let payment_purse_amount = *DEFAULT_PAYMENT;
let account_1_funding_amount = U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE);
let account_1_purse_funding_amount = *DEFAULT_PAYMENT;
let mut builder = InMemoryWasmTestBuilder::default();
let setup_exec_request = ExecuteRequestBuilder::standard(
*DEFAULT_ACCOUNT_ADDR,
CONTRACT_TRANSFER_TO_ACCOUNT,
runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => account_1_funding_amount },
)
.build();
let create_purse_exec_request = ExecuteRequestBuilder::standard(
ACCOUNT_1_ADDR,
TRANSFER_MAIN_PURSE_TO_NEW_PURSE_WASM,
runtime_args! { ARG_DESTINATION => TEST_PURSE_NAME, ARG_AMOUNT => account_1_purse_funding_amount },
)
.build();
builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST);
builder.exec(setup_exec_request).expect_success().commit();
builder
.exec(create_purse_exec_request)
.expect_success()
.commit();
let transfer_result = builder.finish();
// get account_1
let account_1 = transfer_result
.builder()
.get_account(ACCOUNT_1_ADDR)
.expect("should have account");
// get purse
let purse_key = account_1.named_keys()[TEST_PURSE_NAME];
let purse = purse_key.into_uref().expect("should have uref");
let purse_starting_balance = builder.get_purse_balance(purse);
assert_eq!(
purse_starting_balance, account_1_purse_funding_amount,
"purse should be funded with expected amount"
);
// should be able to pay for exec using new purse
let account_payment_exec_request = {
let deploy = DeployItemBuilder::new()
.with_address(ACCOUNT_1_ADDR)
.with_session_code(DO_NOTHING_WASM, RuntimeArgs::default())
.with_payment_code(
NAMED_PURSE_PAYMENT_WASM,
runtime_args! {
ARG_PURSE_NAME => TEST_PURSE_NAME,
ARG_AMOUNT => payment_purse_amount
},
)
.with_authorization_keys(&[account_1_account_hash])
.with_deploy_hash([3; 32])
.build();
ExecuteRequestBuilder::new().push_deploy(deploy).build()
};
let transfer_result = builder
.exec(account_payment_exec_request)
.expect_success()
.commit()
.finish();
let response = transfer_result
.builder()
.get_exec_response(2) | let result = utils::get_success_result(&response);
let gas = result.cost();
let motes = Motes::from_gas(gas, DEFAULT_GAS_PRICE).expect("should have motes");
let expected_resting_balance = account_1_purse_funding_amount - motes.value();
let purse_final_balance = builder.get_purse_balance(purse);
assert_eq!(
purse_final_balance, expected_resting_balance,
"purse resting balance should equal funding amount minus exec costs"
);
} | .expect("there should be a response")
.clone();
|
program.ts | import { EventEmitter } from 'events'
import TypedEventEmitter from 'typed-emitter'
import { Argv } from 'yargs'
import createYargs from 'yargs/yargs'
import { Command, command } from './command'
import { Repl, repl } from './repl'
import { Arguments } from './command'
import { isPromise } from './utils'
interface Events {
run: (command: string | readonly string[]) => void
}
type ProgramOptions = {
/**
* Program description. Can also be set by calling
* `program().description(...)`.
*
* Defaults to `undefined`.
*/
description?: string
/**
* Sets a custom REPL prompt. Can also be set by calling
* `program().prompt(...)`.
*
* Defaults to `> `.
*/
prompt?: string
/**
* Whether or not to add a global help command that displays an overview of
* commands.
*
* Defaults to `true`.
*/
help?: boolean
/**
* Whether or not to add a global version command that displays the version as
* specified in the package.json file.
*
* Defaults to `true`.
*/
version?: boolean
}
/**
* Creates a new bandersnatch program.
*/
export function program(options: ProgramOptions = {}) {
return new Program(options)
}
function extractCommandFromProcess() {
return process.argv.slice(2)
}
export class Program extends (EventEmitter as new () => TypedEventEmitter<
Events
>) {
private commands: Command<any>[] = []
private replInstance?: Repl
constructor(private options: ProgramOptions = {}) {
super()
}
/**
* Set the program description.
*/
public description(description: string) {
this.options.description = description
return this
}
/**
* Sets a custom REPL prompt.
*/
public prompt(prompt: string) {
this.options.prompt = prompt
return this
}
/**
* Create a new yargs instance. This method may change at any time, not
* intended for public use.
*
* @private
*/
public createYargsInstance() {
const yargs = createYargs()
this.options.description && yargs.usage(this.options.description)
// Help accepts boolean
yargs.help(this.options.help !== false)
// Version must be false or undefined
this.options.version !== false ? yargs.version() : yargs.version(false)
// Non-configurable options
yargs.recommendCommands()
yargs.strict()
yargs.demandCommand()
// Hidden completion command
yargs.completion('completion', false)
// Custom fail function.
yargs.fail(this.failHandler.bind(this))
// In case we're in a REPL session, do not exit on errors.
yargs.exitProcess(!this.replInstance)
// Add commands
this.commands.forEach((command) => {
command.toYargs(yargs, (command: string) => {
return this.run(command)
})
})
return yargs
}
/**
* Adds a new command to the program.
*/
public add<T>(command: Command<T>) {
this.commands.push(command)
return this
}
/**
* Adds a new command to the program and marks it as the default command.
*/
public default<T>(command: Command<T>) {
this.commands.push(command.default())
return this
}
/**
* Evaluate command (or process.argv) and return promise.
*/
public run(command?: string | readonly string[]) {
const cmd = command || extractCommandFromProcess()
this.emit('run', cmd)
// Return promise resolving to the return value of the command
// handler.
return new Promise((resolve, reject) => {
this.createYargsInstance().parse(
cmd,
{},
(err, argv: Arguments, output) => {
/**
* From the yargs docs:
* > any text that would have been output by yargs to the terminal,
* > had a callback not been provided.
* http://yargs.js.org/docs/#api-parseargs-context-parsecallback
*
* Seems that this is primarily used for built-in commands like
* --version and --help.
*/
if (output) {
console.log(output)
}
/**
* From the yargs docs:
* > Populated if any validation errors raised while parsing.
* http://yargs.js.org/docs/#api-parseargs-context-parsecallback
*/
if (err) {
console.error(err)
}
if (isPromise(argv.__promise)) {
// Delegate resolve/reject to promise returned from handler
argv.__promise.then(resolve).catch(reject)
} else {
// Resolve with void if promise is not available, which is the case
// with e.g. --version and --help
resolve()
}
}
)
})
}
/**
* Run event loop which reads command from stdin.
*/
public repl() {
this.replInstance = repl(this, this.options.prompt)
| .action(() => {
process.exit()
})
)
this.replInstance.start()
return this.replInstance
}
/**
* When argv is set, run the program, otherwise start repl loop.
*/
public runOrRepl() {
return extractCommandFromProcess().length ? this.run() : this.repl()
}
/**
* Returns `true` if program is running a repl loop, `false` otherwise.
*/
public isRepl() {
return !!this.replInstance
}
/**
* Method to execute when a failure occurs, rather than printing the failure
* message.
*
* Called with the failure message that would have been printed, the Error
* instance originally thrown and yargs state when the failure occured.
*/
private failHandler(msg: string, err: Error, yargs: Argv) {
if (msg) {
// Simply throw validation messages to reject runner promise
throw new Error(msg)
}
}
} | // Add exit command
this.add(
command('exit')
.description('Exit the application') |
util.go | package main
import (
"crypto/rand"
"fmt"
"io"
"os"
"strconv"
"sync/atomic"
)
// CreateFixedSizedFile creates a fixed sized file.
func CreateFixedSizedFile(size uint64) {
fixFile := "/tmp/fix" + strconv.FormatUint(size, 10)
fmt.Println("fixFile", fixFile)
out, _ := os.Create(fixFile)
_, _ = io.CopyN(out, rand.Reader, int64(size))
_ = out.Close()
}
var x int64 // nolint
// CreateTmpFile creates a temp file.
func | () string {
seq := atomic.AddInt64(&x, 1)
if seq >= 1000 { // nolint:gomnd
atomic.StoreInt64(&x, 1)
seq = 1
}
tempFile := "/tmp/fub" + strconv.FormatInt(seq, 10)
fmt.Println("tempFile", tempFile)
return tempFile
}
| CreateTmpFile |
GameAnalytics.ts | module gameanalytics
{
import GAThreading = gameanalytics.threading.GAThreading;
import TimedBlock = gameanalytics.threading.TimedBlock;
import GALogger = gameanalytics.logging.GALogger;
import GAStore = gameanalytics.store.GAStore;
import GAState = gameanalytics.state.GAState;
import GAHTTPApi = gameanalytics.http.GAHTTPApi;
import GADevice = gameanalytics.device.GADevice;
import GAValidator = gameanalytics.validators.GAValidator;
import EGAHTTPApiResponse = gameanalytics.http.EGAHTTPApiResponse;
import GAUtilities = gameanalytics.utilities.GAUtilities;
import GAEvents = gameanalytics.events.GAEvents;
export class |
{
private static initTimedBlockId:number = -1;
public static methodMap:{[id:string]: (...args: any[]) => void} = {};
public static init(): void
{
GADevice.touch();
GameAnalytics.methodMap['configureAvailableCustomDimensions01'] = GameAnalytics.configureAvailableCustomDimensions01;
GameAnalytics.methodMap['configureAvailableCustomDimensions02'] = GameAnalytics.configureAvailableCustomDimensions02;
GameAnalytics.methodMap['configureAvailableCustomDimensions03'] = GameAnalytics.configureAvailableCustomDimensions03;
GameAnalytics.methodMap['configureAvailableResourceCurrencies'] = GameAnalytics.configureAvailableResourceCurrencies;
GameAnalytics.methodMap['configureAvailableResourceItemTypes'] = GameAnalytics.configureAvailableResourceItemTypes;
GameAnalytics.methodMap['configureBuild'] = GameAnalytics.configureBuild;
GameAnalytics.methodMap['configureSdkGameEngineVersion'] = GameAnalytics.configureSdkGameEngineVersion;
GameAnalytics.methodMap['configureGameEngineVersion'] = GameAnalytics.configureGameEngineVersion;
GameAnalytics.methodMap['configureUserId'] = GameAnalytics.configureUserId;
GameAnalytics.methodMap['initialize'] = GameAnalytics.initialize;
GameAnalytics.methodMap['addBusinessEvent'] = GameAnalytics.addBusinessEvent;
GameAnalytics.methodMap['addResourceEvent'] = GameAnalytics.addResourceEvent;
GameAnalytics.methodMap['addProgressionEvent'] = GameAnalytics.addProgressionEvent;
GameAnalytics.methodMap['addDesignEvent'] = GameAnalytics.addDesignEvent;
GameAnalytics.methodMap['addErrorEvent'] = GameAnalytics.addErrorEvent;
GameAnalytics.methodMap['addErrorEvent'] = GameAnalytics.addErrorEvent;
GameAnalytics.methodMap['setEnabledInfoLog'] = GameAnalytics.setEnabledInfoLog;
GameAnalytics.methodMap['setEnabledVerboseLog'] = GameAnalytics.setEnabledVerboseLog;
GameAnalytics.methodMap['setEnabledManualSessionHandling'] = GameAnalytics.setEnabledManualSessionHandling;
GameAnalytics.methodMap['setEnabledEventSubmission'] = GameAnalytics.setEnabledEventSubmission;
GameAnalytics.methodMap['setCustomDimension01'] = GameAnalytics.setCustomDimension01;
GameAnalytics.methodMap['setCustomDimension02'] = GameAnalytics.setCustomDimension02;
GameAnalytics.methodMap['setCustomDimension03'] = GameAnalytics.setCustomDimension03;
GameAnalytics.methodMap['setFacebookId'] = GameAnalytics.setFacebookId;
GameAnalytics.methodMap['setGender'] = GameAnalytics.setGender;
GameAnalytics.methodMap['setBirthYear'] = GameAnalytics.setBirthYear;
GameAnalytics.methodMap['setEventProcessInterval'] = GameAnalytics.setEventProcessInterval;
GameAnalytics.methodMap['startSession'] = GameAnalytics.startSession;
GameAnalytics.methodMap['endSession'] = GameAnalytics.endSession;
GameAnalytics.methodMap['onStop'] = GameAnalytics.onStop;
GameAnalytics.methodMap['onResume'] = GameAnalytics.onResume;
GameAnalytics.methodMap['addCommandCenterListener'] = GameAnalytics.addCommandCenterListener;
GameAnalytics.methodMap['removeCommandCenterListener'] = GameAnalytics.removeCommandCenterListener;
GameAnalytics.methodMap['getCommandCenterValueAsString'] = GameAnalytics.getCommandCenterValueAsString;
GameAnalytics.methodMap['isCommandCenterReady'] = GameAnalytics.isCommandCenterReady;
GameAnalytics.methodMap['getConfigurationsContentAsString'] = GameAnalytics.getConfigurationsContentAsString;
if(typeof window !== 'undefined' && typeof window['GameAnalytics'] !== 'undefined' && typeof window['GameAnalytics']['q'] !== 'undefined')
{
var q:any[] = window['GameAnalytics']['q'];
for (let i in q)
{
GameAnalytics.gaCommand.apply(null, q[i]);
}
}
}
public static gaCommand(...args: any[]): void
{
if(args.length > 0)
{
if(args[0] in gameanalytics.GameAnalytics.methodMap)
{
if(args.length > 1)
{
gameanalytics.GameAnalytics.methodMap[args[0]].apply(null, Array.prototype.slice.call(args, 1));
}
else
{
gameanalytics.GameAnalytics.methodMap[args[0]]();
}
}
}
}
public static configureAvailableCustomDimensions01(customDimensions:Array<string> = []): void
{
GAThreading.performTaskOnGAThread(() =>
{
if(GameAnalytics.isSdkReady(true, false))
{
GALogger.w("Available custom dimensions must be set before SDK is initialized");
return;
}
GAState.setAvailableCustomDimensions01(customDimensions);
});
}
public static configureAvailableCustomDimensions02(customDimensions:Array<string> = []): void
{
GAThreading.performTaskOnGAThread(() =>
{
if(GameAnalytics.isSdkReady(true, false))
{
GALogger.w("Available custom dimensions must be set before SDK is initialized");
return;
}
GAState.setAvailableCustomDimensions02(customDimensions);
});
}
public static configureAvailableCustomDimensions03(customDimensions:Array<string> = []): void
{
GAThreading.performTaskOnGAThread(() =>
{
if(GameAnalytics.isSdkReady(true, false))
{
GALogger.w("Available custom dimensions must be set before SDK is initialized");
return;
}
GAState.setAvailableCustomDimensions03(customDimensions);
});
}
public static configureAvailableResourceCurrencies(resourceCurrencies:Array<string> = []): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (GameAnalytics.isSdkReady(true, false))
{
GALogger.w("Available resource currencies must be set before SDK is initialized");
return;
}
GAState.setAvailableResourceCurrencies(resourceCurrencies);
});
}
public static configureAvailableResourceItemTypes(resourceItemTypes:Array<string> = []): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (GameAnalytics.isSdkReady(true, false))
{
GALogger.w("Available resource item types must be set before SDK is initialized");
return;
}
GAState.setAvailableResourceItemTypes(resourceItemTypes);
});
}
public static configureBuild(build:string = ""): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (GameAnalytics.isSdkReady(true, false))
{
GALogger.w("Build version must be set before SDK is initialized.");
return;
}
if (!GAValidator.validateBuild(build))
{
GALogger.i("Validation fail - configure build: Cannot be null, empty or above 32 length. String: " + build);
return;
}
GAState.setBuild(build);
});
}
public static configureSdkGameEngineVersion(sdkGameEngineVersion:string = ""): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (GameAnalytics.isSdkReady(true, false))
{
return;
}
if (!GAValidator.validateSdkWrapperVersion(sdkGameEngineVersion))
{
GALogger.i("Validation fail - configure sdk version: Sdk version not supported. String: " + sdkGameEngineVersion);
return;
}
GADevice.sdkGameEngineVersion = sdkGameEngineVersion;
});
}
public static configureGameEngineVersion(gameEngineVersion:string = ""): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (GameAnalytics.isSdkReady(true, false))
{
return;
}
if (!GAValidator.validateEngineVersion(gameEngineVersion))
{
GALogger.i("Validation fail - configure game engine version: Game engine version not supported. String: " + gameEngineVersion);
return;
}
GADevice.gameEngineVersion = gameEngineVersion;
});
}
public static configureUserId(uId:string = ""): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (GameAnalytics.isSdkReady(true, false))
{
GALogger.w("A custom user id must be set before SDK is initialized.");
return;
}
if (!GAValidator.validateUserId(uId))
{
GALogger.i("Validation fail - configure user_id: Cannot be null, empty or above 64 length. Will use default user_id method. Used string: " + uId);
return;
}
GAState.setUserId(uId);
});
}
public static initialize(gameKey:string = "", gameSecret:string = ""): void
{
GADevice.updateConnectionType();
var timedBlock:TimedBlock = GAThreading.createTimedBlock();
timedBlock.async = true;
GameAnalytics.initTimedBlockId = timedBlock.id;
timedBlock.block = () =>
{
if (GameAnalytics.isSdkReady(true, false))
{
GALogger.w("SDK already initialized. Can only be called once.");
return;
}
if (!GAValidator.validateKeys(gameKey, gameSecret))
{
GALogger.w("SDK failed initialize. Game key or secret key is invalid. Can only contain characters A-z 0-9, gameKey is 32 length, gameSecret is 40 length. Failed keys - gameKey: " + gameKey + ", secretKey: " + gameSecret);
return;
}
GAState.setKeys(gameKey, gameSecret);
GameAnalytics.internalInitialize();
};
GAThreading.performTimedBlockOnGAThread(timedBlock);
}
public static addBusinessEvent(currency:string = "", amount:number = 0, itemType:string = "", itemId:string = "", cartType:string = ""/*, fields:{[id:string]: any} = {}*/): void
{
GADevice.updateConnectionType();
GAThreading.performTaskOnGAThread(() =>
{
if (!GameAnalytics.isSdkReady(true, true, "Could not add business event"))
{
return;
}
// Send to events
GAEvents.addBusinessEvent(currency, amount, itemType, itemId, cartType, {});
});
}
public static addResourceEvent(flowType:EGAResourceFlowType = EGAResourceFlowType.Undefined, currency:string = "", amount:number = 0, itemType:string = "", itemId:string = ""/*, fields:{[id:string]: any} = {}*/): void
{
GADevice.updateConnectionType();
GAThreading.performTaskOnGAThread(() =>
{
if (!GameAnalytics.isSdkReady(true, true, "Could not add resource event"))
{
return;
}
GAEvents.addResourceEvent(flowType, currency, amount, itemType, itemId, {});
});
}
public static addProgressionEvent(progressionStatus:EGAProgressionStatus = EGAProgressionStatus.Undefined, progression01:string = "", progression02:string = "", progression03:string = "", score?:any/*, fields:{[id:string]: any} = {}*/): void
{
GADevice.updateConnectionType();
GAThreading.performTaskOnGAThread(() =>
{
if(!GameAnalytics.isSdkReady(true, true, "Could not add progression event"))
{
return;
}
// Send to events
var sendScore:boolean = typeof score === "number";
// if(typeof score === "object")
// {
// fields = score as {[id:string]: any};
// }
GAEvents.addProgressionEvent(progressionStatus, progression01, progression02, progression03, sendScore ? score : 0, sendScore, {});
});
}
public static addDesignEvent(eventId:string, value?:any/*, fields:{[id:string]: any} = {}*/): void
{
GADevice.updateConnectionType();
GAThreading.performTaskOnGAThread(() =>
{
if(!GameAnalytics.isSdkReady(true, true, "Could not add design event"))
{
return;
}
var sendValue:boolean = typeof value === "number";
// if(typeof value === "object")
// {
// fields = value as {[id:string]: any};
// }
GAEvents.addDesignEvent(eventId, sendValue ? value : 0, sendValue, {});
});
}
public static addErrorEvent(severity:EGAErrorSeverity = EGAErrorSeverity.Undefined, message:string = ""/*, fields:{[id:string]: any} = {}*/): void
{
GADevice.updateConnectionType();
GAThreading.performTaskOnGAThread(() =>
{
if (!GameAnalytics.isSdkReady(true, true, "Could not add error event"))
{
return;
}
GAEvents.addErrorEvent(severity, message, {});
});
}
public static setEnabledInfoLog(flag:boolean = false): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (flag)
{
GALogger.setInfoLog(flag);
GALogger.i("Info logging enabled");
}
else
{
GALogger.i("Info logging disabled");
GALogger.setInfoLog(flag);
}
});
}
public static setEnabledVerboseLog(flag:boolean = false): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (flag)
{
GALogger.setVerboseLog(flag);
GALogger.i("Verbose logging enabled");
}
else
{
GALogger.i("Verbose logging disabled");
GALogger.setVerboseLog(flag);
}
});
}
public static setEnabledManualSessionHandling(flag:boolean = false): void
{
GAThreading.performTaskOnGAThread(() =>
{
GAState.setManualSessionHandling(flag);
});
}
public static setEnabledEventSubmission(flag:boolean = false): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (flag)
{
GAState.setEnabledEventSubmission(flag);
GALogger.i("Event submission enabled");
}
else
{
GALogger.i("Event submission disabled");
GAState.setEnabledEventSubmission(flag);
}
});
}
public static setCustomDimension01(dimension:string = ""): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (!GAValidator.validateDimension01(dimension, GAState.getAvailableCustomDimensions01()))
{
GALogger.w("Could not set custom01 dimension value to '" + dimension + "'. Value not found in available custom01 dimension values");
return;
}
GAState.setCustomDimension01(dimension);
});
}
public static setCustomDimension02(dimension:string = ""): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (!GAValidator.validateDimension02(dimension, GAState.getAvailableCustomDimensions02()))
{
GALogger.w("Could not set custom02 dimension value to '" + dimension + "'. Value not found in available custom02 dimension values");
return;
}
GAState.setCustomDimension02(dimension);
});
}
public static setCustomDimension03(dimension:string = ""): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (!GAValidator.validateDimension03(dimension, GAState.getAvailableCustomDimensions03()))
{
GALogger.w("Could not set custom03 dimension value to '" + dimension + "'. Value not found in available custom03 dimension values");
return;
}
GAState.setCustomDimension03(dimension);
});
}
public static setFacebookId(facebookId:string = ""): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (GAValidator.validateFacebookId(facebookId))
{
GAState.setFacebookId(facebookId);
}
});
}
public static setGender(gender:EGAGender = EGAGender.Undefined): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (GAValidator.validateGender(gender))
{
GAState.setGender(gender);
}
});
}
public static setBirthYear(birthYear:number = 0): void
{
GAThreading.performTaskOnGAThread(() =>
{
if (GAValidator.validateBirthyear(birthYear))
{
GAState.setBirthYear(birthYear);
}
});
}
public static setEventProcessInterval(intervalInSeconds:number): void
{
GAThreading.performTaskOnGAThread(() =>
{
GAThreading.setEventProcessInterval(intervalInSeconds);
});
}
public static startSession(): void
{
//if(GAState.getUseManualSessionHandling())
{
if(!GAState.isInitialized())
{
return;
}
var timedBlock:TimedBlock = GAThreading.createTimedBlock();
timedBlock.async = true;
GameAnalytics.initTimedBlockId = timedBlock.id;
timedBlock.block = () =>
{
if(GAState.isEnabled() && GAState.sessionIsStarted())
{
GAThreading.endSessionAndStopQueue();
}
GameAnalytics.resumeSessionAndStartQueue();
};
GAThreading.performTimedBlockOnGAThread(timedBlock);
}
}
public static endSession(): void
{
//if(GAState.getUseManualSessionHandling())
{
GameAnalytics.onStop();
}
}
public static onStop(): void
{
GAThreading.performTaskOnGAThread(() =>
{
try
{
GAThreading.endSessionAndStopQueue();
}
catch (Exception)
{
}
});
}
public static onResume(): void
{
var timedBlock:TimedBlock = GAThreading.createTimedBlock();
timedBlock.async = true;
GameAnalytics.initTimedBlockId = timedBlock.id;
timedBlock.block = () =>
{
GameAnalytics.resumeSessionAndStartQueue();
};
GAThreading.performTimedBlockOnGAThread(timedBlock);
}
public static getCommandCenterValueAsString(key:string, defaultValue:string = null):string
{
return GAState.getConfigurationStringValue(key, defaultValue);
}
public static isCommandCenterReady():boolean
{
return GAState.isCommandCenterReady();
}
public static addCommandCenterListener(listener:{ onCommandCenterUpdated:() => void }):void
{
GAState.addCommandCenterListener(listener);
}
public static removeCommandCenterListener(listener:{ onCommandCenterUpdated:() => void }):void
{
GAState.removeCommandCenterListener(listener);
}
public static getConfigurationsContentAsString():string
{
return GAState.getConfigurationsContentAsString();
}
private static internalInitialize(): void
{
GAState.ensurePersistedStates();
GAStore.setItem(GAState.DefaultUserIdKey, GAState.getDefaultId());
GAState.setInitialized(true);
GameAnalytics.newSession();
if (GAState.isEnabled())
{
GAThreading.ensureEventQueueIsRunning();
}
}
private static newSession(): void
{
GALogger.i("Starting a new session.");
// make sure the current custom dimensions are valid
GAState.validateAndFixCurrentDimensions();
GAHTTPApi.instance.requestInit(GameAnalytics.startNewSessionCallback);
}
private static startNewSessionCallback(initResponse:EGAHTTPApiResponse, initResponseDict:{[key:string]: any}): void
{
// init is ok
if(initResponse === EGAHTTPApiResponse.Ok && initResponseDict)
{
// set the time offset - how many seconds the local time is different from servertime
var timeOffsetSeconds:number = 0;
if(initResponseDict["server_ts"])
{
var serverTs:number = initResponseDict["server_ts"] as number;
timeOffsetSeconds = GAState.calculateServerTimeOffset(serverTs);
}
initResponseDict["time_offset"] = timeOffsetSeconds;
// insert new config in sql lite cross session storage
GAStore.setItem(GAState.SdkConfigCachedKey, GAUtilities.encode64(JSON.stringify(initResponseDict)));
// set new config and cache in memory
GAState.instance.sdkConfigCached = initResponseDict;
GAState.instance.sdkConfig = initResponseDict;
GAState.instance.initAuthorized = true;
}
else if(initResponse == EGAHTTPApiResponse.Unauthorized)
{
GALogger.w("Initialize SDK failed - Unauthorized");
GAState.instance.initAuthorized = false;
}
else
{
// log the status if no connection
if(initResponse === EGAHTTPApiResponse.NoResponse || initResponse === EGAHTTPApiResponse.RequestTimeout)
{
GALogger.i("Init call (session start) failed - no response. Could be offline or timeout.");
}
else if(initResponse === EGAHTTPApiResponse.BadResponse || initResponse === EGAHTTPApiResponse.JsonEncodeFailed || initResponse === EGAHTTPApiResponse.JsonDecodeFailed)
{
GALogger.i("Init call (session start) failed - bad response. Could be bad response from proxy or GA servers.");
}
else if(initResponse === EGAHTTPApiResponse.BadRequest || initResponse === EGAHTTPApiResponse.UnknownResponseCode)
{
GALogger.i("Init call (session start) failed - bad request or unknown response.");
}
// init call failed (perhaps offline)
if(GAState.instance.sdkConfig == null)
{
if(GAState.instance.sdkConfigCached != null)
{
GALogger.i("Init call (session start) failed - using cached init values.");
// set last cross session stored config init values
GAState.instance.sdkConfig = GAState.instance.sdkConfigCached;
}
else
{
GALogger.i("Init call (session start) failed - using default init values.");
// set default init values
GAState.instance.sdkConfig = GAState.instance.sdkConfigDefault;
}
}
else
{
GALogger.i("Init call (session start) failed - using cached init values.");
}
GAState.instance.initAuthorized = true;
}
// set offset in state (memory) from current config (config could be from cache etc.)
GAState.instance.clientServerTimeOffset = GAState.getSdkConfig()["time_offset"] ? GAState.getSdkConfig()["time_offset"] as number : 0;
// populate configurations
GAState.populateConfigurations(GAState.getSdkConfig());
// if SDK is disabled in config
if(!GAState.isEnabled())
{
GALogger.w("Could not start session: SDK is disabled.");
// stop event queue
// + make sure it's able to restart if another session detects it's enabled again
GAThreading.stopEventQueue();
return;
}
else
{
GAThreading.ensureEventQueueIsRunning();
}
// generate the new session
var newSessionId:string = GAUtilities.createGuid();
// Set session id
GAState.instance.sessionId = newSessionId;
// Set session start
GAState.instance.sessionStart = GAState.getClientTsAdjusted();
// Add session start event
GAEvents.addSessionStartEvent();
var timedBlock:TimedBlock = GAThreading.getTimedBlockById(GameAnalytics.initTimedBlockId);
if(timedBlock != null)
{
timedBlock.running = false;
}
GameAnalytics.initTimedBlockId = -1;
}
private static resumeSessionAndStartQueue(): void
{
if(!GAState.isInitialized())
{
return;
}
GALogger.i("Resuming session.");
if(!GAState.sessionIsStarted())
{
GameAnalytics.newSession();
}
}
private static isSdkReady(needsInitialized:boolean, warn:boolean = true, message:string = ""): boolean
{
if(message)
{
message = message + ": ";
}
// Is SDK initialized
if (needsInitialized && !GAState.isInitialized())
{
if (warn)
{
GALogger.w(message + "SDK is not initialized");
}
return false;
}
// Is SDK enabled
if (needsInitialized && !GAState.isEnabled())
{
if (warn)
{
GALogger.w(message + "SDK is disabled");
}
return false;
}
// Is session started
if (needsInitialized && !GAState.sessionIsStarted())
{
if (warn)
{
GALogger.w(message + "Session has not started yet");
}
return false;
}
return true;
}
}
}
gameanalytics.GameAnalytics.init();
var GameAnalytics = gameanalytics.GameAnalytics.gaCommand;
| GameAnalytics |
gas_block_store.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use super::gas_tracker::{GasTracker, PriceList};
use cid::{
multihash::{MultihashDigest, U32},
Cid,
};
use db::{Error, Store};
use forest_encoding::{de::DeserializeOwned, ser::Serialize, to_vec};
use ipld_blockstore::BlockStore;
use std::cell::RefCell;
use std::error::Error as StdError;
use std::rc::Rc;
/// Blockstore wrapper to charge gas on reads and writes
pub(crate) struct GasBlockStore<'bs, BS> {
pub price_list: PriceList,
pub gas: Rc<RefCell<GasTracker>>,
pub store: &'bs BS,
}
impl<BS> BlockStore for GasBlockStore<'_, BS>
where
BS: BlockStore,
{
fn get<T>(&self, cid: &Cid) -> Result<Option<T>, Box<dyn StdError>>
where
T: DeserializeOwned,
{
self.gas
.borrow_mut()
.charge_gas(self.price_list.on_ipld_get())?; | }
fn put<S, T>(&self, obj: &S, hash: T) -> Result<Cid, Box<dyn StdError>>
where
S: Serialize,
T: MultihashDigest<AllocSize = U32>,
{
let bytes = to_vec(obj)?;
self.gas
.borrow_mut()
.charge_gas(self.price_list.on_ipld_put(bytes.len()))?;
Ok(self.store.put_raw(bytes, hash)?)
}
}
impl<BS> Store for GasBlockStore<'_, BS>
where
BS: BlockStore,
{
fn read<K>(&self, key: K) -> Result<Option<Vec<u8>>, Error>
where
K: AsRef<[u8]>,
{
self.store.read(key)
}
fn write<K, V>(&self, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.store.write(key, value)
}
fn delete<K>(&self, key: K) -> Result<(), Error>
where
K: AsRef<[u8]>,
{
self.store.delete(key)
}
fn exists<K>(&self, key: K) -> Result<bool, Error>
where
K: AsRef<[u8]>,
{
self.store.exists(key)
}
fn bulk_read<K>(&self, keys: &[K]) -> Result<Vec<Option<Vec<u8>>>, Error>
where
K: AsRef<[u8]>,
{
self.store.bulk_read(keys)
}
fn bulk_write<K, V>(&self, values: &[(K, V)]) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.store.bulk_write(values)
}
fn bulk_delete<K>(&self, keys: &[K]) -> Result<(), Error>
where
K: AsRef<[u8]>,
{
self.store.bulk_delete(keys)
}
}
#[cfg(test)]
mod tests {
use super::*;
use cid::Code::Blake2b256;
use db::MemoryDB;
use vm::{ActorError, ExitCode};
#[test]
fn gas_blockstore() {
let db = MemoryDB::default();
let gbs = GasBlockStore {
price_list: PriceList {
ipld_get_base: 4,
ipld_put_base: 2,
ipld_put_per_byte: 1,
..Default::default()
},
gas: Rc::new(RefCell::new(GasTracker::new(5000, 0))),
store: &db,
};
assert_eq!(gbs.gas.borrow().gas_used(), 0);
assert_eq!(to_vec(&200u8).unwrap().len(), 2);
let c = gbs.put(&200u8, Blake2b256).unwrap();
assert_eq!(gbs.gas.borrow().gas_used(), 2002);
gbs.get::<u8>(&c).unwrap();
assert_eq!(gbs.gas.borrow().gas_used(), 2006);
}
#[test]
fn gas_blockstore_oog() {
let db = MemoryDB::default();
let gbs = GasBlockStore {
price_list: PriceList {
ipld_put_base: 12,
..Default::default()
},
gas: Rc::new(RefCell::new(GasTracker::new(10, 0))),
store: &db,
};
assert_eq!(gbs.gas.borrow().gas_used(), 0);
assert_eq!(to_vec(&200u8).unwrap().len(), 2);
assert_eq!(
gbs.put(&200u8, Blake2b256)
.unwrap_err()
.downcast::<ActorError>()
.unwrap()
.exit_code(),
ExitCode::SysErrOutOfGas
);
}
} | self.store.get(cid) |
static_service_test.go | // (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package avm
import (
"testing"
"github.com/hellobuild/Luv-Go/utils/constants"
"github.com/hellobuild/Luv-Go/utils/formatting"
"github.com/hellobuild/Luv-Go/utils/json"
)
var addrStrArray = []string{
"A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy",
"6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv",
"6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa",
"Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7",
}
var testHRP = constants.NetworkIDToHRP[networkID]
func TestBuildGenesis(t *testing.T) {
ss := CreateStaticService()
addrMap := map[string]string{}
for _, addrStr := range addrStrArray {
b, err := formatting.Decode(formatting.CB58, addrStr)
if err != nil {
t.Fatal(err)
}
addrMap[addrStr], err = formatting.FormatBech32(testHRP, b)
if err != nil {
t.Fatal(err)
}
}
args := BuildGenesisArgs{
Encoding: formatting.Hex,
GenesisData: map[string]AssetDefinition{
"asset1": {
Name: "myFixedCapAsset",
Symbol: "MFCA",
Denomination: 8,
InitialState: map[string][]interface{}{
"fixedCap": {
Holder{
Amount: 100000,
Address: addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"],
},
Holder{
Amount: 100000,
Address: addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"],
}, | Amount: json.Uint64(startBalance),
Address: addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"],
},
Holder{
Amount: json.Uint64(startBalance),
Address: addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"],
},
},
},
},
"asset2": {
Name: "myVarCapAsset",
Symbol: "MVCA",
InitialState: map[string][]interface{}{
"variableCap": {
Owners{
Threshold: 1,
Minters: []string{
addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"],
addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"],
},
},
Owners{
Threshold: 2,
Minters: []string{
addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"],
addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"],
},
},
},
},
},
"asset3": {
Name: "myOtherVarCapAsset",
InitialState: map[string][]interface{}{
"variableCap": {
Owners{
Threshold: 1,
Minters: []string{
addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"],
},
},
},
},
},
},
}
reply := BuildGenesisReply{}
err := ss.BuildGenesis(nil, &args, &reply)
if err != nil {
t.Fatal(err)
}
} | Holder{ |
views.py | import datetime
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import loader, Context
from equipment.models import ItemType, Item, ItemError, Penalty, Transaction, TransactionError
from infobase.models import Person, STUDENT_KIND, PHASE_END_DATES, phase_for_cohort_and_date
def recent_transactions(person, number=6, hours=1, kind=None):
"""Fetch recent transactions by this person"""
cutoff_datetime = datetime.datetime.now() - datetime.timedelta(hours=hours)
checkouts = person.transaction_set.filter(kind=kind, timestamp__gte=cutoff_datetime)
items = [c.item for c in checkouts[:number]]
return items
def admin_access_allowed(user):
"""Does the user have the right permissions to be using this app?"""
if user:
return bool(user.groups.filter(name="equipment_tracker").count())
def render_response(template, var_dict, mimetype, filename):
"""Simple substitute for render_to_response (backporting some Django-1.0 stuff)"""
t = loader.get_template(template)
c = Context(var_dict)
response = HttpResponse(t.render(c), mimetype=mimetype)
response['Content-Disposition'] = "attachment; filename=%s" % filename
return response
@user_passes_test(admin_access_allowed)
def home(request):
|
@user_passes_test(admin_access_allowed)
def check_in(request):
"""
GET: Show check-in form.
POST: Check in the scanned item.
"""
title = "Check in"
if request.method == "POST":
number = request.POST['number']
try:
item = Item.find_by_number(number)
person = item.checked_out_by
title = "Checked in %s" % item
if item.days_overdue():
title += " (OVERDUE)"
item.check_in()
message = item.transaction_set.latest()
recent_checkins = recent_transactions(person, kind=Transaction.CHECKIN)
except (ItemError, TransactionError), error_message:
pass
return render_to_response("checkin.html", locals())
def checkout_url(request, due_timestamp, person_id):
"""
Helper method that builds a URL to be used in a redirect. It will either have both
due-timestamp and user ID number, or just user ID number. If GET values are provided
in `request`, for `due_timestamp` or `person_id`, they override the corresponding
passed arguments.
"""
if set(["due_date", "due_time"]).issubset(request.GET):
due_timestamp = (request.GET['due_date'] + "-" + request.GET['due_time']).replace(":", "-")
if due_timestamp:
url_template = "/equipment/checkout/%s/%%s/" % due_timestamp
else:
url_template = "/equipment/checkout/%s/"
if "person_id" in request.GET:
person_id = request.GET['person_id']
if Person.objects.filter(id_number=person_id).count() == 0:
raise Person.DoesNotExist("UNKNOWN")
url = url_template % person_id
return url
@user_passes_test(admin_access_allowed)
def check_out(request, person_id=None, due_timestamp=None):
"""
This view handles all stages of the checkout operation. In order for checkout to begin,
a person_id must be in the URL. Optional due_timestamp is also in the URL. Those are
designed to persist; i.e. if you change the person the custom due date (if any) is
kept, and if you change the due date the person (if any) is kept.
"""
# Set default due date values for use in "Change due date" form
dummy_item = Item()
dummy_item.set_due_datetime()
example_due_date = dummy_item.due.date()
example_due_time = dummy_item.due.time()
# If a specific due-date was requested, set it
if due_timestamp:
custom_due_datetime = datetime.datetime.strptime(due_timestamp, "%Y-%m-%d-%H-%M")
else:
custom_due_datetime = None
title = "Scan ID"
try:
# If a change is requested for person or due date, update the URL
if set(["due_date", "due_time", "person_id"]).intersection(request.GET):
url = checkout_url(request, due_timestamp, person_id)
return HttpResponseRedirect(url)
if person_id:
person = Person.objects.get(id_number=person_id)
if not person.is_active():
raise Person.DoesNotExist("ID EXPIRED")
title = "Checking out equipment to %s" % person
recent_checkouts = recent_transactions(person, kind=Transaction.CHECKOUT)
if request.method == "POST" and request.POST['number']:
try:
item = Item.find_by_number(request.POST['number'])
item.check_out(person, custom_due_datetime)
message = "Checked out %s" % item
soundfile = "Glass.aiff"
except (ItemError, TransactionError), error_message:
soundfile = "error.mp3"
except Person.DoesNotExist, reason:
title = "Bad ID"
id_number = person_id or request.GET['person_id']
error_message = "%s: %s" % (id_number, reason)
person = None
soundfile = "error.mp3"
return render_to_response("checkout.html", locals())
@user_passes_test(admin_access_allowed)
def item(request):
"""Display information on the specified item, with some editing options."""
title = "Find an item"
if 'number' in request.GET:
number = request.GET['number']
try:
item = Item.find_by_number(number)
title = unicode(item)
history = item.transaction_set.all()
except ItemError, error_message:
pass
else:
message = "Type or scan the item's HIP number or serial number"
return render_to_response("item.html", locals())
@user_passes_test(admin_access_allowed)
def person(request):
"""
Display information on the specified borrower (person)
"""
title = "Find a person"
if 'person_id' in request.GET:
person_id = request.GET['person_id']
try:
person = Person.objects.get(id_number=person_id)
title = unicode(person)
checked_out_items = person.item_set.all()
transaction_history = person.transaction_set.all()
except Person.DoesNotExist:
error_message = "No person with id number %s" % person_id
else:
message = "Enter or scan the person's ID number"
people = Person.objects.enrolled() # For clickable list of names
return render_to_response("person.html", locals())
def _penalty_report_data(cohort, phase=None):
"""
Data rows for late-equipment report
"""
if phase is None:
phase = phase_for_cohort_and_date(cohort, datetime.date.today())
else:
phase = int(phase)
if phase < 2 or phase > 4:
raise ValueError
start_date = PHASE_END_DATES[cohort][phase-1]
end_date = PHASE_END_DATES[cohort][phase]
all_penalties = Penalty.objects.filter(when_levied__range=(start_date, end_date))
rows = [("Firstname", "Lastname", "ID number", "Date", "Amount")]
rows += [(p.student.firstname, p.student.lastname, p.student.id_number, p.when_levied, 0-p.amount) for p in all_penalties]
return rows
@user_passes_test(admin_access_allowed)
def report(request, report_kind=None, number=None):
"""
General-purpose reporting view. To add a new report type, add an appropriate `if`
clause here, and a corresponding `{% if ... %}` clause in the template for display.
"""
if report_kind:
now = datetime.datetime.now()
title = "%s Report" % report_kind.title()
try:
if report_kind == "kits":
kits = Item.objects.filter(itemtype__kit=True)
if report_kind == "item":
item = Item.find_by_number(number)
title = item
if report_kind == "instock":
itemtypes = [i for i in ItemType.objects.all() if i.how_many_in_stock()]
if report_kind == "latepenalties":
try:
report_rows = _penalty_report_data(cohort=1, phase=number)
csv_link = "/equipment/report/latepenalties-csv/"
filename = "late_equipment.csv"
except ValueError:
report_rows = None
error_message = "Can't generate report (incorrect phase?)"
if report_kind.startswith("out-"):
items = Item.objects.filter(status=Item.OUT, part_of_kit__isnull=True).order_by("due")
if report_kind.startswith("overdue-"):
items = Item.objects.filter(status=Item.OUT, due__lt=now, part_of_kit__isnull=True).order_by("due","checked_out_by")
if report_kind.endswith("-student"):
items = items.filter(checked_out_by__kind=STUDENT_KIND)
if report_kind.endswith("-staff"):
items = items.exclude(checked_out_by__kind=STUDENT_KIND)
except ItemError, error_message:
pass # Letting error_message get picked up by the template
else:
title = "Reports"
if report_kind and report_kind.endswith("-csv") and report_rows:
return render_response("csv.html", locals(), mimetype="text/csv", filename=filename)
else:
return render_to_response("report.html", locals())
@user_passes_test(admin_access_allowed)
def find(request):
"""Control panel for finding items"""
return render_to_response("find.html", locals())
@user_passes_test(admin_access_allowed)
def add(request):
"""Interface for adding new items to the system."""
title = "Add, edit, or delete equipment items"
return render_to_response("add.html", locals())
@user_passes_test(admin_access_allowed)
def buildkit(request, kit_id=None):
"""
Helper view for building up kits. If no kit ID is passed, we ask for a kit.
If a kit ID is passed (via URL), we ask for items to add.
Workflow: Create (empty) kits in admin; come to this view and add items
"""
if "kit_id" in request.GET:
return HttpResponseRedirect("/equipment/buildkit/%s/" % request.GET['kit_id'])
title = "Enter/scan kit ID number"
if kit_id:
try:
kit = Item.find_by_number(kit_id)
assert(kit.itemtype.kit==True)
title = "Adding equipment to %s" % kit
except (Item.DoesNotExist, AssertionError):
raise Http404
if request.method == "POST":
number = request.POST['number']
item = Item.find_by_number(number)
try:
assert(item.itemtype.kit==False) # Don't add a kit to a kit
assert(item.part_of_kit==None) # Item must not already be in a kit
kit.contents.add(item)
message = "Added %s" % item
except ItemError, error_message:
pass
return render_to_response("buildkit.html", locals())
@user_passes_test(admin_access_allowed)
def penalty_statement(request, person_id):
"""
Present a printable statement of dollar-credit penalties accrued due to late equipment.
Also helps create that statement (presenting list of overdue equipment with a submit button).
"""
if person_id:
try:
person = Person.objects.get(id_number=person_id)
except Person.DoesNotExist:
return Http404
if request.method == "POST":
new_penalty = Penalty.levy(person)
message = "$%s penalty levied" % new_penalty.amount
else:
current_phase = phase_for_cohort_and_date(person.student_cohort, datetime.date.today())
phase_start = PHASE_END_DATES[person.student_cohort][current_phase - 1]
penalties = person.penalty_set.filter(when_levied__gte=phase_start)
total = sum(p.amount for p in penalties)
overduesies = Item.objects.filter(status=Item.OUT, due__lt=datetime.datetime.now(), checked_out_by=person).order_by("due")
# If overdue items have already been charged in a penalty, don't show them
for penalty in penalties:
if set(i.id for i in overduesies) == set(i.id for i in penalty.items.all()):
overduesies = None
break
else: # If no person_id is passed, template can display a list
people = Person.objects.enrolled()
return render_to_response("penalty_statement.html", locals())
| """Equipment app home page"""
title = "Main Menu"
message = "Welcome to the equipment tracker."
return render_to_response("home.html", locals()) |
api.go | // THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
// Package marketplacecommerceanalytics provides a client for AWS Marketplace Commerce Analytics.
package marketplacecommerceanalytics
import (
"time"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
const opGenerateDataSet = "GenerateDataSet"
// GenerateDataSetRequest generates a "aws/request.Request" representing the
// client's request for the GenerateDataSet operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See GenerateDataSet for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the GenerateDataSet method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the GenerateDataSetRequest method.
// req, resp := client.GenerateDataSetRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
func (c *MarketplaceCommerceAnalytics) GenerateDataSetRequest(input *GenerateDataSetInput) (req *request.Request, output *GenerateDataSetOutput) {
op := &request.Operation{
Name: opGenerateDataSet,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GenerateDataSetInput{}
}
req = c.newRequest(op, input, output)
output = &GenerateDataSetOutput{}
req.Data = output
return
}
// GenerateDataSet API operation for AWS Marketplace Commerce Analytics.
//
// Given a data set type and data set publication date, asynchronously publishes
// the requested data set to the specified S3 bucket and notifies the specified
// SNS topic once the data is available. Returns a unique request identifier
// that can be used to correlate requests with notifications from the SNS topic.
// Data sets will be published in comma-separated values (CSV) format with the
// file name {data_set_type}_YYYY-MM-DD.csv. If a file with the same name already
// exists (e.g. if the same data set is requested twice), the original file
// will be overwritten by the new file. Requires a Role with an attached permissions
// policy providing Allow permissions for the following actions: s3:PutObject,
// s3:GetBucketLocation, sns:GetTopicAttributes, sns:Publish, iam:GetRolePolicy.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Marketplace Commerce Analytics's
// API operation GenerateDataSet for usage and error information.
//
// Returned Error Codes:
// * Exception
// This exception is thrown when an internal service error occurs.
//
func (c *MarketplaceCommerceAnalytics) GenerateDataSet(input *GenerateDataSetInput) (*GenerateDataSetOutput, error) {
req, out := c.GenerateDataSetRequest(input)
err := req.Send()
return out, err
}
const opStartSupportDataExport = "StartSupportDataExport"
// StartSupportDataExportRequest generates a "aws/request.Request" representing the
// client's request for the StartSupportDataExport operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See StartSupportDataExport for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the StartSupportDataExport method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the StartSupportDataExportRequest method.
// req, resp := client.StartSupportDataExportRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
func (c *MarketplaceCommerceAnalytics) StartSupportDataExportRequest(input *StartSupportDataExportInput) (req *request.Request, output *StartSupportDataExportOutput) {
op := &request.Operation{
Name: opStartSupportDataExport,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &StartSupportDataExportInput{}
}
req = c.newRequest(op, input, output)
output = &StartSupportDataExportOutput{}
req.Data = output
return
}
// StartSupportDataExport API operation for AWS Marketplace Commerce Analytics.
//
// Given a data set type and a from date, asynchronously publishes the requested
// customer support data to the specified S3 bucket and notifies the specified
// SNS topic once the data is available. Returns a unique request identifier
// that can be used to correlate requests with notifications from the SNS topic.
// Data sets will be published in comma-separated values (CSV) format with the
// file name {data_set_type}_YYYY-MM-DD'T'HH-mm-ss'Z'.csv. If a file with the
// same name already exists (e.g. if the same data set is requested twice),
// the original file will be overwritten by the new file. Requires a Role with
// an attached permissions policy providing Allow permissions for the following
// actions: s3:PutObject, s3:GetBucketLocation, sns:GetTopicAttributes, sns:Publish,
// iam:GetRolePolicy.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Marketplace Commerce Analytics's
// API operation StartSupportDataExport for usage and error information.
//
// Returned Error Codes:
// * Exception
// This exception is thrown when an internal service error occurs.
//
func (c *MarketplaceCommerceAnalytics) StartSupportDataExport(input *StartSupportDataExportInput) (*StartSupportDataExportOutput, error) {
req, out := c.StartSupportDataExportRequest(input)
err := req.Send()
return out, err
}
// Container for the parameters to the GenerateDataSet operation.
type GenerateDataSetInput struct {
_ struct{} `type:"structure"`
// (Optional) Key-value pairs which will be returned, unmodified, in the Amazon
// SNS notification message and the data set metadata file. These key-value
// pairs can be used to correlated responses with tracking information from
// other systems.
CustomerDefinedValues map[string]*string `locationName:"customerDefinedValues" min:"1" type:"map"`
// The date a data set was published. For daily data sets, provide a date with
// day-level granularity for the desired day. For weekly data sets, provide
// a date with day-level granularity within the desired week (the day value
// will be ignored). For monthly data sets, provide a date with month-level
// granularity for the desired month (the day value will be ignored).
//
// DataSetPublicationDate is a required field
DataSetPublicationDate *time.Time `locationName:"dataSetPublicationDate" type:"timestamp" timestampFormat:"unix" required:"true"`
// The desired data set type.
//
// customer_subscriber_hourly_monthly_subscriptions - Available daily by 5:00
// PM Pacific Time since 2014-07-21.
// customer_subscriber_annual_subscriptions - Available daily by 5:00 PM Pacific
// Time since 2014-07-21.
// daily_business_usage_by_instance_type - Available daily by 5:00 PM Pacific
// Time since 2015-01-26.
// daily_business_fees - Available daily by 5:00 PM Pacific Time since 2015-01-26.
//
// daily_business_free_trial_conversions - Available daily by 5:00 PM Pacific
// Time since 2015-01-26.
// daily_business_new_instances - Available daily by 5:00 PM Pacific Time since
// 2015-01-26.
// daily_business_new_product_subscribers - Available daily by 5:00 PM Pacific
// Time since 2015-01-26.
// daily_business_canceled_product_subscribers - Available daily by 5:00 PM
// Pacific Time since 2015-01-26.
// monthly_revenue_billing_and_revenue_data - Available monthly on the 4th day
// of the month by 5:00 PM Pacific Time since 2015-02.
// monthly_revenue_annual_subscriptions - Available monthly on the 4th day of
// the month by 5:00 PM Pacific Time since 2015-02.
// disbursed_amount_by_product - Available every 30 days by 5:00 PM Pacific
// Time since 2015-01-26.
// disbursed_amount_by_product_with_uncollected_funds -This data set is only
// available from 2012-04-19 until 2015-01-25. After 2015-01-25, this data set
// was split into three data sets: disbursed_amount_by_product, disbursed_amount_by_age_of_uncollected_funds,
// and disbursed_amount_by_age_of_disbursed_funds.
// disbursed_amount_by_customer_geo - Available every 30 days by 5:00 PM Pacific
// Time since 2012-04-19.
// disbursed_amount_by_age_of_uncollected_funds - Available every 30 days by
// 5:00 PM Pacific Time since 2015-01-26.
// disbursed_amount_by_age_of_disbursed_funds - Available every 30 days by 5:00
// PM Pacific Time since 2015-01-26.
// customer_profile_by_industry - Available daily by 5:00 PM Pacific Time since
// 2015-10-01.
// customer_profile_by_revenue - Available daily by 5:00 PM Pacific Time since
// 2015-10-01.
// customer_profile_by_geography - Available daily by 5:00 PM Pacific Time since
// 2015-10-01.
//
// DataSetType is a required field
DataSetType *string `locationName:"dataSetType" min:"1" type:"string" required:"true" enum:"DataSetType"`
// The name (friendly name, not ARN) of the destination S3 bucket.
//
// DestinationS3BucketName is a required field
DestinationS3BucketName *string `locationName:"destinationS3BucketName" min:"1" type:"string" required:"true"`
// (Optional) The desired S3 prefix for the published data set, similar to a
// directory path in standard file systems. For example, if given the bucket
// name "mybucket" and the prefix "myprefix/mydatasets", the output file "outputfile"
// would be published to "s3://mybucket/myprefix/mydatasets/outputfile". If
// the prefix directory structure does not exist, it will be created. If no
// prefix is provided, the data set will be published to the S3 bucket root.
DestinationS3Prefix *string `locationName:"destinationS3Prefix" type:"string"`
// The Amazon Resource Name (ARN) of the Role with an attached permissions policy
// to interact with the provided AWS services.
//
// RoleNameArn is a required field
RoleNameArn *string `locationName:"roleNameArn" min:"1" type:"string" required:"true"`
// Amazon Resource Name (ARN) for the SNS Topic that will be notified when the
// data set has been published or if an error has occurred.
//
// SnsTopicArn is a required field
SnsTopicArn *string `locationName:"snsTopicArn" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s GenerateDataSetInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GenerateDataSetInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GenerateDataSetInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GenerateDataSetInput"}
if s.CustomerDefinedValues != nil && len(s.CustomerDefinedValues) < 1 {
invalidParams.Add(request.NewErrParamMinLen("CustomerDefinedValues", 1))
}
if s.DataSetPublicationDate == nil {
invalidParams.Add(request.NewErrParamRequired("DataSetPublicationDate"))
}
if s.DataSetType == nil {
invalidParams.Add(request.NewErrParamRequired("DataSetType"))
}
if s.DataSetType != nil && len(*s.DataSetType) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DataSetType", 1))
}
if s.DestinationS3BucketName == nil {
invalidParams.Add(request.NewErrParamRequired("DestinationS3BucketName"))
}
if s.DestinationS3BucketName != nil && len(*s.DestinationS3BucketName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DestinationS3BucketName", 1))
}
if s.RoleNameArn == nil {
invalidParams.Add(request.NewErrParamRequired("RoleNameArn"))
}
if s.RoleNameArn != nil && len(*s.RoleNameArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleNameArn", 1))
}
if s.SnsTopicArn == nil {
invalidParams.Add(request.NewErrParamRequired("SnsTopicArn"))
}
if s.SnsTopicArn != nil && len(*s.SnsTopicArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("SnsTopicArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// Container for the result of the GenerateDataSet operation.
type GenerateDataSetOutput struct {
_ struct{} `type:"structure"`
// A unique identifier representing a specific request to the GenerateDataSet
// operation. This identifier can be used to correlate a request with notifications
// from the SNS topic.
DataSetRequestId *string `locationName:"dataSetRequestId" type:"string"`
}
// String returns the string representation
func (s GenerateDataSetOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GenerateDataSetOutput) GoString() string {
return s.String()
}
// Container for the parameters to the StartSupportDataExport operation.
type StartSupportDataExportInput struct {
_ struct{} `type:"structure"`
// (Optional) Key-value pairs which will be returned, unmodified, in the Amazon
// SNS notification message and the data set metadata file.
CustomerDefinedValues map[string]*string `locationName:"customerDefinedValues" min:"1" type:"map"`
// Specifies the data set type to be written to the output csv file. The data
// set types customer_support_contacts_data and test_customer_support_contacts_data
// both result in a csv file containing the following fields: Product Id, Customer
// Guid, Subscription Guid, Subscription Start Date, Organization, AWS Account
// Id, Given Name, Surname, Telephone Number, Email, Title, Country Code, ZIP
// Code, Operation Type, and Operation Time. Currently, only the test_customer_support_contacts_data
// value is supported
//
// customer_support_contacts_data Customer support contact data. The data set
// will contain all changes (Creates, Updates, and Deletes) to customer support
// contact data from the date specified in the from_date parameter.
// test_customer_support_contacts_data An example data set containing static
// test data in the same format as customer_support_contacts_data
//
// DataSetType is a required field
DataSetType *string `locationName:"dataSetType" min:"1" type:"string" required:"true" enum:"SupportDataSetType"`
// The name (friendly name, not ARN) of the destination S3 bucket.
//
// DestinationS3BucketName is a required field
DestinationS3BucketName *string `locationName:"destinationS3BucketName" min:"1" type:"string" required:"true"`
// (Optional) The desired S3 prefix for the published data set, similar to a
// directory path in standard file systems. For example, if given the bucket
// name "mybucket" and the prefix "myprefix/mydatasets", the output file "outputfile"
// would be published to "s3://mybucket/myprefix/mydatasets/outputfile". If
// the prefix directory structure does not exist, it will be created. If no
// prefix is provided, the data set will be published to the S3 bucket root.
DestinationS3Prefix *string `locationName:"destinationS3Prefix" type:"string"`
// The start date from which to retrieve the data set. This parameter only affects
// the customer_support_contacts_data data set type.
//
// FromDate is a required field
FromDate *time.Time `locationName:"fromDate" type:"timestamp" timestampFormat:"unix" required:"true"`
// The Amazon Resource Name (ARN) of the Role with an attached permissions policy
// to interact with the provided AWS services.
//
// RoleNameArn is a required field
RoleNameArn *string `locationName:"roleNameArn" min:"1" type:"string" required:"true"`
// Amazon Resource Name (ARN) for the SNS Topic that will be notified when the
// data set has been published or if an error has occurred.
//
// SnsTopicArn is a required field
SnsTopicArn *string `locationName:"snsTopicArn" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s StartSupportDataExportInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartSupportDataExportInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StartSupportDataExportInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StartSupportDataExportInput"}
if s.CustomerDefinedValues != nil && len(s.CustomerDefinedValues) < 1 {
invalidParams.Add(request.NewErrParamMinLen("CustomerDefinedValues", 1))
}
if s.DataSetType == nil {
invalidParams.Add(request.NewErrParamRequired("DataSetType"))
}
if s.DataSetType != nil && len(*s.DataSetType) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DataSetType", 1))
}
if s.DestinationS3BucketName == nil {
invalidParams.Add(request.NewErrParamRequired("DestinationS3BucketName"))
}
if s.DestinationS3BucketName != nil && len(*s.DestinationS3BucketName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DestinationS3BucketName", 1))
}
if s.FromDate == nil {
invalidParams.Add(request.NewErrParamRequired("FromDate"))
} | if s.RoleNameArn == nil {
invalidParams.Add(request.NewErrParamRequired("RoleNameArn"))
}
if s.RoleNameArn != nil && len(*s.RoleNameArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleNameArn", 1))
}
if s.SnsTopicArn == nil {
invalidParams.Add(request.NewErrParamRequired("SnsTopicArn"))
}
if s.SnsTopicArn != nil && len(*s.SnsTopicArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("SnsTopicArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// Container for the result of the StartSupportDataExport operation.
type StartSupportDataExportOutput struct {
_ struct{} `type:"structure"`
// A unique identifier representing a specific request to the StartSupportDataExport
// operation. This identifier can be used to correlate a request with notifications
// from the SNS topic.
DataSetRequestId *string `locationName:"dataSetRequestId" type:"string"`
}
// String returns the string representation
func (s StartSupportDataExportOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartSupportDataExportOutput) GoString() string {
return s.String()
}
const (
// DataSetTypeCustomerSubscriberHourlyMonthlySubscriptions is a DataSetType enum value
DataSetTypeCustomerSubscriberHourlyMonthlySubscriptions = "customer_subscriber_hourly_monthly_subscriptions"
// DataSetTypeCustomerSubscriberAnnualSubscriptions is a DataSetType enum value
DataSetTypeCustomerSubscriberAnnualSubscriptions = "customer_subscriber_annual_subscriptions"
// DataSetTypeDailyBusinessUsageByInstanceType is a DataSetType enum value
DataSetTypeDailyBusinessUsageByInstanceType = "daily_business_usage_by_instance_type"
// DataSetTypeDailyBusinessFees is a DataSetType enum value
DataSetTypeDailyBusinessFees = "daily_business_fees"
// DataSetTypeDailyBusinessFreeTrialConversions is a DataSetType enum value
DataSetTypeDailyBusinessFreeTrialConversions = "daily_business_free_trial_conversions"
// DataSetTypeDailyBusinessNewInstances is a DataSetType enum value
DataSetTypeDailyBusinessNewInstances = "daily_business_new_instances"
// DataSetTypeDailyBusinessNewProductSubscribers is a DataSetType enum value
DataSetTypeDailyBusinessNewProductSubscribers = "daily_business_new_product_subscribers"
// DataSetTypeDailyBusinessCanceledProductSubscribers is a DataSetType enum value
DataSetTypeDailyBusinessCanceledProductSubscribers = "daily_business_canceled_product_subscribers"
// DataSetTypeMonthlyRevenueBillingAndRevenueData is a DataSetType enum value
DataSetTypeMonthlyRevenueBillingAndRevenueData = "monthly_revenue_billing_and_revenue_data"
// DataSetTypeMonthlyRevenueAnnualSubscriptions is a DataSetType enum value
DataSetTypeMonthlyRevenueAnnualSubscriptions = "monthly_revenue_annual_subscriptions"
// DataSetTypeDisbursedAmountByProduct is a DataSetType enum value
DataSetTypeDisbursedAmountByProduct = "disbursed_amount_by_product"
// DataSetTypeDisbursedAmountByProductWithUncollectedFunds is a DataSetType enum value
DataSetTypeDisbursedAmountByProductWithUncollectedFunds = "disbursed_amount_by_product_with_uncollected_funds"
// DataSetTypeDisbursedAmountByCustomerGeo is a DataSetType enum value
DataSetTypeDisbursedAmountByCustomerGeo = "disbursed_amount_by_customer_geo"
// DataSetTypeDisbursedAmountByAgeOfUncollectedFunds is a DataSetType enum value
DataSetTypeDisbursedAmountByAgeOfUncollectedFunds = "disbursed_amount_by_age_of_uncollected_funds"
// DataSetTypeDisbursedAmountByAgeOfDisbursedFunds is a DataSetType enum value
DataSetTypeDisbursedAmountByAgeOfDisbursedFunds = "disbursed_amount_by_age_of_disbursed_funds"
// DataSetTypeCustomerProfileByIndustry is a DataSetType enum value
DataSetTypeCustomerProfileByIndustry = "customer_profile_by_industry"
// DataSetTypeCustomerProfileByRevenue is a DataSetType enum value
DataSetTypeCustomerProfileByRevenue = "customer_profile_by_revenue"
// DataSetTypeCustomerProfileByGeography is a DataSetType enum value
DataSetTypeCustomerProfileByGeography = "customer_profile_by_geography"
)
const (
// SupportDataSetTypeCustomerSupportContactsData is a SupportDataSetType enum value
SupportDataSetTypeCustomerSupportContactsData = "customer_support_contacts_data"
// SupportDataSetTypeTestCustomerSupportContactsData is a SupportDataSetType enum value
SupportDataSetTypeTestCustomerSupportContactsData = "test_customer_support_contacts_data"
) | |
api_op_ListAuditSuppressions.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package iot
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/iot/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Lists your Device Defender audit listings. Requires permission to access the
// ListAuditSuppressions
// (https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiot.html#awsiot-actions-as-permissions)
// action.
func (c *Client) ListAuditSuppressions(ctx context.Context, params *ListAuditSuppressionsInput, optFns ...func(*Options)) (*ListAuditSuppressionsOutput, error) {
if params == nil {
params = &ListAuditSuppressionsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListAuditSuppressions", params, optFns, c.addOperationListAuditSuppressionsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListAuditSuppressionsOutput)
out.ResultMetadata = metadata
return out, nil
}
type ListAuditSuppressionsInput struct {
// Determines whether suppressions are listed in ascending order by expiration date
// or not. If parameter isn't provided, ascendingOrder=true.
AscendingOrder bool
// An audit check name. Checks must be enabled for your account. (Use
// DescribeAccountAuditConfiguration to see the list of all checks, including those
// that are enabled or use UpdateAccountAuditConfiguration to select which checks
// are enabled.)
CheckName *string
// The maximum number of results to return at one time. The default is 25.
MaxResults *int32
// The token for the next set of results.
NextToken *string
// Information that identifies the noncompliant resource.
ResourceIdentifier *types.ResourceIdentifier
noSmithyDocumentSerde
}
type ListAuditSuppressionsOutput struct {
// A token that can be used to retrieve the next set of results, or null if there
// are no additional results.
NextToken *string
// List of audit suppressions.
Suppressions []types.AuditSuppression
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationListAuditSuppressionsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpListAuditSuppressions{}, middleware.After)
if err != nil |
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAuditSuppressions{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAuditSuppressions(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// ListAuditSuppressionsAPIClient is a client that implements the
// ListAuditSuppressions operation.
type ListAuditSuppressionsAPIClient interface {
ListAuditSuppressions(context.Context, *ListAuditSuppressionsInput, ...func(*Options)) (*ListAuditSuppressionsOutput, error)
}
var _ ListAuditSuppressionsAPIClient = (*Client)(nil)
// ListAuditSuppressionsPaginatorOptions is the paginator options for
// ListAuditSuppressions
type ListAuditSuppressionsPaginatorOptions struct {
// The maximum number of results to return at one time. The default is 25.
Limit int32
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// ListAuditSuppressionsPaginator is a paginator for ListAuditSuppressions
type ListAuditSuppressionsPaginator struct {
options ListAuditSuppressionsPaginatorOptions
client ListAuditSuppressionsAPIClient
params *ListAuditSuppressionsInput
nextToken *string
firstPage bool
}
// NewListAuditSuppressionsPaginator returns a new ListAuditSuppressionsPaginator
func NewListAuditSuppressionsPaginator(client ListAuditSuppressionsAPIClient, params *ListAuditSuppressionsInput, optFns ...func(*ListAuditSuppressionsPaginatorOptions)) *ListAuditSuppressionsPaginator {
if params == nil {
params = &ListAuditSuppressionsInput{}
}
options := ListAuditSuppressionsPaginatorOptions{}
if params.MaxResults != nil {
options.Limit = *params.MaxResults
}
for _, fn := range optFns {
fn(&options)
}
return &ListAuditSuppressionsPaginator{
options: options,
client: client,
params: params,
firstPage: true,
}
}
// HasMorePages returns a boolean indicating whether more pages are available
func (p *ListAuditSuppressionsPaginator) HasMorePages() bool {
return p.firstPage || p.nextToken != nil
}
// NextPage retrieves the next ListAuditSuppressions page.
func (p *ListAuditSuppressionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAuditSuppressionsOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.NextToken = p.nextToken
var limit *int32
if p.options.Limit > 0 {
limit = &p.options.Limit
}
params.MaxResults = limit
result, err := p.client.ListAuditSuppressions(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
prevToken := p.nextToken
p.nextToken = result.NextToken
if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken {
p.nextToken = nil
}
return result, nil
}
func newServiceMetadataMiddleware_opListAuditSuppressions(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "execute-api",
OperationName: "ListAuditSuppressions",
}
}
| {
return err
} |
elgamal.rs | #![allow(dead_code)]
//! Implementation of the different encryption/decryption mechanisms used in `chain-vote`, including their
//! corresponding structures. In particular, we use (lifted) ElGamal cryptosystem, and combine with ChaCha
//! stream cipher to produce a hybrid encryption scheme.
use crate::{GroupElement, Scalar};
use rand_core::{CryptoRng, RngCore};
use std::ops::{Add, Mul, Sub};
use cryptoxide::blake2b::Blake2b;
use cryptoxide::chacha20::ChaCha20;
use cryptoxide::digest::Digest;
#[derive(Debug, Clone, Eq, PartialEq)]
/// ElGamal public key. pk = sk * G, where sk is the `SecretKey` and G is the group
/// generator.
pub struct PublicKey {
pub pk: GroupElement,
}
#[derive(Clone)]
/// ElGamal secret key
pub struct SecretKey {
pub sk: Scalar,
}
#[derive(Clone)]
/// ElGamal keypair
pub struct Keypair {
pub secret_key: SecretKey,
pub public_key: PublicKey,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
/// ElGamal ciphertext. Given a message M represented by a group element, and ElGamal
/// ciphertext consists of (r * G; M + r * `PublicKey`), where r is a random `Scalar`.
pub struct Ciphertext {
pub(crate) e1: GroupElement,
pub(crate) e2: GroupElement,
}
#[derive(Clone)]
/// Hybrid Ciphertext (which can be found in section 2.1.3 of the Treasury spec) is defined
/// by (g^r, AESEnd_k(m)), where k = h^r and r is taken uniformly at random from Zp. h is
/// and `ElGamal` public key.
pub struct HybridCiphertext {
// Committed randomness
pub(crate) e1: GroupElement,
// Symmetric encrypted message
pub(crate) e2: Box<[u8]>,
}
/// The hybrid encryption scheme uses a group element as a
/// representation of the symmetric key. This facilitates
/// its exchange using ElGamal keypairs.
pub struct SymmetricKey {
pub(crate) group_repr: GroupElement,
}
impl PublicKey {
pub const BYTES_LEN: usize = GroupElement::BYTES_LEN;
pub fn to_bytes(&self) -> Vec<u8> {
self.pk.to_bytes().to_vec()
}
pub fn from_bytes(buf: &[u8]) -> Option<Self> {
Some(Self {
pk: GroupElement::from_bytes(buf)?,
})
}
/// Given a `message` represented as a group element, return a ciphertext.
pub(crate) fn encrypt_point<R>(&self, message: &GroupElement, rng: &mut R) -> Ciphertext
where
R: RngCore + CryptoRng,
{
let r = Scalar::random(rng);
self.encrypt_point_with_r(message, &r)
}
// Given a `message` represented as a group element, return a ciphertext and the
// randomness used.
fn encrypt_point_return_r<R>(&self, message: &GroupElement, rng: &mut R) -> (Ciphertext, Scalar)
where
R: RngCore + CryptoRng,
{
let r = Scalar::random(rng);
(self.encrypt_point_with_r(message, &r), r)
}
// Given a `message` represented as a group element, and some value used as `randomness`,
// return the corresponding ciphertext. This function should only be called when the
// randomness value needs to be a particular value (e.g. verification procedure of the unit vector ZKP).
// Otherwise, `encrypt_point` should be used.
fn encrypt_point_with_r(&self, message: &GroupElement, randomness: &Scalar) -> Ciphertext {
Ciphertext {
e1: &GroupElement::generator() * randomness,
e2: message + &(&self.pk * randomness),
}
}
/// Given a `message` represented as a `Scalar`, return a ciphertext using the
/// "lifted ElGamal" mechanism. Mainly, return (r * G; `message` * G + r * `self`)
pub(crate) fn encrypt<R>(&self, message: &Scalar, rng: &mut R) -> Ciphertext
where
R: RngCore + CryptoRng,
{
self.encrypt_point(&(&GroupElement::generator() * message), rng)
}
/// Given a `message` represented as a `Scalar`, return a ciphertext and return
/// the randomness used.
pub(crate) fn encrypt_return_r<R>(&self, message: &Scalar, rng: &mut R) -> (Ciphertext, Scalar)
where
R: RngCore + CryptoRng,
{
self.encrypt_point_return_r(&(&GroupElement::generator() * message), rng)
}
/// Given a `message` represented as a `Scalar`, and some value used as `randomness`,
/// return the corresponding ciphertext. This function should only be called when the
/// randomness value is not random (e.g. verification procedure of the unit vector ZKP).
/// Otherwise, `encrypt_point` should be used.
pub(crate) fn encrypt_with_r(&self, message: &Scalar, randomness: &Scalar) -> Ciphertext {
self.encrypt_point_with_r(&(&GroupElement::generator() * message), randomness)
}
/// Given a `message` passed as bytes, encrypt it using hybrid encryption.
pub(crate) fn hybrid_encrypt<R>(&self, message: &[u8], rng: &mut R) -> HybridCiphertext
where
R: RngCore + CryptoRng,
{
let encryption_randomness = Scalar::random(rng);
let symmetric_key = SymmetricKey {
group_repr: &self.pk * &encryption_randomness,
};
let e1 = encryption_randomness * GroupElement::generator();
let e2 = symmetric_key.process(message).into_boxed_slice();
HybridCiphertext { e1, e2 }
}
}
impl SecretKey {
pub fn generate<R: RngCore + CryptoRng>(rng: &mut R) -> Self {
let sk = Scalar::random(rng);
Self { sk }
}
pub fn from_bytes(bytes: &[u8]) -> Option<Self> {
Scalar::from_bytes(bytes).map(|sk| Self { sk })
}
pub(crate) fn recover_symmetric_key(&self, ciphertext: &HybridCiphertext) -> SymmetricKey {
SymmetricKey {
group_repr: &ciphertext.e1 * &self.sk,
}
}
#[allow(dead_code)]
/// Decrypt a message using hybrid decryption
pub(crate) fn hybrid_decrypt(&self, ciphertext: &HybridCiphertext) -> Vec<u8> {
self.recover_symmetric_key(ciphertext)
.process(&ciphertext.e2)
}
/// Decrypt ElGamal `Ciphertext` = (`cipher`.e1, `cipher`.e2), by computing
/// `cipher`.e2 - `self` * `cipher`.e1. This returns the plaintext respresented
/// as a `GroupElement`.
pub(crate) fn decrypt_point(&self, cipher: &Ciphertext) -> GroupElement {
&(&cipher.e1 * &self.sk.negate()) + &cipher.e2
}
}
impl SymmetricKey {
/// Generate a new random symmetric key
pub fn new<R: RngCore + CryptoRng>(rng: &mut R) -> Self {
let exponent = Scalar::random(rng);
SymmetricKey {
group_repr: GroupElement::generator() * &exponent,
}
}
// Initialise encryption, by hashing the group element
fn initialise_encryption(&self) -> ChaCha20 {
let mut out = [0u8; 44];
let mut h = Blake2b::new(44);
h.input(&self.group_repr.to_bytes());
h.result(&mut out);
ChaCha20::new(&out[0..32], &out[32..44])
}
// Encrypt/decrypt a message using the symmetric key
fn process(&self, m: &[u8]) -> Vec<u8> {
let mut key = self.initialise_encryption();
let mut dat = m.to_vec();
key.process_mut(&mut dat);
dat
}
}
impl Keypair {
#[allow(dead_code)]
pub fn from_secretkey(secret_key: SecretKey) -> Self {
let public_key = PublicKey {
pk: &GroupElement::generator() * &secret_key.sk,
};
Keypair {
secret_key,
public_key,
}
}
/// Generate a keypair for encryption
pub fn generate<R: RngCore + CryptoRng>(rng: &mut R) -> Keypair {
let sk = Scalar::random(rng);
let pk = &GroupElement::generator() * &sk;
Keypair {
secret_key: SecretKey { sk },
public_key: PublicKey { pk },
}
}
}
impl Ciphertext {
/// Size of the byte representation of `Ciphertext`.
pub const BYTES_LEN: usize = GroupElement::BYTES_LEN * 2;
/// the zero ciphertext
pub fn zero() -> Self {
Ciphertext {
e1: GroupElement::zero(),
e2: GroupElement::zero(),
}
}
pub fn to_bytes(&self) -> Vec<u8> {
let mut r = Vec::with_capacity(Self::BYTES_LEN);
r.extend_from_slice(self.e1.to_bytes().as_ref());
r.extend_from_slice(self.e2.to_bytes().as_ref());
debug_assert_eq!(r.len(), Self::BYTES_LEN);
r
}
pub fn from_bytes(slice: &[u8]) -> Option<Ciphertext> {
let e1 = GroupElement::from_bytes(&slice[..GroupElement::BYTES_LEN])?;
let e2 = GroupElement::from_bytes(&slice[GroupElement::BYTES_LEN..])?;
Some(Ciphertext { e1, e2 })
}
pub fn elements(&self) -> (&GroupElement, &GroupElement) {
(&self.e1, &self.e2) | }
}
impl HybridCiphertext {
pub fn to_bytes(&self) -> Vec<u8> {
let mut r = Vec::with_capacity(GroupElement::BYTES_LEN + self.e2.len());
r.extend_from_slice(self.e1.to_bytes().as_ref());
r.extend_from_slice(self.e2.as_ref());
r
}
pub fn from_bytes(slice: &[u8]) -> Option<HybridCiphertext> {
let e1 = GroupElement::from_bytes(&slice[..GroupElement::BYTES_LEN])?;
let e2 = slice[GroupElement::BYTES_LEN..].to_vec().into_boxed_slice();
Some(HybridCiphertext { e1, e2 })
}
}
impl<'a, 'b> Add<&'b Ciphertext> for &'a Ciphertext {
type Output = Ciphertext;
fn add(self, other: &'b Ciphertext) -> Ciphertext {
Ciphertext {
e1: &self.e1 + &other.e1,
e2: &self.e2 + &other.e2,
}
}
}
std_ops_gen!(Ciphertext, Add, Ciphertext, Ciphertext, add);
impl<'a, 'b> Sub<&'b Ciphertext> for &'a Ciphertext {
type Output = Ciphertext;
fn sub(self, other: &'b Ciphertext) -> Ciphertext {
Ciphertext {
e1: &self.e1 - &other.e1,
e2: &self.e2 - &other.e2,
}
}
}
std_ops_gen!(Ciphertext, Sub, Ciphertext, Ciphertext, sub);
impl<'a, 'b> Mul<&'b Scalar> for &'a Ciphertext {
type Output = Ciphertext;
fn mul(self, rhs: &'b Scalar) -> Self::Output {
Ciphertext {
e1: &self.e1 * rhs,
e2: &self.e2 * rhs,
}
}
}
std_ops_gen!(Ciphertext, Mul, Scalar, Ciphertext, mul);
impl<'a> Mul<u64> for &'a Ciphertext {
type Output = Ciphertext;
fn mul(self, rhs: u64) -> Self::Output {
Ciphertext {
e1: &self.e1 * rhs,
e2: &self.e2 * rhs,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand_chacha::ChaCha20Rng;
use rand_core::SeedableRng;
#[test]
fn zero() {
let cipher = Ciphertext {
e1: GroupElement::zero(),
e2: GroupElement::zero(),
};
assert_eq!(Ciphertext::zero(), cipher)
}
#[test]
fn encrypt_decrypt_point() {
let mut rng = ChaCha20Rng::from_seed([0u8; 32]);
for n in 1..5 {
let keypair = Keypair::generate(&mut rng);
let m = GroupElement::generator() * Scalar::from_u64(n * 24);
let cipher = keypair.public_key.encrypt_point(&m, &mut rng);
let r = keypair.secret_key.decrypt_point(&cipher);
assert_eq!(m, r)
}
}
#[test]
fn encrypt_decrypt() {
let mut rng = ChaCha20Rng::from_seed([0u8; 32]);
for n in 1..5 {
let keypair = Keypair::generate(&mut rng);
let m = Scalar::from_u64(n * 24);
let cipher = keypair.public_key.encrypt(&m, &mut rng);
let r = keypair.secret_key.decrypt_point(&cipher);
assert_eq!(m * GroupElement::generator(), r)
}
}
#[test]
fn symmetric_encrypt_decrypt() {
let mut rng = ChaCha20Rng::from_seed([0u8; 32]);
let k = SecretKey::generate(&mut rng);
let k = Keypair::from_secretkey(k);
let m = [1, 3, 4, 5, 6, 7];
let encrypted = &k.public_key.hybrid_encrypt(&m, &mut rng);
let result = &k.secret_key.hybrid_decrypt(&encrypted);
assert_eq!(&m[..], &result[..])
}
#[test]
fn hybrid_serialisation() {
let mut rng = ChaCha20Rng::from_seed([0u8; 32]);
let k = SecretKey::generate(&mut rng);
let k = Keypair::from_secretkey(k);
let m = [1, 3, 4, 5, 6, 7];
let encrypted = &k.public_key.hybrid_encrypt(&m, &mut rng);
let serialised_ciphertext = encrypted.to_bytes();
let deserialised_ciphertext = HybridCiphertext::from_bytes(&serialised_ciphertext);
assert!(deserialised_ciphertext.is_some());
let result = &k
.secret_key
.hybrid_decrypt(&deserialised_ciphertext.unwrap());
assert_eq!(&m[..], &result[..])
}
} | |
getAbi.js | export default function getAbi(contractEntry) {
if (contractEntry.web3Contract) {
return contractEntry.web3Contract.options.jsonInterface
} else {
return contractEntry.abi | }
} |
|
type.py | import numpy
import theano
from theano.tensor.var import _tensor_py_operators
from theano import Type, Variable, Constant, tensor, config, scalar
from theano.compile import SharedVariable
# Make sure this is importable even if pygpu is absent
# (it will not work though)
try:
import pygpu
from pygpu import gpuarray
from pygpu.elemwise import compare, elemwise2
except ImportError:
pass
_context_reg = {}
def reg_context(name, ctx):
"""
Register a context by mapping it to a name.
The context must be of type `GpuContext` and the name can be
anything hashable (but is usually a string). Only one context can
be registered per name and the second registration for a given
name will raise an error.
Parameters
----------
name : hashable object
Name to associate the context with (usually a string)
ctx : GpuContext
Context instance
"""
if name in _context_reg:
raise ValueError("context name %s is already defined" % (name,))
if not isinstance(ctx, gpuarray.GpuContext):
raise TypeError("context is not GpuContext")
_context_reg[name] = ctx
def get_context(name):
"""
Retrive the context associated with a name.
Return the context object mapped to `ref` that was previously
register through :func:`reg_context`. Trying to get the context
for an unregistered `ref` will raise a exception.
Parameters
----------
name : hashable object
Name associated with the context we want (usually a string)
"""
if name not in _context_reg:
raise ValueError("context name %s not defined" % (name,))
return _context_reg[name]
def list_contexts():
"""
Return an iterable of all the registered context names.
"""
return _context_reg.keys()
# Private method
def _name_for_ctx(ctx):
for k, v in _context_reg:
if v == ctx:
return k
raise ValueError('context is not registered')
# This is a private method for use by the tests only
def _unreg_context(name):
del _context_reg[name]
class GpuArrayType(Type):
def __init__(self, dtype, broadcastable, context_name=None, name=None):
# In case this was not provided and no global value is available
self.dtype = str(dtype)
self.broadcastable = tuple(bool(b) for b in broadcastable)
self.ndim = len(self.broadcastable)
self.name = name
self.context_name = context_name
try:
self.typecode = gpuarray.dtype_to_typecode(self.dtype)
except gpuarray.GpuArrayException:
raise TypeError("Unsupported dtype for %s: %s" %
(self.__class__.__name__, self.dtype))
def clone(self, dtype=None, broadcastable=None):
if dtype is None:
dtype = self.dtype
if broadcastable is None:
broadcastable = self.broadcastable
return self.__class__(dtype=dtype, broadcastable=broadcastable,
context_name=self.context_name, name=self.name)
# This is a property to keep the type pickleable
@property
def context(self):
return get_context(self.context_name)
def __repr__(self):
return "GpuArrayType<%s>(%s, %s)" % (self.context_name, self.dtype,
self.broadcastable)
def filter(self, data, strict=False, allow_downcast=None):
if (isinstance(data, gpuarray.GpuArray) and
data.typecode == self.typecode):
# This is just to make this condition not enter the
# following branches
pass
elif strict:
if not isinstance(data, gpuarray.GpuArray):
raise TypeError("%s expected a GpuArray object." % self,
data, type(data))
if self.typecode != data.typecode:
raise TypeError("%s expected typecode %d (dtype %s), "
"got %d (dtype %s)." %
(self, self.typecode, self.dtype,
data.typecode, str(data.dtype)))
if self.context != data.context:
raise TypeError("data context does not match type context")
# fallthrough to ndim check
elif (allow_downcast or
(allow_downcast is None and
type(data) == float and
self.dtype == config.floatX)):
data = gpuarray.array(data, dtype=self.typecode, copy=False,
ndmin=len(self.broadcastable),
context=self.context)
else:
if not hasattr(data, 'dtype'):
# This is to convert objects that don't have a dtype
# (like lists). We anticipate that the type below
# will match and we pass copy=False so it won't make a
# second object on the GPU.
data = gpuarray.array(data, copy=False, context=self.context)
up_dtype = scalar.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
data = gpuarray.array(data, dtype=self.dtype, copy=False,
context=self.context)
else:
raise TypeError("%s cannot store a value of dtype %s "
"without risking loss of precision." %
(self, data.dtype))
if self.ndim != data.ndim:
raise TypeError("Wrong number of dimensions: expected %s, "
"got %s with shape %s." % (self.ndim, data.ndim,
data.shape), data)
shp = data.shape
for i, b in enumerate(self.broadcastable):
if b and shp[i] != 1:
raise TypeError("Non-unit value on shape on a broadcastable"
" dimension.", shp, self.broadcastable)
return data
def filter_variable(self, other, allow_convert=True):
from theano.sandbox.gpuarray import GpuFromHost
if hasattr(other, '_as_GpuArrayVariable'):
other = other._as_GpuArrayVariable(self.context_name)
if not isinstance(other, Variable):
other = self.Constant(type=self, data=other)
if other.type == self:
return other
if not isinstance(other.type, tensor.TensorType):
raise TypeError('Incompatible type', (self, other.type))
if (other.type.dtype != self.dtype):
raise TypeError('Incompatible dtype', (self.dtype,
other.type.dtype))
if other.type.ndim != self.ndim:
raise TypeError('Incompatible number of dimensions.'
' Expected %d, got %d.' % (self.ndim, other.ndim))
if other.type.broadcastable != self.broadcastable:
if allow_convert:
type2 = other.type.clone(broadcastable=self.broadcastable)
other2 = type2.convert_variable(other)
else:
other2 = None
if other2 is None:
raise TypeError('Incompatible broadcastable dimensions.'
' Expected %s, got %s.' %
(str(other.type.broadcastable),
str(self.broadcastable)))
other = other2
return GpuFromHost(self.context_name)(other)
@staticmethod
def values_eq(a, b):
if a.shape != b.shape:
return False
if a.typecode != b.typecode:
return False
a_eq_b = numpy.asarray(compare(a, '==', b))
if a_eq_b.all():
return True
# maybe the trouble is that there are NaNs
a = numpy.asarray(a)
b = numpy.asarray(b)
a_missing = numpy.isnan(a)
if a_missing.any():
b_missing = numpy.isnan(b)
return numpy.all(a_eq_b + (a_missing == b_missing))
else:
return False
@staticmethod
def values_eq_approx(a, b,
allow_remove_inf=False, allow_remove_nan=False,
rtol=None, atol=None):
if a.shape != b.shape or a.dtype != b.dtype:
return False
if 'int' in str(a.dtype):
return GpuArrayType.values_eq(a, b)
else:
if allow_remove_inf or allow_remove_nan:
raise NotImplementedError(
"GpuArrayType.values_eq_approx() don't implemented the"
" allow_remove_inf and allow_remove_nan parameter")
if a.dtype == 'float16' or b.dtype == 'float16':
an = numpy.asarray(a)
bn = numpy.asarray(b)
return tensor.TensorType.values_eq_approx(
an, bn, allow_remove_inf=allow_remove_inf,
allow_remove_nan=allow_remove_nan, rtol=rtol, atol=atol)
atol_, rtol_ = theano.tensor.basic._get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol
res = elemwise2(a, '', b, a, odtype=numpy.dtype('bool'),
op_tmpl="res[i] = (fabs(%%(a)s - %%(b)s) <"
"(%(atol_)s + %(rtol_)s * fabs(%%(b)s)))" %
locals())
ret = numpy.asarray(res).all()
if ret:
return True
# maybe the trouble is that there are NaNs
an = numpy.asarray(a)
bn = numpy.asarray(b)
return tensor.TensorType.values_eq_approx(
an, bn, allow_remove_inf=allow_remove_inf,
allow_remove_nan=allow_remove_nan, rtol=rtol, atol=atol)
@staticmethod
def may_share_memory(a, b):
if (not isinstance(a, gpuarray.GpuArray) or
not isinstance(b, gpuarray.GpuArray)):
return False
return pygpu.gpuarray.may_share_memory(a, b)
def value_zeros(self, shape):
return pygpu.gpuarray.zeros(shape, dtype=self.typecode,
context=self.context)
def make_variable(self, name=None):
return self.Variable(self, name=name)
def __eq__(self, other):
return (type(self) == type(other) and
self.typecode == other.typecode and
self.broadcastable == other.broadcastable and
self.context_name == other.context_name)
def convert_variable(self, var):
vt = var.type
if (type(self) == type(vt) and
self.typecode == vt.typecode and
self.ndim == vt.ndim and
self.context_name == vt.context_name and
all(sb == ob or ob for sb, ob in zip(self.broadcastable,
vt.broadcastable))):
return theano.tensor.patternbroadcast(var, self.broadcastable)
def __hash__(self):
return hash((type(self), self.typecode, self.broadcastable,
self.context_name))
def dtype_specs(self):
"""
Return a tuple (python type, c type, numpy typenum) that corresponds
to self.dtype.
This function is used internally as part of C code generation.
"""
# TODO: add more type correspondances for e.g. int32, int64, float32,
# complex64, etc.
try:
return {
'float16': (float, 'npy_float16', 'NPY_FLOAT16'),
'float32': (float, 'npy_float32', 'NPY_FLOAT32'),
'float64': (float, 'npy_float64', 'NPY_FLOAT64'),
'uint8': (int, 'npy_uint8', 'NPY_UINT8'),
'int8': (int, 'npy_int8', 'NPY_INT8'),
'uint16': (int, 'npy_uint16', 'NPY_UINT16'),
'int16': (int, 'npy_int16', 'NPY_INT16'),
'uint32': (int, 'npy_uint32', 'NPY_UINT32'),
'int32': (int, 'npy_int32', 'NPY_INT32'),
'uint64': (int, 'npy_uint64', 'NPY_UINT64'),
'int64': (int, 'npy_int64', 'NPY_INT64'),
'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'),
'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')
}[self.dtype]
except KeyError:
raise TypeError("Unsupported dtype for %s: %s" %
(self.__class__.__name__, self.dtype))
def get_shape_info(self, obj):
return obj.shape
def get_size(self, shape_info):
if shape_info:
return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize
else:
return numpy.dtype(self.dtype).itemsize
def c_declare(self, name, sub, check_input=True):
return """
PyGpuArrayObject *%(name)s;
""" % locals()
def c_init(self, name, sub):
return "%s = NULL;" % (name,)
def c_extract(self, name, sub, check_input=True):
# TODO I don't check broadcast stuff for now.
return """
%(name)s = NULL;
if (py_%(name)s == Py_None) {
PyErr_SetString(PyExc_ValueError, "expected a GpuArray, not None");
%(fail)s
}
/* First check if we are the base type exactly (the most common case),
then do the full subclass check if needed. */
if (py_%(name)s->ob_type != &PyGpuArrayType &&
!PyObject_TypeCheck(py_%(name)s, &PyGpuArrayType)) {
PyErr_SetString(PyExc_ValueError, "expected a GpuArray");
%(fail)s
}
%(name)s = (PyGpuArrayObject *)py_%(name)s;
Py_INCREF(%(name)s);
""" % {'name': name, 'fail': sub['fail']}
def c_cleanup(self, name, sub):
return "Py_XDECREF(%(name)s); %(name)s = NULL;" % {'name': name}
def c_sync(self, name, sub):
return """
if (!%(name)s) {
Py_XDECREF(py_%(name)s);
Py_INCREF(Py_None);
py_%(name)s = Py_None;
} else if ((void *)py_%(name)s != (void *)%(name)s) {
Py_XDECREF(py_%(name)s);
py_%(name)s = (PyObject *)%(name)s;
Py_INCREF(py_%(name)s);
}
""" % {'name': name}
def c_init_code(self):
# We don't actually need the numpy API except in
# HostFromGpu and GpuFromHost and those case will be covered
# by the TensorType parameter
return ['import_pygpu__gpuarray();']
def c_headers(self):
# We need arrayobject for the PyArrayDescr struct def
# (even if we just use a pointer to it in a function def)
return ['<gpuarray/array.h>', '<gpuarray/kernel.h>',
'<gpuarray/error.h>', '<gpuarray/buffer_blas.h>',
'<numpy/arrayobject.h>', '<gpuarray_api.h>']
def c_header_dirs(self):
return [pygpu.get_include(), numpy.get_include()]
def c_libraries(self):
return ['gpuarray']
def c_code_cache_version(self):
ver = pygpu.gpuarray.api_version()
# we only use the major version since the minor revision are
# API-compatible.
return (1, ver[0])
class _operators(_tensor_py_operators):
def _as_TensorVariable(self):
from .basic_ops import host_from_gpu
return host_from_gpu(self)
def _as_GpuArrayVariable(self, context_name):
if self.type.context_name == context_name:
return self
else:
from .basic_ops import GpuToGpu
return GpuToGpu(context_name)(self)
class GpuArrayVariable(_operators, Variable):
pass
GpuArrayType.Variable = GpuArrayVariable
class GpuArraySignature(tensor.TensorConstantSignature):
# might do something better if we can run the sum on the GPU, but
# for now this will suffice.
pass
class GpuArrayConstant(_operators, Constant):
def signature(self):
return GpuArraySignature((self.type, numpy.asarray(self.data)))
def __str__(self):
if self.name is not None:
return self.name
try:
np_data = numpy.asarray(self.data)
except gpuarray.GpuArrayException:
np_data = self.data
return "GpuArrayConstant{%s}" % np_data
GpuArrayType.Constant = GpuArrayConstant
class GpuArraySharedVariable(_operators, SharedVariable):
def get_value(self, borrow=False, return_internal_type=False):
if return_internal_type:
if borrow:
return self.container.value
else:
return self.container.value.copy()
else:
return numpy.asarray(self.container.value)
def set_value(self, value, borrow=False):
if isinstance(value, pygpu.gpuarray.GpuArray):
value = pygpu.gpuarray.array(value, copy=(not borrow),
context=self.type.context)
self.container.value = value
def __getitem__(self, *args):
return _operators.__getitem__(self, *args)
GpuArrayType.SharedVariable = GpuArraySharedVariable
def gpuarray_shared_constructor(value, name=None, strict=False,
allow_downcast=None, borrow=False,
broadcastable=None,
context_name=None):
"""
SharedVariable constructor for GpuArrayType.
"""
if not isinstance(value, (numpy.ndarray, pygpu.gpuarray.GpuArray)):
raise TypeError('ndarray or GpuArray required')
try:
get_context(context_name)
except ValueError:
# Don't make this a hard error if we attempt to make a shared
# variable while there is no default context.
if context_name is None:
raise TypeError('No default context and no context specified')
raise
if broadcastable is None:
broadcastable = (False,) * value.ndim
type = GpuArrayType(value.dtype, broadcastable, context_name=context_name)
deviceval = pygpu.gpuarray.array(value, copy=(not borrow),
context=type.context)
return GpuArraySharedVariable(type=type, value=deviceval, name=name,
strict=strict)
theano.compile.register_view_op_c_code(GpuArrayType, """
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
""", version=(0,))
# Register GpuArrayType C code for Shape Op.
theano.compile.register_shape_c_code(
GpuArrayType,
"""
npy_intp shape[] = {%(iname)s->ga.nd};
if(%(oname)s == NULL || (PyArray_DIMS(%(oname)s)[0] != shape[0]))
{
Py_XDECREF(%(oname)s);
%(oname)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, NPY_INT64);
}
for(int i=0;i<shape[0];i++)
{
((npy_int64*)PyArray_GETPTR1(%(oname)s, i))[0] = %(iname)s->ga.dimensions[i];
}
""",
version=1)
theano.compile.register_shape_i_c_code(
GpuArrayType,
"""
if(!%(oname)s)
%(oname)s=(PyArrayObject*)PyArray_ZEROS(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(oname)s))[0] =
%(iname)s->ga.dimensions[%(i)s];
""",
"""
if (%(i)s>=%(iname)s->ga.nd){
PyErr_SetString(PyExc_TypeError,
"Number of dimensions lower than expected");
%(fail)s
}
""",
version=(1,))
theano.compile.register_deep_copy_op_c_code(GpuArrayType, """
Py_XDECREF(%(oname)s);
%(oname)s = pygpu_copy(%(iname)s, GA_ANY_ORDER);
if (!%(oname)s) { %(fail)s }
""", version=(5,))
theano.compile.register_rebroadcast_c_code(
GpuArrayType,
"""
if(%(iname)s->ga.dimensions[%(axis)s] != 1){
PyErr_Format(PyExc_ValueError,
"Dimension %(axis)s in Rebroadcast's input was"
" supposed to be 1 (got %%d instead)",
%(iname)s->ga.dimensions[%(axis)s]);
%(fail)s
}
""",
version=1)
theano.compile.register_specify_shape_c_code(
GpuArrayType,
"""
if (PyGpuArray_NDIM(%(iname)s) != PyArray_DIMS(%(shape)s)[0]) {
PyErr_Format(PyExc_AssertionError,
"SpecifyShape: vector of shape has %%d elements,"
" but the input has %%d dimensions.",
PyGpuArray_NDIM(%(iname)s),
PyArray_DIMS(%(shape)s)[0]);
%(fail)s;
}
for(int i = 0; i < PyGpuArray_NDIM(%(iname)s); i++){
dtype_%(shape)s shp = ((dtype_%(shape)s*)PyArray_GETPTR1(%(shape)s,
i))[0];
if (PyGpuArray_DIMS(%(iname)s)[i] != shp) {
PyErr_Format(PyExc_AssertionError,
"SpecifyShape: dim %%d of input has shape %%d,"
" expected %%d.",
i, PyGpuArray_DIMS(%(iname)s)[i],
shp);
%(fail)s;
}
}
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
""",
version=1,
c_support_code_apply='#include <numpy_compat.h>')
class GpuContextType(Type):
def filter(self, data, strict=False, allow_downcast=None):
if not isinstance(data, gpuarray.GpuContext):
raise TypeError('context is not a GpuContext')
return data
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
@staticmethod
def values_eq(a, b):
|
def c_declare(self, name, sub, check_input=True):
return "PyGpuContextObject *%s;" % (name,)
def c_init(self, name, sub):
return "%s = NULL;" % (name,)
def c_extract(self, name, sub, check_input=True):
if check_input:
res = """
if (!PyObject_TypeCheck(py_%(name)s, &PyGpuContextType)) {
PyErr_SetString(PyExc_TypeError, "expected a GpuContext");
%(fail)s
}
""" % dict(name=name, fail=sub['fail'])
else:
res = ""
return res + """
%(name)s = (PyGpuContextObject *)py_%(name)s;
Py_INCREF(%(name)s);
""" % dict(name=name)
def c_cleanup(self, name, sub):
return "Py_XDECREF(%(name)s); %(name)s = NULL;" % dict(name=name)
# c_sync is intentionally not declared to prevent normal usage
def c_init_code(self):
return ['import_pygpu__gpuarray();']
def c_headers(self):
return ['<gpuarray_api.h>']
def c_header_dirs(self):
return [pygpu.get_include()]
def c_code_cache_version(self):
ver = pygpu.gpuarray.api_version()
return (0, ver[0])
# Variable, Contstant, ... not declared
gpu_context_type = GpuContextType()
| return a == b |
utils.py | """
Mask R-CNN
Common utility functions and classes.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import sys
import os
import logging
import math
import random
import numpy as np
import tensorflow as tf
import scipy
import skimage.color
import skimage.io
import skimage.transform
import urllib.request
import shutil
import warnings
from distutils.version import LooseVersion
# URL from which to download the latest COCO trained weights
COCO_MODEL_URL = "https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5"
############################################################
# Bounding Boxes
############################################################
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_overlaps_masks(masks1, masks2):
"""Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
"""
# If either set of masks is empty return empty result
if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:
return np.zeros((masks1.shape[-1], masks2.shape[-1]))
# flatten masks and compute their areas
masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)
masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)
area1 = np.sum(masks1, axis=0)
area2 = np.sum(masks2, axis=0)
# intersections and union
intersections = np.dot(masks1.T, masks2)
union = area1[:, None] + area2[None, :] - intersections
overlaps = intersections / union
return overlaps
def non_max_suppression(boxes, scores, threshold):
"""Performs non-maximum suppression and returns indices of kept boxes.
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indices into ixs[1:], so add 1 to get
# indices into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indices of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
def apply_box_deltas(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.
deltas: [N, (dy, dx, log(dh), log(dw))]
"""
boxes = boxes.astype(np.float32)
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= np.exp(deltas[:, 2])
width *= np.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return np.stack([y1, x1, y2, x2], axis=1)
def box_refinement_graph(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]
"""
box = tf.cast(box, tf.float32)
gt_box = tf.cast(gt_box, tf.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.math.log(gt_height / height)
dw = tf.math.log(gt_width / width)
result = tf.stack([dy, dx, dh, dw], axis=1)
return result
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
assumed to be outside the box.
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
############################################################
# Dataset
############################################################
class Dataset(object):
|
def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"):
"""Resizes an image keeping the aspect ratio unchanged.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
min_scale: if provided, ensure that the image is scaled up by at least
this percent even if min_dim doesn't require it.
mode: Resizing mode.
none: No resizing. Return the image unchanged.
square: Resize and pad with zeros to get a square image
of size [max_dim, max_dim].
pad64: Pads width and height with zeros to make them multiples of 64.
If min_dim or min_scale are provided, it scales the image up
before padding. max_dim is ignored in this mode.
The multiple of 64 is needed to ensure smooth scaling of feature
maps up and down the 6 levels of the FPN pyramid (2**6=64).
crop: Picks random crops from the image. First, scales the image based
on min_dim and min_scale, then picks a random crop of
size min_dim x min_dim. Can be used in training only.
max_dim is not used in this mode.
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Keep track of image dtype and return results in the same dtype
image_dtype = image.dtype
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
padding = [(0, 0), (0, 0), (0, 0)]
crop = None
if mode == "none":
return image, window, scale, padding, crop
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
if min_scale and scale < min_scale:
scale = min_scale
# Does it exceed max dim?
if max_dim and mode == "square":
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image using bilinear interpolation
if scale != 1:
image = resize(image, (round(h * scale), round(w * scale)),
preserve_range=True)
# Need padding or cropping?
if mode == "square":
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "pad64":
h, w = image.shape[:2]
# Both sides must be divisible by 64
assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64"
# Height
if h % 64 > 0:
max_h = h - (h % 64) + 64
top_pad = (max_h - h) // 2
bottom_pad = max_h - h - top_pad
else:
top_pad = bottom_pad = 0
# Width
if w % 64 > 0:
max_w = w - (w % 64) + 64
left_pad = (max_w - w) // 2
right_pad = max_w - w - left_pad
else:
left_pad = right_pad = 0
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "crop":
# Pick a random crop
h, w = image.shape[:2]
y = random.randint(0, (h - min_dim))
x = random.randint(0, (w - min_dim))
crop = (y, x, min_dim, min_dim)
image = image[y:y + min_dim, x:x + min_dim]
window = (0, 0, min_dim, min_dim)
else:
raise Exception("Mode {} not supported".format(mode))
return image.astype(image_dtype), window, scale, padding, crop
def resize_mask(mask, scale, padding, crop=None):
"""Resizes a mask using the given scale and padding.
Typically, you get the scale and padding from resize_image() to
ensure both, the image and the mask, are resized consistently.
scale: mask scaling factor
padding: Padding to add to the mask in the form
[(top, bottom), (left, right), (0, 0)]
"""
# Suppress warning from scipy 0.13.0, the output shape of zoom() is
# calculated with round() instead of int()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)
if crop is not None:
y, x, h, w = crop
mask = mask[y:y + h, x:x + w]
else:
mask = np.pad(mask, padding, mode='constant', constant_values=0)
return mask
def minimize_mask(bbox, mask, mini_shape):
"""Resize masks to a smaller version to reduce memory load.
Mini-masks can be resized back to image scale using expand_masks()
See inspect_data.ipynb notebook for more details.
"""
mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
# Pick slice and cast to bool in case load_mask() returned wrong dtype
m = mask[:, :, i].astype(bool)
y1, x1, y2, x2 = bbox[i][:4]
m = m[y1:y2, x1:x2]
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
# Resize with bilinear interpolation
m = resize(m, mini_shape)
mini_mask[:, :, i] = np.around(m).astype(np.bool)
return mini_mask
def expand_mask(bbox, mini_mask, image_shape):
"""Resizes mini masks back to image size. Reverses the change
of minimize_mask().
See inspect_data.ipynb notebook for more details.
"""
mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
m = mini_mask[:, :, i]
y1, x1, y2, x2 = bbox[i][:4]
h = y2 - y1
w = x2 - x1
# Resize with bilinear interpolation
m = resize(m, (h, w))
mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)
return mask
# TODO: Build and use this function to reduce code duplication
def mold_mask(mask, config):
pass
def unmold_mask(mask, bbox, image_shape):
"""Converts a mask generated by the neural network to a format similar
to its original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
threshold = 0.5
y1, x1, y2, x2 = bbox
mask = resize(mask, (y2 - y1, x2 - x1))
mask = np.where(mask >= threshold, 1, 0).astype(np.bool)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:2], dtype=np.bool)
full_mask[y1:y2, x1:x2] = mask
return full_mask
############################################################
# Anchors
############################################################
def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))
scales = scales.flatten()
ratios = ratios.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales / np.sqrt(ratios)
widths = scales * np.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride
shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = np.stack(
[box_centers_y, box_centers_x], axis=2).reshape([-1, 2])
box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
return boxes
def generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,
anchor_stride):
"""Generate anchors at different levels of a feature pyramid. Each scale
is associated with a level of the pyramid, but each ratio is used in
all levels of the pyramid.
Returns:
anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted
with the same order of the given scales. So, anchors of scale[0] come
first, then anchors of scale[1], and so on.
"""
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = []
for i in range(len(scales)):
anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],
feature_strides[i], anchor_stride))
return np.concatenate(anchors, axis=0)
############################################################
# Miscellaneous
############################################################
def trim_zeros(x):
"""It's common to have tensors larger than the available data and
pad with zeros. This function removes rows that are all zeros.
x: [rows, columns].
"""
assert len(x.shape) == 2
return x[~np.all(x == 0, axis=1)]
def compute_matches(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5, score_threshold=0.0):
"""Finds matches between prediction and ground truth instances.
Returns:
gt_match: 1-D array. For each GT box it has the index of the matched
predicted box.
pred_match: 1-D array. For each predicted box, it has the index of
the matched ground truth box.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Trim zero padding
# TODO: cleaner to do zero unpadding upstream
gt_boxes = trim_zeros(gt_boxes)
gt_masks = gt_masks[..., :gt_boxes.shape[0]]
pred_boxes = trim_zeros(pred_boxes)
pred_scores = pred_scores[:pred_boxes.shape[0]]
# Sort predictions by score from high to low
indices = np.argsort(pred_scores)[::-1]
pred_boxes = pred_boxes[indices]
pred_class_ids = pred_class_ids[indices]
pred_scores = pred_scores[indices]
pred_masks = pred_masks[..., indices]
# Compute IoU overlaps [pred_masks, gt_masks]
overlaps = compute_overlaps_masks(pred_masks, gt_masks)
# Loop through predictions and find matching ground truth boxes
match_count = 0
pred_match = -1 * np.ones([pred_boxes.shape[0]])
gt_match = -1 * np.ones([gt_boxes.shape[0]])
for i in range(len(pred_boxes)):
# Find best matching ground truth box
# 1. Sort matches by score
sorted_ixs = np.argsort(overlaps[i])[::-1]
# 2. Remove low scores
low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]
if low_score_idx.size > 0:
sorted_ixs = sorted_ixs[:low_score_idx[0]]
# 3. Find the match
for j in sorted_ixs:
# If ground truth box is already matched, go to next one
if gt_match[j] > -1:
continue
# If we reach IoU smaller than the threshold, end the loop
iou = overlaps[i, j]
if iou < iou_threshold:
break
# Do we have a match?
if pred_class_ids[i] == gt_class_ids[j]:
match_count += 1
gt_match[j] = i
pred_match[i] = j
break
return gt_match, pred_match, overlaps
def compute_ap(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5):
"""Compute Average Precision at a set IoU threshold (default 0.5).
Returns:
mAP: Mean Average Precision
precisions: List of precisions at different class score thresholds.
recalls: List of recall values at different class score thresholds.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Get matches and overlaps
gt_match, pred_match, overlaps = compute_matches(
gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold)
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)
# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
recalls = np.concatenate([[0], recalls, [1]])
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = np.maximum(precisions[i], precisions[i + 1])
# Compute mean AP over recall range
indices = np.where(recalls[:-1] != recalls[1:])[0] + 1
mAP = np.sum((recalls[indices] - recalls[indices - 1]) *
precisions[indices])
return mAP, precisions, recalls, overlaps
def compute_ap_range(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_thresholds=None, verbose=1):
"""Compute AP over a range or IoU thresholds. Default range is 0.5-0.95."""
# Default is 0.5 to 0.95 with increments of 0.05
iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)
# Compute AP over range of IoU thresholds
AP = []
for iou_threshold in iou_thresholds:
ap, precisions, recalls, overlaps =\
compute_ap(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold)
if verbose:
print("AP @{:.2f}:\t {:.3f}".format(iou_threshold, ap))
AP.append(ap)
AP = np.array(AP).mean()
if verbose:
print("AP @{:.2f}-{:.2f}:\t {:.3f}".format(
iou_thresholds[0], iou_thresholds[-1], AP))
return AP
def compute_recall(pred_boxes, gt_boxes, iou):
"""Compute the recall at the given IoU threshold. It's an indication
of how many GT boxes were found by the given prediction boxes.
pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates
gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates
"""
# Measure overlaps
overlaps = compute_overlaps(pred_boxes, gt_boxes)
iou_max = np.max(overlaps, axis=1)
iou_argmax = np.argmax(overlaps, axis=1)
positive_ids = np.where(iou_max >= iou)[0]
matched_gt_boxes = iou_argmax[positive_ids]
recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]
return recall, positive_ids
# ## Batch Slicing
# Some custom layers support a batch size of 1 only, and require a lot of work
# to support batches greater than 1. This function slices an input tensor
# across the batch dimension and feeds batches of size 1. Effectively,
# an easy way to support batches > 1 quickly with little code modification.
# In the long run, it's more efficient to modify the code to support large
# batches and getting rid of this function. Consider this a temporary solution
def batch_slice(inputs, graph_fn, batch_size, names=None):
"""Splits inputs into slices and feeds each slice to a copy of the given
computation graph and then combines the results. It allows you to run a
graph on a batch of inputs even if the graph is written to support one
instance only.
inputs: list of tensors. All must have the same first dimension length
graph_fn: A function that returns a TF tensor that's part of a graph.
batch_size: number of slices to divide the data into.
names: If provided, assigns names to the resulting tensors.
"""
if not isinstance(inputs, list):
inputs = [inputs]
outputs = []
for i in range(batch_size):
inputs_slice = [x[i] for x in inputs]
output_slice = graph_fn(*inputs_slice)
if not isinstance(output_slice, (tuple, list)):
output_slice = [output_slice]
outputs.append(output_slice)
# Change outputs from a list of slices where each is
# a list of outputs to a list of outputs and each has
# a list of slices
outputs = list(zip(*outputs))
if names is None:
names = [None] * len(outputs)
result = [tf.stack(o, axis=0, name=n)
for o, n in zip(outputs, names)]
if len(result) == 1:
result = result[0]
return result
def download_trained_weights(coco_model_path, verbose=1):
"""Download COCO trained weights from Releases.
coco_model_path: local path of COCO trained weights
"""
if verbose > 0:
print("Downloading pretrained model to " + coco_model_path + " ...")
with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
if verbose > 0:
print("... done downloading pretrained model!")
def norm_boxes(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [N, (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.divide((boxes - shift), scale).astype(np.float32)
def denorm_boxes(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [N, (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
"""A wrapper for Scikit-Image resize().
Scikit-Image generates warnings on every call to resize() if it doesn't
receive the right parameters. The right parameters depend on the version
of skimage. This solves the problem by using different parameters per
version. And it provides a central place to control resizing defaults.
"""
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
# New in 0.14: anti_aliasing. Default it to False for backward
# compatibility with skimage 0.13.
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range, anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma)
else:
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range) | """The base class for dataset classes.
To use it, create a new class that adds functions specific to the dataset
you want to use. For example:
class CatsAndDogsDataset(Dataset):
def load_cats_and_dogs(self):
...
def load_mask(self, image_id):
...
def image_reference(self, image_id):
...
See COCODataset and ShapesDataset as examples.
"""
def __init__(self, class_map=None):
self._image_ids = []
self.image_info = []
# Background is always the first class
self.class_info = [{"source": "", "id": 0, "name": "BG"}]
self.source_class_ids = {}
def add_class(self, source, class_id, class_name):
assert "." not in source, "Source name cannot contain a dot"
# Does the class exist already?
for info in self.class_info:
if info['source'] == source and info["id"] == class_id:
# source.class_id combination already available, skip
return
# Add the class
self.class_info.append({
"source": source,
"id": class_id,
"name": class_name,
})
def add_image(self, source, image_id, path, **kwargs):
image_info = {
"id": image_id,
"source": source,
"path": path,
}
image_info.update(kwargs)
self.image_info.append(image_info)
def image_reference(self, image_id):
"""Return a link to the image in its source Website or details about
the image that help looking it up or debugging it.
Override for your dataset, but pass to this function
if you encounter images not in your dataset.
"""
return ""
def prepare(self, class_map=None):
"""Prepares the Dataset class for use.
TODO: class map is not supported yet. When done, it should handle mapping
classes from different datasets to the same class ID.
"""
def clean_name(name):
"""Returns a shorter version of object names for cleaner display."""
return ",".join(name.split(",")[:1])
# Build (or rebuild) everything else from the info dicts.
self.num_classes = len(self.class_info)
self.class_ids = np.arange(self.num_classes)
self.class_names = [clean_name(c["name"]) for c in self.class_info]
self.num_images = len(self.image_info)
self._image_ids = np.arange(self.num_images)
# Mapping from source class and image IDs to internal IDs
self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.class_info, self.class_ids)}
self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.image_info, self.image_ids)}
# Map sources to class_ids they support
self.sources = list(set([i['source'] for i in self.class_info]))
self.source_class_ids = {}
# Loop over datasets
for source in self.sources:
self.source_class_ids[source] = []
# Find classes that belong to this dataset
for i, info in enumerate(self.class_info):
# Include BG class in all datasets
if i == 0 or source == info['source']:
self.source_class_ids[source].append(i)
def map_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.map_source_class_id("coco.12") -> 23
"""
return self.class_from_source_map[source_class_id]
def get_source_class_id(self, class_id, source):
"""Map an internal class ID to the corresponding class ID in the source dataset."""
info = self.class_info[class_id]
assert info['source'] == source
return info['id']
@property
def image_ids(self):
return self._image_ids
def source_image_link(self, image_id):
"""Returns the path or URL to the image.
Override this to return a URL to the image if it's available online for easy
debugging.
"""
return self.image_info[image_id]["path"]
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
# Load image
image = skimage.io.imread(self.image_info[image_id]['path'])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. Override this
method to load instance masks and return them in the form of am
array of binary masks of shape [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# Override this function to load a mask from your dataset.
# Otherwise, it returns an empty mask.
logging.warning("You are using the default load_mask(), maybe you need to define your own one.")
mask = np.empty([0, 0, 0])
class_ids = np.empty([0], np.int32)
return mask, class_ids |
benchlist.go | package benchlist
import (
"container/heap"
"fmt"
"math/rand"
"sync"
"time"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/snow/validators"
"github.com/ava-labs/avalanchego/utils/logging"
"github.com/ava-labs/avalanchego/utils/timer"
"github.com/prometheus/client_golang/prometheus"
safemath "github.com/ava-labs/avalanchego/utils/math"
)
// If a peer consistently does not respond to queries, it will
// increase latencies on the network whenever that peer is polled.
// If we cannot terminate the poll early, then the poll will wait
// the full timeout before finalizing the poll and making progress.
// This can increase network latencies to an undesirable level.
// Therefore, nodes that consistently fail are "benched" such that
// queries to that node fail immediately to avoid waiting up to
// the full network timeout for a response.
// Benchlist ...
type Benchlist interface {
// RegisterResponse registers the response to a query message
RegisterResponse(validatorID ids.ShortID)
// RegisterFailure registers that we didn't receive a response within the timeout
RegisterFailure(validatorID ids.ShortID)
// IsBenched returns true if messages to [validatorID]
// should not be sent over the network and should immediately fail.
IsBenched(validatorID ids.ShortID) bool
}
// Data about a validator who is benched
type benchData struct {
benchedUntil time.Time
validatorID ids.ShortID
index int
}
// Implements heap.Interface. Each element is a benched validator
type benchedQueue []*benchData
func (bq benchedQueue) Len() int { return len(bq) }
func (bq benchedQueue) Less(i, j int) bool { return bq[i].benchedUntil.Before(bq[j].benchedUntil) }
func (bq benchedQueue) Swap(i, j int) {
bq[i], bq[j] = bq[j], bq[i]
bq[i].index = i
bq[j].index = j
}
// Push adds an item to this queue. x must have type *benchData
func (bq *benchedQueue) Push(x interface{}) {
item := x.(*benchData)
item.index = len(*bq)
*bq = append(*bq, item)
}
// Pop returns the validator that should leave the bench next
func (bq *benchedQueue) Pop() interface{} {
n := len(*bq)
item := (*bq)[n-1]
(*bq)[n-1] = nil // make sure the item is freed from memory
*bq = (*bq)[:n-1]
return item
}
type failureStreak struct {
// Time of first consecutive timeout
firstFailure time.Time
// Number of consecutive message timeouts
consecutive int
}
type benchlist struct {
lock sync.RWMutex
log logging.Logger
metrics metrics
// Fires when the next validator should leave the bench
// Calls [update] when it fires
timer *timer.Timer
// Tells the time. Can be faked for testing.
clock timer.Clock
// Validator set of the network
vdrs validators.Set
// Validator ID --> Consecutive failure information
failureStreaks map[ids.ShortID]failureStreak
// IDs of validators that are currently benched
benchlistSet ids.ShortSet
// Min heap containing benched validators and their endtimes
// Pop() returns the next validator to leave
benchedQueue benchedQueue
// A validator will be benched if [threshold] messages in a row
// to them time out and the first of those messages was more than
// [minimumFailingDuration] ago
threshold int
minimumFailingDuration time.Duration
// A benched validator will be benched for between [duration/2] and [duration]
duration time.Duration
// The maximum percentage of total network stake that may be benched
// Must be in [0,1)
maxPortion float64
}
// NewBenchlist returns a new Benchlist
func NewBenchlist(
log logging.Logger,
validators validators.Set,
threshold int,
minimumFailingDuration,
duration time.Duration,
maxPortion float64,
namespace string,
registerer prometheus.Registerer,
) (Benchlist, error) {
if maxPortion < 0 || maxPortion >= 1 {
return nil, fmt.Errorf("max portion of benched stake must be in [0,1) but got %f", maxPortion)
}
benchlist := &benchlist{
log: log,
failureStreaks: make(map[ids.ShortID]failureStreak),
benchlistSet: ids.ShortSet{},
vdrs: validators,
threshold: threshold,
minimumFailingDuration: minimumFailingDuration,
duration: duration,
maxPortion: maxPortion,
}
benchlist.timer = timer.NewTimer(benchlist.update)
go benchlist.timer.Dispatch()
return benchlist, benchlist.metrics.Initialize(registerer, namespace)
}
// Update removes benched validators whose time on the bench is over
func (b *benchlist) update() {
b.lock.Lock()
defer b.lock.Unlock()
now := b.clock.Time()
for {
// [next] is nil when no more validators should
// leave the bench at this time
next := b.nextToLeave(now)
if next == nil {
break
}
b.remove(next)
}
// Set next time update will be called
b.setNextLeaveTime()
}
// Remove [validator] from the benchlist
// Assumes [b.lock] is held
func (b *benchlist) remove(validator *benchData) {
// Update state
id := validator.validatorID
b.log.Debug("removing validator %s from benchlist", id)
heap.Remove(&b.benchedQueue, validator.index)
b.benchlistSet.Remove(id)
// Update metrics
b.metrics.numBenched.Set(float64(b.benchedQueue.Len()))
benchedStake, err := b.vdrs.SubsetWeight(b.benchlistSet)
if err != nil {
// This should never happen
b.log.Error("couldn't get benched stake: %w", err)
return
}
b.metrics.weightBenched.Set(float64(benchedStake))
}
// Returns the next validator that should leave
// the bench at time [now]. nil if no validator should.
// Assumes [b.lock] is held
func (b *benchlist) nextToLeave(now time.Time) *benchData {
if b.benchedQueue.Len() == 0 {
return nil
}
next := b.benchedQueue[0]
if now.Before(next.benchedUntil) {
return nil
}
return next
}
// Set [b.timer] to fire when the next validator should leave the bench
// Assumes [b.lock] is held
func (b *benchlist) setNextLeaveTime() {
if b.benchedQueue.Len() == 0 {
b.timer.Cancel()
return
}
now := b.clock.Time()
next := b.benchedQueue[0]
nextLeave := next.benchedUntil.Sub(now)
b.timer.SetTimeoutIn(nextLeave)
}
// IsBenched returns true if messages to [validatorID]
// should not be sent over the network and should immediately fail.
func (b *benchlist) IsBenched(validatorID ids.ShortID) bool {
b.lock.RLock()
defer b.lock.RUnlock()
return b.isBenched(validatorID)
}
// isBenched checks if [validatorID] is currently benched
// and calls cleanup if its benching period has elapsed
// Assumes [b.lock] is held.
func (b *benchlist) isBenched(validatorID ids.ShortID) bool {
if _, ok := b.benchlistSet[validatorID]; ok {
return true
}
return false
}
// RegisterResponse notes that we received a response from validator [validatorID]
func (b *benchlist) RegisterResponse(validatorID ids.ShortID) {
b.lock.Lock()
defer b.lock.Unlock()
delete(b.failureStreaks, validatorID)
}
// RegisterResponse notes that a request to validator [validatorID] timed out
func (b *benchlist) RegisterFailure(validatorID ids.ShortID) {
b.lock.Lock()
defer b.lock.Unlock()
if b.benchlistSet.Contains(validatorID) {
// This validator is benched. Ignore failures until they're not.
return
}
failureStreak := b.failureStreaks[validatorID]
// Increment consecutive failures
failureStreak.consecutive++
now := b.clock.Time()
// Update first failure time
if failureStreak.firstFailure.IsZero() |
b.failureStreaks[validatorID] = failureStreak
if failureStreak.consecutive >= b.threshold && now.After(failureStreak.firstFailure.Add(b.minimumFailingDuration)) {
b.bench(validatorID)
}
}
// Assumes [b.lock] is held
// Assumes [validatorID] is not already benched
func (b *benchlist) bench(validatorID ids.ShortID) {
benchedStake, err := b.vdrs.SubsetWeight(b.benchlistSet)
if err != nil {
// This should never happen
b.log.Error("couldn't get benched stake: %w. Resetting benchlist", err)
return
}
validatorStake, isVdr := b.vdrs.GetWeight(validatorID)
if !isVdr {
// We might want to bench a non-validator because they don't respond to
// my Get requests, but we choose to only bench validators.
return
}
newBenchedStake, err := safemath.Add64(benchedStake, validatorStake)
if err != nil {
// This should never happen
b.log.Error("overflow calculating new benched stake with validator %s", validatorID)
return
}
totalStake := b.vdrs.Weight()
maxBenchedStake := float64(totalStake) * b.maxPortion
if float64(newBenchedStake) > maxBenchedStake {
b.log.Debug(
"not benching %s because benched stake (%f) would exceed max (%f)",
validatorID,
float64(newBenchedStake),
maxBenchedStake,
)
return
}
// Validator is benched for between [b.duration]/2 and [b.duration]
now := b.clock.Time()
minBenchDuration := b.duration / 2
minBenchedUntil := now.Add(minBenchDuration)
maxBenchedUntil := now.Add(b.duration)
diff := maxBenchedUntil.Sub(minBenchedUntil)
benchedUntil := minBenchedUntil.Add(time.Duration(rand.Float64() * float64(diff))) // #nosec G404
// Add to benchlist times with randomized delay
b.benchlistSet.Add(validatorID)
delete(b.failureStreaks, validatorID)
heap.Push(
&b.benchedQueue,
&benchData{validatorID: validatorID, benchedUntil: benchedUntil},
)
b.log.Debug(
"benching validator %s for %s after %d consecutive failed queries.",
validatorID,
benchedUntil.Sub(now),
b.threshold,
)
// Set [b.timer] to fire when next validator should leave bench
b.setNextLeaveTime()
// Update metrics
b.metrics.numBenched.Set(float64(b.benchedQueue.Len()))
b.metrics.weightBenched.Set(float64(newBenchedStake))
}
| {
// This is the first consecutive failure
failureStreak.firstFailure = now
} |
create.board.dto.ts | import { IsNotEmpty } from 'class-validator';
export class CreateBoardDto {
@IsNotEmpty()
title: string;
@IsNotEmpty()
description: string; |
status: string;
} |
|
aurora-tri-aurora-eth.ts | import { multiSushiStrategyAbi } from "../../Contracts/ABIs/multi-sushi-strategy.abi";
import { AuroraTriDualJar } from "./aurora-tri-dual-jar";
export class | extends AuroraTriDualJar {
constructor() {
super(multiSushiStrategyAbi, 6, 2);
}
}
| TriAuroraEth |
indexers.py | """
Low-dependency indexing utilities.
"""
import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
# -----------------------------------------------------------
# Indexer Identification
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, arr_value) -> bool:
"""
Return True if we are all scalar indexers.
Returns
-------
bool
"""
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
|
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> None:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
Key for the setitem.
value : array-like
Value for the setitem.
values : array-like
Values being set into.
Returns
-------
None
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't match.
"""
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
elif isinstance(indexer, slice):
# slice
if is_list_like(value) and len(values):
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
Length of the array being indexed.
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
Array of indices that we are to convert.
n : int
Number of elements in the array that we are indexing.
Returns
-------
array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError
One of the converted indices either exceeded the number of,
elements (specified by `n`), or was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If `indices` is empty, np.array will return a float,
# and will cause indexing errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def length_of_indexer(indexer, target=None) -> int:
"""
Return the length of a single non-tuple indexer which could be a slice.
Returns
-------
int
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
| """
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False |
api.rs | use crate::{client::ApmConfig, model::Span};
use serde::Serialize;
use std::collections::HashMap;
const SAMPLING_PRIORITY_KEY: &'static str = "_sampling_priority_v1";
const ANALYTICS_SAMPLE_RATE_KEY: &'static str = "_dd1.sr.eausr";
const _SAMPLE_RATE_METRIC_KEY: &'static str = "_sample_rate";
const _SAMPLING_AGENT_DECISION: &'static str = "_dd.agent_psr";
const _SAMPLING_RULE_DECISION: &'static str = "_dd.rule_psr";
const _SAMPLING_LIMIT_DECISION: &'static str = "_dd.limit_psr";
fn fill_meta(span: &Span, env: Option<String>) -> HashMap<String, String> {
let mut meta = HashMap::new();
if let Some(env) = env {
meta.insert("env".to_string(), env);
}
if let Some(sql) = &span.sql {
meta.insert("sql.query".to_string(), sql.query.clone());
meta.insert("sql.rows".to_string(), sql.rows.clone());
meta.insert("sql.db".to_string(), sql.db.clone());
}
for (key, value) in &span.tags {
meta.insert(key.to_string(), value.to_string());
}
meta
}
| if apm_config.apm_enabled {
metrics.insert(
SAMPLING_PRIORITY_KEY.to_string(),
apm_config.sample_priority,
);
metrics.insert(
ANALYTICS_SAMPLE_RATE_KEY.to_string(),
apm_config.sample_rate,
);
}
metrics
}
#[derive(Debug, Serialize, Clone, PartialEq)]
pub struct RawSpan {
service: String,
name: String,
resource: String,
trace_id: u64,
span_id: u64,
parent_id: Option<u64>,
start: u64,
duration: u64,
error: i32,
meta: HashMap<String, String>,
metrics: HashMap<String, f64>,
r#type: String,
}
impl RawSpan {
pub fn from_span(
span: &Span,
service: &String,
env: &Option<String>,
cfg: &ApmConfig,
) -> RawSpan {
let http_enabled = span.tags.contains_key("http.url");
let is_error = span.tags.contains_key("error.message");
RawSpan {
service: service.clone(),
trace_id: span.trace_id,
span_id: span.id,
name: span.name.clone(),
resource: span.resource.clone(),
parent_id: span.parent_id,
start: span.start.timestamp_nanos() as u64,
duration: span.duration.num_nanoseconds().unwrap_or(0) as u64,
error: if is_error { 1 } else { 0 },
r#type: if http_enabled { "custom" } else { "web" }.to_string(),
meta: fill_meta(&span, env.clone()),
metrics: fill_metrics(cfg),
}
}
} | fn fill_metrics(apm_config: &ApmConfig) -> HashMap<String, f64> {
let mut metrics = HashMap::new(); |
version_hack.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package discovery
import (
"net/http"
"regexp"
"sort"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
)
// HACK: support the case when we can add core or other legacy scheme resources through CRDs (KCP scenario)
// Possibly we wouldn't need a global variable for ContributedResources that is cluster scoped,
// In the long run we could see the context carrying an injected interface that would let us perform
// some of these cluster scoped behaviors (where we would call methods on it instead of having lots of little caches everywhere).
// Finally with more refactoring we might also just want to skip having the cache and do it dynamically.
var ContributedResources map[ClusterGroupVersion]APIResourceLister = map[ClusterGroupVersion]APIResourceLister{}
type ClusterGroupVersion struct {
ClusterName string
Group string
Version string
}
// Empty returns true if group and version are empty
func (cgv ClusterGroupVersion) Empty() bool {
return len(cgv.Group) == 0 && len(cgv.Version) == 0
}
// String puts "group" and "version" into a single "group/version" string. For the legacy v1
// it returns "v1".
func (cgv ClusterGroupVersion) String() string {
// special case the internal apiVersion for the legacy kube types
if cgv.Empty() {
return ""
}
gv := cgv.Group + "/" + cgv.Version
// special case of "v1" for backward compatibility
if len(cgv.Group) == 0 && cgv.Version == "v1" {
gv = cgv.Version
}
result := gv
if cgv.ClusterName != "" {
result = cgv.ClusterName + "/" + gv
}
return result
}
func (cgv ClusterGroupVersion) GroupVersion() schema.GroupVersion {
return schema.GroupVersion{
Group: cgv.Group,
Version: cgv.Version,
}
}
func withContributedResources(groupVersion schema.GroupVersion, apiResourceLister APIResourceLister) func(*http.Request) APIResourceLister {
return func(req *http.Request) APIResourceLister {
cluster := genericapirequest.ClusterFrom(req.Context())
return APIResourceListerFunc(func() []metav1.APIResource {
result := []metav1.APIResource{}
result = append(result, apiResourceLister.ListAPIResources()...)
if cluster != nil {
if additionalResources := ContributedResources[ClusterGroupVersion{
ClusterName: cluster.Name,
Group: groupVersion.Group,
Version: groupVersion.Version}]; additionalResources != nil {
result = append(result, additionalResources.ListAPIResources()...)
}
sort.Slice(result, func(i, j int) bool {
return result[i].Name < result[j].Name
})
}
return result
})
}
}
// IsAPIContributed returns `true` is the path corresponds to a resource that
// has been contribued to a legacy scheme group from a CRD.
func IsAPIContributed(path string) bool | {
for gv, resourceLister := range ContributedResources {
prefix := gv.Group
if prefix != "" {
prefix = "/apis/" + prefix + "/" + gv.Version + "/"
} else {
prefix = "/api/" + gv.Version + "/"
}
if !strings.HasPrefix(path, prefix) {
continue
}
for _, resource := range resourceLister.ListAPIResources() {
if strings.HasPrefix(path, prefix+resource.Name) {
return true
}
if resource.Namespaced {
if matched, _ := regexp.MatchString(prefix+"namespaces/[^/][^/]*/"+resource.Name+"(/[^/].*)?", path); matched {
return true
}
}
}
}
return false
} |
|
assert_test.go | package vm_test
import (
"testing"
"github.com/elliotchance/ok/vm"
"github.com/stretchr/testify/assert"
)
func TestAssert_String(t *testing.T) | {
ins := &vm.Assert{Left: "0", Right: "1", Final: "2", Op: "==", Pos: "pos"}
assert.Equal(t, "assert($0 == $1)", ins.String())
} |
|
leader.go | package leader
import (
"context"
"os"
"time"
"github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes" | "k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
type Callback func(cb context.Context)
func RunOrDie(ctx context.Context, namespace, name string, client kubernetes.Interface, cb Callback) {
if namespace == "" {
namespace = "kube-system"
}
err := run(ctx, namespace, name, client, cb)
if err != nil {
logrus.Fatalf("Failed to start leader election for %s", name)
}
panic("Failed to start leader election for " + name)
}
func run(ctx context.Context, namespace, name string, client kubernetes.Interface, cb Callback) error {
id, err := os.Hostname()
if err != nil {
return err
}
rl, err := resourcelock.New(resourcelock.ConfigMapsResourceLock,
namespace,
name,
client.CoreV1(),
client.CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: id,
})
if err != nil {
logrus.Fatalf("error creating leader lock for %s: %v", name, err)
}
t := time.Second
if dl := os.Getenv("DEV_LEADERELECTION"); dl != "" {
t = time.Hour
}
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: 45 * t,
RenewDeadline: 30 * t,
RetryPeriod: 2 * t,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
go cb(ctx)
},
OnStoppedLeading: func() {
logrus.Fatalf("leaderelection lost for %s", name)
},
},
ReleaseOnCancel: true,
})
panic("unreachable")
} | |
aggregation.py | """Support for aggregation-based AMG."""
from __future__ import absolute_import
from warnings import warn
import numpy as np
from scipy.sparse import csr_matrix, isspmatrix_csr, isspmatrix_bsr,\
SparseEfficiencyWarning
from pyamg.multilevel import multilevel_solver
from pyamg.relaxation.smoothing import change_smoothers
from pyamg.util.utils import relaxation_as_linear_operator,\
eliminate_diag_dom_nodes, blocksize,\
levelize_strength_or_aggregation, levelize_smooth_or_improve_candidates
from pyamg.strength import classical_strength_of_connection,\
symmetric_strength_of_connection, evolution_strength_of_connection,\
energy_based_strength_of_connection, distance_strength_of_connection,\
algebraic_distance, affinity_distance
from .aggregate import standard_aggregation, naive_aggregation,\
lloyd_aggregation
from .tentative import fit_candidates
from .smooth import jacobi_prolongation_smoother,\
richardson_prolongation_smoother, energy_prolongation_smoother
__all__ = ['smoothed_aggregation_solver']
def smoothed_aggregation_solver(A, B=None, BH=None,
symmetry='hermitian', strength='symmetric',
aggregate='standard',
smooth=('jacobi', {'omega': 4.0/3.0}),
presmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
postsmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
improve_candidates=[('block_gauss_seidel',
{'sweep': 'symmetric',
'iterations': 4}),
None],
max_levels=10, max_coarse=10,
diagonal_dominance=False,
keep=False, **kwargs):
"""Create a multilevel solver using classical-style Smoothed Aggregation (SA).
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix in CSR or BSR format
B : None, array_like
Right near-nullspace candidates stored in the columns of an NxK array.
The default value B=None is equivalent to B=ones((N,1))
BH : None, array_like
Left near-nullspace candidates stored in the columns of an NxK array.
BH is only used if symmetry='nonsymmetric'.
The default value B=None is equivalent to BH=B.copy()
symmetry : string
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
'nonsymmetric' i.e. nonsymmetric in a hermitian sense
Note, in the strictly real case, symmetric and hermitian are the same.
Note, this flag does not denote definiteness of the operator.
strength : string or list
Method used to determine the strength of connection between unknowns of
the linear system. Method-specific parameters may be passed in using a
tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
all nonzero entries of the matrix are considered strong.
Choose from 'symmetric', 'classical', 'evolution', 'algebraic_distance',
'affinity', ('predefined', {'C' : csr_matrix}), None
aggregate : string or list
Method used to aggregate nodes.
Choose from 'standard', 'lloyd', 'naive',
('predefined', {'AggOp' : csr_matrix})
smooth : list
Method used to smooth the tentative prolongator. Method-specific
parameters may be passed in using a tuple, e.g. smooth=
('jacobi',{'filter' : True }).
Choose from 'jacobi', 'richardson', 'energy', None
presmoother : tuple, string, list
Defines the presmoother for the multilevel cycling. The default block
Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
is CSR or is a BSR matrix with blocksize of 1.
postsmoother : tuple, string, list
Same as presmoother, except defines the postsmoother.
improve_candidates : tuple, string, list
The ith entry defines the method used to improve the candidates B on
level i. If the list is shorter than max_levels, then the last entry
will define the method for all levels lower. If tuple or string, then
this single relaxation descriptor defines improve_candidates on all
levels.
The list elements are relaxation descriptors of the form used for
presmoother and postsmoother. A value of None implies no action on B.
max_levels : integer
Maximum number of levels to be used in the multilevel solver.
max_coarse : integer
Maximum number of variables permitted on the coarse grid.
diagonal_dominance : bool, tuple
If True (or the first tuple entry is True), then avoid coarsening
diagonally dominant rows. The second tuple entry requires a
dictionary, where the key value 'theta' is used to tune the diagonal
dominance threshold.
keep : bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), and aggregation (AggOp) are kept.
Other Parameters
----------------
cycle_type : ['V','W','F']
Structrure of multigrid cycle
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
Returns
-------
ml : multilevel_solver
Multigrid hierarchy of matrices and prolongation operators
See Also
--------
multilevel_solver, classical.ruge_stuben_solver,
aggregation.smoothed_aggregation_solver
Notes
-----
- This method implements classical-style SA, not root-node style SA
(see aggregation.rootnode_solver).
- The additional parameters are passed through as arguments to
multilevel_solver. Refer to pyamg.multilevel_solver for additional
documentation.
- At each level, four steps are executed in order to define the coarser
level operator.
1. Matrix A is given and used to derive a strength matrix, C.
2. Based on the strength matrix, indices are grouped or aggregated.
3. The aggregates define coarse nodes and a tentative prolongation
operator T is defined by injection
4. The tentative prolongation operator is smoothed by a relaxation
scheme to improve the quality and extent of interpolation from the
aggregates to fine nodes.
- The parameters smooth, strength, aggregate, presmoother, postsmoother
can be varied on a per level basis. For different methods on
different levels, use a list as input so that the i-th entry defines
the method at the i-th level. If there are more levels in the
hierarchy than list entries, the last entry will define the method
for all levels lower.
Examples are:
smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
aggregate=['standard', 'naive']
strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]
- Predefined strength of connection and aggregation schemes can be
specified. These options are best used together, but aggregation can
be predefined while strength of connection is not.
For predefined strength of connection, use a list consisting of
tuples of the form ('predefined', {'C' : C0}), where C0 is a
csr_matrix and each degree-of-freedom in C0 represents a supernode.
For instance to predefine a three-level hierarchy, use
[('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].
Similarly for predefined aggregation, use a list of tuples. For
instance to predefine a three-level hierarchy, use [('predefined',
{'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
dimensions of A, Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] ==
A.shape[0] and Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a
csr_matrix.
Examples
--------
>>> from pyamg import smoothed_aggregation_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import numpy as np
>>> A = poisson((100,100), format='csr') # matrix
>>> b = np.ones((A.shape[0])) # RHS
>>> ml = smoothed_aggregation_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x,info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
References
----------
.. [1996VaMaBr] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
warn("Implicit conversion of A to CSR", SparseEfficiencyWarning)
except BaseException:
raise TypeError('Argument A must have type csr_matrix or bsr_matrix, or be convertible to csr_matrix')
A = A.asfptype()
if (symmetry != 'symmetric') and (symmetry != 'hermitian') and\
(symmetry != 'nonsymmetric'):
raise ValueError('expected \'symmetric\', \'nonsymmetric\' or \'hermitian\' for the symmetry parameter ')
A.symmetry = symmetry
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
# Right near nullspace candidates use constant for each variable as default
if B is None:
B = np.kron(np.ones((int(A.shape[0]/blocksize(A)), 1), dtype=A.dtype),
np.eye(blocksize(A), dtype=A.dtype))
else:
B = np.asarray(B, dtype=A.dtype)
if len(B.shape) == 1:
B = B.reshape(-1, 1)
if B.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes B have incorrect dimensions for matrix A')
if B.shape[1] < blocksize(A):
warn('Having less target vectors, B.shape[1], than blocksize of A can degrade convergence factors.')
# Left near nullspace candidates
if A.symmetry == 'nonsymmetric':
if BH is None:
BH = B.copy()
else:
BH = np.asarray(BH, dtype=A.dtype)
if len(BH.shape) == 1:
BH = BH.reshape(-1, 1)
if BH.shape[1] != B.shape[1]:
raise ValueError('The number of left and right near null-space modes B and BH, must be equal')
if BH.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes BH have incorrect dimensions for matrix A')
# Levelize the user parameters, so that they become lists describing the
# desired user option on each level.
max_levels, max_coarse, strength =\
levelize_strength_or_aggregation(strength, max_levels, max_coarse)
max_levels, max_coarse, aggregate =\
levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
improve_candidates =\
levelize_smooth_or_improve_candidates(improve_candidates, max_levels)
smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)
# Construct multilevel structure
levels = []
levels.append(multilevel_solver.level())
levels[-1].A = A # matrix
# Append near nullspace candidates
levels[-1].B = B # right candidates
if A.symmetry == 'nonsymmetric':
levels[-1].BH = BH # left candidates
while len(levels) < max_levels and\
int(levels[-1].A.shape[0]/blocksize(levels[-1].A)) > max_coarse:
extend_hierarchy(levels, strength, aggregate, smooth,
improve_candidates, diagonal_dominance, keep)
ml = multilevel_solver(levels, **kwargs)
change_smoothers(ml, presmoother, postsmoother)
return ml
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,
diagonal_dominance=False, keep=True):
| """Extend the multigrid hierarchy.
Service routine to implement the strength of connection, aggregation,
tentative prolongation construction, and prolongation smoothing. Called by
smoothed_aggregation_solver.
"""
def unpack_arg(v):
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
A = levels[-1].A
B = levels[-1].B
if A.symmetry == "nonsymmetric":
AH = A.H.asformat(A.format)
BH = levels[-1].BH
# Compute the strength-of-connection matrix C, where larger
# C[i,j] denote stronger couplings between i and j.
fn, kwargs = unpack_arg(strength[len(levels)-1])
if fn == 'symmetric':
C = symmetric_strength_of_connection(A, **kwargs)
elif fn == 'classical':
C = classical_strength_of_connection(A, **kwargs)
elif fn == 'distance':
C = distance_strength_of_connection(A, **kwargs)
elif (fn == 'ode') or (fn == 'evolution'):
if 'B' in kwargs:
C = evolution_strength_of_connection(A, **kwargs)
else:
C = evolution_strength_of_connection(A, B, **kwargs)
elif fn == 'energy_based':
C = energy_based_strength_of_connection(A, **kwargs)
elif fn == 'predefined':
C = kwargs['C'].tocsr()
elif fn == 'algebraic_distance':
C = algebraic_distance(A, **kwargs)
elif fn == 'affinity':
C = affinity_distance(A, **kwargs)
elif fn is None:
C = A.tocsr()
else:
raise ValueError('unrecognized strength of connection method: %s' %
str(fn))
# Avoid coarsening diagonally dominant rows
flag, kwargs = unpack_arg(diagonal_dominance)
if flag:
C = eliminate_diag_dom_nodes(A, C, **kwargs)
# Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).
# AggOp is a boolean matrix, where the sparsity pattern for the k-th column
# denotes the fine-grid nodes agglomerated into k-th coarse-grid node.
fn, kwargs = unpack_arg(aggregate[len(levels)-1])
if fn == 'standard':
AggOp = standard_aggregation(C, **kwargs)[0]
elif fn == 'naive':
AggOp = naive_aggregation(C, **kwargs)[0]
elif fn == 'lloyd':
AggOp = lloyd_aggregation(C, **kwargs)[0]
elif fn == 'predefined':
AggOp = kwargs['AggOp'].tocsr()
else:
raise ValueError('unrecognized aggregation method %s' % str(fn))
# Improve near nullspace candidates by relaxing on A B = 0
fn, kwargs = unpack_arg(improve_candidates[len(levels)-1])
if fn is not None:
b = np.zeros((A.shape[0], 1), dtype=A.dtype)
B = relaxation_as_linear_operator((fn, kwargs), A, b) * B
levels[-1].B = B
if A.symmetry == "nonsymmetric":
BH = relaxation_as_linear_operator((fn, kwargs), AH, b) * BH
levels[-1].BH = BH
# Compute the tentative prolongator, T, which is a tentative interpolation
# matrix from the coarse-grid to the fine-grid. T exactly interpolates
# B_fine = T B_coarse.
T, B = fit_candidates(AggOp, B)
if A.symmetry == "nonsymmetric":
TH, BH = fit_candidates(AggOp, BH)
# Smooth the tentative prolongator, so that it's accuracy is greatly
# improved for algebraically smooth error.
fn, kwargs = unpack_arg(smooth[len(levels)-1])
if fn == 'jacobi':
P = jacobi_prolongation_smoother(A, T, C, B, **kwargs)
elif fn == 'richardson':
P = richardson_prolongation_smoother(A, T, **kwargs)
elif fn == 'energy':
P = energy_prolongation_smoother(A, T, C, B, None, (False, {}),
**kwargs)
elif fn is None:
P = T
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# Compute the restriction matrix, R, which interpolates from the fine-grid
# to the coarse-grid. If A is nonsymmetric, then R must be constructed
# based on A.H. Otherwise R = P.H or P.T.
symmetry = A.symmetry
if symmetry == 'hermitian':
R = P.H
elif symmetry == 'symmetric':
R = P.T
elif symmetry == 'nonsymmetric':
fn, kwargs = unpack_arg(smooth[len(levels)-1])
if fn == 'jacobi':
R = jacobi_prolongation_smoother(AH, TH, C, BH, **kwargs).H
elif fn == 'richardson':
R = richardson_prolongation_smoother(AH, TH, **kwargs).H
elif fn == 'energy':
R = energy_prolongation_smoother(AH, TH, C, BH, None, (False, {}),
**kwargs)
R = R.H
elif fn is None:
R = T.H
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
if keep:
levels[-1].C = C # strength of connection matrix
levels[-1].AggOp = AggOp # aggregation operator
levels[-1].T = T # tentative prolongator
levels[-1].P = P # smoothed prolongator
levels[-1].R = R # restriction operator
levels.append(multilevel_solver.level())
A = R * A * P # Galerkin operator
A.symmetry = symmetry
levels[-1].A = A
levels[-1].B = B # right near nullspace candidates
if A.symmetry == "nonsymmetric":
levels[-1].BH = BH # left near nullspace candidates |
|
config.py | # Copyright 2020 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import os
import sys
import logging
from .utils import path_from_root, exit_with_error, __rootpath__, which
logger = logging.getLogger('shared')
# The following class can be overridden by the config file and/or
# environment variables. Specifically any variable whose name
# is in ALL_UPPER_CASE is condifered a valid config file key.
# See parse_config_file below.
EMSCRIPTEN_ROOT = __rootpath__
NODE_JS = None
BINARYEN_ROOT = None
SPIDERMONKEY_ENGINE = None
V8_ENGINE = None
LLVM_ROOT = None
LLVM_ADD_VERSION = None
CLANG_ADD_VERSION = None
CLOSURE_COMPILER = None
JAVA = None
JS_ENGINE = None
JS_ENGINES = None
WASMER = None
WASMTIME = None
WASM_ENGINES = []
FROZEN_CACHE = None
CACHE = None
PORTS = None
COMPILER_WRAPPER = None
def listify(x):
if type(x) is not list:
return [x]
return x
def fix_js_engine(old, new):
if old is None:
return
global JS_ENGINES
JS_ENGINES = [new if x == old else x for x in JS_ENGINES]
return new
def root_is_writable():
return os.access(__rootpath__, os.W_OK)
def normalize_config_settings():
global CACHE, PORTS, JAVA, LLVM_ADD_VERSION, CLANG_ADD_VERSION
global NODE_JS, V8_ENGINE, JS_ENGINE, JS_ENGINES, SPIDERMONKEY_ENGINE, WASM_ENGINES
# EM_CONFIG stuff
if not JS_ENGINES:
JS_ENGINES = [NODE_JS]
if not JS_ENGINE:
JS_ENGINE = JS_ENGINES[0]
# Engine tweaks
if SPIDERMONKEY_ENGINE:
new_spidermonkey = SPIDERMONKEY_ENGINE
if '-w' not in str(new_spidermonkey):
new_spidermonkey += ['-w']
SPIDERMONKEY_ENGINE = fix_js_engine(SPIDERMONKEY_ENGINE, new_spidermonkey)
NODE_JS = fix_js_engine(NODE_JS, listify(NODE_JS))
V8_ENGINE = fix_js_engine(V8_ENGINE, listify(V8_ENGINE))
JS_ENGINE = fix_js_engine(JS_ENGINE, listify(JS_ENGINE))
JS_ENGINES = [listify(engine) for engine in JS_ENGINES]
WASM_ENGINES = [listify(engine) for engine in WASM_ENGINES]
if not CACHE:
if root_is_writable():
CACHE = path_from_root('cache')
else:
# Use the legacy method of putting the cache in the user's home directory
# if the emscripten root is not writable.
# This is useful mostly for read-only installation and perhaps could
# be removed in the future since such installations should probably be
# setting a specific cache location.
logger.debug('Using home-directory for emscripten cache due to read-only root')
CACHE = os.path.expanduser(os.path.join('~', '.emscripten_cache'))
if not PORTS:
PORTS = os.path.join(CACHE, 'ports')
if JAVA is None:
logger.debug('JAVA not defined in ' + config_file_location() + ', using "java"')
JAVA = 'java'
# Tools/paths
if LLVM_ADD_VERSION is None:
LLVM_ADD_VERSION = os.getenv('LLVM_ADD_VERSION')
if CLANG_ADD_VERSION is None:
CLANG_ADD_VERSION = os.getenv('CLANG_ADD_VERSION')
def parse_config_file():
"""Parse the emscripten config file using python's exec.
Also check EM_<KEY> environment variables to override specific config keys.
"""
config = {}
config_text = open(config_file, 'r').read() if config_file else EM_CONFIG
try:
exec(config_text, config)
except Exception as e:
exit_with_error('Error in evaluating %s (at %s): %s, text: %s', EM_CONFIG, config_file, str(e), config_text)
CONFIG_KEYS = (
'NODE_JS',
'BINARYEN_ROOT',
'SPIDERMONKEY_ENGINE',
'V8_ENGINE',
'LLVM_ROOT',
'LLVM_ADD_VERSION',
'CLANG_ADD_VERSION',
'CLOSURE_COMPILER', | 'JAVA',
'JS_ENGINE',
'JS_ENGINES',
'WASMER',
'WASMTIME',
'WASM_ENGINES',
'FROZEN_CACHE',
'CACHE',
'PORTS',
'COMPILER_WRAPPER',
)
# Only propagate certain settings from the config file.
for key in CONFIG_KEYS:
env_var = 'EM_' + key
env_value = os.environ.get(env_var)
if env_value is not None:
globals()[key] = env_value
elif key in config:
globals()[key] = config[key]
# Certain keys are mandatory
for key in ('LLVM_ROOT', 'NODE_JS', 'BINARYEN_ROOT'):
if key not in config:
exit_with_error('%s is not defined in %s', key, config_file_location())
if not globals()[key]:
exit_with_error('%s is set to empty value in %s', key, config_file_location())
if not NODE_JS:
exit_with_error('NODE_JS is not defined in %s', config_file_location())
normalize_config_settings()
# Returns the location of the emscripten config file.
def config_file_location():
# Handle the case where there is no config file at all (i.e. If EM_CONFIG is passed as python code
# direclty on the command line).
if not config_file:
return '<inline config>'
return config_file
def generate_config(path, first_time=False):
# Note: repr is used to ensure the paths are escaped correctly on Windows.
# The full string is replaced so that the template stays valid Python.
config_file = open(path_from_root('tools', 'settings_template.py')).read().splitlines()
config_file = config_file[3:] # remove the initial comment
config_file = '\n'.join(config_file)
# autodetect some default paths
config_file = config_file.replace('\'{{{ EMSCRIPTEN_ROOT }}}\'', repr(__rootpath__))
llvm_root = os.path.dirname(which('llvm-dis') or '/usr/bin/llvm-dis')
config_file = config_file.replace('\'{{{ LLVM_ROOT }}}\'', repr(llvm_root))
node = which('nodejs') or which('node') or 'node'
config_file = config_file.replace('\'{{{ NODE }}}\'', repr(node))
abspath = os.path.abspath(os.path.expanduser(path))
# write
with open(abspath, 'w') as f:
f.write(config_file)
if first_time:
print('''
==============================================================================
Welcome to Emscripten!
This is the first time any of the Emscripten tools has been run.
A settings file has been copied to %s, at absolute path: %s
It contains our best guesses for the important paths, which are:
LLVM_ROOT = %s
NODE_JS = %s
EMSCRIPTEN_ROOT = %s
Please edit the file if any of those are incorrect.
This command will now exit. When you are done editing those paths, re-run it.
==============================================================================
''' % (path, abspath, llvm_root, node, __rootpath__), file=sys.stderr)
# Emscripten configuration is done through the --em-config command line option
# or the EM_CONFIG environment variable. If the specified string value contains
# newline or semicolon-separated definitions, then these definitions will be
# used to configure Emscripten. Otherwise, the string is understood to be a
# path to a settings file that contains the required definitions.
# The search order from the config file is as follows:
# 1. Specified on the command line (--em-config)
# 2. Specified via EM_CONFIG environment variable
# 3. Local .emscripten file, if found
# 4. Local .emscripten file, as used by `emsdk --embedded` (two levels above,
# see below)
# 5. User home directory config (~/.emscripten), if found.
embedded_config = path_from_root('.emscripten')
# For compatibility with `emsdk --embedded` mode also look two levels up. The
# layout of the emsdk puts emcc two levels below emsdk. For exmaple:
# - emsdk/upstream/emscripten/emcc
# - emsdk/emscipten/1.38.31/emcc
# However `emsdk --embedded` stores the config file in the emsdk root.
# Without this check, when emcc is run from within the emsdk in embedded mode
# and the user forgets to first run `emsdk_env.sh` (which sets EM_CONFIG) emcc
# will not see any config file at all and fall back to creating a new/emtpy
# one.
# We could remove this special case if emsdk were to write its embedded config
# file into the emscripten directory itself.
# See: https://github.com/emscripten-core/emsdk/pull/367
emsdk_root = os.path.dirname(os.path.dirname(path_from_root()))
emsdk_embedded_config = os.path.join(emsdk_root, '.emscripten')
user_home_config = os.path.expanduser('~/.emscripten')
if '--em-config' in sys.argv:
EM_CONFIG = sys.argv[sys.argv.index('--em-config') + 1]
# And now remove it from sys.argv
skip = False
newargs = []
for arg in sys.argv:
if not skip and arg != '--em-config':
newargs += [arg]
elif arg == '--em-config':
skip = True
elif skip:
skip = False
sys.argv = newargs
if not os.path.isfile(EM_CONFIG):
if EM_CONFIG.startswith('-'):
exit_with_error('Passed --em-config without an argument. Usage: --em-config /path/to/.emscripten or --em-config LLVM_ROOT=/path;...')
if '=' not in EM_CONFIG:
exit_with_error('File ' + EM_CONFIG + ' passed to --em-config does not exist!')
else:
EM_CONFIG = EM_CONFIG.replace(';', '\n') + '\n'
elif 'EM_CONFIG' in os.environ:
EM_CONFIG = os.environ['EM_CONFIG']
elif os.path.exists(embedded_config):
EM_CONFIG = embedded_config
elif os.path.exists(emsdk_embedded_config):
EM_CONFIG = emsdk_embedded_config
elif os.path.exists(user_home_config):
EM_CONFIG = user_home_config
else:
if root_is_writable():
generate_config(embedded_config, first_time=True)
else:
generate_config(user_home_config, first_time=True)
sys.exit(0)
if '\n' in EM_CONFIG:
config_file = None
logger.debug('config is specified inline without a file')
else:
config_file = os.path.expanduser(EM_CONFIG)
logger.debug('emscripten config is located in ' + config_file)
if not os.path.exists(config_file):
exit_with_error('emscripten config file not found: ' + config_file)
# Emscripten compiler spawns other processes, which can reimport shared.py, so
# make sure that those child processes get the same configuration file by
# setting it to the currently active environment.
os.environ['EM_CONFIG'] = EM_CONFIG
parse_config_file() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.